Bug Summary

File:llvm/lib/Target/X86/X86TargetTransformInfo.cpp
Warning:line 3043, column 15
Division by zero

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name X86TargetTransformInfo.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -fhalf-no-semantic-interposition -mframe-pointer=none -fmath-errno -fno-rounding-math -mconstructor-aliases -munwind-tables -target-cpu x86-64 -tune-cpu generic -fno-split-dwarf-inlining -debugger-tuning=gdb -ffunction-sections -fdata-sections -resource-dir /usr/lib/llvm-12/lib/clang/12.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/build-llvm/lib/Target/X86 -I /build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Target/X86 -I /build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/build-llvm/include -I /build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0/backward -internal-isystem /usr/local/include -internal-isystem /usr/lib/llvm-12/lib/clang/12.0.0/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir /build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/build-llvm/lib/Target/X86 -fdebug-prefix-map=/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1=. -ferror-limit 19 -fvisibility hidden -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -o /tmp/scan-build-2021-01-24-223304-31662-1 -x c++ /build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Target/X86/X86TargetTransformInfo.cpp

/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Target/X86/X86TargetTransformInfo.cpp

1//===-- X86TargetTransformInfo.cpp - X86 specific TTI pass ----------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8/// \file
9/// This file implements a TargetTransformInfo analysis pass specific to the
10/// X86 target machine. It uses the target's detailed information to provide
11/// more precise answers to certain TTI queries, while letting the target
12/// independent and default TTI implementations handle the rest.
13///
14//===----------------------------------------------------------------------===//
15/// About Cost Model numbers used below it's necessary to say the following:
16/// the numbers correspond to some "generic" X86 CPU instead of usage of
17/// concrete CPU model. Usually the numbers correspond to CPU where the feature
18/// apeared at the first time. For example, if we do Subtarget.hasSSE42() in
19/// the lookups below the cost is based on Nehalem as that was the first CPU
20/// to support that feature level and thus has most likely the worst case cost.
21/// Some examples of other technologies/CPUs:
22/// SSE 3 - Pentium4 / Athlon64
23/// SSE 4.1 - Penryn
24/// SSE 4.2 - Nehalem
25/// AVX - Sandy Bridge
26/// AVX2 - Haswell
27/// AVX-512 - Xeon Phi / Skylake
28/// And some examples of instruction target dependent costs (latency)
29/// divss sqrtss rsqrtss
30/// AMD K7 11-16 19 3
31/// Piledriver 9-24 13-15 5
32/// Jaguar 14 16 2
33/// Pentium II,III 18 30 2
34/// Nehalem 7-14 7-18 3
35/// Haswell 10-13 11 5
36/// TODO: Develop and implement the target dependent cost model and
37/// specialize cost numbers for different Cost Model Targets such as throughput,
38/// code size, latency and uop count.
39//===----------------------------------------------------------------------===//
40
41#include "X86TargetTransformInfo.h"
42#include "llvm/Analysis/TargetTransformInfo.h"
43#include "llvm/CodeGen/BasicTTIImpl.h"
44#include "llvm/CodeGen/CostTable.h"
45#include "llvm/CodeGen/TargetLowering.h"
46#include "llvm/IR/IntrinsicInst.h"
47#include "llvm/Support/Debug.h"
48
49using namespace llvm;
50
51#define DEBUG_TYPE"x86tti" "x86tti"
52
53//===----------------------------------------------------------------------===//
54//
55// X86 cost model.
56//
57//===----------------------------------------------------------------------===//
58
59TargetTransformInfo::PopcntSupportKind
60X86TTIImpl::getPopcntSupport(unsigned TyWidth) {
61 assert(isPowerOf2_32(TyWidth) && "Ty width must be power of 2")((isPowerOf2_32(TyWidth) && "Ty width must be power of 2"
) ? static_cast<void> (0) : __assert_fail ("isPowerOf2_32(TyWidth) && \"Ty width must be power of 2\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 61, __PRETTY_FUNCTION__))
;
62 // TODO: Currently the __builtin_popcount() implementation using SSE3
63 // instructions is inefficient. Once the problem is fixed, we should
64 // call ST->hasSSE3() instead of ST->hasPOPCNT().
65 return ST->hasPOPCNT() ? TTI::PSK_FastHardware : TTI::PSK_Software;
66}
67
68llvm::Optional<unsigned> X86TTIImpl::getCacheSize(
69 TargetTransformInfo::CacheLevel Level) const {
70 switch (Level) {
71 case TargetTransformInfo::CacheLevel::L1D:
72 // - Penryn
73 // - Nehalem
74 // - Westmere
75 // - Sandy Bridge
76 // - Ivy Bridge
77 // - Haswell
78 // - Broadwell
79 // - Skylake
80 // - Kabylake
81 return 32 * 1024; // 32 KByte
82 case TargetTransformInfo::CacheLevel::L2D:
83 // - Penryn
84 // - Nehalem
85 // - Westmere
86 // - Sandy Bridge
87 // - Ivy Bridge
88 // - Haswell
89 // - Broadwell
90 // - Skylake
91 // - Kabylake
92 return 256 * 1024; // 256 KByte
93 }
94
95 llvm_unreachable("Unknown TargetTransformInfo::CacheLevel")::llvm::llvm_unreachable_internal("Unknown TargetTransformInfo::CacheLevel"
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 95)
;
96}
97
98llvm::Optional<unsigned> X86TTIImpl::getCacheAssociativity(
99 TargetTransformInfo::CacheLevel Level) const {
100 // - Penryn
101 // - Nehalem
102 // - Westmere
103 // - Sandy Bridge
104 // - Ivy Bridge
105 // - Haswell
106 // - Broadwell
107 // - Skylake
108 // - Kabylake
109 switch (Level) {
110 case TargetTransformInfo::CacheLevel::L1D:
111 LLVM_FALLTHROUGH[[gnu::fallthrough]];
112 case TargetTransformInfo::CacheLevel::L2D:
113 return 8;
114 }
115
116 llvm_unreachable("Unknown TargetTransformInfo::CacheLevel")::llvm::llvm_unreachable_internal("Unknown TargetTransformInfo::CacheLevel"
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 116)
;
117}
118
119unsigned X86TTIImpl::getNumberOfRegisters(unsigned ClassID) const {
120 bool Vector = (ClassID == 1);
121 if (Vector && !ST->hasSSE1())
122 return 0;
123
124 if (ST->is64Bit()) {
125 if (Vector && ST->hasAVX512())
126 return 32;
127 return 16;
128 }
129 return 8;
130}
131
132unsigned X86TTIImpl::getRegisterBitWidth(bool Vector) const {
133 unsigned PreferVectorWidth = ST->getPreferVectorWidth();
134 if (Vector) {
135 if (ST->hasAVX512() && PreferVectorWidth >= 512)
136 return 512;
137 if (ST->hasAVX() && PreferVectorWidth >= 256)
138 return 256;
139 if (ST->hasSSE1() && PreferVectorWidth >= 128)
140 return 128;
141 return 0;
142 }
143
144 if (ST->is64Bit())
145 return 64;
146
147 return 32;
148}
149
150unsigned X86TTIImpl::getLoadStoreVecRegBitWidth(unsigned) const {
151 return getRegisterBitWidth(true);
152}
153
154unsigned X86TTIImpl::getMaxInterleaveFactor(unsigned VF) {
155 // If the loop will not be vectorized, don't interleave the loop.
156 // Let regular unroll to unroll the loop, which saves the overflow
157 // check and memory check cost.
158 if (VF == 1)
159 return 1;
160
161 if (ST->isAtom())
162 return 1;
163
164 // Sandybridge and Haswell have multiple execution ports and pipelined
165 // vector units.
166 if (ST->hasAVX())
167 return 4;
168
169 return 2;
170}
171
172int X86TTIImpl::getArithmeticInstrCost(unsigned Opcode, Type *Ty,
173 TTI::TargetCostKind CostKind,
174 TTI::OperandValueKind Op1Info,
175 TTI::OperandValueKind Op2Info,
176 TTI::OperandValueProperties Opd1PropInfo,
177 TTI::OperandValueProperties Opd2PropInfo,
178 ArrayRef<const Value *> Args,
179 const Instruction *CxtI) {
180 // TODO: Handle more cost kinds.
181 if (CostKind != TTI::TCK_RecipThroughput)
182 return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Op1Info,
183 Op2Info, Opd1PropInfo,
184 Opd2PropInfo, Args, CxtI);
185 // Legalize the type.
186 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty);
187
188 int ISD = TLI->InstructionOpcodeToISD(Opcode);
189 assert(ISD && "Invalid opcode")((ISD && "Invalid opcode") ? static_cast<void> (
0) : __assert_fail ("ISD && \"Invalid opcode\"", "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 189, __PRETTY_FUNCTION__))
;
190
191 static const CostTblEntry GLMCostTable[] = {
192 { ISD::FDIV, MVT::f32, 18 }, // divss
193 { ISD::FDIV, MVT::v4f32, 35 }, // divps
194 { ISD::FDIV, MVT::f64, 33 }, // divsd
195 { ISD::FDIV, MVT::v2f64, 65 }, // divpd
196 };
197
198 if (ST->useGLMDivSqrtCosts())
199 if (const auto *Entry = CostTableLookup(GLMCostTable, ISD,
200 LT.second))
201 return LT.first * Entry->Cost;
202
203 static const CostTblEntry SLMCostTable[] = {
204 { ISD::MUL, MVT::v4i32, 11 }, // pmulld
205 { ISD::MUL, MVT::v8i16, 2 }, // pmullw
206 { ISD::MUL, MVT::v16i8, 14 }, // extend/pmullw/trunc sequence.
207 { ISD::FMUL, MVT::f64, 2 }, // mulsd
208 { ISD::FMUL, MVT::v2f64, 4 }, // mulpd
209 { ISD::FMUL, MVT::v4f32, 2 }, // mulps
210 { ISD::FDIV, MVT::f32, 17 }, // divss
211 { ISD::FDIV, MVT::v4f32, 39 }, // divps
212 { ISD::FDIV, MVT::f64, 32 }, // divsd
213 { ISD::FDIV, MVT::v2f64, 69 }, // divpd
214 { ISD::FADD, MVT::v2f64, 2 }, // addpd
215 { ISD::FSUB, MVT::v2f64, 2 }, // subpd
216 // v2i64/v4i64 mul is custom lowered as a series of long:
217 // multiplies(3), shifts(3) and adds(2)
218 // slm muldq version throughput is 2 and addq throughput 4
219 // thus: 3X2 (muldq throughput) + 3X1 (shift throughput) +
220 // 3X4 (addq throughput) = 17
221 { ISD::MUL, MVT::v2i64, 17 },
222 // slm addq\subq throughput is 4
223 { ISD::ADD, MVT::v2i64, 4 },
224 { ISD::SUB, MVT::v2i64, 4 },
225 };
226
227 if (ST->isSLM()) {
228 if (Args.size() == 2 && ISD == ISD::MUL && LT.second == MVT::v4i32) {
229 // Check if the operands can be shrinked into a smaller datatype.
230 bool Op1Signed = false;
231 unsigned Op1MinSize = BaseT::minRequiredElementSize(Args[0], Op1Signed);
232 bool Op2Signed = false;
233 unsigned Op2MinSize = BaseT::minRequiredElementSize(Args[1], Op2Signed);
234
235 bool SignedMode = Op1Signed || Op2Signed;
236 unsigned OpMinSize = std::max(Op1MinSize, Op2MinSize);
237
238 if (OpMinSize <= 7)
239 return LT.first * 3; // pmullw/sext
240 if (!SignedMode && OpMinSize <= 8)
241 return LT.first * 3; // pmullw/zext
242 if (OpMinSize <= 15)
243 return LT.first * 5; // pmullw/pmulhw/pshuf
244 if (!SignedMode && OpMinSize <= 16)
245 return LT.first * 5; // pmullw/pmulhw/pshuf
246 }
247
248 if (const auto *Entry = CostTableLookup(SLMCostTable, ISD,
249 LT.second)) {
250 return LT.first * Entry->Cost;
251 }
252 }
253
254 if ((ISD == ISD::SDIV || ISD == ISD::SREM || ISD == ISD::UDIV ||
255 ISD == ISD::UREM) &&
256 (Op2Info == TargetTransformInfo::OK_UniformConstantValue ||
257 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) &&
258 Opd2PropInfo == TargetTransformInfo::OP_PowerOf2) {
259 if (ISD == ISD::SDIV || ISD == ISD::SREM) {
260 // On X86, vector signed division by constants power-of-two are
261 // normally expanded to the sequence SRA + SRL + ADD + SRA.
262 // The OperandValue properties may not be the same as that of the previous
263 // operation; conservatively assume OP_None.
264 int Cost =
265 2 * getArithmeticInstrCost(Instruction::AShr, Ty, CostKind, Op1Info,
266 Op2Info,
267 TargetTransformInfo::OP_None,
268 TargetTransformInfo::OP_None);
269 Cost += getArithmeticInstrCost(Instruction::LShr, Ty, CostKind, Op1Info,
270 Op2Info,
271 TargetTransformInfo::OP_None,
272 TargetTransformInfo::OP_None);
273 Cost += getArithmeticInstrCost(Instruction::Add, Ty, CostKind, Op1Info,
274 Op2Info,
275 TargetTransformInfo::OP_None,
276 TargetTransformInfo::OP_None);
277
278 if (ISD == ISD::SREM) {
279 // For SREM: (X % C) is the equivalent of (X - (X/C)*C)
280 Cost += getArithmeticInstrCost(Instruction::Mul, Ty, CostKind, Op1Info,
281 Op2Info);
282 Cost += getArithmeticInstrCost(Instruction::Sub, Ty, CostKind, Op1Info,
283 Op2Info);
284 }
285
286 return Cost;
287 }
288
289 // Vector unsigned division/remainder will be simplified to shifts/masks.
290 if (ISD == ISD::UDIV)
291 return getArithmeticInstrCost(Instruction::LShr, Ty, CostKind,
292 Op1Info, Op2Info,
293 TargetTransformInfo::OP_None,
294 TargetTransformInfo::OP_None);
295
296 else // UREM
297 return getArithmeticInstrCost(Instruction::And, Ty, CostKind,
298 Op1Info, Op2Info,
299 TargetTransformInfo::OP_None,
300 TargetTransformInfo::OP_None);
301 }
302
303 static const CostTblEntry AVX512BWUniformConstCostTable[] = {
304 { ISD::SHL, MVT::v64i8, 2 }, // psllw + pand.
305 { ISD::SRL, MVT::v64i8, 2 }, // psrlw + pand.
306 { ISD::SRA, MVT::v64i8, 4 }, // psrlw, pand, pxor, psubb.
307 };
308
309 if (Op2Info == TargetTransformInfo::OK_UniformConstantValue &&
310 ST->hasBWI()) {
311 if (const auto *Entry = CostTableLookup(AVX512BWUniformConstCostTable, ISD,
312 LT.second))
313 return LT.first * Entry->Cost;
314 }
315
316 static const CostTblEntry AVX512UniformConstCostTable[] = {
317 { ISD::SRA, MVT::v2i64, 1 },
318 { ISD::SRA, MVT::v4i64, 1 },
319 { ISD::SRA, MVT::v8i64, 1 },
320
321 { ISD::SHL, MVT::v64i8, 4 }, // psllw + pand.
322 { ISD::SRL, MVT::v64i8, 4 }, // psrlw + pand.
323 { ISD::SRA, MVT::v64i8, 8 }, // psrlw, pand, pxor, psubb.
324
325 { ISD::SDIV, MVT::v16i32, 6 }, // pmuludq sequence
326 { ISD::SREM, MVT::v16i32, 8 }, // pmuludq+mul+sub sequence
327 { ISD::UDIV, MVT::v16i32, 5 }, // pmuludq sequence
328 { ISD::UREM, MVT::v16i32, 7 }, // pmuludq+mul+sub sequence
329 };
330
331 if (Op2Info == TargetTransformInfo::OK_UniformConstantValue &&
332 ST->hasAVX512()) {
333 if (const auto *Entry = CostTableLookup(AVX512UniformConstCostTable, ISD,
334 LT.second))
335 return LT.first * Entry->Cost;
336 }
337
338 static const CostTblEntry AVX2UniformConstCostTable[] = {
339 { ISD::SHL, MVT::v32i8, 2 }, // psllw + pand.
340 { ISD::SRL, MVT::v32i8, 2 }, // psrlw + pand.
341 { ISD::SRA, MVT::v32i8, 4 }, // psrlw, pand, pxor, psubb.
342
343 { ISD::SRA, MVT::v4i64, 4 }, // 2 x psrad + shuffle.
344
345 { ISD::SDIV, MVT::v8i32, 6 }, // pmuludq sequence
346 { ISD::SREM, MVT::v8i32, 8 }, // pmuludq+mul+sub sequence
347 { ISD::UDIV, MVT::v8i32, 5 }, // pmuludq sequence
348 { ISD::UREM, MVT::v8i32, 7 }, // pmuludq+mul+sub sequence
349 };
350
351 if (Op2Info == TargetTransformInfo::OK_UniformConstantValue &&
352 ST->hasAVX2()) {
353 if (const auto *Entry = CostTableLookup(AVX2UniformConstCostTable, ISD,
354 LT.second))
355 return LT.first * Entry->Cost;
356 }
357
358 static const CostTblEntry SSE2UniformConstCostTable[] = {
359 { ISD::SHL, MVT::v16i8, 2 }, // psllw + pand.
360 { ISD::SRL, MVT::v16i8, 2 }, // psrlw + pand.
361 { ISD::SRA, MVT::v16i8, 4 }, // psrlw, pand, pxor, psubb.
362
363 { ISD::SHL, MVT::v32i8, 4+2 }, // 2*(psllw + pand) + split.
364 { ISD::SRL, MVT::v32i8, 4+2 }, // 2*(psrlw + pand) + split.
365 { ISD::SRA, MVT::v32i8, 8+2 }, // 2*(psrlw, pand, pxor, psubb) + split.
366
367 { ISD::SDIV, MVT::v8i32, 12+2 }, // 2*pmuludq sequence + split.
368 { ISD::SREM, MVT::v8i32, 16+2 }, // 2*pmuludq+mul+sub sequence + split.
369 { ISD::SDIV, MVT::v4i32, 6 }, // pmuludq sequence
370 { ISD::SREM, MVT::v4i32, 8 }, // pmuludq+mul+sub sequence
371 { ISD::UDIV, MVT::v8i32, 10+2 }, // 2*pmuludq sequence + split.
372 { ISD::UREM, MVT::v8i32, 14+2 }, // 2*pmuludq+mul+sub sequence + split.
373 { ISD::UDIV, MVT::v4i32, 5 }, // pmuludq sequence
374 { ISD::UREM, MVT::v4i32, 7 }, // pmuludq+mul+sub sequence
375 };
376
377 // XOP has faster vXi8 shifts.
378 if (Op2Info == TargetTransformInfo::OK_UniformConstantValue &&
379 ST->hasSSE2() && !ST->hasXOP()) {
380 if (const auto *Entry =
381 CostTableLookup(SSE2UniformConstCostTable, ISD, LT.second))
382 return LT.first * Entry->Cost;
383 }
384
385 static const CostTblEntry AVX512BWConstCostTable[] = {
386 { ISD::SDIV, MVT::v64i8, 14 }, // 2*ext+2*pmulhw sequence
387 { ISD::SREM, MVT::v64i8, 16 }, // 2*ext+2*pmulhw+mul+sub sequence
388 { ISD::UDIV, MVT::v64i8, 14 }, // 2*ext+2*pmulhw sequence
389 { ISD::UREM, MVT::v64i8, 16 }, // 2*ext+2*pmulhw+mul+sub sequence
390 { ISD::SDIV, MVT::v32i16, 6 }, // vpmulhw sequence
391 { ISD::SREM, MVT::v32i16, 8 }, // vpmulhw+mul+sub sequence
392 { ISD::UDIV, MVT::v32i16, 6 }, // vpmulhuw sequence
393 { ISD::UREM, MVT::v32i16, 8 }, // vpmulhuw+mul+sub sequence
394 };
395
396 if ((Op2Info == TargetTransformInfo::OK_UniformConstantValue ||
397 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) &&
398 ST->hasBWI()) {
399 if (const auto *Entry =
400 CostTableLookup(AVX512BWConstCostTable, ISD, LT.second))
401 return LT.first * Entry->Cost;
402 }
403
404 static const CostTblEntry AVX512ConstCostTable[] = {
405 { ISD::SDIV, MVT::v16i32, 15 }, // vpmuldq sequence
406 { ISD::SREM, MVT::v16i32, 17 }, // vpmuldq+mul+sub sequence
407 { ISD::UDIV, MVT::v16i32, 15 }, // vpmuludq sequence
408 { ISD::UREM, MVT::v16i32, 17 }, // vpmuludq+mul+sub sequence
409 { ISD::SDIV, MVT::v64i8, 28 }, // 4*ext+4*pmulhw sequence
410 { ISD::SREM, MVT::v64i8, 32 }, // 4*ext+4*pmulhw+mul+sub sequence
411 { ISD::UDIV, MVT::v64i8, 28 }, // 4*ext+4*pmulhw sequence
412 { ISD::UREM, MVT::v64i8, 32 }, // 4*ext+4*pmulhw+mul+sub sequence
413 { ISD::SDIV, MVT::v32i16, 12 }, // 2*vpmulhw sequence
414 { ISD::SREM, MVT::v32i16, 16 }, // 2*vpmulhw+mul+sub sequence
415 { ISD::UDIV, MVT::v32i16, 12 }, // 2*vpmulhuw sequence
416 { ISD::UREM, MVT::v32i16, 16 }, // 2*vpmulhuw+mul+sub sequence
417 };
418
419 if ((Op2Info == TargetTransformInfo::OK_UniformConstantValue ||
420 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) &&
421 ST->hasAVX512()) {
422 if (const auto *Entry =
423 CostTableLookup(AVX512ConstCostTable, ISD, LT.second))
424 return LT.first * Entry->Cost;
425 }
426
427 static const CostTblEntry AVX2ConstCostTable[] = {
428 { ISD::SDIV, MVT::v32i8, 14 }, // 2*ext+2*pmulhw sequence
429 { ISD::SREM, MVT::v32i8, 16 }, // 2*ext+2*pmulhw+mul+sub sequence
430 { ISD::UDIV, MVT::v32i8, 14 }, // 2*ext+2*pmulhw sequence
431 { ISD::UREM, MVT::v32i8, 16 }, // 2*ext+2*pmulhw+mul+sub sequence
432 { ISD::SDIV, MVT::v16i16, 6 }, // vpmulhw sequence
433 { ISD::SREM, MVT::v16i16, 8 }, // vpmulhw+mul+sub sequence
434 { ISD::UDIV, MVT::v16i16, 6 }, // vpmulhuw sequence
435 { ISD::UREM, MVT::v16i16, 8 }, // vpmulhuw+mul+sub sequence
436 { ISD::SDIV, MVT::v8i32, 15 }, // vpmuldq sequence
437 { ISD::SREM, MVT::v8i32, 19 }, // vpmuldq+mul+sub sequence
438 { ISD::UDIV, MVT::v8i32, 15 }, // vpmuludq sequence
439 { ISD::UREM, MVT::v8i32, 19 }, // vpmuludq+mul+sub sequence
440 };
441
442 if ((Op2Info == TargetTransformInfo::OK_UniformConstantValue ||
443 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) &&
444 ST->hasAVX2()) {
445 if (const auto *Entry = CostTableLookup(AVX2ConstCostTable, ISD, LT.second))
446 return LT.first * Entry->Cost;
447 }
448
449 static const CostTblEntry SSE2ConstCostTable[] = {
450 { ISD::SDIV, MVT::v32i8, 28+2 }, // 4*ext+4*pmulhw sequence + split.
451 { ISD::SREM, MVT::v32i8, 32+2 }, // 4*ext+4*pmulhw+mul+sub sequence + split.
452 { ISD::SDIV, MVT::v16i8, 14 }, // 2*ext+2*pmulhw sequence
453 { ISD::SREM, MVT::v16i8, 16 }, // 2*ext+2*pmulhw+mul+sub sequence
454 { ISD::UDIV, MVT::v32i8, 28+2 }, // 4*ext+4*pmulhw sequence + split.
455 { ISD::UREM, MVT::v32i8, 32+2 }, // 4*ext+4*pmulhw+mul+sub sequence + split.
456 { ISD::UDIV, MVT::v16i8, 14 }, // 2*ext+2*pmulhw sequence
457 { ISD::UREM, MVT::v16i8, 16 }, // 2*ext+2*pmulhw+mul+sub sequence
458 { ISD::SDIV, MVT::v16i16, 12+2 }, // 2*pmulhw sequence + split.
459 { ISD::SREM, MVT::v16i16, 16+2 }, // 2*pmulhw+mul+sub sequence + split.
460 { ISD::SDIV, MVT::v8i16, 6 }, // pmulhw sequence
461 { ISD::SREM, MVT::v8i16, 8 }, // pmulhw+mul+sub sequence
462 { ISD::UDIV, MVT::v16i16, 12+2 }, // 2*pmulhuw sequence + split.
463 { ISD::UREM, MVT::v16i16, 16+2 }, // 2*pmulhuw+mul+sub sequence + split.
464 { ISD::UDIV, MVT::v8i16, 6 }, // pmulhuw sequence
465 { ISD::UREM, MVT::v8i16, 8 }, // pmulhuw+mul+sub sequence
466 { ISD::SDIV, MVT::v8i32, 38+2 }, // 2*pmuludq sequence + split.
467 { ISD::SREM, MVT::v8i32, 48+2 }, // 2*pmuludq+mul+sub sequence + split.
468 { ISD::SDIV, MVT::v4i32, 19 }, // pmuludq sequence
469 { ISD::SREM, MVT::v4i32, 24 }, // pmuludq+mul+sub sequence
470 { ISD::UDIV, MVT::v8i32, 30+2 }, // 2*pmuludq sequence + split.
471 { ISD::UREM, MVT::v8i32, 40+2 }, // 2*pmuludq+mul+sub sequence + split.
472 { ISD::UDIV, MVT::v4i32, 15 }, // pmuludq sequence
473 { ISD::UREM, MVT::v4i32, 20 }, // pmuludq+mul+sub sequence
474 };
475
476 if ((Op2Info == TargetTransformInfo::OK_UniformConstantValue ||
477 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) &&
478 ST->hasSSE2()) {
479 // pmuldq sequence.
480 if (ISD == ISD::SDIV && LT.second == MVT::v8i32 && ST->hasAVX())
481 return LT.first * 32;
482 if (ISD == ISD::SREM && LT.second == MVT::v8i32 && ST->hasAVX())
483 return LT.first * 38;
484 if (ISD == ISD::SDIV && LT.second == MVT::v4i32 && ST->hasSSE41())
485 return LT.first * 15;
486 if (ISD == ISD::SREM && LT.second == MVT::v4i32 && ST->hasSSE41())
487 return LT.first * 20;
488
489 if (const auto *Entry = CostTableLookup(SSE2ConstCostTable, ISD, LT.second))
490 return LT.first * Entry->Cost;
491 }
492
493 static const CostTblEntry AVX512BWShiftCostTable[] = {
494 { ISD::SHL, MVT::v8i16, 1 }, // vpsllvw
495 { ISD::SRL, MVT::v8i16, 1 }, // vpsrlvw
496 { ISD::SRA, MVT::v8i16, 1 }, // vpsravw
497
498 { ISD::SHL, MVT::v16i16, 1 }, // vpsllvw
499 { ISD::SRL, MVT::v16i16, 1 }, // vpsrlvw
500 { ISD::SRA, MVT::v16i16, 1 }, // vpsravw
501
502 { ISD::SHL, MVT::v32i16, 1 }, // vpsllvw
503 { ISD::SRL, MVT::v32i16, 1 }, // vpsrlvw
504 { ISD::SRA, MVT::v32i16, 1 }, // vpsravw
505 };
506
507 if (ST->hasBWI())
508 if (const auto *Entry = CostTableLookup(AVX512BWShiftCostTable, ISD, LT.second))
509 return LT.first * Entry->Cost;
510
511 static const CostTblEntry AVX2UniformCostTable[] = {
512 // Uniform splats are cheaper for the following instructions.
513 { ISD::SHL, MVT::v16i16, 1 }, // psllw.
514 { ISD::SRL, MVT::v16i16, 1 }, // psrlw.
515 { ISD::SRA, MVT::v16i16, 1 }, // psraw.
516 { ISD::SHL, MVT::v32i16, 2 }, // 2*psllw.
517 { ISD::SRL, MVT::v32i16, 2 }, // 2*psrlw.
518 { ISD::SRA, MVT::v32i16, 2 }, // 2*psraw.
519 };
520
521 if (ST->hasAVX2() &&
522 ((Op2Info == TargetTransformInfo::OK_UniformConstantValue) ||
523 (Op2Info == TargetTransformInfo::OK_UniformValue))) {
524 if (const auto *Entry =
525 CostTableLookup(AVX2UniformCostTable, ISD, LT.second))
526 return LT.first * Entry->Cost;
527 }
528
529 static const CostTblEntry SSE2UniformCostTable[] = {
530 // Uniform splats are cheaper for the following instructions.
531 { ISD::SHL, MVT::v8i16, 1 }, // psllw.
532 { ISD::SHL, MVT::v4i32, 1 }, // pslld
533 { ISD::SHL, MVT::v2i64, 1 }, // psllq.
534
535 { ISD::SRL, MVT::v8i16, 1 }, // psrlw.
536 { ISD::SRL, MVT::v4i32, 1 }, // psrld.
537 { ISD::SRL, MVT::v2i64, 1 }, // psrlq.
538
539 { ISD::SRA, MVT::v8i16, 1 }, // psraw.
540 { ISD::SRA, MVT::v4i32, 1 }, // psrad.
541 };
542
543 if (ST->hasSSE2() &&
544 ((Op2Info == TargetTransformInfo::OK_UniformConstantValue) ||
545 (Op2Info == TargetTransformInfo::OK_UniformValue))) {
546 if (const auto *Entry =
547 CostTableLookup(SSE2UniformCostTable, ISD, LT.second))
548 return LT.first * Entry->Cost;
549 }
550
551 static const CostTblEntry AVX512DQCostTable[] = {
552 { ISD::MUL, MVT::v2i64, 1 },
553 { ISD::MUL, MVT::v4i64, 1 },
554 { ISD::MUL, MVT::v8i64, 1 }
555 };
556
557 // Look for AVX512DQ lowering tricks for custom cases.
558 if (ST->hasDQI())
559 if (const auto *Entry = CostTableLookup(AVX512DQCostTable, ISD, LT.second))
560 return LT.first * Entry->Cost;
561
562 static const CostTblEntry AVX512BWCostTable[] = {
563 { ISD::SHL, MVT::v64i8, 11 }, // vpblendvb sequence.
564 { ISD::SRL, MVT::v64i8, 11 }, // vpblendvb sequence.
565 { ISD::SRA, MVT::v64i8, 24 }, // vpblendvb sequence.
566
567 { ISD::MUL, MVT::v64i8, 11 }, // extend/pmullw/trunc sequence.
568 { ISD::MUL, MVT::v32i8, 4 }, // extend/pmullw/trunc sequence.
569 { ISD::MUL, MVT::v16i8, 4 }, // extend/pmullw/trunc sequence.
570 };
571
572 // Look for AVX512BW lowering tricks for custom cases.
573 if (ST->hasBWI())
574 if (const auto *Entry = CostTableLookup(AVX512BWCostTable, ISD, LT.second))
575 return LT.first * Entry->Cost;
576
577 static const CostTblEntry AVX512CostTable[] = {
578 { ISD::SHL, MVT::v16i32, 1 },
579 { ISD::SRL, MVT::v16i32, 1 },
580 { ISD::SRA, MVT::v16i32, 1 },
581
582 { ISD::SHL, MVT::v8i64, 1 },
583 { ISD::SRL, MVT::v8i64, 1 },
584
585 { ISD::SRA, MVT::v2i64, 1 },
586 { ISD::SRA, MVT::v4i64, 1 },
587 { ISD::SRA, MVT::v8i64, 1 },
588
589 { ISD::MUL, MVT::v64i8, 26 }, // extend/pmullw/trunc sequence.
590 { ISD::MUL, MVT::v32i8, 13 }, // extend/pmullw/trunc sequence.
591 { ISD::MUL, MVT::v16i8, 5 }, // extend/pmullw/trunc sequence.
592 { ISD::MUL, MVT::v16i32, 1 }, // pmulld (Skylake from agner.org)
593 { ISD::MUL, MVT::v8i32, 1 }, // pmulld (Skylake from agner.org)
594 { ISD::MUL, MVT::v4i32, 1 }, // pmulld (Skylake from agner.org)
595 { ISD::MUL, MVT::v8i64, 8 }, // 3*pmuludq/3*shift/2*add
596
597 { ISD::FADD, MVT::v8f64, 1 }, // Skylake from http://www.agner.org/
598 { ISD::FSUB, MVT::v8f64, 1 }, // Skylake from http://www.agner.org/
599 { ISD::FMUL, MVT::v8f64, 1 }, // Skylake from http://www.agner.org/
600
601 { ISD::FADD, MVT::v16f32, 1 }, // Skylake from http://www.agner.org/
602 { ISD::FSUB, MVT::v16f32, 1 }, // Skylake from http://www.agner.org/
603 { ISD::FMUL, MVT::v16f32, 1 }, // Skylake from http://www.agner.org/
604 };
605
606 if (ST->hasAVX512())
607 if (const auto *Entry = CostTableLookup(AVX512CostTable, ISD, LT.second))
608 return LT.first * Entry->Cost;
609
610 static const CostTblEntry AVX2ShiftCostTable[] = {
611 // Shifts on v4i64/v8i32 on AVX2 is legal even though we declare to
612 // customize them to detect the cases where shift amount is a scalar one.
613 { ISD::SHL, MVT::v4i32, 1 },
614 { ISD::SRL, MVT::v4i32, 1 },
615 { ISD::SRA, MVT::v4i32, 1 },
616 { ISD::SHL, MVT::v8i32, 1 },
617 { ISD::SRL, MVT::v8i32, 1 },
618 { ISD::SRA, MVT::v8i32, 1 },
619 { ISD::SHL, MVT::v2i64, 1 },
620 { ISD::SRL, MVT::v2i64, 1 },
621 { ISD::SHL, MVT::v4i64, 1 },
622 { ISD::SRL, MVT::v4i64, 1 },
623 };
624
625 if (ST->hasAVX512()) {
626 if (ISD == ISD::SHL && LT.second == MVT::v32i16 &&
627 (Op2Info == TargetTransformInfo::OK_UniformConstantValue ||
628 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue))
629 // On AVX512, a packed v32i16 shift left by a constant build_vector
630 // is lowered into a vector multiply (vpmullw).
631 return getArithmeticInstrCost(Instruction::Mul, Ty, CostKind,
632 Op1Info, Op2Info,
633 TargetTransformInfo::OP_None,
634 TargetTransformInfo::OP_None);
635 }
636
637 // Look for AVX2 lowering tricks.
638 if (ST->hasAVX2()) {
639 if (ISD == ISD::SHL && LT.second == MVT::v16i16 &&
640 (Op2Info == TargetTransformInfo::OK_UniformConstantValue ||
641 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue))
642 // On AVX2, a packed v16i16 shift left by a constant build_vector
643 // is lowered into a vector multiply (vpmullw).
644 return getArithmeticInstrCost(Instruction::Mul, Ty, CostKind,
645 Op1Info, Op2Info,
646 TargetTransformInfo::OP_None,
647 TargetTransformInfo::OP_None);
648
649 if (const auto *Entry = CostTableLookup(AVX2ShiftCostTable, ISD, LT.second))
650 return LT.first * Entry->Cost;
651 }
652
653 static const CostTblEntry XOPShiftCostTable[] = {
654 // 128bit shifts take 1cy, but right shifts require negation beforehand.
655 { ISD::SHL, MVT::v16i8, 1 },
656 { ISD::SRL, MVT::v16i8, 2 },
657 { ISD::SRA, MVT::v16i8, 2 },
658 { ISD::SHL, MVT::v8i16, 1 },
659 { ISD::SRL, MVT::v8i16, 2 },
660 { ISD::SRA, MVT::v8i16, 2 },
661 { ISD::SHL, MVT::v4i32, 1 },
662 { ISD::SRL, MVT::v4i32, 2 },
663 { ISD::SRA, MVT::v4i32, 2 },
664 { ISD::SHL, MVT::v2i64, 1 },
665 { ISD::SRL, MVT::v2i64, 2 },
666 { ISD::SRA, MVT::v2i64, 2 },
667 // 256bit shifts require splitting if AVX2 didn't catch them above.
668 { ISD::SHL, MVT::v32i8, 2+2 },
669 { ISD::SRL, MVT::v32i8, 4+2 },
670 { ISD::SRA, MVT::v32i8, 4+2 },
671 { ISD::SHL, MVT::v16i16, 2+2 },
672 { ISD::SRL, MVT::v16i16, 4+2 },
673 { ISD::SRA, MVT::v16i16, 4+2 },
674 { ISD::SHL, MVT::v8i32, 2+2 },
675 { ISD::SRL, MVT::v8i32, 4+2 },
676 { ISD::SRA, MVT::v8i32, 4+2 },
677 { ISD::SHL, MVT::v4i64, 2+2 },
678 { ISD::SRL, MVT::v4i64, 4+2 },
679 { ISD::SRA, MVT::v4i64, 4+2 },
680 };
681
682 // Look for XOP lowering tricks.
683 if (ST->hasXOP()) {
684 // If the right shift is constant then we'll fold the negation so
685 // it's as cheap as a left shift.
686 int ShiftISD = ISD;
687 if ((ShiftISD == ISD::SRL || ShiftISD == ISD::SRA) &&
688 (Op2Info == TargetTransformInfo::OK_UniformConstantValue ||
689 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue))
690 ShiftISD = ISD::SHL;
691 if (const auto *Entry =
692 CostTableLookup(XOPShiftCostTable, ShiftISD, LT.second))
693 return LT.first * Entry->Cost;
694 }
695
696 static const CostTblEntry SSE2UniformShiftCostTable[] = {
697 // Uniform splats are cheaper for the following instructions.
698 { ISD::SHL, MVT::v16i16, 2+2 }, // 2*psllw + split.
699 { ISD::SHL, MVT::v8i32, 2+2 }, // 2*pslld + split.
700 { ISD::SHL, MVT::v4i64, 2+2 }, // 2*psllq + split.
701
702 { ISD::SRL, MVT::v16i16, 2+2 }, // 2*psrlw + split.
703 { ISD::SRL, MVT::v8i32, 2+2 }, // 2*psrld + split.
704 { ISD::SRL, MVT::v4i64, 2+2 }, // 2*psrlq + split.
705
706 { ISD::SRA, MVT::v16i16, 2+2 }, // 2*psraw + split.
707 { ISD::SRA, MVT::v8i32, 2+2 }, // 2*psrad + split.
708 { ISD::SRA, MVT::v2i64, 4 }, // 2*psrad + shuffle.
709 { ISD::SRA, MVT::v4i64, 8+2 }, // 2*(2*psrad + shuffle) + split.
710 };
711
712 if (ST->hasSSE2() &&
713 ((Op2Info == TargetTransformInfo::OK_UniformConstantValue) ||
714 (Op2Info == TargetTransformInfo::OK_UniformValue))) {
715
716 // Handle AVX2 uniform v4i64 ISD::SRA, it's not worth a table.
717 if (ISD == ISD::SRA && LT.second == MVT::v4i64 && ST->hasAVX2())
718 return LT.first * 4; // 2*psrad + shuffle.
719
720 if (const auto *Entry =
721 CostTableLookup(SSE2UniformShiftCostTable, ISD, LT.second))
722 return LT.first * Entry->Cost;
723 }
724
725 if (ISD == ISD::SHL &&
726 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) {
727 MVT VT = LT.second;
728 // Vector shift left by non uniform constant can be lowered
729 // into vector multiply.
730 if (((VT == MVT::v8i16 || VT == MVT::v4i32) && ST->hasSSE2()) ||
731 ((VT == MVT::v16i16 || VT == MVT::v8i32) && ST->hasAVX()))
732 ISD = ISD::MUL;
733 }
734
735 static const CostTblEntry AVX2CostTable[] = {
736 { ISD::SHL, MVT::v32i8, 11 }, // vpblendvb sequence.
737 { ISD::SHL, MVT::v64i8, 22 }, // 2*vpblendvb sequence.
738 { ISD::SHL, MVT::v16i16, 10 }, // extend/vpsrlvd/pack sequence.
739 { ISD::SHL, MVT::v32i16, 20 }, // 2*extend/vpsrlvd/pack sequence.
740
741 { ISD::SRL, MVT::v32i8, 11 }, // vpblendvb sequence.
742 { ISD::SRL, MVT::v64i8, 22 }, // 2*vpblendvb sequence.
743 { ISD::SRL, MVT::v16i16, 10 }, // extend/vpsrlvd/pack sequence.
744 { ISD::SRL, MVT::v32i16, 20 }, // 2*extend/vpsrlvd/pack sequence.
745
746 { ISD::SRA, MVT::v32i8, 24 }, // vpblendvb sequence.
747 { ISD::SRA, MVT::v64i8, 48 }, // 2*vpblendvb sequence.
748 { ISD::SRA, MVT::v16i16, 10 }, // extend/vpsravd/pack sequence.
749 { ISD::SRA, MVT::v32i16, 20 }, // 2*extend/vpsravd/pack sequence.
750 { ISD::SRA, MVT::v2i64, 4 }, // srl/xor/sub sequence.
751 { ISD::SRA, MVT::v4i64, 4 }, // srl/xor/sub sequence.
752
753 { ISD::SUB, MVT::v32i8, 1 }, // psubb
754 { ISD::ADD, MVT::v32i8, 1 }, // paddb
755 { ISD::SUB, MVT::v16i16, 1 }, // psubw
756 { ISD::ADD, MVT::v16i16, 1 }, // paddw
757 { ISD::SUB, MVT::v8i32, 1 }, // psubd
758 { ISD::ADD, MVT::v8i32, 1 }, // paddd
759 { ISD::SUB, MVT::v4i64, 1 }, // psubq
760 { ISD::ADD, MVT::v4i64, 1 }, // paddq
761
762 { ISD::MUL, MVT::v32i8, 17 }, // extend/pmullw/trunc sequence.
763 { ISD::MUL, MVT::v16i8, 7 }, // extend/pmullw/trunc sequence.
764 { ISD::MUL, MVT::v16i16, 1 }, // pmullw
765 { ISD::MUL, MVT::v8i32, 2 }, // pmulld (Haswell from agner.org)
766 { ISD::MUL, MVT::v4i64, 8 }, // 3*pmuludq/3*shift/2*add
767
768 { ISD::FADD, MVT::v4f64, 1 }, // Haswell from http://www.agner.org/
769 { ISD::FADD, MVT::v8f32, 1 }, // Haswell from http://www.agner.org/
770 { ISD::FSUB, MVT::v4f64, 1 }, // Haswell from http://www.agner.org/
771 { ISD::FSUB, MVT::v8f32, 1 }, // Haswell from http://www.agner.org/
772 { ISD::FMUL, MVT::v4f64, 1 }, // Haswell from http://www.agner.org/
773 { ISD::FMUL, MVT::v8f32, 1 }, // Haswell from http://www.agner.org/
774
775 { ISD::FDIV, MVT::f32, 7 }, // Haswell from http://www.agner.org/
776 { ISD::FDIV, MVT::v4f32, 7 }, // Haswell from http://www.agner.org/
777 { ISD::FDIV, MVT::v8f32, 14 }, // Haswell from http://www.agner.org/
778 { ISD::FDIV, MVT::f64, 14 }, // Haswell from http://www.agner.org/
779 { ISD::FDIV, MVT::v2f64, 14 }, // Haswell from http://www.agner.org/
780 { ISD::FDIV, MVT::v4f64, 28 }, // Haswell from http://www.agner.org/
781 };
782
783 // Look for AVX2 lowering tricks for custom cases.
784 if (ST->hasAVX2())
785 if (const auto *Entry = CostTableLookup(AVX2CostTable, ISD, LT.second))
786 return LT.first * Entry->Cost;
787
788 static const CostTblEntry AVX1CostTable[] = {
789 // We don't have to scalarize unsupported ops. We can issue two half-sized
790 // operations and we only need to extract the upper YMM half.
791 // Two ops + 1 extract + 1 insert = 4.
792 { ISD::MUL, MVT::v16i16, 4 },
793 { ISD::MUL, MVT::v8i32, 4 },
794 { ISD::SUB, MVT::v32i8, 4 },
795 { ISD::ADD, MVT::v32i8, 4 },
796 { ISD::SUB, MVT::v16i16, 4 },
797 { ISD::ADD, MVT::v16i16, 4 },
798 { ISD::SUB, MVT::v8i32, 4 },
799 { ISD::ADD, MVT::v8i32, 4 },
800 { ISD::SUB, MVT::v4i64, 4 },
801 { ISD::ADD, MVT::v4i64, 4 },
802
803 // A v4i64 multiply is custom lowered as two split v2i64 vectors that then
804 // are lowered as a series of long multiplies(3), shifts(3) and adds(2)
805 // Because we believe v4i64 to be a legal type, we must also include the
806 // extract+insert in the cost table. Therefore, the cost here is 18
807 // instead of 8.
808 { ISD::MUL, MVT::v4i64, 18 },
809
810 { ISD::MUL, MVT::v32i8, 26 }, // extend/pmullw/trunc sequence.
811
812 { ISD::FDIV, MVT::f32, 14 }, // SNB from http://www.agner.org/
813 { ISD::FDIV, MVT::v4f32, 14 }, // SNB from http://www.agner.org/
814 { ISD::FDIV, MVT::v8f32, 28 }, // SNB from http://www.agner.org/
815 { ISD::FDIV, MVT::f64, 22 }, // SNB from http://www.agner.org/
816 { ISD::FDIV, MVT::v2f64, 22 }, // SNB from http://www.agner.org/
817 { ISD::FDIV, MVT::v4f64, 44 }, // SNB from http://www.agner.org/
818 };
819
820 if (ST->hasAVX())
821 if (const auto *Entry = CostTableLookup(AVX1CostTable, ISD, LT.second))
822 return LT.first * Entry->Cost;
823
824 static const CostTblEntry SSE42CostTable[] = {
825 { ISD::FADD, MVT::f64, 1 }, // Nehalem from http://www.agner.org/
826 { ISD::FADD, MVT::f32, 1 }, // Nehalem from http://www.agner.org/
827 { ISD::FADD, MVT::v2f64, 1 }, // Nehalem from http://www.agner.org/
828 { ISD::FADD, MVT::v4f32, 1 }, // Nehalem from http://www.agner.org/
829
830 { ISD::FSUB, MVT::f64, 1 }, // Nehalem from http://www.agner.org/
831 { ISD::FSUB, MVT::f32 , 1 }, // Nehalem from http://www.agner.org/
832 { ISD::FSUB, MVT::v2f64, 1 }, // Nehalem from http://www.agner.org/
833 { ISD::FSUB, MVT::v4f32, 1 }, // Nehalem from http://www.agner.org/
834
835 { ISD::FMUL, MVT::f64, 1 }, // Nehalem from http://www.agner.org/
836 { ISD::FMUL, MVT::f32, 1 }, // Nehalem from http://www.agner.org/
837 { ISD::FMUL, MVT::v2f64, 1 }, // Nehalem from http://www.agner.org/
838 { ISD::FMUL, MVT::v4f32, 1 }, // Nehalem from http://www.agner.org/
839
840 { ISD::FDIV, MVT::f32, 14 }, // Nehalem from http://www.agner.org/
841 { ISD::FDIV, MVT::v4f32, 14 }, // Nehalem from http://www.agner.org/
842 { ISD::FDIV, MVT::f64, 22 }, // Nehalem from http://www.agner.org/
843 { ISD::FDIV, MVT::v2f64, 22 }, // Nehalem from http://www.agner.org/
844 };
845
846 if (ST->hasSSE42())
847 if (const auto *Entry = CostTableLookup(SSE42CostTable, ISD, LT.second))
848 return LT.first * Entry->Cost;
849
850 static const CostTblEntry SSE41CostTable[] = {
851 { ISD::SHL, MVT::v16i8, 11 }, // pblendvb sequence.
852 { ISD::SHL, MVT::v32i8, 2*11+2 }, // pblendvb sequence + split.
853 { ISD::SHL, MVT::v8i16, 14 }, // pblendvb sequence.
854 { ISD::SHL, MVT::v16i16, 2*14+2 }, // pblendvb sequence + split.
855 { ISD::SHL, MVT::v4i32, 4 }, // pslld/paddd/cvttps2dq/pmulld
856 { ISD::SHL, MVT::v8i32, 2*4+2 }, // pslld/paddd/cvttps2dq/pmulld + split
857
858 { ISD::SRL, MVT::v16i8, 12 }, // pblendvb sequence.
859 { ISD::SRL, MVT::v32i8, 2*12+2 }, // pblendvb sequence + split.
860 { ISD::SRL, MVT::v8i16, 14 }, // pblendvb sequence.
861 { ISD::SRL, MVT::v16i16, 2*14+2 }, // pblendvb sequence + split.
862 { ISD::SRL, MVT::v4i32, 11 }, // Shift each lane + blend.
863 { ISD::SRL, MVT::v8i32, 2*11+2 }, // Shift each lane + blend + split.
864
865 { ISD::SRA, MVT::v16i8, 24 }, // pblendvb sequence.
866 { ISD::SRA, MVT::v32i8, 2*24+2 }, // pblendvb sequence + split.
867 { ISD::SRA, MVT::v8i16, 14 }, // pblendvb sequence.
868 { ISD::SRA, MVT::v16i16, 2*14+2 }, // pblendvb sequence + split.
869 { ISD::SRA, MVT::v4i32, 12 }, // Shift each lane + blend.
870 { ISD::SRA, MVT::v8i32, 2*12+2 }, // Shift each lane + blend + split.
871
872 { ISD::MUL, MVT::v4i32, 2 } // pmulld (Nehalem from agner.org)
873 };
874
875 if (ST->hasSSE41())
876 if (const auto *Entry = CostTableLookup(SSE41CostTable, ISD, LT.second))
877 return LT.first * Entry->Cost;
878
879 static const CostTblEntry SSE2CostTable[] = {
880 // We don't correctly identify costs of casts because they are marked as
881 // custom.
882 { ISD::SHL, MVT::v16i8, 26 }, // cmpgtb sequence.
883 { ISD::SHL, MVT::v8i16, 32 }, // cmpgtb sequence.
884 { ISD::SHL, MVT::v4i32, 2*5 }, // We optimized this using mul.
885 { ISD::SHL, MVT::v2i64, 4 }, // splat+shuffle sequence.
886 { ISD::SHL, MVT::v4i64, 2*4+2 }, // splat+shuffle sequence + split.
887
888 { ISD::SRL, MVT::v16i8, 26 }, // cmpgtb sequence.
889 { ISD::SRL, MVT::v8i16, 32 }, // cmpgtb sequence.
890 { ISD::SRL, MVT::v4i32, 16 }, // Shift each lane + blend.
891 { ISD::SRL, MVT::v2i64, 4 }, // splat+shuffle sequence.
892 { ISD::SRL, MVT::v4i64, 2*4+2 }, // splat+shuffle sequence + split.
893
894 { ISD::SRA, MVT::v16i8, 54 }, // unpacked cmpgtb sequence.
895 { ISD::SRA, MVT::v8i16, 32 }, // cmpgtb sequence.
896 { ISD::SRA, MVT::v4i32, 16 }, // Shift each lane + blend.
897 { ISD::SRA, MVT::v2i64, 12 }, // srl/xor/sub sequence.
898 { ISD::SRA, MVT::v4i64, 2*12+2 }, // srl/xor/sub sequence+split.
899
900 { ISD::MUL, MVT::v16i8, 12 }, // extend/pmullw/trunc sequence.
901 { ISD::MUL, MVT::v8i16, 1 }, // pmullw
902 { ISD::MUL, MVT::v4i32, 6 }, // 3*pmuludq/4*shuffle
903 { ISD::MUL, MVT::v2i64, 8 }, // 3*pmuludq/3*shift/2*add
904
905 { ISD::FDIV, MVT::f32, 23 }, // Pentium IV from http://www.agner.org/
906 { ISD::FDIV, MVT::v4f32, 39 }, // Pentium IV from http://www.agner.org/
907 { ISD::FDIV, MVT::f64, 38 }, // Pentium IV from http://www.agner.org/
908 { ISD::FDIV, MVT::v2f64, 69 }, // Pentium IV from http://www.agner.org/
909
910 { ISD::FADD, MVT::f32, 2 }, // Pentium IV from http://www.agner.org/
911 { ISD::FADD, MVT::f64, 2 }, // Pentium IV from http://www.agner.org/
912
913 { ISD::FSUB, MVT::f32, 2 }, // Pentium IV from http://www.agner.org/
914 { ISD::FSUB, MVT::f64, 2 }, // Pentium IV from http://www.agner.org/
915 };
916
917 if (ST->hasSSE2())
918 if (const auto *Entry = CostTableLookup(SSE2CostTable, ISD, LT.second))
919 return LT.first * Entry->Cost;
920
921 static const CostTblEntry SSE1CostTable[] = {
922 { ISD::FDIV, MVT::f32, 17 }, // Pentium III from http://www.agner.org/
923 { ISD::FDIV, MVT::v4f32, 34 }, // Pentium III from http://www.agner.org/
924
925 { ISD::FADD, MVT::f32, 1 }, // Pentium III from http://www.agner.org/
926 { ISD::FADD, MVT::v4f32, 2 }, // Pentium III from http://www.agner.org/
927
928 { ISD::FSUB, MVT::f32, 1 }, // Pentium III from http://www.agner.org/
929 { ISD::FSUB, MVT::v4f32, 2 }, // Pentium III from http://www.agner.org/
930
931 { ISD::ADD, MVT::i8, 1 }, // Pentium III from http://www.agner.org/
932 { ISD::ADD, MVT::i16, 1 }, // Pentium III from http://www.agner.org/
933 { ISD::ADD, MVT::i32, 1 }, // Pentium III from http://www.agner.org/
934
935 { ISD::SUB, MVT::i8, 1 }, // Pentium III from http://www.agner.org/
936 { ISD::SUB, MVT::i16, 1 }, // Pentium III from http://www.agner.org/
937 { ISD::SUB, MVT::i32, 1 }, // Pentium III from http://www.agner.org/
938 };
939
940 if (ST->hasSSE1())
941 if (const auto *Entry = CostTableLookup(SSE1CostTable, ISD, LT.second))
942 return LT.first * Entry->Cost;
943
944 // It is not a good idea to vectorize division. We have to scalarize it and
945 // in the process we will often end up having to spilling regular
946 // registers. The overhead of division is going to dominate most kernels
947 // anyways so try hard to prevent vectorization of division - it is
948 // generally a bad idea. Assume somewhat arbitrarily that we have to be able
949 // to hide "20 cycles" for each lane.
950 if (LT.second.isVector() && (ISD == ISD::SDIV || ISD == ISD::SREM ||
951 ISD == ISD::UDIV || ISD == ISD::UREM)) {
952 int ScalarCost = getArithmeticInstrCost(
953 Opcode, Ty->getScalarType(), CostKind, Op1Info, Op2Info,
954 TargetTransformInfo::OP_None, TargetTransformInfo::OP_None);
955 return 20 * LT.first * LT.second.getVectorNumElements() * ScalarCost;
956 }
957
958 // Fallback to the default implementation.
959 return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Op1Info, Op2Info);
960}
961
962int X86TTIImpl::getShuffleCost(TTI::ShuffleKind Kind, VectorType *BaseTp,
963 int Index, VectorType *SubTp) {
964 // 64-bit packed float vectors (v2f32) are widened to type v4f32.
965 // 64-bit packed integer vectors (v2i32) are widened to type v4i32.
966 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, BaseTp);
967
968 // Treat Transpose as 2-op shuffles - there's no difference in lowering.
969 if (Kind == TTI::SK_Transpose)
970 Kind = TTI::SK_PermuteTwoSrc;
971
972 // For Broadcasts we are splatting the first element from the first input
973 // register, so only need to reference that input and all the output
974 // registers are the same.
975 if (Kind == TTI::SK_Broadcast)
976 LT.first = 1;
977
978 // Subvector extractions are free if they start at the beginning of a
979 // vector and cheap if the subvectors are aligned.
980 if (Kind == TTI::SK_ExtractSubvector && LT.second.isVector()) {
981 int NumElts = LT.second.getVectorNumElements();
982 if ((Index % NumElts) == 0)
983 return 0;
984 std::pair<int, MVT> SubLT = TLI->getTypeLegalizationCost(DL, SubTp);
985 if (SubLT.second.isVector()) {
986 int NumSubElts = SubLT.second.getVectorNumElements();
987 if ((Index % NumSubElts) == 0 && (NumElts % NumSubElts) == 0)
988 return SubLT.first;
989 // Handle some cases for widening legalization. For now we only handle
990 // cases where the original subvector was naturally aligned and evenly
991 // fit in its legalized subvector type.
992 // FIXME: Remove some of the alignment restrictions.
993 // FIXME: We can use permq for 64-bit or larger extracts from 256-bit
994 // vectors.
995 int OrigSubElts = cast<FixedVectorType>(SubTp)->getNumElements();
996 if (NumSubElts > OrigSubElts && (Index % OrigSubElts) == 0 &&
997 (NumSubElts % OrigSubElts) == 0 &&
998 LT.second.getVectorElementType() ==
999 SubLT.second.getVectorElementType() &&
1000 LT.second.getVectorElementType().getSizeInBits() ==
1001 BaseTp->getElementType()->getPrimitiveSizeInBits()) {
1002 assert(NumElts >= NumSubElts && NumElts > OrigSubElts &&((NumElts >= NumSubElts && NumElts > OrigSubElts
&& "Unexpected number of elements!") ? static_cast<
void> (0) : __assert_fail ("NumElts >= NumSubElts && NumElts > OrigSubElts && \"Unexpected number of elements!\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 1003, __PRETTY_FUNCTION__))
1003 "Unexpected number of elements!")((NumElts >= NumSubElts && NumElts > OrigSubElts
&& "Unexpected number of elements!") ? static_cast<
void> (0) : __assert_fail ("NumElts >= NumSubElts && NumElts > OrigSubElts && \"Unexpected number of elements!\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 1003, __PRETTY_FUNCTION__))
;
1004 auto *VecTy = FixedVectorType::get(BaseTp->getElementType(),
1005 LT.second.getVectorNumElements());
1006 auto *SubTy = FixedVectorType::get(BaseTp->getElementType(),
1007 SubLT.second.getVectorNumElements());
1008 int ExtractIndex = alignDown((Index % NumElts), NumSubElts);
1009 int ExtractCost = getShuffleCost(TTI::SK_ExtractSubvector, VecTy,
1010 ExtractIndex, SubTy);
1011
1012 // If the original size is 32-bits or more, we can use pshufd. Otherwise
1013 // if we have SSSE3 we can use pshufb.
1014 if (SubTp->getPrimitiveSizeInBits() >= 32 || ST->hasSSSE3())
1015 return ExtractCost + 1; // pshufd or pshufb
1016
1017 assert(SubTp->getPrimitiveSizeInBits() == 16 &&((SubTp->getPrimitiveSizeInBits() == 16 && "Unexpected vector size"
) ? static_cast<void> (0) : __assert_fail ("SubTp->getPrimitiveSizeInBits() == 16 && \"Unexpected vector size\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 1018, __PRETTY_FUNCTION__))
1018 "Unexpected vector size")((SubTp->getPrimitiveSizeInBits() == 16 && "Unexpected vector size"
) ? static_cast<void> (0) : __assert_fail ("SubTp->getPrimitiveSizeInBits() == 16 && \"Unexpected vector size\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 1018, __PRETTY_FUNCTION__))
;
1019
1020 return ExtractCost + 2; // worst case pshufhw + pshufd
1021 }
1022 }
1023 }
1024
1025 // Handle some common (illegal) sub-vector types as they are often very cheap
1026 // to shuffle even on targets without PSHUFB.
1027 EVT VT = TLI->getValueType(DL, BaseTp);
1028 if (VT.isSimple() && VT.isVector() && VT.getSizeInBits() < 128 &&
1029 !ST->hasSSSE3()) {
1030 static const CostTblEntry SSE2SubVectorShuffleTbl[] = {
1031 {TTI::SK_Broadcast, MVT::v4i16, 1}, // pshuflw
1032 {TTI::SK_Broadcast, MVT::v2i16, 1}, // pshuflw
1033 {TTI::SK_Broadcast, MVT::v8i8, 2}, // punpck/pshuflw
1034 {TTI::SK_Broadcast, MVT::v4i8, 2}, // punpck/pshuflw
1035 {TTI::SK_Broadcast, MVT::v2i8, 1}, // punpck
1036
1037 {TTI::SK_Reverse, MVT::v4i16, 1}, // pshuflw
1038 {TTI::SK_Reverse, MVT::v2i16, 1}, // pshuflw
1039 {TTI::SK_Reverse, MVT::v4i8, 3}, // punpck/pshuflw/packus
1040 {TTI::SK_Reverse, MVT::v2i8, 1}, // punpck
1041
1042 {TTI::SK_PermuteTwoSrc, MVT::v4i16, 2}, // punpck/pshuflw
1043 {TTI::SK_PermuteTwoSrc, MVT::v2i16, 2}, // punpck/pshuflw
1044 {TTI::SK_PermuteTwoSrc, MVT::v8i8, 7}, // punpck/pshuflw
1045 {TTI::SK_PermuteTwoSrc, MVT::v4i8, 4}, // punpck/pshuflw
1046 {TTI::SK_PermuteTwoSrc, MVT::v2i8, 2}, // punpck
1047
1048 {TTI::SK_PermuteSingleSrc, MVT::v4i16, 1}, // pshuflw
1049 {TTI::SK_PermuteSingleSrc, MVT::v2i16, 1}, // pshuflw
1050 {TTI::SK_PermuteSingleSrc, MVT::v8i8, 5}, // punpck/pshuflw
1051 {TTI::SK_PermuteSingleSrc, MVT::v4i8, 3}, // punpck/pshuflw
1052 {TTI::SK_PermuteSingleSrc, MVT::v2i8, 1}, // punpck
1053 };
1054
1055 if (ST->hasSSE2())
1056 if (const auto *Entry =
1057 CostTableLookup(SSE2SubVectorShuffleTbl, Kind, VT.getSimpleVT()))
1058 return Entry->Cost;
1059 }
1060
1061 // We are going to permute multiple sources and the result will be in multiple
1062 // destinations. Providing an accurate cost only for splits where the element
1063 // type remains the same.
1064 if (Kind == TTI::SK_PermuteSingleSrc && LT.first != 1) {
1065 MVT LegalVT = LT.second;
1066 if (LegalVT.isVector() &&
1067 LegalVT.getVectorElementType().getSizeInBits() ==
1068 BaseTp->getElementType()->getPrimitiveSizeInBits() &&
1069 LegalVT.getVectorNumElements() <
1070 cast<FixedVectorType>(BaseTp)->getNumElements()) {
1071
1072 unsigned VecTySize = DL.getTypeStoreSize(BaseTp);
1073 unsigned LegalVTSize = LegalVT.getStoreSize();
1074 // Number of source vectors after legalization:
1075 unsigned NumOfSrcs = (VecTySize + LegalVTSize - 1) / LegalVTSize;
1076 // Number of destination vectors after legalization:
1077 unsigned NumOfDests = LT.first;
1078
1079 auto *SingleOpTy = FixedVectorType::get(BaseTp->getElementType(),
1080 LegalVT.getVectorNumElements());
1081
1082 unsigned NumOfShuffles = (NumOfSrcs - 1) * NumOfDests;
1083 return NumOfShuffles *
1084 getShuffleCost(TTI::SK_PermuteTwoSrc, SingleOpTy, 0, nullptr);
1085 }
1086
1087 return BaseT::getShuffleCost(Kind, BaseTp, Index, SubTp);
1088 }
1089
1090 // For 2-input shuffles, we must account for splitting the 2 inputs into many.
1091 if (Kind == TTI::SK_PermuteTwoSrc && LT.first != 1) {
1092 // We assume that source and destination have the same vector type.
1093 int NumOfDests = LT.first;
1094 int NumOfShufflesPerDest = LT.first * 2 - 1;
1095 LT.first = NumOfDests * NumOfShufflesPerDest;
1096 }
1097
1098 static const CostTblEntry AVX512VBMIShuffleTbl[] = {
1099 {TTI::SK_Reverse, MVT::v64i8, 1}, // vpermb
1100 {TTI::SK_Reverse, MVT::v32i8, 1}, // vpermb
1101
1102 {TTI::SK_PermuteSingleSrc, MVT::v64i8, 1}, // vpermb
1103 {TTI::SK_PermuteSingleSrc, MVT::v32i8, 1}, // vpermb
1104
1105 {TTI::SK_PermuteTwoSrc, MVT::v64i8, 2}, // vpermt2b
1106 {TTI::SK_PermuteTwoSrc, MVT::v32i8, 2}, // vpermt2b
1107 {TTI::SK_PermuteTwoSrc, MVT::v16i8, 2} // vpermt2b
1108 };
1109
1110 if (ST->hasVBMI())
1111 if (const auto *Entry =
1112 CostTableLookup(AVX512VBMIShuffleTbl, Kind, LT.second))
1113 return LT.first * Entry->Cost;
1114
1115 static const CostTblEntry AVX512BWShuffleTbl[] = {
1116 {TTI::SK_Broadcast, MVT::v32i16, 1}, // vpbroadcastw
1117 {TTI::SK_Broadcast, MVT::v64i8, 1}, // vpbroadcastb
1118
1119 {TTI::SK_Reverse, MVT::v32i16, 2}, // vpermw
1120 {TTI::SK_Reverse, MVT::v16i16, 2}, // vpermw
1121 {TTI::SK_Reverse, MVT::v64i8, 2}, // pshufb + vshufi64x2
1122
1123 {TTI::SK_PermuteSingleSrc, MVT::v32i16, 2}, // vpermw
1124 {TTI::SK_PermuteSingleSrc, MVT::v16i16, 2}, // vpermw
1125 {TTI::SK_PermuteSingleSrc, MVT::v64i8, 8}, // extend to v32i16
1126
1127 {TTI::SK_PermuteTwoSrc, MVT::v32i16, 2}, // vpermt2w
1128 {TTI::SK_PermuteTwoSrc, MVT::v16i16, 2}, // vpermt2w
1129 {TTI::SK_PermuteTwoSrc, MVT::v8i16, 2}, // vpermt2w
1130 {TTI::SK_PermuteTwoSrc, MVT::v64i8, 19}, // 6 * v32i8 + 1
1131
1132 {TTI::SK_Select, MVT::v32i16, 1}, // vblendmw
1133 {TTI::SK_Select, MVT::v64i8, 1}, // vblendmb
1134 };
1135
1136 if (ST->hasBWI())
1137 if (const auto *Entry =
1138 CostTableLookup(AVX512BWShuffleTbl, Kind, LT.second))
1139 return LT.first * Entry->Cost;
1140
1141 static const CostTblEntry AVX512ShuffleTbl[] = {
1142 {TTI::SK_Broadcast, MVT::v8f64, 1}, // vbroadcastpd
1143 {TTI::SK_Broadcast, MVT::v16f32, 1}, // vbroadcastps
1144 {TTI::SK_Broadcast, MVT::v8i64, 1}, // vpbroadcastq
1145 {TTI::SK_Broadcast, MVT::v16i32, 1}, // vpbroadcastd
1146 {TTI::SK_Broadcast, MVT::v32i16, 1}, // vpbroadcastw
1147 {TTI::SK_Broadcast, MVT::v64i8, 1}, // vpbroadcastb
1148
1149 {TTI::SK_Reverse, MVT::v8f64, 1}, // vpermpd
1150 {TTI::SK_Reverse, MVT::v16f32, 1}, // vpermps
1151 {TTI::SK_Reverse, MVT::v8i64, 1}, // vpermq
1152 {TTI::SK_Reverse, MVT::v16i32, 1}, // vpermd
1153
1154 {TTI::SK_PermuteSingleSrc, MVT::v8f64, 1}, // vpermpd
1155 {TTI::SK_PermuteSingleSrc, MVT::v4f64, 1}, // vpermpd
1156 {TTI::SK_PermuteSingleSrc, MVT::v2f64, 1}, // vpermpd
1157 {TTI::SK_PermuteSingleSrc, MVT::v16f32, 1}, // vpermps
1158 {TTI::SK_PermuteSingleSrc, MVT::v8f32, 1}, // vpermps
1159 {TTI::SK_PermuteSingleSrc, MVT::v4f32, 1}, // vpermps
1160 {TTI::SK_PermuteSingleSrc, MVT::v8i64, 1}, // vpermq
1161 {TTI::SK_PermuteSingleSrc, MVT::v4i64, 1}, // vpermq
1162 {TTI::SK_PermuteSingleSrc, MVT::v2i64, 1}, // vpermq
1163 {TTI::SK_PermuteSingleSrc, MVT::v16i32, 1}, // vpermd
1164 {TTI::SK_PermuteSingleSrc, MVT::v8i32, 1}, // vpermd
1165 {TTI::SK_PermuteSingleSrc, MVT::v4i32, 1}, // vpermd
1166 {TTI::SK_PermuteSingleSrc, MVT::v16i8, 1}, // pshufb
1167
1168 {TTI::SK_PermuteTwoSrc, MVT::v8f64, 1}, // vpermt2pd
1169 {TTI::SK_PermuteTwoSrc, MVT::v16f32, 1}, // vpermt2ps
1170 {TTI::SK_PermuteTwoSrc, MVT::v8i64, 1}, // vpermt2q
1171 {TTI::SK_PermuteTwoSrc, MVT::v16i32, 1}, // vpermt2d
1172 {TTI::SK_PermuteTwoSrc, MVT::v4f64, 1}, // vpermt2pd
1173 {TTI::SK_PermuteTwoSrc, MVT::v8f32, 1}, // vpermt2ps
1174 {TTI::SK_PermuteTwoSrc, MVT::v4i64, 1}, // vpermt2q
1175 {TTI::SK_PermuteTwoSrc, MVT::v8i32, 1}, // vpermt2d
1176 {TTI::SK_PermuteTwoSrc, MVT::v2f64, 1}, // vpermt2pd
1177 {TTI::SK_PermuteTwoSrc, MVT::v4f32, 1}, // vpermt2ps
1178 {TTI::SK_PermuteTwoSrc, MVT::v2i64, 1}, // vpermt2q
1179 {TTI::SK_PermuteTwoSrc, MVT::v4i32, 1}, // vpermt2d
1180
1181 // FIXME: This just applies the type legalization cost rules above
1182 // assuming these completely split.
1183 {TTI::SK_PermuteSingleSrc, MVT::v32i16, 14},
1184 {TTI::SK_PermuteSingleSrc, MVT::v64i8, 14},
1185 {TTI::SK_PermuteTwoSrc, MVT::v32i16, 42},
1186 {TTI::SK_PermuteTwoSrc, MVT::v64i8, 42},
1187
1188 {TTI::SK_Select, MVT::v32i16, 1}, // vpternlogq
1189 {TTI::SK_Select, MVT::v64i8, 1}, // vpternlogq
1190 {TTI::SK_Select, MVT::v8f64, 1}, // vblendmpd
1191 {TTI::SK_Select, MVT::v16f32, 1}, // vblendmps
1192 {TTI::SK_Select, MVT::v8i64, 1}, // vblendmq
1193 {TTI::SK_Select, MVT::v16i32, 1}, // vblendmd
1194 };
1195
1196 if (ST->hasAVX512())
1197 if (const auto *Entry = CostTableLookup(AVX512ShuffleTbl, Kind, LT.second))
1198 return LT.first * Entry->Cost;
1199
1200 static const CostTblEntry AVX2ShuffleTbl[] = {
1201 {TTI::SK_Broadcast, MVT::v4f64, 1}, // vbroadcastpd
1202 {TTI::SK_Broadcast, MVT::v8f32, 1}, // vbroadcastps
1203 {TTI::SK_Broadcast, MVT::v4i64, 1}, // vpbroadcastq
1204 {TTI::SK_Broadcast, MVT::v8i32, 1}, // vpbroadcastd
1205 {TTI::SK_Broadcast, MVT::v16i16, 1}, // vpbroadcastw
1206 {TTI::SK_Broadcast, MVT::v32i8, 1}, // vpbroadcastb
1207
1208 {TTI::SK_Reverse, MVT::v4f64, 1}, // vpermpd
1209 {TTI::SK_Reverse, MVT::v8f32, 1}, // vpermps
1210 {TTI::SK_Reverse, MVT::v4i64, 1}, // vpermq
1211 {TTI::SK_Reverse, MVT::v8i32, 1}, // vpermd
1212 {TTI::SK_Reverse, MVT::v16i16, 2}, // vperm2i128 + pshufb
1213 {TTI::SK_Reverse, MVT::v32i8, 2}, // vperm2i128 + pshufb
1214
1215 {TTI::SK_Select, MVT::v16i16, 1}, // vpblendvb
1216 {TTI::SK_Select, MVT::v32i8, 1}, // vpblendvb
1217
1218 {TTI::SK_PermuteSingleSrc, MVT::v4f64, 1}, // vpermpd
1219 {TTI::SK_PermuteSingleSrc, MVT::v8f32, 1}, // vpermps
1220 {TTI::SK_PermuteSingleSrc, MVT::v4i64, 1}, // vpermq
1221 {TTI::SK_PermuteSingleSrc, MVT::v8i32, 1}, // vpermd
1222 {TTI::SK_PermuteSingleSrc, MVT::v16i16, 4}, // vperm2i128 + 2*vpshufb
1223 // + vpblendvb
1224 {TTI::SK_PermuteSingleSrc, MVT::v32i8, 4}, // vperm2i128 + 2*vpshufb
1225 // + vpblendvb
1226
1227 {TTI::SK_PermuteTwoSrc, MVT::v4f64, 3}, // 2*vpermpd + vblendpd
1228 {TTI::SK_PermuteTwoSrc, MVT::v8f32, 3}, // 2*vpermps + vblendps
1229 {TTI::SK_PermuteTwoSrc, MVT::v4i64, 3}, // 2*vpermq + vpblendd
1230 {TTI::SK_PermuteTwoSrc, MVT::v8i32, 3}, // 2*vpermd + vpblendd
1231 {TTI::SK_PermuteTwoSrc, MVT::v16i16, 7}, // 2*vperm2i128 + 4*vpshufb
1232 // + vpblendvb
1233 {TTI::SK_PermuteTwoSrc, MVT::v32i8, 7}, // 2*vperm2i128 + 4*vpshufb
1234 // + vpblendvb
1235 };
1236
1237 if (ST->hasAVX2())
1238 if (const auto *Entry = CostTableLookup(AVX2ShuffleTbl, Kind, LT.second))
1239 return LT.first * Entry->Cost;
1240
1241 static const CostTblEntry XOPShuffleTbl[] = {
1242 {TTI::SK_PermuteSingleSrc, MVT::v4f64, 2}, // vperm2f128 + vpermil2pd
1243 {TTI::SK_PermuteSingleSrc, MVT::v8f32, 2}, // vperm2f128 + vpermil2ps
1244 {TTI::SK_PermuteSingleSrc, MVT::v4i64, 2}, // vperm2f128 + vpermil2pd
1245 {TTI::SK_PermuteSingleSrc, MVT::v8i32, 2}, // vperm2f128 + vpermil2ps
1246 {TTI::SK_PermuteSingleSrc, MVT::v16i16, 4}, // vextractf128 + 2*vpperm
1247 // + vinsertf128
1248 {TTI::SK_PermuteSingleSrc, MVT::v32i8, 4}, // vextractf128 + 2*vpperm
1249 // + vinsertf128
1250
1251 {TTI::SK_PermuteTwoSrc, MVT::v16i16, 9}, // 2*vextractf128 + 6*vpperm
1252 // + vinsertf128
1253 {TTI::SK_PermuteTwoSrc, MVT::v8i16, 1}, // vpperm
1254 {TTI::SK_PermuteTwoSrc, MVT::v32i8, 9}, // 2*vextractf128 + 6*vpperm
1255 // + vinsertf128
1256 {TTI::SK_PermuteTwoSrc, MVT::v16i8, 1}, // vpperm
1257 };
1258
1259 if (ST->hasXOP())
1260 if (const auto *Entry = CostTableLookup(XOPShuffleTbl, Kind, LT.second))
1261 return LT.first * Entry->Cost;
1262
1263 static const CostTblEntry AVX1ShuffleTbl[] = {
1264 {TTI::SK_Broadcast, MVT::v4f64, 2}, // vperm2f128 + vpermilpd
1265 {TTI::SK_Broadcast, MVT::v8f32, 2}, // vperm2f128 + vpermilps
1266 {TTI::SK_Broadcast, MVT::v4i64, 2}, // vperm2f128 + vpermilpd
1267 {TTI::SK_Broadcast, MVT::v8i32, 2}, // vperm2f128 + vpermilps
1268 {TTI::SK_Broadcast, MVT::v16i16, 3}, // vpshuflw + vpshufd + vinsertf128
1269 {TTI::SK_Broadcast, MVT::v32i8, 2}, // vpshufb + vinsertf128
1270
1271 {TTI::SK_Reverse, MVT::v4f64, 2}, // vperm2f128 + vpermilpd
1272 {TTI::SK_Reverse, MVT::v8f32, 2}, // vperm2f128 + vpermilps
1273 {TTI::SK_Reverse, MVT::v4i64, 2}, // vperm2f128 + vpermilpd
1274 {TTI::SK_Reverse, MVT::v8i32, 2}, // vperm2f128 + vpermilps
1275 {TTI::SK_Reverse, MVT::v16i16, 4}, // vextractf128 + 2*pshufb
1276 // + vinsertf128
1277 {TTI::SK_Reverse, MVT::v32i8, 4}, // vextractf128 + 2*pshufb
1278 // + vinsertf128
1279
1280 {TTI::SK_Select, MVT::v4i64, 1}, // vblendpd
1281 {TTI::SK_Select, MVT::v4f64, 1}, // vblendpd
1282 {TTI::SK_Select, MVT::v8i32, 1}, // vblendps
1283 {TTI::SK_Select, MVT::v8f32, 1}, // vblendps
1284 {TTI::SK_Select, MVT::v16i16, 3}, // vpand + vpandn + vpor
1285 {TTI::SK_Select, MVT::v32i8, 3}, // vpand + vpandn + vpor
1286
1287 {TTI::SK_PermuteSingleSrc, MVT::v4f64, 2}, // vperm2f128 + vshufpd
1288 {TTI::SK_PermuteSingleSrc, MVT::v4i64, 2}, // vperm2f128 + vshufpd
1289 {TTI::SK_PermuteSingleSrc, MVT::v8f32, 4}, // 2*vperm2f128 + 2*vshufps
1290 {TTI::SK_PermuteSingleSrc, MVT::v8i32, 4}, // 2*vperm2f128 + 2*vshufps
1291 {TTI::SK_PermuteSingleSrc, MVT::v16i16, 8}, // vextractf128 + 4*pshufb
1292 // + 2*por + vinsertf128
1293 {TTI::SK_PermuteSingleSrc, MVT::v32i8, 8}, // vextractf128 + 4*pshufb
1294 // + 2*por + vinsertf128
1295
1296 {TTI::SK_PermuteTwoSrc, MVT::v4f64, 3}, // 2*vperm2f128 + vshufpd
1297 {TTI::SK_PermuteTwoSrc, MVT::v4i64, 3}, // 2*vperm2f128 + vshufpd
1298 {TTI::SK_PermuteTwoSrc, MVT::v8f32, 4}, // 2*vperm2f128 + 2*vshufps
1299 {TTI::SK_PermuteTwoSrc, MVT::v8i32, 4}, // 2*vperm2f128 + 2*vshufps
1300 {TTI::SK_PermuteTwoSrc, MVT::v16i16, 15}, // 2*vextractf128 + 8*pshufb
1301 // + 4*por + vinsertf128
1302 {TTI::SK_PermuteTwoSrc, MVT::v32i8, 15}, // 2*vextractf128 + 8*pshufb
1303 // + 4*por + vinsertf128
1304 };
1305
1306 if (ST->hasAVX())
1307 if (const auto *Entry = CostTableLookup(AVX1ShuffleTbl, Kind, LT.second))
1308 return LT.first * Entry->Cost;
1309
1310 static const CostTblEntry SSE41ShuffleTbl[] = {
1311 {TTI::SK_Select, MVT::v2i64, 1}, // pblendw
1312 {TTI::SK_Select, MVT::v2f64, 1}, // movsd
1313 {TTI::SK_Select, MVT::v4i32, 1}, // pblendw
1314 {TTI::SK_Select, MVT::v4f32, 1}, // blendps
1315 {TTI::SK_Select, MVT::v8i16, 1}, // pblendw
1316 {TTI::SK_Select, MVT::v16i8, 1} // pblendvb
1317 };
1318
1319 if (ST->hasSSE41())
1320 if (const auto *Entry = CostTableLookup(SSE41ShuffleTbl, Kind, LT.second))
1321 return LT.first * Entry->Cost;
1322
1323 static const CostTblEntry SSSE3ShuffleTbl[] = {
1324 {TTI::SK_Broadcast, MVT::v8i16, 1}, // pshufb
1325 {TTI::SK_Broadcast, MVT::v16i8, 1}, // pshufb
1326
1327 {TTI::SK_Reverse, MVT::v8i16, 1}, // pshufb
1328 {TTI::SK_Reverse, MVT::v16i8, 1}, // pshufb
1329
1330 {TTI::SK_Select, MVT::v8i16, 3}, // 2*pshufb + por
1331 {TTI::SK_Select, MVT::v16i8, 3}, // 2*pshufb + por
1332
1333 {TTI::SK_PermuteSingleSrc, MVT::v8i16, 1}, // pshufb
1334 {TTI::SK_PermuteSingleSrc, MVT::v16i8, 1}, // pshufb
1335
1336 {TTI::SK_PermuteTwoSrc, MVT::v8i16, 3}, // 2*pshufb + por
1337 {TTI::SK_PermuteTwoSrc, MVT::v16i8, 3}, // 2*pshufb + por
1338 };
1339
1340 if (ST->hasSSSE3())
1341 if (const auto *Entry = CostTableLookup(SSSE3ShuffleTbl, Kind, LT.second))
1342 return LT.first * Entry->Cost;
1343
1344 static const CostTblEntry SSE2ShuffleTbl[] = {
1345 {TTI::SK_Broadcast, MVT::v2f64, 1}, // shufpd
1346 {TTI::SK_Broadcast, MVT::v2i64, 1}, // pshufd
1347 {TTI::SK_Broadcast, MVT::v4i32, 1}, // pshufd
1348 {TTI::SK_Broadcast, MVT::v8i16, 2}, // pshuflw + pshufd
1349 {TTI::SK_Broadcast, MVT::v16i8, 3}, // unpck + pshuflw + pshufd
1350
1351 {TTI::SK_Reverse, MVT::v2f64, 1}, // shufpd
1352 {TTI::SK_Reverse, MVT::v2i64, 1}, // pshufd
1353 {TTI::SK_Reverse, MVT::v4i32, 1}, // pshufd
1354 {TTI::SK_Reverse, MVT::v8i16, 3}, // pshuflw + pshufhw + pshufd
1355 {TTI::SK_Reverse, MVT::v16i8, 9}, // 2*pshuflw + 2*pshufhw
1356 // + 2*pshufd + 2*unpck + packus
1357
1358 {TTI::SK_Select, MVT::v2i64, 1}, // movsd
1359 {TTI::SK_Select, MVT::v2f64, 1}, // movsd
1360 {TTI::SK_Select, MVT::v4i32, 2}, // 2*shufps
1361 {TTI::SK_Select, MVT::v8i16, 3}, // pand + pandn + por
1362 {TTI::SK_Select, MVT::v16i8, 3}, // pand + pandn + por
1363
1364 {TTI::SK_PermuteSingleSrc, MVT::v2f64, 1}, // shufpd
1365 {TTI::SK_PermuteSingleSrc, MVT::v2i64, 1}, // pshufd
1366 {TTI::SK_PermuteSingleSrc, MVT::v4i32, 1}, // pshufd
1367 {TTI::SK_PermuteSingleSrc, MVT::v8i16, 5}, // 2*pshuflw + 2*pshufhw
1368 // + pshufd/unpck
1369 { TTI::SK_PermuteSingleSrc, MVT::v16i8, 10 }, // 2*pshuflw + 2*pshufhw
1370 // + 2*pshufd + 2*unpck + 2*packus
1371
1372 { TTI::SK_PermuteTwoSrc, MVT::v2f64, 1 }, // shufpd
1373 { TTI::SK_PermuteTwoSrc, MVT::v2i64, 1 }, // shufpd
1374 { TTI::SK_PermuteTwoSrc, MVT::v4i32, 2 }, // 2*{unpck,movsd,pshufd}
1375 { TTI::SK_PermuteTwoSrc, MVT::v8i16, 8 }, // blend+permute
1376 { TTI::SK_PermuteTwoSrc, MVT::v16i8, 13 }, // blend+permute
1377 };
1378
1379 if (ST->hasSSE2())
1380 if (const auto *Entry = CostTableLookup(SSE2ShuffleTbl, Kind, LT.second))
1381 return LT.first * Entry->Cost;
1382
1383 static const CostTblEntry SSE1ShuffleTbl[] = {
1384 { TTI::SK_Broadcast, MVT::v4f32, 1 }, // shufps
1385 { TTI::SK_Reverse, MVT::v4f32, 1 }, // shufps
1386 { TTI::SK_Select, MVT::v4f32, 2 }, // 2*shufps
1387 { TTI::SK_PermuteSingleSrc, MVT::v4f32, 1 }, // shufps
1388 { TTI::SK_PermuteTwoSrc, MVT::v4f32, 2 }, // 2*shufps
1389 };
1390
1391 if (ST->hasSSE1())
1392 if (const auto *Entry = CostTableLookup(SSE1ShuffleTbl, Kind, LT.second))
1393 return LT.first * Entry->Cost;
1394
1395 return BaseT::getShuffleCost(Kind, BaseTp, Index, SubTp);
1396}
1397
1398int X86TTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src,
1399 TTI::CastContextHint CCH,
1400 TTI::TargetCostKind CostKind,
1401 const Instruction *I) {
1402 int ISD = TLI->InstructionOpcodeToISD(Opcode);
1403 assert(ISD && "Invalid opcode")((ISD && "Invalid opcode") ? static_cast<void> (
0) : __assert_fail ("ISD && \"Invalid opcode\"", "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 1403, __PRETTY_FUNCTION__))
;
1404
1405 // TODO: Allow non-throughput costs that aren't binary.
1406 auto AdjustCost = [&CostKind](int Cost) {
1407 if (CostKind != TTI::TCK_RecipThroughput)
1408 return Cost == 0 ? 0 : 1;
1409 return Cost;
1410 };
1411
1412 // FIXME: Need a better design of the cost table to handle non-simple types of
1413 // potential massive combinations (elem_num x src_type x dst_type).
1414
1415 static const TypeConversionCostTblEntry AVX512BWConversionTbl[] {
1416 { ISD::SIGN_EXTEND, MVT::v32i16, MVT::v32i8, 1 },
1417 { ISD::ZERO_EXTEND, MVT::v32i16, MVT::v32i8, 1 },
1418
1419 // Mask sign extend has an instruction.
1420 { ISD::SIGN_EXTEND, MVT::v2i8, MVT::v2i1, 1 },
1421 { ISD::SIGN_EXTEND, MVT::v2i16, MVT::v2i1, 1 },
1422 { ISD::SIGN_EXTEND, MVT::v4i8, MVT::v4i1, 1 },
1423 { ISD::SIGN_EXTEND, MVT::v4i16, MVT::v4i1, 1 },
1424 { ISD::SIGN_EXTEND, MVT::v8i8, MVT::v8i1, 1 },
1425 { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i1, 1 },
1426 { ISD::SIGN_EXTEND, MVT::v16i8, MVT::v16i1, 1 },
1427 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i1, 1 },
1428 { ISD::SIGN_EXTEND, MVT::v32i8, MVT::v32i1, 1 },
1429 { ISD::SIGN_EXTEND, MVT::v32i16, MVT::v32i1, 1 },
1430 { ISD::SIGN_EXTEND, MVT::v64i8, MVT::v64i1, 1 },
1431
1432 // Mask zero extend is a sext + shift.
1433 { ISD::ZERO_EXTEND, MVT::v2i8, MVT::v2i1, 2 },
1434 { ISD::ZERO_EXTEND, MVT::v2i16, MVT::v2i1, 2 },
1435 { ISD::ZERO_EXTEND, MVT::v4i8, MVT::v4i1, 2 },
1436 { ISD::ZERO_EXTEND, MVT::v4i16, MVT::v4i1, 2 },
1437 { ISD::ZERO_EXTEND, MVT::v8i8, MVT::v8i1, 2 },
1438 { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i1, 2 },
1439 { ISD::ZERO_EXTEND, MVT::v16i8, MVT::v16i1, 2 },
1440 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i1, 2 },
1441 { ISD::ZERO_EXTEND, MVT::v32i8, MVT::v32i1, 2 },
1442 { ISD::ZERO_EXTEND, MVT::v32i16, MVT::v32i1, 2 },
1443 { ISD::ZERO_EXTEND, MVT::v64i8, MVT::v64i1, 2 },
1444
1445 { ISD::TRUNCATE, MVT::v32i8, MVT::v32i16, 2 },
1446 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i16, 2 }, // widen to zmm
1447 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i8, 2 }, // widen to zmm
1448 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i16, 2 }, // widen to zmm
1449 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i8, 2 }, // widen to zmm
1450 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i16, 2 }, // widen to zmm
1451 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i8, 2 }, // widen to zmm
1452 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i16, 2 }, // widen to zmm
1453 { ISD::TRUNCATE, MVT::v16i1, MVT::v16i8, 2 }, // widen to zmm
1454 { ISD::TRUNCATE, MVT::v16i1, MVT::v16i16, 2 }, // widen to zmm
1455 { ISD::TRUNCATE, MVT::v32i1, MVT::v32i8, 2 }, // widen to zmm
1456 { ISD::TRUNCATE, MVT::v32i1, MVT::v32i16, 2 },
1457 { ISD::TRUNCATE, MVT::v64i1, MVT::v64i8, 2 },
1458 };
1459
1460 static const TypeConversionCostTblEntry AVX512DQConversionTbl[] = {
1461 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i64, 1 },
1462 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i64, 1 },
1463
1464 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i64, 1 },
1465 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i64, 1 },
1466
1467 { ISD::FP_TO_SINT, MVT::v8i64, MVT::v8f32, 1 },
1468 { ISD::FP_TO_SINT, MVT::v8i64, MVT::v8f64, 1 },
1469
1470 { ISD::FP_TO_UINT, MVT::v8i64, MVT::v8f32, 1 },
1471 { ISD::FP_TO_UINT, MVT::v8i64, MVT::v8f64, 1 },
1472 };
1473
1474 // TODO: For AVX512DQ + AVX512VL, we also have cheap casts for 128-bit and
1475 // 256-bit wide vectors.
1476
1477 static const TypeConversionCostTblEntry AVX512FConversionTbl[] = {
1478 { ISD::FP_EXTEND, MVT::v8f64, MVT::v8f32, 1 },
1479 { ISD::FP_EXTEND, MVT::v8f64, MVT::v16f32, 3 },
1480 { ISD::FP_ROUND, MVT::v8f32, MVT::v8f64, 1 },
1481
1482 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i8, 3 }, // sext+vpslld+vptestmd
1483 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i8, 3 }, // sext+vpslld+vptestmd
1484 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i8, 3 }, // sext+vpslld+vptestmd
1485 { ISD::TRUNCATE, MVT::v16i1, MVT::v16i8, 3 }, // sext+vpslld+vptestmd
1486 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i16, 3 }, // sext+vpsllq+vptestmq
1487 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i16, 3 }, // sext+vpsllq+vptestmq
1488 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i16, 3 }, // sext+vpsllq+vptestmq
1489 { ISD::TRUNCATE, MVT::v16i1, MVT::v16i16, 3 }, // sext+vpslld+vptestmd
1490 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i32, 2 }, // zmm vpslld+vptestmd
1491 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i32, 2 }, // zmm vpslld+vptestmd
1492 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i32, 2 }, // zmm vpslld+vptestmd
1493 { ISD::TRUNCATE, MVT::v16i1, MVT::v16i32, 2 }, // vpslld+vptestmd
1494 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i64, 2 }, // zmm vpsllq+vptestmq
1495 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i64, 2 }, // zmm vpsllq+vptestmq
1496 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i64, 2 }, // vpsllq+vptestmq
1497 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i32, 2 },
1498 { ISD::TRUNCATE, MVT::v16i16, MVT::v16i32, 2 },
1499 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i64, 2 },
1500 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i64, 2 },
1501 { ISD::TRUNCATE, MVT::v8i32, MVT::v8i64, 1 },
1502 { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 1 }, // zmm vpmovqd
1503 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i64, 5 },// 2*vpmovqd+concat+vpmovdb
1504
1505 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i16, 3 }, // extend to v16i32
1506 { ISD::TRUNCATE, MVT::v32i8, MVT::v32i16, 8 },
1507
1508 // Sign extend is zmm vpternlogd+vptruncdb.
1509 // Zero extend is zmm broadcast load+vptruncdw.
1510 { ISD::SIGN_EXTEND, MVT::v2i8, MVT::v2i1, 3 },
1511 { ISD::ZERO_EXTEND, MVT::v2i8, MVT::v2i1, 4 },
1512 { ISD::SIGN_EXTEND, MVT::v4i8, MVT::v4i1, 3 },
1513 { ISD::ZERO_EXTEND, MVT::v4i8, MVT::v4i1, 4 },
1514 { ISD::SIGN_EXTEND, MVT::v8i8, MVT::v8i1, 3 },
1515 { ISD::ZERO_EXTEND, MVT::v8i8, MVT::v8i1, 4 },
1516 { ISD::SIGN_EXTEND, MVT::v16i8, MVT::v16i1, 3 },
1517 { ISD::ZERO_EXTEND, MVT::v16i8, MVT::v16i1, 4 },
1518
1519 // Sign extend is zmm vpternlogd+vptruncdw.
1520 // Zero extend is zmm vpternlogd+vptruncdw+vpsrlw.
1521 { ISD::SIGN_EXTEND, MVT::v2i16, MVT::v2i1, 3 },
1522 { ISD::ZERO_EXTEND, MVT::v2i16, MVT::v2i1, 4 },
1523 { ISD::SIGN_EXTEND, MVT::v4i16, MVT::v4i1, 3 },
1524 { ISD::ZERO_EXTEND, MVT::v4i16, MVT::v4i1, 4 },
1525 { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i1, 3 },
1526 { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i1, 4 },
1527 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i1, 3 },
1528 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i1, 4 },
1529
1530 { ISD::SIGN_EXTEND, MVT::v2i32, MVT::v2i1, 1 }, // zmm vpternlogd
1531 { ISD::ZERO_EXTEND, MVT::v2i32, MVT::v2i1, 2 }, // zmm vpternlogd+psrld
1532 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i1, 1 }, // zmm vpternlogd
1533 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i1, 2 }, // zmm vpternlogd+psrld
1534 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i1, 1 }, // zmm vpternlogd
1535 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i1, 2 }, // zmm vpternlogd+psrld
1536 { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i1, 1 }, // zmm vpternlogq
1537 { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i1, 2 }, // zmm vpternlogq+psrlq
1538 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i1, 1 }, // zmm vpternlogq
1539 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i1, 2 }, // zmm vpternlogq+psrlq
1540
1541 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i1, 1 }, // vpternlogd
1542 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i1, 2 }, // vpternlogd+psrld
1543 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i1, 1 }, // vpternlogq
1544 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i1, 2 }, // vpternlogq+psrlq
1545
1546 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 1 },
1547 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 1 },
1548 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i16, 1 },
1549 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i16, 1 },
1550 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i8, 1 },
1551 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i8, 1 },
1552 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i16, 1 },
1553 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i16, 1 },
1554 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i32, 1 },
1555 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i32, 1 },
1556
1557 { ISD::SIGN_EXTEND, MVT::v32i16, MVT::v32i8, 3 }, // FIXME: May not be right
1558 { ISD::ZERO_EXTEND, MVT::v32i16, MVT::v32i8, 3 }, // FIXME: May not be right
1559
1560 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i1, 4 },
1561 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i1, 3 },
1562 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i8, 2 },
1563 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i8, 2 },
1564 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i16, 2 },
1565 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i16, 2 },
1566 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i32, 1 },
1567 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i32, 1 },
1568
1569 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i1, 4 },
1570 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i1, 3 },
1571 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i8, 2 },
1572 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i8, 2 },
1573 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i16, 2 },
1574 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i16, 2 },
1575 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i32, 1 },
1576 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i32, 1 },
1577 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i64, 26 },
1578 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i64, 5 },
1579
1580 { ISD::FP_TO_SINT, MVT::v8i8, MVT::v8f64, 3 },
1581 { ISD::FP_TO_SINT, MVT::v8i16, MVT::v8f64, 3 },
1582 { ISD::FP_TO_SINT, MVT::v16i8, MVT::v16f32, 3 },
1583 { ISD::FP_TO_SINT, MVT::v16i16, MVT::v16f32, 3 },
1584
1585 { ISD::FP_TO_UINT, MVT::v8i32, MVT::v8f64, 1 },
1586 { ISD::FP_TO_UINT, MVT::v8i16, MVT::v8f64, 3 },
1587 { ISD::FP_TO_UINT, MVT::v8i8, MVT::v8f64, 3 },
1588 { ISD::FP_TO_UINT, MVT::v16i32, MVT::v16f32, 1 },
1589 { ISD::FP_TO_UINT, MVT::v16i16, MVT::v16f32, 3 },
1590 { ISD::FP_TO_UINT, MVT::v16i8, MVT::v16f32, 3 },
1591 };
1592
1593 static const TypeConversionCostTblEntry AVX512BWVLConversionTbl[] {
1594 // Mask sign extend has an instruction.
1595 { ISD::SIGN_EXTEND, MVT::v2i8, MVT::v2i1, 1 },
1596 { ISD::SIGN_EXTEND, MVT::v2i16, MVT::v2i1, 1 },
1597 { ISD::SIGN_EXTEND, MVT::v4i8, MVT::v4i1, 1 },
1598 { ISD::SIGN_EXTEND, MVT::v4i16, MVT::v4i1, 1 },
1599 { ISD::SIGN_EXTEND, MVT::v8i8, MVT::v8i1, 1 },
1600 { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i1, 1 },
1601 { ISD::SIGN_EXTEND, MVT::v16i8, MVT::v16i1, 1 },
1602 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i1, 1 },
1603 { ISD::SIGN_EXTEND, MVT::v32i8, MVT::v32i1, 1 },
1604
1605 // Mask zero extend is a sext + shift.
1606 { ISD::ZERO_EXTEND, MVT::v2i8, MVT::v2i1, 2 },
1607 { ISD::ZERO_EXTEND, MVT::v2i16, MVT::v2i1, 2 },
1608 { ISD::ZERO_EXTEND, MVT::v4i8, MVT::v4i1, 2 },
1609 { ISD::ZERO_EXTEND, MVT::v4i16, MVT::v4i1, 2 },
1610 { ISD::ZERO_EXTEND, MVT::v8i8, MVT::v8i1, 2 },
1611 { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i1, 2 },
1612 { ISD::ZERO_EXTEND, MVT::v16i8, MVT::v16i1, 2 },
1613 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i1, 2 },
1614 { ISD::ZERO_EXTEND, MVT::v32i8, MVT::v32i1, 2 },
1615
1616 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i16, 2 },
1617 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i8, 2 }, // vpsllw+vptestmb
1618 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i16, 2 }, // vpsllw+vptestmw
1619 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i8, 2 }, // vpsllw+vptestmb
1620 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i16, 2 }, // vpsllw+vptestmw
1621 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i8, 2 }, // vpsllw+vptestmb
1622 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i16, 2 }, // vpsllw+vptestmw
1623 { ISD::TRUNCATE, MVT::v16i1, MVT::v16i8, 2 }, // vpsllw+vptestmb
1624 { ISD::TRUNCATE, MVT::v16i1, MVT::v16i16, 2 }, // vpsllw+vptestmw
1625 { ISD::TRUNCATE, MVT::v32i1, MVT::v32i8, 2 }, // vpsllw+vptestmb
1626 };
1627
1628 static const TypeConversionCostTblEntry AVX512DQVLConversionTbl[] = {
1629 { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i64, 1 },
1630 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i64, 1 },
1631 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i64, 1 },
1632 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i64, 1 },
1633
1634 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i64, 1 },
1635 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 1 },
1636 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i64, 1 },
1637 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i64, 1 },
1638
1639 { ISD::FP_TO_SINT, MVT::v2i64, MVT::v2f32, 1 },
1640 { ISD::FP_TO_SINT, MVT::v4i64, MVT::v4f32, 1 },
1641 { ISD::FP_TO_SINT, MVT::v2i64, MVT::v2f64, 1 },
1642 { ISD::FP_TO_SINT, MVT::v4i64, MVT::v4f64, 1 },
1643
1644 { ISD::FP_TO_UINT, MVT::v2i64, MVT::v2f32, 1 },
1645 { ISD::FP_TO_UINT, MVT::v4i64, MVT::v4f32, 1 },
1646 { ISD::FP_TO_UINT, MVT::v2i64, MVT::v2f64, 1 },
1647 { ISD::FP_TO_UINT, MVT::v4i64, MVT::v4f64, 1 },
1648 };
1649
1650 static const TypeConversionCostTblEntry AVX512VLConversionTbl[] = {
1651 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i8, 3 }, // sext+vpslld+vptestmd
1652 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i8, 3 }, // sext+vpslld+vptestmd
1653 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i8, 3 }, // sext+vpslld+vptestmd
1654 { ISD::TRUNCATE, MVT::v16i1, MVT::v16i8, 8 }, // split+2*v8i8
1655 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i16, 3 }, // sext+vpsllq+vptestmq
1656 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i16, 3 }, // sext+vpsllq+vptestmq
1657 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i16, 3 }, // sext+vpsllq+vptestmq
1658 { ISD::TRUNCATE, MVT::v16i1, MVT::v16i16, 8 }, // split+2*v8i16
1659 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i32, 2 }, // vpslld+vptestmd
1660 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i32, 2 }, // vpslld+vptestmd
1661 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i32, 2 }, // vpslld+vptestmd
1662 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i64, 2 }, // vpsllq+vptestmq
1663 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i64, 2 }, // vpsllq+vptestmq
1664 { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 1 }, // vpmovqd
1665
1666 // sign extend is vpcmpeq+maskedmove+vpmovdw+vpacksswb
1667 // zero extend is vpcmpeq+maskedmove+vpmovdw+vpsrlw+vpackuswb
1668 { ISD::SIGN_EXTEND, MVT::v2i8, MVT::v2i1, 5 },
1669 { ISD::ZERO_EXTEND, MVT::v2i8, MVT::v2i1, 6 },
1670 { ISD::SIGN_EXTEND, MVT::v4i8, MVT::v4i1, 5 },
1671 { ISD::ZERO_EXTEND, MVT::v4i8, MVT::v4i1, 6 },
1672 { ISD::SIGN_EXTEND, MVT::v8i8, MVT::v8i1, 5 },
1673 { ISD::ZERO_EXTEND, MVT::v8i8, MVT::v8i1, 6 },
1674 { ISD::SIGN_EXTEND, MVT::v16i8, MVT::v16i1, 10 },
1675 { ISD::ZERO_EXTEND, MVT::v16i8, MVT::v16i1, 12 },
1676
1677 // sign extend is vpcmpeq+maskedmove+vpmovdw
1678 // zero extend is vpcmpeq+maskedmove+vpmovdw+vpsrlw
1679 { ISD::SIGN_EXTEND, MVT::v2i16, MVT::v2i1, 4 },
1680 { ISD::ZERO_EXTEND, MVT::v2i16, MVT::v2i1, 5 },
1681 { ISD::SIGN_EXTEND, MVT::v4i16, MVT::v4i1, 4 },
1682 { ISD::ZERO_EXTEND, MVT::v4i16, MVT::v4i1, 5 },
1683 { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i1, 4 },
1684 { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i1, 5 },
1685 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i1, 10 },
1686 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i1, 12 },
1687
1688 { ISD::SIGN_EXTEND, MVT::v2i32, MVT::v2i1, 1 }, // vpternlogd
1689 { ISD::ZERO_EXTEND, MVT::v2i32, MVT::v2i1, 2 }, // vpternlogd+psrld
1690 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i1, 1 }, // vpternlogd
1691 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i1, 2 }, // vpternlogd+psrld
1692 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i1, 1 }, // vpternlogd
1693 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i1, 2 }, // vpternlogd+psrld
1694 { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i1, 1 }, // vpternlogq
1695 { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i1, 2 }, // vpternlogq+psrlq
1696 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i1, 1 }, // vpternlogq
1697 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i1, 2 }, // vpternlogq+psrlq
1698
1699 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i8, 2 },
1700 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i8, 2 },
1701 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i8, 2 },
1702 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i16, 5 },
1703 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i16, 2 },
1704 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i16, 2 },
1705 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i32, 2 },
1706 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i32, 1 },
1707 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 },
1708 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i32, 1 },
1709 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i32, 1 },
1710 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i64, 5 },
1711 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 5 },
1712 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i64, 5 },
1713
1714 { ISD::UINT_TO_FP, MVT::f32, MVT::i64, 1 },
1715 { ISD::UINT_TO_FP, MVT::f64, MVT::i64, 1 },
1716
1717 { ISD::FP_TO_SINT, MVT::v8i8, MVT::v8f32, 3 },
1718 { ISD::FP_TO_UINT, MVT::v8i8, MVT::v8f32, 3 },
1719
1720 { ISD::FP_TO_UINT, MVT::i64, MVT::f32, 1 },
1721 { ISD::FP_TO_UINT, MVT::i64, MVT::f64, 1 },
1722
1723 { ISD::FP_TO_UINT, MVT::v2i32, MVT::v2f32, 1 },
1724 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f32, 1 },
1725 { ISD::FP_TO_UINT, MVT::v2i32, MVT::v2f64, 1 },
1726 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f64, 1 },
1727 { ISD::FP_TO_UINT, MVT::v8i32, MVT::v8f32, 1 },
1728 };
1729
1730 static const TypeConversionCostTblEntry AVX2ConversionTbl[] = {
1731 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i1, 3 },
1732 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i1, 3 },
1733 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i1, 3 },
1734 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i1, 3 },
1735 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i8, 1 },
1736 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i8, 1 },
1737 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 1 },
1738 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 1 },
1739 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i1, 1 },
1740 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i1, 1 },
1741 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 1 },
1742 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 1 },
1743 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 1 },
1744 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 1 },
1745 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 1 },
1746 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 1 },
1747 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 1 },
1748 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 1 },
1749 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i16, 3 },
1750 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i16, 3 },
1751
1752 { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 2 },
1753 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i32, 2 },
1754
1755 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i64, 2 },
1756 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i64, 2 },
1757 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 2 },
1758 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 2 },
1759
1760 { ISD::FP_EXTEND, MVT::v8f64, MVT::v8f32, 3 },
1761 { ISD::FP_ROUND, MVT::v8f32, MVT::v8f64, 3 },
1762
1763 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i32, 8 },
1764 };
1765
1766 static const TypeConversionCostTblEntry AVXConversionTbl[] = {
1767 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i1, 6 },
1768 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i1, 4 },
1769 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i1, 7 },
1770 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i1, 4 },
1771 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i8, 4 },
1772 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i8, 4 },
1773 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 4 },
1774 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 4 },
1775 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i1, 4 },
1776 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i1, 4 },
1777 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 4 },
1778 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 4 },
1779 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 4 },
1780 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 3 },
1781 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 4 },
1782 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 4 },
1783 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 4 },
1784 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 4 },
1785
1786 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i64, 4 },
1787 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i32, 5 },
1788 { ISD::TRUNCATE, MVT::v16i1, MVT::v16i16, 4 },
1789 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i64, 9 },
1790 { ISD::TRUNCATE, MVT::v16i1, MVT::v16i64, 11 },
1791
1792 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i16, 4 },
1793 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 4 },
1794 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 5 },
1795 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i64, 4 },
1796 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i64, 4 },
1797 { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 2 },
1798 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i64, 11 },
1799 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i64, 9 },
1800 { ISD::TRUNCATE, MVT::v8i32, MVT::v8i64, 3 },
1801 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i64, 11 },
1802
1803 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i1, 3 },
1804 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i1, 3 },
1805 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i1, 8 },
1806 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i8, 3 },
1807 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i8, 3 },
1808 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i8, 8 },
1809 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i16, 3 },
1810 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i16, 3 },
1811 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i16, 5 },
1812 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 },
1813 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i32, 1 },
1814 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i32, 1 },
1815
1816 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i1, 7 },
1817 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i1, 7 },
1818 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i1, 6 },
1819 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i8, 2 },
1820 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i8, 2 },
1821 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i8, 5 },
1822 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i16, 2 },
1823 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i16, 2 },
1824 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i16, 5 },
1825 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i32, 6 },
1826 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 6 },
1827 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i32, 6 },
1828 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i32, 9 },
1829 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 5 },
1830 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i64, 6 },
1831 // The generic code to compute the scalar overhead is currently broken.
1832 // Workaround this limitation by estimating the scalarization overhead
1833 // here. We have roughly 10 instructions per scalar element.
1834 // Multiply that by the vector width.
1835 // FIXME: remove that when PR19268 is fixed.
1836 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i64, 13 },
1837 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i64, 13 },
1838
1839 { ISD::FP_TO_SINT, MVT::v8i8, MVT::v8f32, 4 },
1840 { ISD::FP_TO_SINT, MVT::v4i8, MVT::v4f64, 3 },
1841 { ISD::FP_TO_SINT, MVT::v4i16, MVT::v4f64, 2 },
1842 { ISD::FP_TO_SINT, MVT::v8i16, MVT::v8f32, 3 },
1843
1844 { ISD::FP_TO_UINT, MVT::v4i8, MVT::v4f64, 3 },
1845 { ISD::FP_TO_UINT, MVT::v4i16, MVT::v4f64, 2 },
1846 { ISD::FP_TO_UINT, MVT::v8i8, MVT::v8f32, 4 },
1847 { ISD::FP_TO_UINT, MVT::v8i16, MVT::v8f32, 3 },
1848 // This node is expanded into scalarized operations but BasicTTI is overly
1849 // optimistic estimating its cost. It computes 3 per element (one
1850 // vector-extract, one scalar conversion and one vector-insert). The
1851 // problem is that the inserts form a read-modify-write chain so latency
1852 // should be factored in too. Inflating the cost per element by 1.
1853 { ISD::FP_TO_UINT, MVT::v8i32, MVT::v8f32, 8*4 },
1854 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f64, 4*4 },
1855
1856 { ISD::FP_EXTEND, MVT::v4f64, MVT::v4f32, 1 },
1857 { ISD::FP_ROUND, MVT::v4f32, MVT::v4f64, 1 },
1858 };
1859
1860 static const TypeConversionCostTblEntry SSE41ConversionTbl[] = {
1861 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i8, 2 },
1862 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i8, 2 },
1863 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 2 },
1864 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 2 },
1865 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 2 },
1866 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 2 },
1867
1868 { ISD::ZERO_EXTEND, MVT::v4i16, MVT::v4i8, 1 },
1869 { ISD::SIGN_EXTEND, MVT::v4i16, MVT::v4i8, 2 },
1870 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i8, 1 },
1871 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i8, 1 },
1872 { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i8, 1 },
1873 { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i8, 1 },
1874 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 2 },
1875 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 2 },
1876 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 2 },
1877 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 2 },
1878 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 4 },
1879 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 4 },
1880 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i16, 1 },
1881 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i16, 1 },
1882 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 2 },
1883 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 2 },
1884 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i16, 4 },
1885 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i16, 4 },
1886
1887 // These truncates end up widening elements.
1888 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i8, 1 }, // PMOVXZBQ
1889 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i16, 1 }, // PMOVXZWQ
1890 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i8, 1 }, // PMOVXZBD
1891
1892 { ISD::TRUNCATE, MVT::v2i8, MVT::v2i16, 1 },
1893 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i16, 1 },
1894 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i16, 1 },
1895 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i32, 1 },
1896 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i32, 1 },
1897 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 3 },
1898 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 3 },
1899 { ISD::TRUNCATE, MVT::v16i16, MVT::v16i32, 6 },
1900 { ISD::TRUNCATE, MVT::v2i8, MVT::v2i64, 1 }, // PSHUFB
1901
1902 { ISD::UINT_TO_FP, MVT::f32, MVT::i64, 4 },
1903 { ISD::UINT_TO_FP, MVT::f64, MVT::i64, 4 },
1904
1905 { ISD::FP_TO_SINT, MVT::v2i8, MVT::v2f32, 3 },
1906 { ISD::FP_TO_SINT, MVT::v2i8, MVT::v2f64, 3 },
1907
1908 { ISD::FP_TO_UINT, MVT::v2i8, MVT::v2f32, 3 },
1909 { ISD::FP_TO_UINT, MVT::v2i8, MVT::v2f64, 3 },
1910 { ISD::FP_TO_UINT, MVT::v4i16, MVT::v4f32, 2 },
1911 };
1912
1913 static const TypeConversionCostTblEntry SSE2ConversionTbl[] = {
1914 // These are somewhat magic numbers justified by looking at the output of
1915 // Intel's IACA, running some kernels and making sure when we take
1916 // legalization into account the throughput will be overestimated.
1917 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v16i8, 8 },
1918 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v16i8, 16*10 },
1919 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v8i16, 15 },
1920 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v8i16, 8*10 },
1921 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 5 },
1922 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v4i32, 2*10 },
1923 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i32, 2*10 },
1924 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v2i64, 15 },
1925 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i64, 2*10 },
1926
1927 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v16i8, 16*10 },
1928 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v16i8, 8 },
1929 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v8i16, 15 },
1930 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v8i16, 8*10 },
1931 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v4i32, 4*10 },
1932 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 8 },
1933 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 6 },
1934 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v2i64, 15 },
1935
1936 { ISD::FP_TO_SINT, MVT::v2i8, MVT::v2f32, 4 },
1937 { ISD::FP_TO_SINT, MVT::v2i16, MVT::v2f32, 2 },
1938 { ISD::FP_TO_SINT, MVT::v4i8, MVT::v4f32, 3 },
1939 { ISD::FP_TO_SINT, MVT::v4i16, MVT::v4f32, 2 },
1940 { ISD::FP_TO_SINT, MVT::v2i16, MVT::v2f64, 2 },
1941 { ISD::FP_TO_SINT, MVT::v2i8, MVT::v2f64, 4 },
1942
1943 { ISD::FP_TO_SINT, MVT::v2i32, MVT::v2f64, 1 },
1944
1945 { ISD::UINT_TO_FP, MVT::f32, MVT::i64, 6 },
1946 { ISD::UINT_TO_FP, MVT::f64, MVT::i64, 6 },
1947
1948 { ISD::FP_TO_UINT, MVT::i64, MVT::f32, 4 },
1949 { ISD::FP_TO_UINT, MVT::i64, MVT::f64, 4 },
1950 { ISD::FP_TO_UINT, MVT::v2i8, MVT::v2f32, 4 },
1951 { ISD::FP_TO_UINT, MVT::v2i8, MVT::v2f64, 4 },
1952 { ISD::FP_TO_UINT, MVT::v4i8, MVT::v4f32, 3 },
1953 { ISD::FP_TO_UINT, MVT::v2i16, MVT::v2f32, 2 },
1954 { ISD::FP_TO_UINT, MVT::v2i16, MVT::v2f64, 2 },
1955 { ISD::FP_TO_UINT, MVT::v4i16, MVT::v4f32, 4 },
1956
1957 { ISD::ZERO_EXTEND, MVT::v4i16, MVT::v4i8, 1 },
1958 { ISD::SIGN_EXTEND, MVT::v4i16, MVT::v4i8, 6 },
1959 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i8, 2 },
1960 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i8, 3 },
1961 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i8, 4 },
1962 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i8, 8 },
1963 { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i8, 1 },
1964 { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i8, 2 },
1965 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 6 },
1966 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 6 },
1967 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 3 },
1968 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 4 },
1969 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 9 },
1970 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 12 },
1971 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i16, 1 },
1972 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i16, 2 },
1973 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 3 },
1974 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 10 },
1975 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 3 },
1976 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 4 },
1977 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i16, 6 },
1978 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i16, 8 },
1979 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 3 },
1980 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 5 },
1981
1982 // These truncates are really widening elements.
1983 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i32, 1 }, // PSHUFD
1984 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i16, 2 }, // PUNPCKLWD+DQ
1985 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i8, 3 }, // PUNPCKLBW+WD+PSHUFD
1986 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i16, 1 }, // PUNPCKLWD
1987 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i8, 2 }, // PUNPCKLBW+WD
1988 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i8, 1 }, // PUNPCKLBW
1989
1990 { ISD::TRUNCATE, MVT::v2i8, MVT::v2i16, 2 }, // PAND+PACKUSWB
1991 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i16, 2 }, // PAND+PACKUSWB
1992 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i16, 2 }, // PAND+PACKUSWB
1993 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i16, 3 },
1994 { ISD::TRUNCATE, MVT::v2i8, MVT::v2i32, 3 }, // PAND+2*PACKUSWB
1995 { ISD::TRUNCATE, MVT::v2i16, MVT::v2i32, 1 },
1996 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i32, 3 },
1997 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i32, 3 },
1998 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 4 },
1999 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i32, 7 },
2000 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 5 },
2001 { ISD::TRUNCATE, MVT::v16i16, MVT::v16i32, 10 },
2002 { ISD::TRUNCATE, MVT::v2i8, MVT::v2i64, 4 }, // PAND+3*PACKUSWB
2003 { ISD::TRUNCATE, MVT::v2i16, MVT::v2i64, 2 }, // PSHUFD+PSHUFLW
2004 { ISD::TRUNCATE, MVT::v2i32, MVT::v2i64, 1 }, // PSHUFD
2005 };
2006
2007 std::pair<int, MVT> LTSrc = TLI->getTypeLegalizationCost(DL, Src);
2008 std::pair<int, MVT> LTDest = TLI->getTypeLegalizationCost(DL, Dst);
2009
2010 if (ST->hasSSE2() && !ST->hasAVX()) {
2011 if (const auto *Entry = ConvertCostTableLookup(SSE2ConversionTbl, ISD,
2012 LTDest.second, LTSrc.second))
2013 return AdjustCost(LTSrc.first * Entry->Cost);
2014 }
2015
2016 EVT SrcTy = TLI->getValueType(DL, Src);
2017 EVT DstTy = TLI->getValueType(DL, Dst);
2018
2019 // The function getSimpleVT only handles simple value types.
2020 if (!SrcTy.isSimple() || !DstTy.isSimple())
2021 return AdjustCost(BaseT::getCastInstrCost(Opcode, Dst, Src, CCH, CostKind));
2022
2023 MVT SimpleSrcTy = SrcTy.getSimpleVT();
2024 MVT SimpleDstTy = DstTy.getSimpleVT();
2025
2026 if (ST->useAVX512Regs()) {
2027 if (ST->hasBWI())
2028 if (const auto *Entry = ConvertCostTableLookup(AVX512BWConversionTbl, ISD,
2029 SimpleDstTy, SimpleSrcTy))
2030 return AdjustCost(Entry->Cost);
2031
2032 if (ST->hasDQI())
2033 if (const auto *Entry = ConvertCostTableLookup(AVX512DQConversionTbl, ISD,
2034 SimpleDstTy, SimpleSrcTy))
2035 return AdjustCost(Entry->Cost);
2036
2037 if (ST->hasAVX512())
2038 if (const auto *Entry = ConvertCostTableLookup(AVX512FConversionTbl, ISD,
2039 SimpleDstTy, SimpleSrcTy))
2040 return AdjustCost(Entry->Cost);
2041 }
2042
2043 if (ST->hasBWI())
2044 if (const auto *Entry = ConvertCostTableLookup(AVX512BWVLConversionTbl, ISD,
2045 SimpleDstTy, SimpleSrcTy))
2046 return AdjustCost(Entry->Cost);
2047
2048 if (ST->hasDQI())
2049 if (const auto *Entry = ConvertCostTableLookup(AVX512DQVLConversionTbl, ISD,
2050 SimpleDstTy, SimpleSrcTy))
2051 return AdjustCost(Entry->Cost);
2052
2053 if (ST->hasAVX512())
2054 if (const auto *Entry = ConvertCostTableLookup(AVX512VLConversionTbl, ISD,
2055 SimpleDstTy, SimpleSrcTy))
2056 return AdjustCost(Entry->Cost);
2057
2058 if (ST->hasAVX2()) {
2059 if (const auto *Entry = ConvertCostTableLookup(AVX2ConversionTbl, ISD,
2060 SimpleDstTy, SimpleSrcTy))
2061 return AdjustCost(Entry->Cost);
2062 }
2063
2064 if (ST->hasAVX()) {
2065 if (const auto *Entry = ConvertCostTableLookup(AVXConversionTbl, ISD,
2066 SimpleDstTy, SimpleSrcTy))
2067 return AdjustCost(Entry->Cost);
2068 }
2069
2070 if (ST->hasSSE41()) {
2071 if (const auto *Entry = ConvertCostTableLookup(SSE41ConversionTbl, ISD,
2072 SimpleDstTy, SimpleSrcTy))
2073 return AdjustCost(Entry->Cost);
2074 }
2075
2076 if (ST->hasSSE2()) {
2077 if (const auto *Entry = ConvertCostTableLookup(SSE2ConversionTbl, ISD,
2078 SimpleDstTy, SimpleSrcTy))
2079 return AdjustCost(Entry->Cost);
2080 }
2081
2082 return AdjustCost(
2083 BaseT::getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I));
2084}
2085
2086int X86TTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy,
2087 CmpInst::Predicate VecPred,
2088 TTI::TargetCostKind CostKind,
2089 const Instruction *I) {
2090 // TODO: Handle other cost kinds.
2091 if (CostKind != TTI::TCK_RecipThroughput)
2092 return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, VecPred, CostKind,
2093 I);
2094
2095 // Legalize the type.
2096 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy);
2097
2098 MVT MTy = LT.second;
2099
2100 int ISD = TLI->InstructionOpcodeToISD(Opcode);
2101 assert(ISD && "Invalid opcode")((ISD && "Invalid opcode") ? static_cast<void> (
0) : __assert_fail ("ISD && \"Invalid opcode\"", "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 2101, __PRETTY_FUNCTION__))
;
2102
2103 unsigned ExtraCost = 0;
2104 if (I && (Opcode == Instruction::ICmp || Opcode == Instruction::FCmp)) {
2105 // Some vector comparison predicates cost extra instructions.
2106 if (MTy.isVector() &&
2107 !((ST->hasXOP() && (!ST->hasAVX2() || MTy.is128BitVector())) ||
2108 (ST->hasAVX512() && 32 <= MTy.getScalarSizeInBits()) ||
2109 ST->hasBWI())) {
2110 switch (cast<CmpInst>(I)->getPredicate()) {
2111 case CmpInst::Predicate::ICMP_NE:
2112 // xor(cmpeq(x,y),-1)
2113 ExtraCost = 1;
2114 break;
2115 case CmpInst::Predicate::ICMP_SGE:
2116 case CmpInst::Predicate::ICMP_SLE:
2117 // xor(cmpgt(x,y),-1)
2118 ExtraCost = 1;
2119 break;
2120 case CmpInst::Predicate::ICMP_ULT:
2121 case CmpInst::Predicate::ICMP_UGT:
2122 // cmpgt(xor(x,signbit),xor(y,signbit))
2123 // xor(cmpeq(pmaxu(x,y),x),-1)
2124 ExtraCost = 2;
2125 break;
2126 case CmpInst::Predicate::ICMP_ULE:
2127 case CmpInst::Predicate::ICMP_UGE:
2128 if ((ST->hasSSE41() && MTy.getScalarSizeInBits() == 32) ||
2129 (ST->hasSSE2() && MTy.getScalarSizeInBits() < 32)) {
2130 // cmpeq(psubus(x,y),0)
2131 // cmpeq(pminu(x,y),x)
2132 ExtraCost = 1;
2133 } else {
2134 // xor(cmpgt(xor(x,signbit),xor(y,signbit)),-1)
2135 ExtraCost = 3;
2136 }
2137 break;
2138 default:
2139 break;
2140 }
2141 }
2142 }
2143
2144 static const CostTblEntry SLMCostTbl[] = {
2145 // slm pcmpeq/pcmpgt throughput is 2
2146 { ISD::SETCC, MVT::v2i64, 2 },
2147 };
2148
2149 static const CostTblEntry AVX512BWCostTbl[] = {
2150 { ISD::SETCC, MVT::v32i16, 1 },
2151 { ISD::SETCC, MVT::v64i8, 1 },
2152
2153 { ISD::SELECT, MVT::v32i16, 1 },
2154 { ISD::SELECT, MVT::v64i8, 1 },
2155 };
2156
2157 static const CostTblEntry AVX512CostTbl[] = {
2158 { ISD::SETCC, MVT::v8i64, 1 },
2159 { ISD::SETCC, MVT::v16i32, 1 },
2160 { ISD::SETCC, MVT::v8f64, 1 },
2161 { ISD::SETCC, MVT::v16f32, 1 },
2162
2163 { ISD::SELECT, MVT::v8i64, 1 },
2164 { ISD::SELECT, MVT::v16i32, 1 },
2165 { ISD::SELECT, MVT::v8f64, 1 },
2166 { ISD::SELECT, MVT::v16f32, 1 },
2167
2168 { ISD::SETCC, MVT::v32i16, 2 }, // FIXME: should probably be 4
2169 { ISD::SETCC, MVT::v64i8, 2 }, // FIXME: should probably be 4
2170
2171 { ISD::SELECT, MVT::v32i16, 2 }, // FIXME: should be 3
2172 { ISD::SELECT, MVT::v64i8, 2 }, // FIXME: should be 3
2173 };
2174
2175 static const CostTblEntry AVX2CostTbl[] = {
2176 { ISD::SETCC, MVT::v4i64, 1 },
2177 { ISD::SETCC, MVT::v8i32, 1 },
2178 { ISD::SETCC, MVT::v16i16, 1 },
2179 { ISD::SETCC, MVT::v32i8, 1 },
2180
2181 { ISD::SELECT, MVT::v4i64, 1 }, // pblendvb
2182 { ISD::SELECT, MVT::v8i32, 1 }, // pblendvb
2183 { ISD::SELECT, MVT::v16i16, 1 }, // pblendvb
2184 { ISD::SELECT, MVT::v32i8, 1 }, // pblendvb
2185 };
2186
2187 static const CostTblEntry AVX1CostTbl[] = {
2188 { ISD::SETCC, MVT::v4f64, 1 },
2189 { ISD::SETCC, MVT::v8f32, 1 },
2190 // AVX1 does not support 8-wide integer compare.
2191 { ISD::SETCC, MVT::v4i64, 4 },
2192 { ISD::SETCC, MVT::v8i32, 4 },
2193 { ISD::SETCC, MVT::v16i16, 4 },
2194 { ISD::SETCC, MVT::v32i8, 4 },
2195
2196 { ISD::SELECT, MVT::v4f64, 1 }, // vblendvpd
2197 { ISD::SELECT, MVT::v8f32, 1 }, // vblendvps
2198 { ISD::SELECT, MVT::v4i64, 1 }, // vblendvpd
2199 { ISD::SELECT, MVT::v8i32, 1 }, // vblendvps
2200 { ISD::SELECT, MVT::v16i16, 3 }, // vandps + vandnps + vorps
2201 { ISD::SELECT, MVT::v32i8, 3 }, // vandps + vandnps + vorps
2202 };
2203
2204 static const CostTblEntry SSE42CostTbl[] = {
2205 { ISD::SETCC, MVT::v2f64, 1 },
2206 { ISD::SETCC, MVT::v4f32, 1 },
2207 { ISD::SETCC, MVT::v2i64, 1 },
2208 };
2209
2210 static const CostTblEntry SSE41CostTbl[] = {
2211 { ISD::SELECT, MVT::v2f64, 1 }, // blendvpd
2212 { ISD::SELECT, MVT::v4f32, 1 }, // blendvps
2213 { ISD::SELECT, MVT::v2i64, 1 }, // pblendvb
2214 { ISD::SELECT, MVT::v4i32, 1 }, // pblendvb
2215 { ISD::SELECT, MVT::v8i16, 1 }, // pblendvb
2216 { ISD::SELECT, MVT::v16i8, 1 }, // pblendvb
2217 };
2218
2219 static const CostTblEntry SSE2CostTbl[] = {
2220 { ISD::SETCC, MVT::v2f64, 2 },
2221 { ISD::SETCC, MVT::f64, 1 },
2222 { ISD::SETCC, MVT::v2i64, 8 },
2223 { ISD::SETCC, MVT::v4i32, 1 },
2224 { ISD::SETCC, MVT::v8i16, 1 },
2225 { ISD::SETCC, MVT::v16i8, 1 },
2226
2227 { ISD::SELECT, MVT::v2f64, 3 }, // andpd + andnpd + orpd
2228 { ISD::SELECT, MVT::v2i64, 3 }, // pand + pandn + por
2229 { ISD::SELECT, MVT::v4i32, 3 }, // pand + pandn + por
2230 { ISD::SELECT, MVT::v8i16, 3 }, // pand + pandn + por
2231 { ISD::SELECT, MVT::v16i8, 3 }, // pand + pandn + por
2232 };
2233
2234 static const CostTblEntry SSE1CostTbl[] = {
2235 { ISD::SETCC, MVT::v4f32, 2 },
2236 { ISD::SETCC, MVT::f32, 1 },
2237
2238 { ISD::SELECT, MVT::v4f32, 3 }, // andps + andnps + orps
2239 };
2240
2241 if (ST->isSLM())
2242 if (const auto *Entry = CostTableLookup(SLMCostTbl, ISD, MTy))
2243 return LT.first * (ExtraCost + Entry->Cost);
2244
2245 if (ST->hasBWI())
2246 if (const auto *Entry = CostTableLookup(AVX512BWCostTbl, ISD, MTy))
2247 return LT.first * (ExtraCost + Entry->Cost);
2248
2249 if (ST->hasAVX512())
2250 if (const auto *Entry = CostTableLookup(AVX512CostTbl, ISD, MTy))
2251 return LT.first * (ExtraCost + Entry->Cost);
2252
2253 if (ST->hasAVX2())
2254 if (const auto *Entry = CostTableLookup(AVX2CostTbl, ISD, MTy))
2255 return LT.first * (ExtraCost + Entry->Cost);
2256
2257 if (ST->hasAVX())
2258 if (const auto *Entry = CostTableLookup(AVX1CostTbl, ISD, MTy))
2259 return LT.first * (ExtraCost + Entry->Cost);
2260
2261 if (ST->hasSSE42())
2262 if (const auto *Entry = CostTableLookup(SSE42CostTbl, ISD, MTy))
2263 return LT.first * (ExtraCost + Entry->Cost);
2264
2265 if (ST->hasSSE41())
2266 if (const auto *Entry = CostTableLookup(SSE41CostTbl, ISD, MTy))
2267 return LT.first * (ExtraCost + Entry->Cost);
2268
2269 if (ST->hasSSE2())
2270 if (const auto *Entry = CostTableLookup(SSE2CostTbl, ISD, MTy))
2271 return LT.first * (ExtraCost + Entry->Cost);
2272
2273 if (ST->hasSSE1())
2274 if (const auto *Entry = CostTableLookup(SSE1CostTbl, ISD, MTy))
2275 return LT.first * (ExtraCost + Entry->Cost);
2276
2277 return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, VecPred, CostKind, I);
2278}
2279
2280unsigned X86TTIImpl::getAtomicMemIntrinsicMaxElementSize() const { return 16; }
2281
2282int X86TTIImpl::getTypeBasedIntrinsicInstrCost(
2283 const IntrinsicCostAttributes &ICA, TTI::TargetCostKind CostKind) {
2284
2285 // Costs should match the codegen from:
2286 // BITREVERSE: llvm\test\CodeGen\X86\vector-bitreverse.ll
2287 // BSWAP: llvm\test\CodeGen\X86\bswap-vector.ll
2288 // CTLZ: llvm\test\CodeGen\X86\vector-lzcnt-*.ll
2289 // CTPOP: llvm\test\CodeGen\X86\vector-popcnt-*.ll
2290 // CTTZ: llvm\test\CodeGen\X86\vector-tzcnt-*.ll
2291
2292 // TODO: Overflow intrinsics (*ADDO, *SUBO, *MULO) with vector types are not
2293 // specialized in these tables yet.
2294 static const CostTblEntry AVX512CDCostTbl[] = {
2295 { ISD::CTLZ, MVT::v8i64, 1 },
2296 { ISD::CTLZ, MVT::v16i32, 1 },
2297 { ISD::CTLZ, MVT::v32i16, 8 },
2298 { ISD::CTLZ, MVT::v64i8, 20 },
2299 { ISD::CTLZ, MVT::v4i64, 1 },
2300 { ISD::CTLZ, MVT::v8i32, 1 },
2301 { ISD::CTLZ, MVT::v16i16, 4 },
2302 { ISD::CTLZ, MVT::v32i8, 10 },
2303 { ISD::CTLZ, MVT::v2i64, 1 },
2304 { ISD::CTLZ, MVT::v4i32, 1 },
2305 { ISD::CTLZ, MVT::v8i16, 4 },
2306 { ISD::CTLZ, MVT::v16i8, 4 },
2307 };
2308 static const CostTblEntry AVX512BWCostTbl[] = {
2309 { ISD::ABS, MVT::v32i16, 1 },
2310 { ISD::ABS, MVT::v64i8, 1 },
2311 { ISD::BITREVERSE, MVT::v8i64, 5 },
2312 { ISD::BITREVERSE, MVT::v16i32, 5 },
2313 { ISD::BITREVERSE, MVT::v32i16, 5 },
2314 { ISD::BITREVERSE, MVT::v64i8, 5 },
2315 { ISD::CTLZ, MVT::v8i64, 23 },
2316 { ISD::CTLZ, MVT::v16i32, 22 },
2317 { ISD::CTLZ, MVT::v32i16, 18 },
2318 { ISD::CTLZ, MVT::v64i8, 17 },
2319 { ISD::CTPOP, MVT::v8i64, 7 },
2320 { ISD::CTPOP, MVT::v16i32, 11 },
2321 { ISD::CTPOP, MVT::v32i16, 9 },
2322 { ISD::CTPOP, MVT::v64i8, 6 },
2323 { ISD::CTTZ, MVT::v8i64, 10 },
2324 { ISD::CTTZ, MVT::v16i32, 14 },
2325 { ISD::CTTZ, MVT::v32i16, 12 },
2326 { ISD::CTTZ, MVT::v64i8, 9 },
2327 { ISD::SADDSAT, MVT::v32i16, 1 },
2328 { ISD::SADDSAT, MVT::v64i8, 1 },
2329 { ISD::SMAX, MVT::v32i16, 1 },
2330 { ISD::SMAX, MVT::v64i8, 1 },
2331 { ISD::SMIN, MVT::v32i16, 1 },
2332 { ISD::SMIN, MVT::v64i8, 1 },
2333 { ISD::SSUBSAT, MVT::v32i16, 1 },
2334 { ISD::SSUBSAT, MVT::v64i8, 1 },
2335 { ISD::UADDSAT, MVT::v32i16, 1 },
2336 { ISD::UADDSAT, MVT::v64i8, 1 },
2337 { ISD::UMAX, MVT::v32i16, 1 },
2338 { ISD::UMAX, MVT::v64i8, 1 },
2339 { ISD::UMIN, MVT::v32i16, 1 },
2340 { ISD::UMIN, MVT::v64i8, 1 },
2341 { ISD::USUBSAT, MVT::v32i16, 1 },
2342 { ISD::USUBSAT, MVT::v64i8, 1 },
2343 };
2344 static const CostTblEntry AVX512CostTbl[] = {
2345 { ISD::ABS, MVT::v8i64, 1 },
2346 { ISD::ABS, MVT::v16i32, 1 },
2347 { ISD::ABS, MVT::v32i16, 2 }, // FIXME: include split
2348 { ISD::ABS, MVT::v64i8, 2 }, // FIXME: include split
2349 { ISD::ABS, MVT::v4i64, 1 },
2350 { ISD::ABS, MVT::v2i64, 1 },
2351 { ISD::BITREVERSE, MVT::v8i64, 36 },
2352 { ISD::BITREVERSE, MVT::v16i32, 24 },
2353 { ISD::BITREVERSE, MVT::v32i16, 10 },
2354 { ISD::BITREVERSE, MVT::v64i8, 10 },
2355 { ISD::CTLZ, MVT::v8i64, 29 },
2356 { ISD::CTLZ, MVT::v16i32, 35 },
2357 { ISD::CTLZ, MVT::v32i16, 28 },
2358 { ISD::CTLZ, MVT::v64i8, 18 },
2359 { ISD::CTPOP, MVT::v8i64, 16 },
2360 { ISD::CTPOP, MVT::v16i32, 24 },
2361 { ISD::CTPOP, MVT::v32i16, 18 },
2362 { ISD::CTPOP, MVT::v64i8, 12 },
2363 { ISD::CTTZ, MVT::v8i64, 20 },
2364 { ISD::CTTZ, MVT::v16i32, 28 },
2365 { ISD::CTTZ, MVT::v32i16, 24 },
2366 { ISD::CTTZ, MVT::v64i8, 18 },
2367 { ISD::SMAX, MVT::v8i64, 1 },
2368 { ISD::SMAX, MVT::v16i32, 1 },
2369 { ISD::SMAX, MVT::v32i16, 2 }, // FIXME: include split
2370 { ISD::SMAX, MVT::v64i8, 2 }, // FIXME: include split
2371 { ISD::SMAX, MVT::v4i64, 1 },
2372 { ISD::SMAX, MVT::v2i64, 1 },
2373 { ISD::SMIN, MVT::v8i64, 1 },
2374 { ISD::SMIN, MVT::v16i32, 1 },
2375 { ISD::SMIN, MVT::v32i16, 2 }, // FIXME: include split
2376 { ISD::SMIN, MVT::v64i8, 2 }, // FIXME: include split
2377 { ISD::SMIN, MVT::v4i64, 1 },
2378 { ISD::SMIN, MVT::v2i64, 1 },
2379 { ISD::UMAX, MVT::v8i64, 1 },
2380 { ISD::UMAX, MVT::v16i32, 1 },
2381 { ISD::UMAX, MVT::v32i16, 2 }, // FIXME: include split
2382 { ISD::UMAX, MVT::v64i8, 2 }, // FIXME: include split
2383 { ISD::UMAX, MVT::v4i64, 1 },
2384 { ISD::UMAX, MVT::v2i64, 1 },
2385 { ISD::UMIN, MVT::v8i64, 1 },
2386 { ISD::UMIN, MVT::v16i32, 1 },
2387 { ISD::UMIN, MVT::v32i16, 2 }, // FIXME: include split
2388 { ISD::UMIN, MVT::v64i8, 2 }, // FIXME: include split
2389 { ISD::UMIN, MVT::v4i64, 1 },
2390 { ISD::UMIN, MVT::v2i64, 1 },
2391 { ISD::USUBSAT, MVT::v16i32, 2 }, // pmaxud + psubd
2392 { ISD::USUBSAT, MVT::v2i64, 2 }, // pmaxuq + psubq
2393 { ISD::USUBSAT, MVT::v4i64, 2 }, // pmaxuq + psubq
2394 { ISD::USUBSAT, MVT::v8i64, 2 }, // pmaxuq + psubq
2395 { ISD::UADDSAT, MVT::v16i32, 3 }, // not + pminud + paddd
2396 { ISD::UADDSAT, MVT::v2i64, 3 }, // not + pminuq + paddq
2397 { ISD::UADDSAT, MVT::v4i64, 3 }, // not + pminuq + paddq
2398 { ISD::UADDSAT, MVT::v8i64, 3 }, // not + pminuq + paddq
2399 { ISD::SADDSAT, MVT::v32i16, 2 }, // FIXME: include split
2400 { ISD::SADDSAT, MVT::v64i8, 2 }, // FIXME: include split
2401 { ISD::SSUBSAT, MVT::v32i16, 2 }, // FIXME: include split
2402 { ISD::SSUBSAT, MVT::v64i8, 2 }, // FIXME: include split
2403 { ISD::UADDSAT, MVT::v32i16, 2 }, // FIXME: include split
2404 { ISD::UADDSAT, MVT::v64i8, 2 }, // FIXME: include split
2405 { ISD::USUBSAT, MVT::v32i16, 2 }, // FIXME: include split
2406 { ISD::USUBSAT, MVT::v64i8, 2 }, // FIXME: include split
2407 { ISD::FMAXNUM, MVT::f32, 2 },
2408 { ISD::FMAXNUM, MVT::v4f32, 2 },
2409 { ISD::FMAXNUM, MVT::v8f32, 2 },
2410 { ISD::FMAXNUM, MVT::v16f32, 2 },
2411 { ISD::FMAXNUM, MVT::f64, 2 },
2412 { ISD::FMAXNUM, MVT::v2f64, 2 },
2413 { ISD::FMAXNUM, MVT::v4f64, 2 },
2414 { ISD::FMAXNUM, MVT::v8f64, 2 },
2415 };
2416 static const CostTblEntry XOPCostTbl[] = {
2417 { ISD::BITREVERSE, MVT::v4i64, 4 },
2418 { ISD::BITREVERSE, MVT::v8i32, 4 },
2419 { ISD::BITREVERSE, MVT::v16i16, 4 },
2420 { ISD::BITREVERSE, MVT::v32i8, 4 },
2421 { ISD::BITREVERSE, MVT::v2i64, 1 },
2422 { ISD::BITREVERSE, MVT::v4i32, 1 },
2423 { ISD::BITREVERSE, MVT::v8i16, 1 },
2424 { ISD::BITREVERSE, MVT::v16i8, 1 },
2425 { ISD::BITREVERSE, MVT::i64, 3 },
2426 { ISD::BITREVERSE, MVT::i32, 3 },
2427 { ISD::BITREVERSE, MVT::i16, 3 },
2428 { ISD::BITREVERSE, MVT::i8, 3 }
2429 };
2430 static const CostTblEntry AVX2CostTbl[] = {
2431 { ISD::ABS, MVT::v4i64, 2 }, // VBLENDVPD(X,VPSUBQ(0,X),X)
2432 { ISD::ABS, MVT::v8i32, 1 },
2433 { ISD::ABS, MVT::v16i16, 1 },
2434 { ISD::ABS, MVT::v32i8, 1 },
2435 { ISD::BITREVERSE, MVT::v4i64, 5 },
2436 { ISD::BITREVERSE, MVT::v8i32, 5 },
2437 { ISD::BITREVERSE, MVT::v16i16, 5 },
2438 { ISD::BITREVERSE, MVT::v32i8, 5 },
2439 { ISD::BSWAP, MVT::v4i64, 1 },
2440 { ISD::BSWAP, MVT::v8i32, 1 },
2441 { ISD::BSWAP, MVT::v16i16, 1 },
2442 { ISD::CTLZ, MVT::v4i64, 23 },
2443 { ISD::CTLZ, MVT::v8i32, 18 },
2444 { ISD::CTLZ, MVT::v16i16, 14 },
2445 { ISD::CTLZ, MVT::v32i8, 9 },
2446 { ISD::CTPOP, MVT::v4i64, 7 },
2447 { ISD::CTPOP, MVT::v8i32, 11 },
2448 { ISD::CTPOP, MVT::v16i16, 9 },
2449 { ISD::CTPOP, MVT::v32i8, 6 },
2450 { ISD::CTTZ, MVT::v4i64, 10 },
2451 { ISD::CTTZ, MVT::v8i32, 14 },
2452 { ISD::CTTZ, MVT::v16i16, 12 },
2453 { ISD::CTTZ, MVT::v32i8, 9 },
2454 { ISD::SADDSAT, MVT::v16i16, 1 },
2455 { ISD::SADDSAT, MVT::v32i8, 1 },
2456 { ISD::SMAX, MVT::v8i32, 1 },
2457 { ISD::SMAX, MVT::v16i16, 1 },
2458 { ISD::SMAX, MVT::v32i8, 1 },
2459 { ISD::SMIN, MVT::v8i32, 1 },
2460 { ISD::SMIN, MVT::v16i16, 1 },
2461 { ISD::SMIN, MVT::v32i8, 1 },
2462 { ISD::SSUBSAT, MVT::v16i16, 1 },
2463 { ISD::SSUBSAT, MVT::v32i8, 1 },
2464 { ISD::UADDSAT, MVT::v16i16, 1 },
2465 { ISD::UADDSAT, MVT::v32i8, 1 },
2466 { ISD::UADDSAT, MVT::v8i32, 3 }, // not + pminud + paddd
2467 { ISD::UMAX, MVT::v8i32, 1 },
2468 { ISD::UMAX, MVT::v16i16, 1 },
2469 { ISD::UMAX, MVT::v32i8, 1 },
2470 { ISD::UMIN, MVT::v8i32, 1 },
2471 { ISD::UMIN, MVT::v16i16, 1 },
2472 { ISD::UMIN, MVT::v32i8, 1 },
2473 { ISD::USUBSAT, MVT::v16i16, 1 },
2474 { ISD::USUBSAT, MVT::v32i8, 1 },
2475 { ISD::USUBSAT, MVT::v8i32, 2 }, // pmaxud + psubd
2476 { ISD::FMAXNUM, MVT::v8f32, 3 }, // MAXPS + CMPUNORDPS + BLENDVPS
2477 { ISD::FMAXNUM, MVT::v4f64, 3 }, // MAXPD + CMPUNORDPD + BLENDVPD
2478 { ISD::FSQRT, MVT::f32, 7 }, // Haswell from http://www.agner.org/
2479 { ISD::FSQRT, MVT::v4f32, 7 }, // Haswell from http://www.agner.org/
2480 { ISD::FSQRT, MVT::v8f32, 14 }, // Haswell from http://www.agner.org/
2481 { ISD::FSQRT, MVT::f64, 14 }, // Haswell from http://www.agner.org/
2482 { ISD::FSQRT, MVT::v2f64, 14 }, // Haswell from http://www.agner.org/
2483 { ISD::FSQRT, MVT::v4f64, 28 }, // Haswell from http://www.agner.org/
2484 };
2485 static const CostTblEntry AVX1CostTbl[] = {
2486 { ISD::ABS, MVT::v4i64, 5 }, // VBLENDVPD(X,VPSUBQ(0,X),X)
2487 { ISD::ABS, MVT::v8i32, 3 },
2488 { ISD::ABS, MVT::v16i16, 3 },
2489 { ISD::ABS, MVT::v32i8, 3 },
2490 { ISD::BITREVERSE, MVT::v4i64, 12 }, // 2 x 128-bit Op + extract/insert
2491 { ISD::BITREVERSE, MVT::v8i32, 12 }, // 2 x 128-bit Op + extract/insert
2492 { ISD::BITREVERSE, MVT::v16i16, 12 }, // 2 x 128-bit Op + extract/insert
2493 { ISD::BITREVERSE, MVT::v32i8, 12 }, // 2 x 128-bit Op + extract/insert
2494 { ISD::BSWAP, MVT::v4i64, 4 },
2495 { ISD::BSWAP, MVT::v8i32, 4 },
2496 { ISD::BSWAP, MVT::v16i16, 4 },
2497 { ISD::CTLZ, MVT::v4i64, 48 }, // 2 x 128-bit Op + extract/insert
2498 { ISD::CTLZ, MVT::v8i32, 38 }, // 2 x 128-bit Op + extract/insert
2499 { ISD::CTLZ, MVT::v16i16, 30 }, // 2 x 128-bit Op + extract/insert
2500 { ISD::CTLZ, MVT::v32i8, 20 }, // 2 x 128-bit Op + extract/insert
2501 { ISD::CTPOP, MVT::v4i64, 16 }, // 2 x 128-bit Op + extract/insert
2502 { ISD::CTPOP, MVT::v8i32, 24 }, // 2 x 128-bit Op + extract/insert
2503 { ISD::CTPOP, MVT::v16i16, 20 }, // 2 x 128-bit Op + extract/insert
2504 { ISD::CTPOP, MVT::v32i8, 14 }, // 2 x 128-bit Op + extract/insert
2505 { ISD::CTTZ, MVT::v4i64, 22 }, // 2 x 128-bit Op + extract/insert
2506 { ISD::CTTZ, MVT::v8i32, 30 }, // 2 x 128-bit Op + extract/insert
2507 { ISD::CTTZ, MVT::v16i16, 26 }, // 2 x 128-bit Op + extract/insert
2508 { ISD::CTTZ, MVT::v32i8, 20 }, // 2 x 128-bit Op + extract/insert
2509 { ISD::SADDSAT, MVT::v16i16, 4 }, // 2 x 128-bit Op + extract/insert
2510 { ISD::SADDSAT, MVT::v32i8, 4 }, // 2 x 128-bit Op + extract/insert
2511 { ISD::SMAX, MVT::v8i32, 4 }, // 2 x 128-bit Op + extract/insert
2512 { ISD::SMAX, MVT::v16i16, 4 }, // 2 x 128-bit Op + extract/insert
2513 { ISD::SMAX, MVT::v32i8, 4 }, // 2 x 128-bit Op + extract/insert
2514 { ISD::SMIN, MVT::v8i32, 4 }, // 2 x 128-bit Op + extract/insert
2515 { ISD::SMIN, MVT::v16i16, 4 }, // 2 x 128-bit Op + extract/insert
2516 { ISD::SMIN, MVT::v32i8, 4 }, // 2 x 128-bit Op + extract/insert
2517 { ISD::SSUBSAT, MVT::v16i16, 4 }, // 2 x 128-bit Op + extract/insert
2518 { ISD::SSUBSAT, MVT::v32i8, 4 }, // 2 x 128-bit Op + extract/insert
2519 { ISD::UADDSAT, MVT::v16i16, 4 }, // 2 x 128-bit Op + extract/insert
2520 { ISD::UADDSAT, MVT::v32i8, 4 }, // 2 x 128-bit Op + extract/insert
2521 { ISD::UADDSAT, MVT::v8i32, 8 }, // 2 x 128-bit Op + extract/insert
2522 { ISD::UMAX, MVT::v8i32, 4 }, // 2 x 128-bit Op + extract/insert
2523 { ISD::UMAX, MVT::v16i16, 4 }, // 2 x 128-bit Op + extract/insert
2524 { ISD::UMAX, MVT::v32i8, 4 }, // 2 x 128-bit Op + extract/insert
2525 { ISD::UMIN, MVT::v8i32, 4 }, // 2 x 128-bit Op + extract/insert
2526 { ISD::UMIN, MVT::v16i16, 4 }, // 2 x 128-bit Op + extract/insert
2527 { ISD::UMIN, MVT::v32i8, 4 }, // 2 x 128-bit Op + extract/insert
2528 { ISD::USUBSAT, MVT::v16i16, 4 }, // 2 x 128-bit Op + extract/insert
2529 { ISD::USUBSAT, MVT::v32i8, 4 }, // 2 x 128-bit Op + extract/insert
2530 { ISD::USUBSAT, MVT::v8i32, 6 }, // 2 x 128-bit Op + extract/insert
2531 { ISD::FMAXNUM, MVT::f32, 3 }, // MAXSS + CMPUNORDSS + BLENDVPS
2532 { ISD::FMAXNUM, MVT::v4f32, 3 }, // MAXPS + CMPUNORDPS + BLENDVPS
2533 { ISD::FMAXNUM, MVT::v8f32, 5 }, // MAXPS + CMPUNORDPS + BLENDVPS + ?
2534 { ISD::FMAXNUM, MVT::f64, 3 }, // MAXSD + CMPUNORDSD + BLENDVPD
2535 { ISD::FMAXNUM, MVT::v2f64, 3 }, // MAXPD + CMPUNORDPD + BLENDVPD
2536 { ISD::FMAXNUM, MVT::v4f64, 5 }, // MAXPD + CMPUNORDPD + BLENDVPD + ?
2537 { ISD::FSQRT, MVT::f32, 14 }, // SNB from http://www.agner.org/
2538 { ISD::FSQRT, MVT::v4f32, 14 }, // SNB from http://www.agner.org/
2539 { ISD::FSQRT, MVT::v8f32, 28 }, // SNB from http://www.agner.org/
2540 { ISD::FSQRT, MVT::f64, 21 }, // SNB from http://www.agner.org/
2541 { ISD::FSQRT, MVT::v2f64, 21 }, // SNB from http://www.agner.org/
2542 { ISD::FSQRT, MVT::v4f64, 43 }, // SNB from http://www.agner.org/
2543 };
2544 static const CostTblEntry GLMCostTbl[] = {
2545 { ISD::FSQRT, MVT::f32, 19 }, // sqrtss
2546 { ISD::FSQRT, MVT::v4f32, 37 }, // sqrtps
2547 { ISD::FSQRT, MVT::f64, 34 }, // sqrtsd
2548 { ISD::FSQRT, MVT::v2f64, 67 }, // sqrtpd
2549 };
2550 static const CostTblEntry SLMCostTbl[] = {
2551 { ISD::FSQRT, MVT::f32, 20 }, // sqrtss
2552 { ISD::FSQRT, MVT::v4f32, 40 }, // sqrtps
2553 { ISD::FSQRT, MVT::f64, 35 }, // sqrtsd
2554 { ISD::FSQRT, MVT::v2f64, 70 }, // sqrtpd
2555 };
2556 static const CostTblEntry SSE42CostTbl[] = {
2557 { ISD::USUBSAT, MVT::v4i32, 2 }, // pmaxud + psubd
2558 { ISD::UADDSAT, MVT::v4i32, 3 }, // not + pminud + paddd
2559 { ISD::FSQRT, MVT::f32, 18 }, // Nehalem from http://www.agner.org/
2560 { ISD::FSQRT, MVT::v4f32, 18 }, // Nehalem from http://www.agner.org/
2561 };
2562 static const CostTblEntry SSE41CostTbl[] = {
2563 { ISD::ABS, MVT::v2i64, 2 }, // BLENDVPD(X,PSUBQ(0,X),X)
2564 { ISD::SMAX, MVT::v4i32, 1 },
2565 { ISD::SMAX, MVT::v16i8, 1 },
2566 { ISD::SMIN, MVT::v4i32, 1 },
2567 { ISD::SMIN, MVT::v16i8, 1 },
2568 { ISD::UMAX, MVT::v4i32, 1 },
2569 { ISD::UMAX, MVT::v8i16, 1 },
2570 { ISD::UMIN, MVT::v4i32, 1 },
2571 { ISD::UMIN, MVT::v8i16, 1 },
2572 };
2573 static const CostTblEntry SSSE3CostTbl[] = {
2574 { ISD::ABS, MVT::v4i32, 1 },
2575 { ISD::ABS, MVT::v8i16, 1 },
2576 { ISD::ABS, MVT::v16i8, 1 },
2577 { ISD::BITREVERSE, MVT::v2i64, 5 },
2578 { ISD::BITREVERSE, MVT::v4i32, 5 },
2579 { ISD::BITREVERSE, MVT::v8i16, 5 },
2580 { ISD::BITREVERSE, MVT::v16i8, 5 },
2581 { ISD::BSWAP, MVT::v2i64, 1 },
2582 { ISD::BSWAP, MVT::v4i32, 1 },
2583 { ISD::BSWAP, MVT::v8i16, 1 },
2584 { ISD::CTLZ, MVT::v2i64, 23 },
2585 { ISD::CTLZ, MVT::v4i32, 18 },
2586 { ISD::CTLZ, MVT::v8i16, 14 },
2587 { ISD::CTLZ, MVT::v16i8, 9 },
2588 { ISD::CTPOP, MVT::v2i64, 7 },
2589 { ISD::CTPOP, MVT::v4i32, 11 },
2590 { ISD::CTPOP, MVT::v8i16, 9 },
2591 { ISD::CTPOP, MVT::v16i8, 6 },
2592 { ISD::CTTZ, MVT::v2i64, 10 },
2593 { ISD::CTTZ, MVT::v4i32, 14 },
2594 { ISD::CTTZ, MVT::v8i16, 12 },
2595 { ISD::CTTZ, MVT::v16i8, 9 }
2596 };
2597 static const CostTblEntry SSE2CostTbl[] = {
2598 { ISD::ABS, MVT::v2i64, 4 },
2599 { ISD::ABS, MVT::v4i32, 3 },
2600 { ISD::ABS, MVT::v8i16, 2 },
2601 { ISD::ABS, MVT::v16i8, 2 },
2602 { ISD::BITREVERSE, MVT::v2i64, 29 },
2603 { ISD::BITREVERSE, MVT::v4i32, 27 },
2604 { ISD::BITREVERSE, MVT::v8i16, 27 },
2605 { ISD::BITREVERSE, MVT::v16i8, 20 },
2606 { ISD::BSWAP, MVT::v2i64, 7 },
2607 { ISD::BSWAP, MVT::v4i32, 7 },
2608 { ISD::BSWAP, MVT::v8i16, 7 },
2609 { ISD::CTLZ, MVT::v2i64, 25 },
2610 { ISD::CTLZ, MVT::v4i32, 26 },
2611 { ISD::CTLZ, MVT::v8i16, 20 },
2612 { ISD::CTLZ, MVT::v16i8, 17 },
2613 { ISD::CTPOP, MVT::v2i64, 12 },
2614 { ISD::CTPOP, MVT::v4i32, 15 },
2615 { ISD::CTPOP, MVT::v8i16, 13 },
2616 { ISD::CTPOP, MVT::v16i8, 10 },
2617 { ISD::CTTZ, MVT::v2i64, 14 },
2618 { ISD::CTTZ, MVT::v4i32, 18 },
2619 { ISD::CTTZ, MVT::v8i16, 16 },
2620 { ISD::CTTZ, MVT::v16i8, 13 },
2621 { ISD::SADDSAT, MVT::v8i16, 1 },
2622 { ISD::SADDSAT, MVT::v16i8, 1 },
2623 { ISD::SMAX, MVT::v8i16, 1 },
2624 { ISD::SMIN, MVT::v8i16, 1 },
2625 { ISD::SSUBSAT, MVT::v8i16, 1 },
2626 { ISD::SSUBSAT, MVT::v16i8, 1 },
2627 { ISD::UADDSAT, MVT::v8i16, 1 },
2628 { ISD::UADDSAT, MVT::v16i8, 1 },
2629 { ISD::UMAX, MVT::v8i16, 2 },
2630 { ISD::UMAX, MVT::v16i8, 1 },
2631 { ISD::UMIN, MVT::v8i16, 2 },
2632 { ISD::UMIN, MVT::v16i8, 1 },
2633 { ISD::USUBSAT, MVT::v8i16, 1 },
2634 { ISD::USUBSAT, MVT::v16i8, 1 },
2635 { ISD::FMAXNUM, MVT::f64, 4 },
2636 { ISD::FMAXNUM, MVT::v2f64, 4 },
2637 { ISD::FSQRT, MVT::f64, 32 }, // Nehalem from http://www.agner.org/
2638 { ISD::FSQRT, MVT::v2f64, 32 }, // Nehalem from http://www.agner.org/
2639 };
2640 static const CostTblEntry SSE1CostTbl[] = {
2641 { ISD::FMAXNUM, MVT::f32, 4 },
2642 { ISD::FMAXNUM, MVT::v4f32, 4 },
2643 { ISD::FSQRT, MVT::f32, 28 }, // Pentium III from http://www.agner.org/
2644 { ISD::FSQRT, MVT::v4f32, 56 }, // Pentium III from http://www.agner.org/
2645 };
2646 static const CostTblEntry BMI64CostTbl[] = { // 64-bit targets
2647 { ISD::CTTZ, MVT::i64, 1 },
2648 };
2649 static const CostTblEntry BMI32CostTbl[] = { // 32 or 64-bit targets
2650 { ISD::CTTZ, MVT::i32, 1 },
2651 { ISD::CTTZ, MVT::i16, 1 },
2652 { ISD::CTTZ, MVT::i8, 1 },
2653 };
2654 static const CostTblEntry LZCNT64CostTbl[] = { // 64-bit targets
2655 { ISD::CTLZ, MVT::i64, 1 },
2656 };
2657 static const CostTblEntry LZCNT32CostTbl[] = { // 32 or 64-bit targets
2658 { ISD::CTLZ, MVT::i32, 1 },
2659 { ISD::CTLZ, MVT::i16, 1 },
2660 { ISD::CTLZ, MVT::i8, 1 },
2661 };
2662 static const CostTblEntry POPCNT64CostTbl[] = { // 64-bit targets
2663 { ISD::CTPOP, MVT::i64, 1 },
2664 };
2665 static const CostTblEntry POPCNT32CostTbl[] = { // 32 or 64-bit targets
2666 { ISD::CTPOP, MVT::i32, 1 },
2667 { ISD::CTPOP, MVT::i16, 1 },
2668 { ISD::CTPOP, MVT::i8, 1 },
2669 };
2670 static const CostTblEntry X64CostTbl[] = { // 64-bit targets
2671 { ISD::ABS, MVT::i64, 2 }, // SUB+CMOV
2672 { ISD::BITREVERSE, MVT::i64, 14 },
2673 { ISD::CTLZ, MVT::i64, 4 }, // BSR+XOR or BSR+XOR+CMOV
2674 { ISD::CTTZ, MVT::i64, 3 }, // TEST+BSF+CMOV/BRANCH
2675 { ISD::CTPOP, MVT::i64, 10 },
2676 { ISD::SADDO, MVT::i64, 1 },
2677 { ISD::UADDO, MVT::i64, 1 },
2678 { ISD::UMULO, MVT::i64, 2 }, // mulq + seto
2679 };
2680 static const CostTblEntry X86CostTbl[] = { // 32 or 64-bit targets
2681 { ISD::ABS, MVT::i32, 2 }, // SUB+CMOV
2682 { ISD::ABS, MVT::i16, 2 }, // SUB+CMOV
2683 { ISD::BITREVERSE, MVT::i32, 14 },
2684 { ISD::BITREVERSE, MVT::i16, 14 },
2685 { ISD::BITREVERSE, MVT::i8, 11 },
2686 { ISD::CTLZ, MVT::i32, 4 }, // BSR+XOR or BSR+XOR+CMOV
2687 { ISD::CTLZ, MVT::i16, 4 }, // BSR+XOR or BSR+XOR+CMOV
2688 { ISD::CTLZ, MVT::i8, 4 }, // BSR+XOR or BSR+XOR+CMOV
2689 { ISD::CTTZ, MVT::i32, 3 }, // TEST+BSF+CMOV/BRANCH
2690 { ISD::CTTZ, MVT::i16, 3 }, // TEST+BSF+CMOV/BRANCH
2691 { ISD::CTTZ, MVT::i8, 3 }, // TEST+BSF+CMOV/BRANCH
2692 { ISD::CTPOP, MVT::i32, 8 },
2693 { ISD::CTPOP, MVT::i16, 9 },
2694 { ISD::CTPOP, MVT::i8, 7 },
2695 { ISD::SADDO, MVT::i32, 1 },
2696 { ISD::SADDO, MVT::i16, 1 },
2697 { ISD::SADDO, MVT::i8, 1 },
2698 { ISD::UADDO, MVT::i32, 1 },
2699 { ISD::UADDO, MVT::i16, 1 },
2700 { ISD::UADDO, MVT::i8, 1 },
2701 { ISD::UMULO, MVT::i32, 2 }, // mul + seto
2702 { ISD::UMULO, MVT::i16, 2 },
2703 { ISD::UMULO, MVT::i8, 2 },
2704 };
2705
2706 Type *RetTy = ICA.getReturnType();
2707 Type *OpTy = RetTy;
2708 Intrinsic::ID IID = ICA.getID();
2709 unsigned ISD = ISD::DELETED_NODE;
2710 switch (IID) {
2711 default:
2712 break;
2713 case Intrinsic::abs:
2714 ISD = ISD::ABS;
2715 break;
2716 case Intrinsic::bitreverse:
2717 ISD = ISD::BITREVERSE;
2718 break;
2719 case Intrinsic::bswap:
2720 ISD = ISD::BSWAP;
2721 break;
2722 case Intrinsic::ctlz:
2723 ISD = ISD::CTLZ;
2724 break;
2725 case Intrinsic::ctpop:
2726 ISD = ISD::CTPOP;
2727 break;
2728 case Intrinsic::cttz:
2729 ISD = ISD::CTTZ;
2730 break;
2731 case Intrinsic::maxnum:
2732 case Intrinsic::minnum:
2733 // FMINNUM has same costs so don't duplicate.
2734 ISD = ISD::FMAXNUM;
2735 break;
2736 case Intrinsic::sadd_sat:
2737 ISD = ISD::SADDSAT;
2738 break;
2739 case Intrinsic::smax:
2740 ISD = ISD::SMAX;
2741 break;
2742 case Intrinsic::smin:
2743 ISD = ISD::SMIN;
2744 break;
2745 case Intrinsic::ssub_sat:
2746 ISD = ISD::SSUBSAT;
2747 break;
2748 case Intrinsic::uadd_sat:
2749 ISD = ISD::UADDSAT;
2750 break;
2751 case Intrinsic::umax:
2752 ISD = ISD::UMAX;
2753 break;
2754 case Intrinsic::umin:
2755 ISD = ISD::UMIN;
2756 break;
2757 case Intrinsic::usub_sat:
2758 ISD = ISD::USUBSAT;
2759 break;
2760 case Intrinsic::sqrt:
2761 ISD = ISD::FSQRT;
2762 break;
2763 case Intrinsic::sadd_with_overflow:
2764 case Intrinsic::ssub_with_overflow:
2765 // SSUBO has same costs so don't duplicate.
2766 ISD = ISD::SADDO;
2767 OpTy = RetTy->getContainedType(0);
2768 break;
2769 case Intrinsic::uadd_with_overflow:
2770 case Intrinsic::usub_with_overflow:
2771 // USUBO has same costs so don't duplicate.
2772 ISD = ISD::UADDO;
2773 OpTy = RetTy->getContainedType(0);
2774 break;
2775 case Intrinsic::umul_with_overflow:
2776 case Intrinsic::smul_with_overflow:
2777 // SMULO has same costs so don't duplicate.
2778 ISD = ISD::UMULO;
2779 OpTy = RetTy->getContainedType(0);
2780 break;
2781 }
2782
2783 if (ISD != ISD::DELETED_NODE) {
2784 // Legalize the type.
2785 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, OpTy);
2786 MVT MTy = LT.second;
2787
2788 // Attempt to lookup cost.
2789 if (ISD == ISD::BITREVERSE && ST->hasGFNI() && ST->hasSSSE3() &&
2790 MTy.isVector()) {
2791 // With PSHUFB the code is very similar for all types. If we have integer
2792 // byte operations, we just need a GF2P8AFFINEQB for vXi8. For other types
2793 // we also need a PSHUFB.
2794 unsigned Cost = MTy.getVectorElementType() == MVT::i8 ? 1 : 2;
2795
2796 // Without byte operations, we need twice as many GF2P8AFFINEQB and PSHUFB
2797 // instructions. We also need an extract and an insert.
2798 if (!(MTy.is128BitVector() || (ST->hasAVX2() && MTy.is256BitVector()) ||
2799 (ST->hasBWI() && MTy.is512BitVector())))
2800 Cost = Cost * 2 + 2;
2801
2802 return LT.first * Cost;
2803 }
2804
2805 auto adjustTableCost = [](const CostTblEntry &Entry, int LegalizationCost,
2806 FastMathFlags FMF) {
2807 // If there are no NANs to deal with, then these are reduced to a
2808 // single MIN** or MAX** instruction instead of the MIN/CMP/SELECT that we
2809 // assume is used in the non-fast case.
2810 if (Entry.ISD == ISD::FMAXNUM || Entry.ISD == ISD::FMINNUM) {
2811 if (FMF.noNaNs())
2812 return LegalizationCost * 1;
2813 }
2814 return LegalizationCost * (int)Entry.Cost;
2815 };
2816
2817 if (ST->useGLMDivSqrtCosts())
2818 if (const auto *Entry = CostTableLookup(GLMCostTbl, ISD, MTy))
2819 return adjustTableCost(*Entry, LT.first, ICA.getFlags());
2820
2821 if (ST->isSLM())
2822 if (const auto *Entry = CostTableLookup(SLMCostTbl, ISD, MTy))
2823 return adjustTableCost(*Entry, LT.first, ICA.getFlags());
2824
2825 if (ST->hasCDI())
2826 if (const auto *Entry = CostTableLookup(AVX512CDCostTbl, ISD, MTy))
2827 return adjustTableCost(*Entry, LT.first, ICA.getFlags());
2828
2829 if (ST->hasBWI())
2830 if (const auto *Entry = CostTableLookup(AVX512BWCostTbl, ISD, MTy))
2831 return adjustTableCost(*Entry, LT.first, ICA.getFlags());
2832
2833 if (ST->hasAVX512())
2834 if (const auto *Entry = CostTableLookup(AVX512CostTbl, ISD, MTy))
2835 return adjustTableCost(*Entry, LT.first, ICA.getFlags());
2836
2837 if (ST->hasXOP())
2838 if (const auto *Entry = CostTableLookup(XOPCostTbl, ISD, MTy))
2839 return adjustTableCost(*Entry, LT.first, ICA.getFlags());
2840
2841 if (ST->hasAVX2())
2842 if (const auto *Entry = CostTableLookup(AVX2CostTbl, ISD, MTy))
2843 return adjustTableCost(*Entry, LT.first, ICA.getFlags());
2844
2845 if (ST->hasAVX())
2846 if (const auto *Entry = CostTableLookup(AVX1CostTbl, ISD, MTy))
2847 return adjustTableCost(*Entry, LT.first, ICA.getFlags());
2848
2849 if (ST->hasSSE42())
2850 if (const auto *Entry = CostTableLookup(SSE42CostTbl, ISD, MTy))
2851 return adjustTableCost(*Entry, LT.first, ICA.getFlags());
2852
2853 if (ST->hasSSE41())
2854 if (const auto *Entry = CostTableLookup(SSE41CostTbl, ISD, MTy))
2855 return adjustTableCost(*Entry, LT.first, ICA.getFlags());
2856
2857 if (ST->hasSSSE3())
2858 if (const auto *Entry = CostTableLookup(SSSE3CostTbl, ISD, MTy))
2859 return adjustTableCost(*Entry, LT.first, ICA.getFlags());
2860
2861 if (ST->hasSSE2())
2862 if (const auto *Entry = CostTableLookup(SSE2CostTbl, ISD, MTy))
2863 return adjustTableCost(*Entry, LT.first, ICA.getFlags());
2864
2865 if (ST->hasSSE1())
2866 if (const auto *Entry = CostTableLookup(SSE1CostTbl, ISD, MTy))
2867 return adjustTableCost(*Entry, LT.first, ICA.getFlags());
2868
2869 if (ST->hasBMI()) {
2870 if (ST->is64Bit())
2871 if (const auto *Entry = CostTableLookup(BMI64CostTbl, ISD, MTy))
2872 return adjustTableCost(*Entry, LT.first, ICA.getFlags());
2873
2874 if (const auto *Entry = CostTableLookup(BMI32CostTbl, ISD, MTy))
2875 return adjustTableCost(*Entry, LT.first, ICA.getFlags());
2876 }
2877
2878 if (ST->hasLZCNT()) {
2879 if (ST->is64Bit())
2880 if (const auto *Entry = CostTableLookup(LZCNT64CostTbl, ISD, MTy))
2881 return adjustTableCost(*Entry, LT.first, ICA.getFlags());
2882
2883 if (const auto *Entry = CostTableLookup(LZCNT32CostTbl, ISD, MTy))
2884 return adjustTableCost(*Entry, LT.first, ICA.getFlags());
2885 }
2886
2887 if (ST->hasPOPCNT()) {
2888 if (ST->is64Bit())
2889 if (const auto *Entry = CostTableLookup(POPCNT64CostTbl, ISD, MTy))
2890 return adjustTableCost(*Entry, LT.first, ICA.getFlags());
2891
2892 if (const auto *Entry = CostTableLookup(POPCNT32CostTbl, ISD, MTy))
2893 return adjustTableCost(*Entry, LT.first, ICA.getFlags());
2894 }
2895
2896 // TODO - add BMI (TZCNT) scalar handling
2897
2898 if (ST->is64Bit())
2899 if (const auto *Entry = CostTableLookup(X64CostTbl, ISD, MTy))
2900 return adjustTableCost(*Entry, LT.first, ICA.getFlags());
2901
2902 if (const auto *Entry = CostTableLookup(X86CostTbl, ISD, MTy))
2903 return adjustTableCost(*Entry, LT.first, ICA.getFlags());
2904 }
2905
2906 return BaseT::getIntrinsicInstrCost(ICA, CostKind);
2907}
2908
2909int X86TTIImpl::getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA,
2910 TTI::TargetCostKind CostKind) {
2911 if (ICA.isTypeBasedOnly())
2912 return getTypeBasedIntrinsicInstrCost(ICA, CostKind);
2913
2914 static const CostTblEntry AVX512CostTbl[] = {
2915 { ISD::ROTL, MVT::v8i64, 1 },
2916 { ISD::ROTL, MVT::v4i64, 1 },
2917 { ISD::ROTL, MVT::v2i64, 1 },
2918 { ISD::ROTL, MVT::v16i32, 1 },
2919 { ISD::ROTL, MVT::v8i32, 1 },
2920 { ISD::ROTL, MVT::v4i32, 1 },
2921 { ISD::ROTR, MVT::v8i64, 1 },
2922 { ISD::ROTR, MVT::v4i64, 1 },
2923 { ISD::ROTR, MVT::v2i64, 1 },
2924 { ISD::ROTR, MVT::v16i32, 1 },
2925 { ISD::ROTR, MVT::v8i32, 1 },
2926 { ISD::ROTR, MVT::v4i32, 1 }
2927 };
2928 // XOP: ROTL = VPROT(X,Y), ROTR = VPROT(X,SUB(0,Y))
2929 static const CostTblEntry XOPCostTbl[] = {
2930 { ISD::ROTL, MVT::v4i64, 4 },
2931 { ISD::ROTL, MVT::v8i32, 4 },
2932 { ISD::ROTL, MVT::v16i16, 4 },
2933 { ISD::ROTL, MVT::v32i8, 4 },
2934 { ISD::ROTL, MVT::v2i64, 1 },
2935 { ISD::ROTL, MVT::v4i32, 1 },
2936 { ISD::ROTL, MVT::v8i16, 1 },
2937 { ISD::ROTL, MVT::v16i8, 1 },
2938 { ISD::ROTR, MVT::v4i64, 6 },
2939 { ISD::ROTR, MVT::v8i32, 6 },
2940 { ISD::ROTR, MVT::v16i16, 6 },
2941 { ISD::ROTR, MVT::v32i8, 6 },
2942 { ISD::ROTR, MVT::v2i64, 2 },
2943 { ISD::ROTR, MVT::v4i32, 2 },
2944 { ISD::ROTR, MVT::v8i16, 2 },
2945 { ISD::ROTR, MVT::v16i8, 2 }
2946 };
2947 static const CostTblEntry X64CostTbl[] = { // 64-bit targets
2948 { ISD::ROTL, MVT::i64, 1 },
2949 { ISD::ROTR, MVT::i64, 1 },
2950 { ISD::FSHL, MVT::i64, 4 }
2951 };
2952 static const CostTblEntry X86CostTbl[] = { // 32 or 64-bit targets
2953 { ISD::ROTL, MVT::i32, 1 },
2954 { ISD::ROTL, MVT::i16, 1 },
2955 { ISD::ROTL, MVT::i8, 1 },
2956 { ISD::ROTR, MVT::i32, 1 },
2957 { ISD::ROTR, MVT::i16, 1 },
2958 { ISD::ROTR, MVT::i8, 1 },
2959 { ISD::FSHL, MVT::i32, 4 },
2960 { ISD::FSHL, MVT::i16, 4 },
2961 { ISD::FSHL, MVT::i8, 4 }
2962 };
2963
2964 Intrinsic::ID IID = ICA.getID();
2965 Type *RetTy = ICA.getReturnType();
2966 const SmallVectorImpl<const Value *> &Args = ICA.getArgs();
2967 unsigned ISD = ISD::DELETED_NODE;
2968 switch (IID) {
2969 default:
2970 break;
2971 case Intrinsic::fshl:
2972 ISD = ISD::FSHL;
2973 if (Args[0] == Args[1])
2974 ISD = ISD::ROTL;
2975 break;
2976 case Intrinsic::fshr:
2977 // FSHR has same costs so don't duplicate.
2978 ISD = ISD::FSHL;
2979 if (Args[0] == Args[1])
2980 ISD = ISD::ROTR;
2981 break;
2982 }
2983
2984 if (ISD != ISD::DELETED_NODE) {
2985 // Legalize the type.
2986 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, RetTy);
2987 MVT MTy = LT.second;
2988
2989 // Attempt to lookup cost.
2990 if (ST->hasAVX512())
2991 if (const auto *Entry = CostTableLookup(AVX512CostTbl, ISD, MTy))
2992 return LT.first * Entry->Cost;
2993
2994 if (ST->hasXOP())
2995 if (const auto *Entry = CostTableLookup(XOPCostTbl, ISD, MTy))
2996 return LT.first * Entry->Cost;
2997
2998 if (ST->is64Bit())
2999 if (const auto *Entry = CostTableLookup(X64CostTbl, ISD, MTy))
3000 return LT.first * Entry->Cost;
3001
3002 if (const auto *Entry = CostTableLookup(X86CostTbl, ISD, MTy))
3003 return LT.first * Entry->Cost;
3004 }
3005
3006 return BaseT::getIntrinsicInstrCost(ICA, CostKind);
3007}
3008
3009int X86TTIImpl::getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index) {
3010 static const CostTblEntry SLMCostTbl[] = {
3011 { ISD::EXTRACT_VECTOR_ELT, MVT::i8, 4 },
3012 { ISD::EXTRACT_VECTOR_ELT, MVT::i16, 4 },
3013 { ISD::EXTRACT_VECTOR_ELT, MVT::i32, 4 },
3014 { ISD::EXTRACT_VECTOR_ELT, MVT::i64, 7 }
3015 };
3016
3017 assert(Val->isVectorTy() && "This must be a vector type")((Val->isVectorTy() && "This must be a vector type"
) ? static_cast<void> (0) : __assert_fail ("Val->isVectorTy() && \"This must be a vector type\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 3017, __PRETTY_FUNCTION__))
;
21
'?' condition is true
3018 Type *ScalarType = Val->getScalarType();
3019 int RegisterFileMoveCost = 0;
3020
3021 if (Index != -1U && (Opcode
21.1
'Opcode' is equal to ExtractElement
21.1
'Opcode' is equal to ExtractElement
21.1
'Opcode' is equal to ExtractElement
== Instruction::ExtractElement ||
3022 Opcode == Instruction::InsertElement)) {
3023 // Legalize the type.
3024 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Val);
3025
3026 // This type is legalized to a scalar type.
3027 if (!LT.second.isVector())
22
Calling 'MVT::isVector'
26
Returning from 'MVT::isVector'
27
Taking false branch
3028 return 0;
3029
3030 // The type may be split. Normalize the index to the new type.
3031 unsigned NumElts = LT.second.getVectorNumElements();
3032 unsigned SubNumElts = NumElts;
3033 Index = Index % NumElts;
3034
3035 // For >128-bit vectors, we need to extract higher 128-bit subvectors.
3036 // For inserts, we also need to insert the subvector back.
3037 if (LT.second.getSizeInBits() > 128) {
28
Assuming the condition is true
29
Taking true branch
3038 assert((LT.second.getSizeInBits() % 128) == 0 && "Illegal vector")(((LT.second.getSizeInBits() % 128) == 0 && "Illegal vector"
) ? static_cast<void> (0) : __assert_fail ("(LT.second.getSizeInBits() % 128) == 0 && \"Illegal vector\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 3038, __PRETTY_FUNCTION__))
;
30
Assuming the condition is true
31
'?' condition is true
3039 unsigned NumSubVecs = LT.second.getSizeInBits() / 128;
3040 SubNumElts = NumElts / NumSubVecs;
32
Value assigned to 'SubNumElts'
3041 if (SubNumElts <= Index) {
33
Assuming 'SubNumElts' is <= 'Index'
34
Taking true branch
3042 RegisterFileMoveCost += (Opcode
34.1
'Opcode' is not equal to InsertElement
34.1
'Opcode' is not equal to InsertElement
34.1
'Opcode' is not equal to InsertElement
== Instruction::InsertElement ? 2 : 1);
35
'?' condition is false
3043 Index %= SubNumElts;
36
Division by zero
3044 }
3045 }
3046
3047 if (Index == 0) {
3048 // Floating point scalars are already located in index #0.
3049 // Many insertions to #0 can fold away for scalar fp-ops, so let's assume
3050 // true for all.
3051 if (ScalarType->isFloatingPointTy())
3052 return RegisterFileMoveCost;
3053
3054 // Assume movd/movq XMM -> GPR is relatively cheap on all targets.
3055 if (ScalarType->isIntegerTy() && Opcode == Instruction::ExtractElement)
3056 return 1 + RegisterFileMoveCost;
3057 }
3058
3059 int ISD = TLI->InstructionOpcodeToISD(Opcode);
3060 assert(ISD && "Unexpected vector opcode")((ISD && "Unexpected vector opcode") ? static_cast<
void> (0) : __assert_fail ("ISD && \"Unexpected vector opcode\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 3060, __PRETTY_FUNCTION__))
;
3061 MVT MScalarTy = LT.second.getScalarType();
3062 if (ST->isSLM())
3063 if (auto *Entry = CostTableLookup(SLMCostTbl, ISD, MScalarTy))
3064 return Entry->Cost + RegisterFileMoveCost;
3065
3066 // Assume pinsr/pextr XMM <-> GPR is relatively cheap on all targets.
3067 if ((MScalarTy == MVT::i16 && ST->hasSSE2()) ||
3068 (MScalarTy.isInteger() && ST->hasSSE41()))
3069 return 1 + RegisterFileMoveCost;
3070
3071 // Assume insertps is relatively cheap on all targets.
3072 if (MScalarTy == MVT::f32 && ST->hasSSE41() &&
3073 Opcode == Instruction::InsertElement)
3074 return 1 + RegisterFileMoveCost;
3075
3076 // For extractions we just need to shuffle the element to index 0, which
3077 // should be very cheap (assume cost = 1). For insertions we need to shuffle
3078 // the elements to its destination. In both cases we must handle the
3079 // subvector move(s).
3080 // If the vector type is already less than 128-bits then don't reduce it.
3081 // TODO: Under what circumstances should we shuffle using the full width?
3082 int ShuffleCost = 1;
3083 if (Opcode == Instruction::InsertElement) {
3084 auto *SubTy = cast<VectorType>(Val);
3085 EVT VT = TLI->getValueType(DL, Val);
3086 if (VT.getScalarType() != MScalarTy || VT.getSizeInBits() >= 128)
3087 SubTy = FixedVectorType::get(ScalarType, SubNumElts);
3088 ShuffleCost = getShuffleCost(TTI::SK_PermuteTwoSrc, SubTy, 0, SubTy);
3089 }
3090 int IntOrFpCost = ScalarType->isFloatingPointTy() ? 0 : 1;
3091 return ShuffleCost + IntOrFpCost + RegisterFileMoveCost;
3092 }
3093
3094 // Add to the base cost if we know that the extracted element of a vector is
3095 // destined to be moved to and used in the integer register file.
3096 if (Opcode == Instruction::ExtractElement && ScalarType->isPointerTy())
3097 RegisterFileMoveCost += 1;
3098
3099 return BaseT::getVectorInstrCost(Opcode, Val, Index) + RegisterFileMoveCost;
3100}
3101
3102unsigned X86TTIImpl::getScalarizationOverhead(VectorType *Ty,
3103 const APInt &DemandedElts,
3104 bool Insert, bool Extract) {
3105 unsigned Cost = 0;
3106
3107 // For insertions, a ISD::BUILD_VECTOR style vector initialization can be much
3108 // cheaper than an accumulation of ISD::INSERT_VECTOR_ELT.
3109 if (Insert) {
3110 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty);
3111 MVT MScalarTy = LT.second.getScalarType();
3112
3113 if ((MScalarTy == MVT::i16 && ST->hasSSE2()) ||
3114 (MScalarTy.isInteger() && ST->hasSSE41()) ||
3115 (MScalarTy == MVT::f32 && ST->hasSSE41())) {
3116 // For types we can insert directly, insertion into 128-bit sub vectors is
3117 // cheap, followed by a cheap chain of concatenations.
3118 if (LT.second.getSizeInBits() <= 128) {
3119 Cost +=
3120 BaseT::getScalarizationOverhead(Ty, DemandedElts, Insert, false);
3121 } else {
3122 // In each 128-lane, if at least one index is demanded but not all
3123 // indices are demanded and this 128-lane is not the first 128-lane of
3124 // the legalized-vector, then this 128-lane needs a extracti128; If in
3125 // each 128-lane, there is at least one demanded index, this 128-lane
3126 // needs a inserti128.
3127
3128 // The following cases will help you build a better understanding:
3129 // Assume we insert several elements into a v8i32 vector in avx2,
3130 // Case#1: inserting into 1th index needs vpinsrd + inserti128.
3131 // Case#2: inserting into 5th index needs extracti128 + vpinsrd +
3132 // inserti128.
3133 // Case#3: inserting into 4,5,6,7 index needs 4*vpinsrd + inserti128.
3134 unsigned Num128Lanes = LT.second.getSizeInBits() / 128 * LT.first;
3135 unsigned NumElts = LT.second.getVectorNumElements() * LT.first;
3136 APInt WidenedDemandedElts = DemandedElts.zextOrSelf(NumElts);
3137 unsigned Scale = NumElts / Num128Lanes;
3138 // We iterate each 128-lane, and check if we need a
3139 // extracti128/inserti128 for this 128-lane.
3140 for (unsigned I = 0; I < NumElts; I += Scale) {
3141 APInt Mask = WidenedDemandedElts.getBitsSet(NumElts, I, I + Scale);
3142 APInt MaskedDE = Mask & WidenedDemandedElts;
3143 unsigned Population = MaskedDE.countPopulation();
3144 Cost += (Population > 0 && Population != Scale &&
3145 I % LT.second.getVectorNumElements() != 0);
3146 Cost += Population > 0;
3147 }
3148 Cost += DemandedElts.countPopulation();
3149
3150 // For vXf32 cases, insertion into the 0'th index in each v4f32
3151 // 128-bit vector is free.
3152 // NOTE: This assumes legalization widens vXf32 vectors.
3153 if (MScalarTy == MVT::f32)
3154 for (unsigned i = 0, e = cast<FixedVectorType>(Ty)->getNumElements();
3155 i < e; i += 4)
3156 if (DemandedElts[i])
3157 Cost--;
3158 }
3159 } else if (LT.second.isVector()) {
3160 // Without fast insertion, we need to use MOVD/MOVQ to pass each demanded
3161 // integer element as a SCALAR_TO_VECTOR, then we build the vector as a
3162 // series of UNPCK followed by CONCAT_VECTORS - all of these can be
3163 // considered cheap.
3164 if (Ty->isIntOrIntVectorTy())
3165 Cost += DemandedElts.countPopulation();
3166
3167 // Get the smaller of the legalized or original pow2-extended number of
3168 // vector elements, which represents the number of unpacks we'll end up
3169 // performing.
3170 unsigned NumElts = LT.second.getVectorNumElements();
3171 unsigned Pow2Elts =
3172 PowerOf2Ceil(cast<FixedVectorType>(Ty)->getNumElements());
3173 Cost += (std::min<unsigned>(NumElts, Pow2Elts) - 1) * LT.first;
3174 }
3175 }
3176
3177 // TODO: Use default extraction for now, but we should investigate extending this
3178 // to handle repeated subvector extraction.
3179 if (Extract)
3180 Cost += BaseT::getScalarizationOverhead(Ty, DemandedElts, false, Extract);
3181
3182 return Cost;
3183}
3184
3185int X86TTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src,
3186 MaybeAlign Alignment, unsigned AddressSpace,
3187 TTI::TargetCostKind CostKind,
3188 const Instruction *I) {
3189 // TODO: Handle other cost kinds.
3190 if (CostKind != TTI::TCK_RecipThroughput) {
3191 if (auto *SI = dyn_cast_or_null<StoreInst>(I)) {
3192 // Store instruction with index and scale costs 2 Uops.
3193 // Check the preceding GEP to identify non-const indices.
3194 if (auto *GEP = dyn_cast<GetElementPtrInst>(SI->getPointerOperand())) {
3195 if (!all_of(GEP->indices(), [](Value *V) { return isa<Constant>(V); }))
3196 return TTI::TCC_Basic * 2;
3197 }
3198 }
3199 return TTI::TCC_Basic;
3200 }
3201
3202 // Handle non-power-of-two vectors such as <3 x float>
3203 if (auto *VTy = dyn_cast<FixedVectorType>(Src)) {
3204 unsigned NumElem = VTy->getNumElements();
3205
3206 // Handle a few common cases:
3207 // <3 x float>
3208 if (NumElem == 3 && VTy->getScalarSizeInBits() == 32)
3209 // Cost = 64 bit store + extract + 32 bit store.
3210 return 3;
3211
3212 // <3 x double>
3213 if (NumElem == 3 && VTy->getScalarSizeInBits() == 64)
3214 // Cost = 128 bit store + unpack + 64 bit store.
3215 return 3;
3216
3217 // Assume that all other non-power-of-two numbers are scalarized.
3218 if (!isPowerOf2_32(NumElem)) {
3219 APInt DemandedElts = APInt::getAllOnesValue(NumElem);
3220 int Cost = BaseT::getMemoryOpCost(Opcode, VTy->getScalarType(), Alignment,
3221 AddressSpace, CostKind);
3222 int SplitCost = getScalarizationOverhead(VTy, DemandedElts,
3223 Opcode == Instruction::Load,
3224 Opcode == Instruction::Store);
3225 return NumElem * Cost + SplitCost;
3226 }
3227 }
3228
3229 // Type legalization can't handle structs
3230 if (TLI->getValueType(DL, Src, true) == MVT::Other)
3231 return BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace,
3232 CostKind);
3233
3234 // Legalize the type.
3235 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Src);
3236 assert((Opcode == Instruction::Load || Opcode == Instruction::Store) &&(((Opcode == Instruction::Load || Opcode == Instruction::Store
) && "Invalid Opcode") ? static_cast<void> (0) :
__assert_fail ("(Opcode == Instruction::Load || Opcode == Instruction::Store) && \"Invalid Opcode\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 3237, __PRETTY_FUNCTION__))
3237 "Invalid Opcode")(((Opcode == Instruction::Load || Opcode == Instruction::Store
) && "Invalid Opcode") ? static_cast<void> (0) :
__assert_fail ("(Opcode == Instruction::Load || Opcode == Instruction::Store) && \"Invalid Opcode\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 3237, __PRETTY_FUNCTION__))
;
3238
3239 // Each load/store unit costs 1.
3240 int Cost = LT.first * 1;
3241
3242 // This isn't exactly right. We're using slow unaligned 32-byte accesses as a
3243 // proxy for a double-pumped AVX memory interface such as on Sandybridge.
3244 if (LT.second.getStoreSize() == 32 && ST->isUnalignedMem32Slow())
3245 Cost *= 2;
3246
3247 return Cost;
3248}
3249
3250int X86TTIImpl::getMaskedMemoryOpCost(unsigned Opcode, Type *SrcTy,
3251 Align Alignment, unsigned AddressSpace,
3252 TTI::TargetCostKind CostKind) {
3253 bool IsLoad = (Instruction::Load == Opcode);
3254 bool IsStore = (Instruction::Store == Opcode);
3255
3256 auto *SrcVTy = dyn_cast<FixedVectorType>(SrcTy);
3257 if (!SrcVTy)
3258 // To calculate scalar take the regular cost, without mask
3259 return getMemoryOpCost(Opcode, SrcTy, Alignment, AddressSpace, CostKind);
3260
3261 unsigned NumElem = SrcVTy->getNumElements();
3262 auto *MaskTy =
3263 FixedVectorType::get(Type::getInt8Ty(SrcVTy->getContext()), NumElem);
3264 if ((IsLoad && !isLegalMaskedLoad(SrcVTy, Alignment)) ||
3265 (IsStore && !isLegalMaskedStore(SrcVTy, Alignment)) ||
3266 !isPowerOf2_32(NumElem)) {
3267 // Scalarization
3268 APInt DemandedElts = APInt::getAllOnesValue(NumElem);
3269 int MaskSplitCost =
3270 getScalarizationOverhead(MaskTy, DemandedElts, false, true);
3271 int ScalarCompareCost = getCmpSelInstrCost(
3272 Instruction::ICmp, Type::getInt8Ty(SrcVTy->getContext()), nullptr,
3273 CmpInst::BAD_ICMP_PREDICATE, CostKind);
3274 int BranchCost = getCFInstrCost(Instruction::Br, CostKind);
3275 int MaskCmpCost = NumElem * (BranchCost + ScalarCompareCost);
3276 int ValueSplitCost =
3277 getScalarizationOverhead(SrcVTy, DemandedElts, IsLoad, IsStore);
3278 int MemopCost =
3279 NumElem * BaseT::getMemoryOpCost(Opcode, SrcVTy->getScalarType(),
3280 Alignment, AddressSpace, CostKind);
3281 return MemopCost + ValueSplitCost + MaskSplitCost + MaskCmpCost;
3282 }
3283
3284 // Legalize the type.
3285 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, SrcVTy);
3286 auto VT = TLI->getValueType(DL, SrcVTy);
3287 int Cost = 0;
3288 if (VT.isSimple() && LT.second != VT.getSimpleVT() &&
3289 LT.second.getVectorNumElements() == NumElem)
3290 // Promotion requires expand/truncate for data and a shuffle for mask.
3291 Cost += getShuffleCost(TTI::SK_PermuteTwoSrc, SrcVTy, 0, nullptr) +
3292 getShuffleCost(TTI::SK_PermuteTwoSrc, MaskTy, 0, nullptr);
3293
3294 else if (LT.second.getVectorNumElements() > NumElem) {
3295 auto *NewMaskTy = FixedVectorType::get(MaskTy->getElementType(),
3296 LT.second.getVectorNumElements());
3297 // Expanding requires fill mask with zeroes
3298 Cost += getShuffleCost(TTI::SK_InsertSubvector, NewMaskTy, 0, MaskTy);
3299 }
3300
3301 // Pre-AVX512 - each maskmov load costs 2 + store costs ~8.
3302 if (!ST->hasAVX512())
3303 return Cost + LT.first * (IsLoad ? 2 : 8);
3304
3305 // AVX-512 masked load/store is cheapper
3306 return Cost + LT.first;
3307}
3308
3309int X86TTIImpl::getAddressComputationCost(Type *Ty, ScalarEvolution *SE,
3310 const SCEV *Ptr) {
3311 // Address computations in vectorized code with non-consecutive addresses will
3312 // likely result in more instructions compared to scalar code where the
3313 // computation can more often be merged into the index mode. The resulting
3314 // extra micro-ops can significantly decrease throughput.
3315 const unsigned NumVectorInstToHideOverhead = 10;
3316
3317 // Cost modeling of Strided Access Computation is hidden by the indexing
3318 // modes of X86 regardless of the stride value. We dont believe that there
3319 // is a difference between constant strided access in gerenal and constant
3320 // strided value which is less than or equal to 64.
3321 // Even in the case of (loop invariant) stride whose value is not known at
3322 // compile time, the address computation will not incur more than one extra
3323 // ADD instruction.
3324 if (Ty->isVectorTy() && SE) {
3325 if (!BaseT::isStridedAccess(Ptr))
3326 return NumVectorInstToHideOverhead;
3327 if (!BaseT::getConstantStrideStep(SE, Ptr))
3328 return 1;
3329 }
3330
3331 return BaseT::getAddressComputationCost(Ty, SE, Ptr);
3332}
3333
3334int X86TTIImpl::getArithmeticReductionCost(unsigned Opcode, VectorType *ValTy,
3335 bool IsPairwise,
3336 TTI::TargetCostKind CostKind) {
3337 // Just use the default implementation for pair reductions.
3338 if (IsPairwise)
3339 return BaseT::getArithmeticReductionCost(Opcode, ValTy, IsPairwise, CostKind);
3340
3341 // We use the Intel Architecture Code Analyzer(IACA) to measure the throughput
3342 // and make it as the cost.
3343
3344 static const CostTblEntry SLMCostTblNoPairWise[] = {
3345 { ISD::FADD, MVT::v2f64, 3 },
3346 { ISD::ADD, MVT::v2i64, 5 },
3347 };
3348
3349 static const CostTblEntry SSE2CostTblNoPairWise[] = {
3350 { ISD::FADD, MVT::v2f64, 2 },
3351 { ISD::FADD, MVT::v4f32, 4 },
3352 { ISD::ADD, MVT::v2i64, 2 }, // The data reported by the IACA tool is "1.6".
3353 { ISD::ADD, MVT::v2i32, 2 }, // FIXME: chosen to be less than v4i32
3354 { ISD::ADD, MVT::v4i32, 3 }, // The data reported by the IACA tool is "3.3".
3355 { ISD::ADD, MVT::v2i16, 2 }, // The data reported by the IACA tool is "4.3".
3356 { ISD::ADD, MVT::v4i16, 3 }, // The data reported by the IACA tool is "4.3".
3357 { ISD::ADD, MVT::v8i16, 4 }, // The data reported by the IACA tool is "4.3".
3358 { ISD::ADD, MVT::v2i8, 2 },
3359 { ISD::ADD, MVT::v4i8, 2 },
3360 { ISD::ADD, MVT::v8i8, 2 },
3361 { ISD::ADD, MVT::v16i8, 3 },
3362 };
3363
3364 static const CostTblEntry AVX1CostTblNoPairWise[] = {
3365 { ISD::FADD, MVT::v4f64, 3 },
3366 { ISD::FADD, MVT::v4f32, 3 },
3367 { ISD::FADD, MVT::v8f32, 4 },
3368 { ISD::ADD, MVT::v2i64, 1 }, // The data reported by the IACA tool is "1.5".
3369 { ISD::ADD, MVT::v4i64, 3 },
3370 { ISD::ADD, MVT::v8i32, 5 },
3371 { ISD::ADD, MVT::v16i16, 5 },
3372 { ISD::ADD, MVT::v32i8, 4 },
3373 };
3374
3375 int ISD = TLI->InstructionOpcodeToISD(Opcode);
3376 assert(ISD && "Invalid opcode")((ISD && "Invalid opcode") ? static_cast<void> (
0) : __assert_fail ("ISD && \"Invalid opcode\"", "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 3376, __PRETTY_FUNCTION__))
;
3377
3378 // Before legalizing the type, give a chance to look up illegal narrow types
3379 // in the table.
3380 // FIXME: Is there a better way to do this?
3381 EVT VT = TLI->getValueType(DL, ValTy);
3382 if (VT.isSimple()) {
3383 MVT MTy = VT.getSimpleVT();
3384 if (ST->isSLM())
3385 if (const auto *Entry = CostTableLookup(SLMCostTblNoPairWise, ISD, MTy))
3386 return Entry->Cost;
3387
3388 if (ST->hasAVX())
3389 if (const auto *Entry = CostTableLookup(AVX1CostTblNoPairWise, ISD, MTy))
3390 return Entry->Cost;
3391
3392 if (ST->hasSSE2())
3393 if (const auto *Entry = CostTableLookup(SSE2CostTblNoPairWise, ISD, MTy))
3394 return Entry->Cost;
3395 }
3396
3397 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy);
3398
3399 MVT MTy = LT.second;
3400
3401 auto *ValVTy = cast<FixedVectorType>(ValTy);
3402
3403 unsigned ArithmeticCost = 0;
3404 if (LT.first != 1 && MTy.isVector() &&
3405 MTy.getVectorNumElements() < ValVTy->getNumElements()) {
3406 // Type needs to be split. We need LT.first - 1 arithmetic ops.
3407 auto *SingleOpTy = FixedVectorType::get(ValVTy->getElementType(),
3408 MTy.getVectorNumElements());
3409 ArithmeticCost = getArithmeticInstrCost(Opcode, SingleOpTy, CostKind);
3410 ArithmeticCost *= LT.first - 1;
3411 }
3412
3413 if (ST->isSLM())
3414 if (const auto *Entry = CostTableLookup(SLMCostTblNoPairWise, ISD, MTy))
3415 return ArithmeticCost + Entry->Cost;
3416
3417 if (ST->hasAVX())
3418 if (const auto *Entry = CostTableLookup(AVX1CostTblNoPairWise, ISD, MTy))
3419 return ArithmeticCost + Entry->Cost;
3420
3421 if (ST->hasSSE2())
3422 if (const auto *Entry = CostTableLookup(SSE2CostTblNoPairWise, ISD, MTy))
3423 return ArithmeticCost + Entry->Cost;
3424
3425 // FIXME: These assume a naive kshift+binop lowering, which is probably
3426 // conservative in most cases.
3427 static const CostTblEntry AVX512BoolReduction[] = {
3428 { ISD::AND, MVT::v2i1, 3 },
3429 { ISD::AND, MVT::v4i1, 5 },
3430 { ISD::AND, MVT::v8i1, 7 },
3431 { ISD::AND, MVT::v16i1, 9 },
3432 { ISD::AND, MVT::v32i1, 11 },
3433 { ISD::AND, MVT::v64i1, 13 },
3434 { ISD::OR, MVT::v2i1, 3 },
3435 { ISD::OR, MVT::v4i1, 5 },
3436 { ISD::OR, MVT::v8i1, 7 },
3437 { ISD::OR, MVT::v16i1, 9 },
3438 { ISD::OR, MVT::v32i1, 11 },
3439 { ISD::OR, MVT::v64i1, 13 },
3440 };
3441
3442 static const CostTblEntry AVX2BoolReduction[] = {
3443 { ISD::AND, MVT::v16i16, 2 }, // vpmovmskb + cmp
3444 { ISD::AND, MVT::v32i8, 2 }, // vpmovmskb + cmp
3445 { ISD::OR, MVT::v16i16, 2 }, // vpmovmskb + cmp
3446 { ISD::OR, MVT::v32i8, 2 }, // vpmovmskb + cmp
3447 };
3448
3449 static const CostTblEntry AVX1BoolReduction[] = {
3450 { ISD::AND, MVT::v4i64, 2 }, // vmovmskpd + cmp
3451 { ISD::AND, MVT::v8i32, 2 }, // vmovmskps + cmp
3452 { ISD::AND, MVT::v16i16, 4 }, // vextractf128 + vpand + vpmovmskb + cmp
3453 { ISD::AND, MVT::v32i8, 4 }, // vextractf128 + vpand + vpmovmskb + cmp
3454 { ISD::OR, MVT::v4i64, 2 }, // vmovmskpd + cmp
3455 { ISD::OR, MVT::v8i32, 2 }, // vmovmskps + cmp
3456 { ISD::OR, MVT::v16i16, 4 }, // vextractf128 + vpor + vpmovmskb + cmp
3457 { ISD::OR, MVT::v32i8, 4 }, // vextractf128 + vpor + vpmovmskb + cmp
3458 };
3459
3460 static const CostTblEntry SSE2BoolReduction[] = {
3461 { ISD::AND, MVT::v2i64, 2 }, // movmskpd + cmp
3462 { ISD::AND, MVT::v4i32, 2 }, // movmskps + cmp
3463 { ISD::AND, MVT::v8i16, 2 }, // pmovmskb + cmp
3464 { ISD::AND, MVT::v16i8, 2 }, // pmovmskb + cmp
3465 { ISD::OR, MVT::v2i64, 2 }, // movmskpd + cmp
3466 { ISD::OR, MVT::v4i32, 2 }, // movmskps + cmp
3467 { ISD::OR, MVT::v8i16, 2 }, // pmovmskb + cmp
3468 { ISD::OR, MVT::v16i8, 2 }, // pmovmskb + cmp
3469 };
3470
3471 // Handle bool allof/anyof patterns.
3472 if (ValVTy->getElementType()->isIntegerTy(1)) {
3473 unsigned ArithmeticCost = 0;
3474 if (LT.first != 1 && MTy.isVector() &&
3475 MTy.getVectorNumElements() < ValVTy->getNumElements()) {
3476 // Type needs to be split. We need LT.first - 1 arithmetic ops.
3477 auto *SingleOpTy = FixedVectorType::get(ValVTy->getElementType(),
3478 MTy.getVectorNumElements());
3479 ArithmeticCost = getArithmeticInstrCost(Opcode, SingleOpTy, CostKind);
3480 ArithmeticCost *= LT.first - 1;
3481 }
3482
3483 if (ST->hasAVX512())
3484 if (const auto *Entry = CostTableLookup(AVX512BoolReduction, ISD, MTy))
3485 return ArithmeticCost + Entry->Cost;
3486 if (ST->hasAVX2())
3487 if (const auto *Entry = CostTableLookup(AVX2BoolReduction, ISD, MTy))
3488 return ArithmeticCost + Entry->Cost;
3489 if (ST->hasAVX())
3490 if (const auto *Entry = CostTableLookup(AVX1BoolReduction, ISD, MTy))
3491 return ArithmeticCost + Entry->Cost;
3492 if (ST->hasSSE2())
3493 if (const auto *Entry = CostTableLookup(SSE2BoolReduction, ISD, MTy))
3494 return ArithmeticCost + Entry->Cost;
3495
3496 return BaseT::getArithmeticReductionCost(Opcode, ValVTy, IsPairwise,
3497 CostKind);
3498 }
3499
3500 unsigned NumVecElts = ValVTy->getNumElements();
3501 unsigned ScalarSize = ValVTy->getScalarSizeInBits();
3502
3503 // Special case power of 2 reductions where the scalar type isn't changed
3504 // by type legalization.
3505 if (!isPowerOf2_32(NumVecElts) || ScalarSize != MTy.getScalarSizeInBits())
3506 return BaseT::getArithmeticReductionCost(Opcode, ValVTy, IsPairwise,
3507 CostKind);
3508
3509 unsigned ReductionCost = 0;
3510
3511 auto *Ty = ValVTy;
3512 if (LT.first != 1 && MTy.isVector() &&
3513 MTy.getVectorNumElements() < ValVTy->getNumElements()) {
3514 // Type needs to be split. We need LT.first - 1 arithmetic ops.
3515 Ty = FixedVectorType::get(ValVTy->getElementType(),
3516 MTy.getVectorNumElements());
3517 ReductionCost = getArithmeticInstrCost(Opcode, Ty, CostKind);
3518 ReductionCost *= LT.first - 1;
3519 NumVecElts = MTy.getVectorNumElements();
3520 }
3521
3522 // Now handle reduction with the legal type, taking into account size changes
3523 // at each level.
3524 while (NumVecElts > 1) {
3525 // Determine the size of the remaining vector we need to reduce.
3526 unsigned Size = NumVecElts * ScalarSize;
3527 NumVecElts /= 2;
3528 // If we're reducing from 256/512 bits, use an extract_subvector.
3529 if (Size > 128) {
3530 auto *SubTy = FixedVectorType::get(ValVTy->getElementType(), NumVecElts);
3531 ReductionCost +=
3532 getShuffleCost(TTI::SK_ExtractSubvector, Ty, NumVecElts, SubTy);
3533 Ty = SubTy;
3534 } else if (Size == 128) {
3535 // Reducing from 128 bits is a permute of v2f64/v2i64.
3536 FixedVectorType *ShufTy;
3537 if (ValVTy->isFloatingPointTy())
3538 ShufTy =
3539 FixedVectorType::get(Type::getDoubleTy(ValVTy->getContext()), 2);
3540 else
3541 ShufTy =
3542 FixedVectorType::get(Type::getInt64Ty(ValVTy->getContext()), 2);
3543 ReductionCost +=
3544 getShuffleCost(TTI::SK_PermuteSingleSrc, ShufTy, 0, nullptr);
3545 } else if (Size == 64) {
3546 // Reducing from 64 bits is a shuffle of v4f32/v4i32.
3547 FixedVectorType *ShufTy;
3548 if (ValVTy->isFloatingPointTy())
3549 ShufTy =
3550 FixedVectorType::get(Type::getFloatTy(ValVTy->getContext()), 4);
3551 else
3552 ShufTy =
3553 FixedVectorType::get(Type::getInt32Ty(ValVTy->getContext()), 4);
3554 ReductionCost +=
3555 getShuffleCost(TTI::SK_PermuteSingleSrc, ShufTy, 0, nullptr);
3556 } else {
3557 // Reducing from smaller size is a shift by immediate.
3558 auto *ShiftTy = FixedVectorType::get(
3559 Type::getIntNTy(ValVTy->getContext(), Size), 128 / Size);
3560 ReductionCost += getArithmeticInstrCost(
3561 Instruction::LShr, ShiftTy, CostKind,
3562 TargetTransformInfo::OK_AnyValue,
3563 TargetTransformInfo::OK_UniformConstantValue,
3564 TargetTransformInfo::OP_None, TargetTransformInfo::OP_None);
3565 }
3566
3567 // Add the arithmetic op for this level.
3568 ReductionCost += getArithmeticInstrCost(Opcode, Ty, CostKind);
3569 }
3570
3571 // Add the final extract element to the cost.
3572 return ReductionCost + getVectorInstrCost(Instruction::ExtractElement, Ty, 0);
3573}
3574
3575int X86TTIImpl::getMinMaxCost(Type *Ty, Type *CondTy, bool IsUnsigned) {
3576 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty);
3577
3578 MVT MTy = LT.second;
3579
3580 int ISD;
3581 if (Ty->isIntOrIntVectorTy()) {
3582 ISD = IsUnsigned ? ISD::UMIN : ISD::SMIN;
3583 } else {
3584 assert(Ty->isFPOrFPVectorTy() &&((Ty->isFPOrFPVectorTy() && "Expected float point or integer vector type."
) ? static_cast<void> (0) : __assert_fail ("Ty->isFPOrFPVectorTy() && \"Expected float point or integer vector type.\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 3585, __PRETTY_FUNCTION__))
3585 "Expected float point or integer vector type.")((Ty->isFPOrFPVectorTy() && "Expected float point or integer vector type."
) ? static_cast<void> (0) : __assert_fail ("Ty->isFPOrFPVectorTy() && \"Expected float point or integer vector type.\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 3585, __PRETTY_FUNCTION__))
;
3586 ISD = ISD::FMINNUM;
3587 }
3588
3589 static const CostTblEntry SSE1CostTbl[] = {
3590 {ISD::FMINNUM, MVT::v4f32, 1},
3591 };
3592
3593 static const CostTblEntry SSE2CostTbl[] = {
3594 {ISD::FMINNUM, MVT::v2f64, 1},
3595 {ISD::SMIN, MVT::v8i16, 1},
3596 {ISD::UMIN, MVT::v16i8, 1},
3597 };
3598
3599 static const CostTblEntry SSE41CostTbl[] = {
3600 {ISD::SMIN, MVT::v4i32, 1},
3601 {ISD::UMIN, MVT::v4i32, 1},
3602 {ISD::UMIN, MVT::v8i16, 1},
3603 {ISD::SMIN, MVT::v16i8, 1},
3604 };
3605
3606 static const CostTblEntry SSE42CostTbl[] = {
3607 {ISD::UMIN, MVT::v2i64, 3}, // xor+pcmpgtq+blendvpd
3608 };
3609
3610 static const CostTblEntry AVX1CostTbl[] = {
3611 {ISD::FMINNUM, MVT::v8f32, 1},
3612 {ISD::FMINNUM, MVT::v4f64, 1},
3613 {ISD::SMIN, MVT::v8i32, 3},
3614 {ISD::UMIN, MVT::v8i32, 3},
3615 {ISD::SMIN, MVT::v16i16, 3},
3616 {ISD::UMIN, MVT::v16i16, 3},
3617 {ISD::SMIN, MVT::v32i8, 3},
3618 {ISD::UMIN, MVT::v32i8, 3},
3619 };
3620
3621 static const CostTblEntry AVX2CostTbl[] = {
3622 {ISD::SMIN, MVT::v8i32, 1},
3623 {ISD::UMIN, MVT::v8i32, 1},
3624 {ISD::SMIN, MVT::v16i16, 1},
3625 {ISD::UMIN, MVT::v16i16, 1},
3626 {ISD::SMIN, MVT::v32i8, 1},
3627 {ISD::UMIN, MVT::v32i8, 1},
3628 };
3629
3630 static const CostTblEntry AVX512CostTbl[] = {
3631 {ISD::FMINNUM, MVT::v16f32, 1},
3632 {ISD::FMINNUM, MVT::v8f64, 1},
3633 {ISD::SMIN, MVT::v2i64, 1},
3634 {ISD::UMIN, MVT::v2i64, 1},
3635 {ISD::SMIN, MVT::v4i64, 1},
3636 {ISD::UMIN, MVT::v4i64, 1},
3637 {ISD::SMIN, MVT::v8i64, 1},
3638 {ISD::UMIN, MVT::v8i64, 1},
3639 {ISD::SMIN, MVT::v16i32, 1},
3640 {ISD::UMIN, MVT::v16i32, 1},
3641 };
3642
3643 static const CostTblEntry AVX512BWCostTbl[] = {
3644 {ISD::SMIN, MVT::v32i16, 1},
3645 {ISD::UMIN, MVT::v32i16, 1},
3646 {ISD::SMIN, MVT::v64i8, 1},
3647 {ISD::UMIN, MVT::v64i8, 1},
3648 };
3649
3650 // If we have a native MIN/MAX instruction for this type, use it.
3651 if (ST->hasBWI())
3652 if (const auto *Entry = CostTableLookup(AVX512BWCostTbl, ISD, MTy))
3653 return LT.first * Entry->Cost;
3654
3655 if (ST->hasAVX512())
3656 if (const auto *Entry = CostTableLookup(AVX512CostTbl, ISD, MTy))
3657 return LT.first * Entry->Cost;
3658
3659 if (ST->hasAVX2())
3660 if (const auto *Entry = CostTableLookup(AVX2CostTbl, ISD, MTy))
3661 return LT.first * Entry->Cost;
3662
3663 if (ST->hasAVX())
3664 if (const auto *Entry = CostTableLookup(AVX1CostTbl, ISD, MTy))
3665 return LT.first * Entry->Cost;
3666
3667 if (ST->hasSSE42())
3668 if (const auto *Entry = CostTableLookup(SSE42CostTbl, ISD, MTy))
3669 return LT.first * Entry->Cost;
3670
3671 if (ST->hasSSE41())
3672 if (const auto *Entry = CostTableLookup(SSE41CostTbl, ISD, MTy))
3673 return LT.first * Entry->Cost;
3674
3675 if (ST->hasSSE2())
3676 if (const auto *Entry = CostTableLookup(SSE2CostTbl, ISD, MTy))
3677 return LT.first * Entry->Cost;
3678
3679 if (ST->hasSSE1())
3680 if (const auto *Entry = CostTableLookup(SSE1CostTbl, ISD, MTy))
3681 return LT.first * Entry->Cost;
3682
3683 unsigned CmpOpcode;
3684 if (Ty->isFPOrFPVectorTy()) {
3685 CmpOpcode = Instruction::FCmp;
3686 } else {
3687 assert(Ty->isIntOrIntVectorTy() &&((Ty->isIntOrIntVectorTy() && "expecting floating point or integer type for min/max reduction"
) ? static_cast<void> (0) : __assert_fail ("Ty->isIntOrIntVectorTy() && \"expecting floating point or integer type for min/max reduction\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 3688, __PRETTY_FUNCTION__))
3688 "expecting floating point or integer type for min/max reduction")((Ty->isIntOrIntVectorTy() && "expecting floating point or integer type for min/max reduction"
) ? static_cast<void> (0) : __assert_fail ("Ty->isIntOrIntVectorTy() && \"expecting floating point or integer type for min/max reduction\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 3688, __PRETTY_FUNCTION__))
;
3689 CmpOpcode = Instruction::ICmp;
3690 }
3691
3692 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
3693 // Otherwise fall back to cmp+select.
3694 return getCmpSelInstrCost(CmpOpcode, Ty, CondTy, CmpInst::BAD_ICMP_PREDICATE,
3695 CostKind) +
3696 getCmpSelInstrCost(Instruction::Select, Ty, CondTy,
3697 CmpInst::BAD_ICMP_PREDICATE, CostKind);
3698}
3699
3700int X86TTIImpl::getMinMaxReductionCost(VectorType *ValTy, VectorType *CondTy,
3701 bool IsPairwise, bool IsUnsigned,
3702 TTI::TargetCostKind CostKind) {
3703 // Just use the default implementation for pair reductions.
3704 if (IsPairwise)
3705 return BaseT::getMinMaxReductionCost(ValTy, CondTy, IsPairwise, IsUnsigned,
3706 CostKind);
3707
3708 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy);
3709
3710 MVT MTy = LT.second;
3711
3712 int ISD;
3713 if (ValTy->isIntOrIntVectorTy()) {
3714 ISD = IsUnsigned ? ISD::UMIN : ISD::SMIN;
3715 } else {
3716 assert(ValTy->isFPOrFPVectorTy() &&((ValTy->isFPOrFPVectorTy() && "Expected float point or integer vector type."
) ? static_cast<void> (0) : __assert_fail ("ValTy->isFPOrFPVectorTy() && \"Expected float point or integer vector type.\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 3717, __PRETTY_FUNCTION__))
3717 "Expected float point or integer vector type.")((ValTy->isFPOrFPVectorTy() && "Expected float point or integer vector type."
) ? static_cast<void> (0) : __assert_fail ("ValTy->isFPOrFPVectorTy() && \"Expected float point or integer vector type.\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 3717, __PRETTY_FUNCTION__))
;
3718 ISD = ISD::FMINNUM;
3719 }
3720
3721 // We use the Intel Architecture Code Analyzer(IACA) to measure the throughput
3722 // and make it as the cost.
3723
3724 static const CostTblEntry SSE2CostTblNoPairWise[] = {
3725 {ISD::UMIN, MVT::v2i16, 5}, // need pxors to use pminsw/pmaxsw
3726 {ISD::UMIN, MVT::v4i16, 7}, // need pxors to use pminsw/pmaxsw
3727 {ISD::UMIN, MVT::v8i16, 9}, // need pxors to use pminsw/pmaxsw
3728 };
3729
3730 static const CostTblEntry SSE41CostTblNoPairWise[] = {
3731 {ISD::SMIN, MVT::v2i16, 3}, // same as sse2
3732 {ISD::SMIN, MVT::v4i16, 5}, // same as sse2
3733 {ISD::UMIN, MVT::v2i16, 5}, // same as sse2
3734 {ISD::UMIN, MVT::v4i16, 7}, // same as sse2
3735 {ISD::SMIN, MVT::v8i16, 4}, // phminposuw+xor
3736 {ISD::UMIN, MVT::v8i16, 4}, // FIXME: umin is cheaper than umax
3737 {ISD::SMIN, MVT::v2i8, 3}, // pminsb
3738 {ISD::SMIN, MVT::v4i8, 5}, // pminsb
3739 {ISD::SMIN, MVT::v8i8, 7}, // pminsb
3740 {ISD::SMIN, MVT::v16i8, 6},
3741 {ISD::UMIN, MVT::v2i8, 3}, // same as sse2
3742 {ISD::UMIN, MVT::v4i8, 5}, // same as sse2
3743 {ISD::UMIN, MVT::v8i8, 7}, // same as sse2
3744 {ISD::UMIN, MVT::v16i8, 6}, // FIXME: umin is cheaper than umax
3745 };
3746
3747 static const CostTblEntry AVX1CostTblNoPairWise[] = {
3748 {ISD::SMIN, MVT::v16i16, 6},
3749 {ISD::UMIN, MVT::v16i16, 6}, // FIXME: umin is cheaper than umax
3750 {ISD::SMIN, MVT::v32i8, 8},
3751 {ISD::UMIN, MVT::v32i8, 8},
3752 };
3753
3754 static const CostTblEntry AVX512BWCostTblNoPairWise[] = {
3755 {ISD::SMIN, MVT::v32i16, 8},
3756 {ISD::UMIN, MVT::v32i16, 8}, // FIXME: umin is cheaper than umax
3757 {ISD::SMIN, MVT::v64i8, 10},
3758 {ISD::UMIN, MVT::v64i8, 10},
3759 };
3760
3761 // Before legalizing the type, give a chance to look up illegal narrow types
3762 // in the table.
3763 // FIXME: Is there a better way to do this?
3764 EVT VT = TLI->getValueType(DL, ValTy);
3765 if (VT.isSimple()) {
3766 MVT MTy = VT.getSimpleVT();
3767 if (ST->hasBWI())
3768 if (const auto *Entry = CostTableLookup(AVX512BWCostTblNoPairWise, ISD, MTy))
3769 return Entry->Cost;
3770
3771 if (ST->hasAVX())
3772 if (const auto *Entry = CostTableLookup(AVX1CostTblNoPairWise, ISD, MTy))
3773 return Entry->Cost;
3774
3775 if (ST->hasSSE41())
3776 if (const auto *Entry = CostTableLookup(SSE41CostTblNoPairWise, ISD, MTy))
3777 return Entry->Cost;
3778
3779 if (ST->hasSSE2())
3780 if (const auto *Entry = CostTableLookup(SSE2CostTblNoPairWise, ISD, MTy))
3781 return Entry->Cost;
3782 }
3783
3784 auto *ValVTy = cast<FixedVectorType>(ValTy);
3785 unsigned NumVecElts = ValVTy->getNumElements();
3786
3787 auto *Ty = ValVTy;
3788 unsigned MinMaxCost = 0;
3789 if (LT.first != 1 && MTy.isVector() &&
3790 MTy.getVectorNumElements() < ValVTy->getNumElements()) {
3791 // Type needs to be split. We need LT.first - 1 operations ops.
3792 Ty = FixedVectorType::get(ValVTy->getElementType(),
3793 MTy.getVectorNumElements());
3794 auto *SubCondTy = FixedVectorType::get(CondTy->getElementType(),
3795 MTy.getVectorNumElements());
3796 MinMaxCost = getMinMaxCost(Ty, SubCondTy, IsUnsigned);
3797 MinMaxCost *= LT.first - 1;
3798 NumVecElts = MTy.getVectorNumElements();
3799 }
3800
3801 if (ST->hasBWI())
3802 if (const auto *Entry = CostTableLookup(AVX512BWCostTblNoPairWise, ISD, MTy))
3803 return MinMaxCost + Entry->Cost;
3804
3805 if (ST->hasAVX())
3806 if (const auto *Entry = CostTableLookup(AVX1CostTblNoPairWise, ISD, MTy))
3807 return MinMaxCost + Entry->Cost;
3808
3809 if (ST->hasSSE41())
3810 if (const auto *Entry = CostTableLookup(SSE41CostTblNoPairWise, ISD, MTy))
3811 return MinMaxCost + Entry->Cost;
3812
3813 if (ST->hasSSE2())
3814 if (const auto *Entry = CostTableLookup(SSE2CostTblNoPairWise, ISD, MTy))
3815 return MinMaxCost + Entry->Cost;
3816
3817 unsigned ScalarSize = ValTy->getScalarSizeInBits();
3818
3819 // Special case power of 2 reductions where the scalar type isn't changed
3820 // by type legalization.
3821 if (!isPowerOf2_32(ValVTy->getNumElements()) ||
3822 ScalarSize != MTy.getScalarSizeInBits())
3823 return BaseT::getMinMaxReductionCost(ValTy, CondTy, IsPairwise, IsUnsigned,
3824 CostKind);
3825
3826 // Now handle reduction with the legal type, taking into account size changes
3827 // at each level.
3828 while (NumVecElts > 1) {
3829 // Determine the size of the remaining vector we need to reduce.
3830 unsigned Size = NumVecElts * ScalarSize;
3831 NumVecElts /= 2;
3832 // If we're reducing from 256/512 bits, use an extract_subvector.
3833 if (Size > 128) {
3834 auto *SubTy = FixedVectorType::get(ValVTy->getElementType(), NumVecElts);
3835 MinMaxCost +=
3836 getShuffleCost(TTI::SK_ExtractSubvector, Ty, NumVecElts, SubTy);
3837 Ty = SubTy;
3838 } else if (Size == 128) {
3839 // Reducing from 128 bits is a permute of v2f64/v2i64.
3840 VectorType *ShufTy;
3841 if (ValTy->isFloatingPointTy())
3842 ShufTy =
3843 FixedVectorType::get(Type::getDoubleTy(ValTy->getContext()), 2);
3844 else
3845 ShufTy = FixedVectorType::get(Type::getInt64Ty(ValTy->getContext()), 2);
3846 MinMaxCost +=
3847 getShuffleCost(TTI::SK_PermuteSingleSrc, ShufTy, 0, nullptr);
3848 } else if (Size == 64) {
3849 // Reducing from 64 bits is a shuffle of v4f32/v4i32.
3850 FixedVectorType *ShufTy;
3851 if (ValTy->isFloatingPointTy())
3852 ShufTy = FixedVectorType::get(Type::getFloatTy(ValTy->getContext()), 4);
3853 else
3854 ShufTy = FixedVectorType::get(Type::getInt32Ty(ValTy->getContext()), 4);
3855 MinMaxCost +=
3856 getShuffleCost(TTI::SK_PermuteSingleSrc, ShufTy, 0, nullptr);
3857 } else {
3858 // Reducing from smaller size is a shift by immediate.
3859 auto *ShiftTy = FixedVectorType::get(
3860 Type::getIntNTy(ValTy->getContext(), Size), 128 / Size);
3861 MinMaxCost += getArithmeticInstrCost(
3862 Instruction::LShr, ShiftTy, TTI::TCK_RecipThroughput,
3863 TargetTransformInfo::OK_AnyValue,
3864 TargetTransformInfo::OK_UniformConstantValue,
3865 TargetTransformInfo::OP_None, TargetTransformInfo::OP_None);
3866 }
3867
3868 // Add the arithmetic op for this level.
3869 auto *SubCondTy =
3870 FixedVectorType::get(CondTy->getElementType(), Ty->getNumElements());
3871 MinMaxCost += getMinMaxCost(Ty, SubCondTy, IsUnsigned);
3872 }
3873
3874 // Add the final extract element to the cost.
3875 return MinMaxCost + getVectorInstrCost(Instruction::ExtractElement, Ty, 0);
3876}
3877
3878/// Calculate the cost of materializing a 64-bit value. This helper
3879/// method might only calculate a fraction of a larger immediate. Therefore it
3880/// is valid to return a cost of ZERO.
3881int X86TTIImpl::getIntImmCost(int64_t Val) {
3882 if (Val == 0)
3883 return TTI::TCC_Free;
3884
3885 if (isInt<32>(Val))
3886 return TTI::TCC_Basic;
3887
3888 return 2 * TTI::TCC_Basic;
3889}
3890
3891int X86TTIImpl::getIntImmCost(const APInt &Imm, Type *Ty,
3892 TTI::TargetCostKind CostKind) {
3893 assert(Ty->isIntegerTy())((Ty->isIntegerTy()) ? static_cast<void> (0) : __assert_fail
("Ty->isIntegerTy()", "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 3893, __PRETTY_FUNCTION__))
;
3894
3895 unsigned BitSize = Ty->getPrimitiveSizeInBits();
3896 if (BitSize == 0)
3897 return ~0U;
3898
3899 // Never hoist constants larger than 128bit, because this might lead to
3900 // incorrect code generation or assertions in codegen.
3901 // Fixme: Create a cost model for types larger than i128 once the codegen
3902 // issues have been fixed.
3903 if (BitSize > 128)
3904 return TTI::TCC_Free;
3905
3906 if (Imm == 0)
3907 return TTI::TCC_Free;
3908
3909 // Sign-extend all constants to a multiple of 64-bit.
3910 APInt ImmVal = Imm;
3911 if (BitSize % 64 != 0)
3912 ImmVal = Imm.sext(alignTo(BitSize, 64));
3913
3914 // Split the constant into 64-bit chunks and calculate the cost for each
3915 // chunk.
3916 int Cost = 0;
3917 for (unsigned ShiftVal = 0; ShiftVal < BitSize; ShiftVal += 64) {
3918 APInt Tmp = ImmVal.ashr(ShiftVal).sextOrTrunc(64);
3919 int64_t Val = Tmp.getSExtValue();
3920 Cost += getIntImmCost(Val);
3921 }
3922 // We need at least one instruction to materialize the constant.
3923 return std::max(1, Cost);
3924}
3925
3926int X86TTIImpl::getIntImmCostInst(unsigned Opcode, unsigned Idx,
3927 const APInt &Imm, Type *Ty,
3928 TTI::TargetCostKind CostKind,
3929 Instruction *Inst) {
3930 assert(Ty->isIntegerTy())((Ty->isIntegerTy()) ? static_cast<void> (0) : __assert_fail
("Ty->isIntegerTy()", "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 3930, __PRETTY_FUNCTION__))
;
3931
3932 unsigned BitSize = Ty->getPrimitiveSizeInBits();
3933 // There is no cost model for constants with a bit size of 0. Return TCC_Free
3934 // here, so that constant hoisting will ignore this constant.
3935 if (BitSize == 0)
3936 return TTI::TCC_Free;
3937
3938 unsigned ImmIdx = ~0U;
3939 switch (Opcode) {
3940 default:
3941 return TTI::TCC_Free;
3942 case Instruction::GetElementPtr:
3943 // Always hoist the base address of a GetElementPtr. This prevents the
3944 // creation of new constants for every base constant that gets constant
3945 // folded with the offset.
3946 if (Idx == 0)
3947 return 2 * TTI::TCC_Basic;
3948 return TTI::TCC_Free;
3949 case Instruction::Store:
3950 ImmIdx = 0;
3951 break;
3952 case Instruction::ICmp:
3953 // This is an imperfect hack to prevent constant hoisting of
3954 // compares that might be trying to check if a 64-bit value fits in
3955 // 32-bits. The backend can optimize these cases using a right shift by 32.
3956 // Ideally we would check the compare predicate here. There also other
3957 // similar immediates the backend can use shifts for.
3958 if (Idx == 1 && Imm.getBitWidth() == 64) {
3959 uint64_t ImmVal = Imm.getZExtValue();
3960 if (ImmVal == 0x100000000ULL || ImmVal == 0xffffffff)
3961 return TTI::TCC_Free;
3962 }
3963 ImmIdx = 1;
3964 break;
3965 case Instruction::And:
3966 // We support 64-bit ANDs with immediates with 32-bits of leading zeroes
3967 // by using a 32-bit operation with implicit zero extension. Detect such
3968 // immediates here as the normal path expects bit 31 to be sign extended.
3969 if (Idx == 1 && Imm.getBitWidth() == 64 && isUInt<32>(Imm.getZExtValue()))
3970 return TTI::TCC_Free;
3971 ImmIdx = 1;
3972 break;
3973 case Instruction::Add:
3974 case Instruction::Sub:
3975 // For add/sub, we can use the opposite instruction for INT32_MIN.
3976 if (Idx == 1 && Imm.getBitWidth() == 64 && Imm.getZExtValue() == 0x80000000)
3977 return TTI::TCC_Free;
3978 ImmIdx = 1;
3979 break;
3980 case Instruction::UDiv:
3981 case Instruction::SDiv:
3982 case Instruction::URem:
3983 case Instruction::SRem:
3984 // Division by constant is typically expanded later into a different
3985 // instruction sequence. This completely changes the constants.
3986 // Report them as "free" to stop ConstantHoist from marking them as opaque.
3987 return TTI::TCC_Free;
3988 case Instruction::Mul:
3989 case Instruction::Or:
3990 case Instruction::Xor:
3991 ImmIdx = 1;
3992 break;
3993 // Always return TCC_Free for the shift value of a shift instruction.
3994 case Instruction::Shl:
3995 case Instruction::LShr:
3996 case Instruction::AShr:
3997 if (Idx == 1)
3998 return TTI::TCC_Free;
3999 break;
4000 case Instruction::Trunc:
4001 case Instruction::ZExt:
4002 case Instruction::SExt:
4003 case Instruction::IntToPtr:
4004 case Instruction::PtrToInt:
4005 case Instruction::BitCast:
4006 case Instruction::PHI:
4007 case Instruction::Call:
4008 case Instruction::Select:
4009 case Instruction::Ret:
4010 case Instruction::Load:
4011 break;
4012 }
4013
4014 if (Idx == ImmIdx) {
4015 int NumConstants = divideCeil(BitSize, 64);
4016 int Cost = X86TTIImpl::getIntImmCost(Imm, Ty, CostKind);
4017 return (Cost <= NumConstants * TTI::TCC_Basic)
4018 ? static_cast<int>(TTI::TCC_Free)
4019 : Cost;
4020 }
4021
4022 return X86TTIImpl::getIntImmCost(Imm, Ty, CostKind);
4023}
4024
4025int X86TTIImpl::getIntImmCostIntrin(Intrinsic::ID IID, unsigned Idx,
4026 const APInt &Imm, Type *Ty,
4027 TTI::TargetCostKind CostKind) {
4028 assert(Ty->isIntegerTy())((Ty->isIntegerTy()) ? static_cast<void> (0) : __assert_fail
("Ty->isIntegerTy()", "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 4028, __PRETTY_FUNCTION__))
;
4029
4030 unsigned BitSize = Ty->getPrimitiveSizeInBits();
4031 // There is no cost model for constants with a bit size of 0. Return TCC_Free
4032 // here, so that constant hoisting will ignore this constant.
4033 if (BitSize == 0)
4034 return TTI::TCC_Free;
4035
4036 switch (IID) {
4037 default:
4038 return TTI::TCC_Free;
4039 case Intrinsic::sadd_with_overflow:
4040 case Intrinsic::uadd_with_overflow:
4041 case Intrinsic::ssub_with_overflow:
4042 case Intrinsic::usub_with_overflow:
4043 case Intrinsic::smul_with_overflow:
4044 case Intrinsic::umul_with_overflow:
4045 if ((Idx == 1) && Imm.getBitWidth() <= 64 && isInt<32>(Imm.getSExtValue()))
4046 return TTI::TCC_Free;
4047 break;
4048 case Intrinsic::experimental_stackmap:
4049 if ((Idx < 2) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
4050 return TTI::TCC_Free;
4051 break;
4052 case Intrinsic::experimental_patchpoint_void:
4053 case Intrinsic::experimental_patchpoint_i64:
4054 if ((Idx < 4) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
4055 return TTI::TCC_Free;
4056 break;
4057 }
4058 return X86TTIImpl::getIntImmCost(Imm, Ty, CostKind);
4059}
4060
4061unsigned
4062X86TTIImpl::getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind) {
4063 if (CostKind != TTI::TCK_RecipThroughput)
4064 return Opcode == Instruction::PHI ? 0 : 1;
4065 // Branches are assumed to be predicted.
4066 return CostKind == TTI::TCK_RecipThroughput ? 0 : 1;
4067}
4068
4069int X86TTIImpl::getGatherOverhead() const {
4070 // Some CPUs have more overhead for gather. The specified overhead is relative
4071 // to the Load operation. "2" is the number provided by Intel architects. This
4072 // parameter is used for cost estimation of Gather Op and comparison with
4073 // other alternatives.
4074 // TODO: Remove the explicit hasAVX512()?, That would mean we would only
4075 // enable gather with a -march.
4076 if (ST->hasAVX512() || (ST->hasAVX2() && ST->hasFastGather()))
4077 return 2;
4078
4079 return 1024;
4080}
4081
4082int X86TTIImpl::getScatterOverhead() const {
4083 if (ST->hasAVX512())
4084 return 2;
4085
4086 return 1024;
4087}
4088
4089// Return an average cost of Gather / Scatter instruction, maybe improved later.
4090// FIXME: Add TargetCostKind support.
4091int X86TTIImpl::getGSVectorCost(unsigned Opcode, Type *SrcVTy, const Value *Ptr,
4092 Align Alignment, unsigned AddressSpace) {
4093
4094 assert(isa<VectorType>(SrcVTy) && "Unexpected type in getGSVectorCost")((isa<VectorType>(SrcVTy) && "Unexpected type in getGSVectorCost"
) ? static_cast<void> (0) : __assert_fail ("isa<VectorType>(SrcVTy) && \"Unexpected type in getGSVectorCost\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 4094, __PRETTY_FUNCTION__))
;
4095 unsigned VF = cast<FixedVectorType>(SrcVTy)->getNumElements();
4096
4097 // Try to reduce index size from 64 bit (default for GEP)
4098 // to 32. It is essential for VF 16. If the index can't be reduced to 32, the
4099 // operation will use 16 x 64 indices which do not fit in a zmm and needs
4100 // to split. Also check that the base pointer is the same for all lanes,
4101 // and that there's at most one variable index.
4102 auto getIndexSizeInBits = [](const Value *Ptr, const DataLayout &DL) {
4103 unsigned IndexSize = DL.getPointerSizeInBits();
4104 const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr);
4105 if (IndexSize < 64 || !GEP)
4106 return IndexSize;
4107
4108 unsigned NumOfVarIndices = 0;
4109 const Value *Ptrs = GEP->getPointerOperand();
4110 if (Ptrs->getType()->isVectorTy() && !getSplatValue(Ptrs))
4111 return IndexSize;
4112 for (unsigned i = 1; i < GEP->getNumOperands(); ++i) {
4113 if (isa<Constant>(GEP->getOperand(i)))
4114 continue;
4115 Type *IndxTy = GEP->getOperand(i)->getType();
4116 if (auto *IndexVTy = dyn_cast<VectorType>(IndxTy))
4117 IndxTy = IndexVTy->getElementType();
4118 if ((IndxTy->getPrimitiveSizeInBits() == 64 &&
4119 !isa<SExtInst>(GEP->getOperand(i))) ||
4120 ++NumOfVarIndices > 1)
4121 return IndexSize; // 64
4122 }
4123 return (unsigned)32;
4124 };
4125
4126 // Trying to reduce IndexSize to 32 bits for vector 16.
4127 // By default the IndexSize is equal to pointer size.
4128 unsigned IndexSize = (ST->hasAVX512() && VF >= 16)
4129 ? getIndexSizeInBits(Ptr, DL)
4130 : DL.getPointerSizeInBits();
4131
4132 auto *IndexVTy = FixedVectorType::get(
4133 IntegerType::get(SrcVTy->getContext(), IndexSize), VF);
4134 std::pair<int, MVT> IdxsLT = TLI->getTypeLegalizationCost(DL, IndexVTy);
4135 std::pair<int, MVT> SrcLT = TLI->getTypeLegalizationCost(DL, SrcVTy);
4136 int SplitFactor = std::max(IdxsLT.first, SrcLT.first);
4137 if (SplitFactor > 1) {
4138 // Handle splitting of vector of pointers
4139 auto *SplitSrcTy =
4140 FixedVectorType::get(SrcVTy->getScalarType(), VF / SplitFactor);
4141 return SplitFactor * getGSVectorCost(Opcode, SplitSrcTy, Ptr, Alignment,
4142 AddressSpace);
4143 }
4144
4145 // The gather / scatter cost is given by Intel architects. It is a rough
4146 // number since we are looking at one instruction in a time.
4147 const int GSOverhead = (Opcode == Instruction::Load)
4148 ? getGatherOverhead()
4149 : getScatterOverhead();
4150 return GSOverhead + VF * getMemoryOpCost(Opcode, SrcVTy->getScalarType(),
4151 MaybeAlign(Alignment), AddressSpace,
4152 TTI::TCK_RecipThroughput);
4153}
4154
4155/// Return the cost of full scalarization of gather / scatter operation.
4156///
4157/// Opcode - Load or Store instruction.
4158/// SrcVTy - The type of the data vector that should be gathered or scattered.
4159/// VariableMask - The mask is non-constant at compile time.
4160/// Alignment - Alignment for one element.
4161/// AddressSpace - pointer[s] address space.
4162///
4163/// FIXME: Add TargetCostKind support.
4164int X86TTIImpl::getGSScalarCost(unsigned Opcode, Type *SrcVTy,
4165 bool VariableMask, Align Alignment,
4166 unsigned AddressSpace) {
4167 unsigned VF = cast<FixedVectorType>(SrcVTy)->getNumElements();
4168 APInt DemandedElts = APInt::getAllOnesValue(VF);
4169 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
4170
4171 int MaskUnpackCost = 0;
4172 if (VariableMask) {
4173 auto *MaskTy =
4174 FixedVectorType::get(Type::getInt1Ty(SrcVTy->getContext()), VF);
4175 MaskUnpackCost =
4176 getScalarizationOverhead(MaskTy, DemandedElts, false, true);
4177 int ScalarCompareCost = getCmpSelInstrCost(
4178 Instruction::ICmp, Type::getInt1Ty(SrcVTy->getContext()), nullptr,
4179 CmpInst::BAD_ICMP_PREDICATE, CostKind);
4180 int BranchCost = getCFInstrCost(Instruction::Br, CostKind);
4181 MaskUnpackCost += VF * (BranchCost + ScalarCompareCost);
4182 }
4183
4184 // The cost of the scalar loads/stores.
4185 int MemoryOpCost = VF * getMemoryOpCost(Opcode, SrcVTy->getScalarType(),
4186 MaybeAlign(Alignment), AddressSpace,
4187 CostKind);
4188
4189 int InsertExtractCost = 0;
4190 if (Opcode == Instruction::Load)
4191 for (unsigned i = 0; i < VF; ++i)
4192 // Add the cost of inserting each scalar load into the vector
4193 InsertExtractCost +=
4194 getVectorInstrCost(Instruction::InsertElement, SrcVTy, i);
4195 else
4196 for (unsigned i = 0; i < VF; ++i)
4197 // Add the cost of extracting each element out of the data vector
4198 InsertExtractCost +=
4199 getVectorInstrCost(Instruction::ExtractElement, SrcVTy, i);
4200
4201 return MemoryOpCost + MaskUnpackCost + InsertExtractCost;
4202}
4203
4204/// Calculate the cost of Gather / Scatter operation
4205int X86TTIImpl::getGatherScatterOpCost(unsigned Opcode, Type *SrcVTy,
4206 const Value *Ptr, bool VariableMask,
4207 Align Alignment,
4208 TTI::TargetCostKind CostKind,
4209 const Instruction *I = nullptr) {
4210 if (CostKind != TTI::TCK_RecipThroughput) {
4211 if ((Opcode == Instruction::Load &&
4212 isLegalMaskedGather(SrcVTy, Align(Alignment))) ||
4213 (Opcode == Instruction::Store &&
4214 isLegalMaskedScatter(SrcVTy, Align(Alignment))))
4215 return 1;
4216 return BaseT::getGatherScatterOpCost(Opcode, SrcVTy, Ptr, VariableMask,
4217 Alignment, CostKind, I);
4218 }
4219
4220 assert(SrcVTy->isVectorTy() && "Unexpected data type for Gather/Scatter")((SrcVTy->isVectorTy() && "Unexpected data type for Gather/Scatter"
) ? static_cast<void> (0) : __assert_fail ("SrcVTy->isVectorTy() && \"Unexpected data type for Gather/Scatter\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 4220, __PRETTY_FUNCTION__))
;
4221 unsigned VF = cast<FixedVectorType>(SrcVTy)->getNumElements();
4222 PointerType *PtrTy = dyn_cast<PointerType>(Ptr->getType());
4223 if (!PtrTy && Ptr->getType()->isVectorTy())
4224 PtrTy = dyn_cast<PointerType>(
4225 cast<VectorType>(Ptr->getType())->getElementType());
4226 assert(PtrTy && "Unexpected type for Ptr argument")((PtrTy && "Unexpected type for Ptr argument") ? static_cast
<void> (0) : __assert_fail ("PtrTy && \"Unexpected type for Ptr argument\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 4226, __PRETTY_FUNCTION__))
;
4227 unsigned AddressSpace = PtrTy->getAddressSpace();
4228
4229 bool Scalarize = false;
4230 if ((Opcode == Instruction::Load &&
4231 !isLegalMaskedGather(SrcVTy, Align(Alignment))) ||
4232 (Opcode == Instruction::Store &&
4233 !isLegalMaskedScatter(SrcVTy, Align(Alignment))))
4234 Scalarize = true;
4235 // Gather / Scatter for vector 2 is not profitable on KNL / SKX
4236 // Vector-4 of gather/scatter instruction does not exist on KNL.
4237 // We can extend it to 8 elements, but zeroing upper bits of
4238 // the mask vector will add more instructions. Right now we give the scalar
4239 // cost of vector-4 for KNL. TODO: Check, maybe the gather/scatter instruction
4240 // is better in the VariableMask case.
4241 if (ST->hasAVX512() && (VF == 2 || (VF == 4 && !ST->hasVLX())))
4242 Scalarize = true;
4243
4244 if (Scalarize)
4245 return getGSScalarCost(Opcode, SrcVTy, VariableMask, Alignment,
4246 AddressSpace);
4247
4248 return getGSVectorCost(Opcode, SrcVTy, Ptr, Alignment, AddressSpace);
4249}
4250
4251bool X86TTIImpl::isLSRCostLess(TargetTransformInfo::LSRCost &C1,
4252 TargetTransformInfo::LSRCost &C2) {
4253 // X86 specific here are "instruction number 1st priority".
4254 return std::tie(C1.Insns, C1.NumRegs, C1.AddRecCost,
4255 C1.NumIVMuls, C1.NumBaseAdds,
4256 C1.ScaleCost, C1.ImmCost, C1.SetupCost) <
4257 std::tie(C2.Insns, C2.NumRegs, C2.AddRecCost,
4258 C2.NumIVMuls, C2.NumBaseAdds,
4259 C2.ScaleCost, C2.ImmCost, C2.SetupCost);
4260}
4261
4262bool X86TTIImpl::canMacroFuseCmp() {
4263 return ST->hasMacroFusion() || ST->hasBranchFusion();
4264}
4265
4266bool X86TTIImpl::isLegalMaskedLoad(Type *DataTy, Align Alignment) {
4267 if (!ST->hasAVX())
4268 return false;
4269
4270 // The backend can't handle a single element vector.
4271 if (isa<VectorType>(DataTy) &&
4272 cast<FixedVectorType>(DataTy)->getNumElements() == 1)
4273 return false;
4274 Type *ScalarTy = DataTy->getScalarType();
4275
4276 if (ScalarTy->isPointerTy())
4277 return true;
4278
4279 if (ScalarTy->isFloatTy() || ScalarTy->isDoubleTy())
4280 return true;
4281
4282 if (!ScalarTy->isIntegerTy())
4283 return false;
4284
4285 unsigned IntWidth = ScalarTy->getIntegerBitWidth();
4286 return IntWidth == 32 || IntWidth == 64 ||
4287 ((IntWidth == 8 || IntWidth == 16) && ST->hasBWI());
4288}
4289
4290bool X86TTIImpl::isLegalMaskedStore(Type *DataType, Align Alignment) {
4291 return isLegalMaskedLoad(DataType, Alignment);
4292}
4293
4294bool X86TTIImpl::isLegalNTLoad(Type *DataType, Align Alignment) {
4295 unsigned DataSize = DL.getTypeStoreSize(DataType);
4296 // The only supported nontemporal loads are for aligned vectors of 16 or 32
4297 // bytes. Note that 32-byte nontemporal vector loads are supported by AVX2
4298 // (the equivalent stores only require AVX).
4299 if (Alignment >= DataSize && (DataSize == 16 || DataSize == 32))
4300 return DataSize == 16 ? ST->hasSSE1() : ST->hasAVX2();
4301
4302 return false;
4303}
4304
4305bool X86TTIImpl::isLegalNTStore(Type *DataType, Align Alignment) {
4306 unsigned DataSize = DL.getTypeStoreSize(DataType);
4307
4308 // SSE4A supports nontemporal stores of float and double at arbitrary
4309 // alignment.
4310 if (ST->hasSSE4A() && (DataType->isFloatTy() || DataType->isDoubleTy()))
4311 return true;
4312
4313 // Besides the SSE4A subtarget exception above, only aligned stores are
4314 // available nontemporaly on any other subtarget. And only stores with a size
4315 // of 4..32 bytes (powers of 2, only) are permitted.
4316 if (Alignment < DataSize || DataSize < 4 || DataSize > 32 ||
4317 !isPowerOf2_32(DataSize))
4318 return false;
4319
4320 // 32-byte vector nontemporal stores are supported by AVX (the equivalent
4321 // loads require AVX2).
4322 if (DataSize == 32)
4323 return ST->hasAVX();
4324 else if (DataSize == 16)
4325 return ST->hasSSE1();
4326 return true;
4327}
4328
4329bool X86TTIImpl::isLegalMaskedExpandLoad(Type *DataTy) {
4330 if (!isa<VectorType>(DataTy))
4331 return false;
4332
4333 if (!ST->hasAVX512())
4334 return false;
4335
4336 // The backend can't handle a single element vector.
4337 if (cast<FixedVectorType>(DataTy)->getNumElements() == 1)
4338 return false;
4339
4340 Type *ScalarTy = cast<VectorType>(DataTy)->getElementType();
4341
4342 if (ScalarTy->isFloatTy() || ScalarTy->isDoubleTy())
4343 return true;
4344
4345 if (!ScalarTy->isIntegerTy())
4346 return false;
4347
4348 unsigned IntWidth = ScalarTy->getIntegerBitWidth();
4349 return IntWidth == 32 || IntWidth == 64 ||
4350 ((IntWidth == 8 || IntWidth == 16) && ST->hasVBMI2());
4351}
4352
4353bool X86TTIImpl::isLegalMaskedCompressStore(Type *DataTy) {
4354 return isLegalMaskedExpandLoad(DataTy);
4355}
4356
4357bool X86TTIImpl::isLegalMaskedGather(Type *DataTy, Align Alignment) {
4358 // Some CPUs have better gather performance than others.
4359 // TODO: Remove the explicit ST->hasAVX512()?, That would mean we would only
4360 // enable gather with a -march.
4361 if (!(ST->hasAVX512() || (ST->hasFastGather() && ST->hasAVX2())))
4362 return false;
4363
4364 // This function is called now in two cases: from the Loop Vectorizer
4365 // and from the Scalarizer.
4366 // When the Loop Vectorizer asks about legality of the feature,
4367 // the vectorization factor is not calculated yet. The Loop Vectorizer
4368 // sends a scalar type and the decision is based on the width of the
4369 // scalar element.
4370 // Later on, the cost model will estimate usage this intrinsic based on
4371 // the vector type.
4372 // The Scalarizer asks again about legality. It sends a vector type.
4373 // In this case we can reject non-power-of-2 vectors.
4374 // We also reject single element vectors as the type legalizer can't
4375 // scalarize it.
4376 if (auto *DataVTy = dyn_cast<FixedVectorType>(DataTy)) {
4377 unsigned NumElts = DataVTy->getNumElements();
4378 if (NumElts == 1)
4379 return false;
4380 }
4381 Type *ScalarTy = DataTy->getScalarType();
4382 if (ScalarTy->isPointerTy())
4383 return true;
4384
4385 if (ScalarTy->isFloatTy() || ScalarTy->isDoubleTy())
4386 return true;
4387
4388 if (!ScalarTy->isIntegerTy())
4389 return false;
4390
4391 unsigned IntWidth = ScalarTy->getIntegerBitWidth();
4392 return IntWidth == 32 || IntWidth == 64;
4393}
4394
4395bool X86TTIImpl::isLegalMaskedScatter(Type *DataType, Align Alignment) {
4396 // AVX2 doesn't support scatter
4397 if (!ST->hasAVX512())
4398 return false;
4399 return isLegalMaskedGather(DataType, Alignment);
4400}
4401
4402bool X86TTIImpl::hasDivRemOp(Type *DataType, bool IsSigned) {
4403 EVT VT = TLI->getValueType(DL, DataType);
4404 return TLI->isOperationLegal(IsSigned ? ISD::SDIVREM : ISD::UDIVREM, VT);
4405}
4406
4407bool X86TTIImpl::isFCmpOrdCheaperThanFCmpZero(Type *Ty) {
4408 return false;
4409}
4410
4411bool X86TTIImpl::areInlineCompatible(const Function *Caller,
4412 const Function *Callee) const {
4413 const TargetMachine &TM = getTLI()->getTargetMachine();
4414
4415 // Work this as a subsetting of subtarget features.
4416 const FeatureBitset &CallerBits =
4417 TM.getSubtargetImpl(*Caller)->getFeatureBits();
4418 const FeatureBitset &CalleeBits =
4419 TM.getSubtargetImpl(*Callee)->getFeatureBits();
4420
4421 FeatureBitset RealCallerBits = CallerBits & ~InlineFeatureIgnoreList;
4422 FeatureBitset RealCalleeBits = CalleeBits & ~InlineFeatureIgnoreList;
4423 return (RealCallerBits & RealCalleeBits) == RealCalleeBits;
4424}
4425
4426bool X86TTIImpl::areFunctionArgsABICompatible(
4427 const Function *Caller, const Function *Callee,
4428 SmallPtrSetImpl<Argument *> &Args) const {
4429 if (!BaseT::areFunctionArgsABICompatible(Caller, Callee, Args))
4430 return false;
4431
4432 // If we get here, we know the target features match. If one function
4433 // considers 512-bit vectors legal and the other does not, consider them
4434 // incompatible.
4435 const TargetMachine &TM = getTLI()->getTargetMachine();
4436
4437 if (TM.getSubtarget<X86Subtarget>(*Caller).useAVX512Regs() ==
4438 TM.getSubtarget<X86Subtarget>(*Callee).useAVX512Regs())
4439 return true;
4440
4441 // Consider the arguments compatible if they aren't vectors or aggregates.
4442 // FIXME: Look at the size of vectors.
4443 // FIXME: Look at the element types of aggregates to see if there are vectors.
4444 // FIXME: The API of this function seems intended to allow arguments
4445 // to be removed from the set, but the caller doesn't check if the set
4446 // becomes empty so that may not work in practice.
4447 return llvm::none_of(Args, [](Argument *A) {
4448 auto *EltTy = cast<PointerType>(A->getType())->getElementType();
4449 return EltTy->isVectorTy() || EltTy->isAggregateType();
4450 });
4451}
4452
4453X86TTIImpl::TTI::MemCmpExpansionOptions
4454X86TTIImpl::enableMemCmpExpansion(bool OptSize, bool IsZeroCmp) const {
4455 TTI::MemCmpExpansionOptions Options;
4456 Options.MaxNumLoads = TLI->getMaxExpandSizeMemcmp(OptSize);
4457 Options.NumLoadsPerBlock = 2;
4458 // All GPR and vector loads can be unaligned.
4459 Options.AllowOverlappingLoads = true;
4460 if (IsZeroCmp) {
4461 // Only enable vector loads for equality comparison. Right now the vector
4462 // version is not as fast for three way compare (see #33329).
4463 const unsigned PreferredWidth = ST->getPreferVectorWidth();
4464 if (PreferredWidth >= 512 && ST->hasAVX512()) Options.LoadSizes.push_back(64);
4465 if (PreferredWidth >= 256 && ST->hasAVX()) Options.LoadSizes.push_back(32);
4466 if (PreferredWidth >= 128 && ST->hasSSE2()) Options.LoadSizes.push_back(16);
4467 }
4468 if (ST->is64Bit()) {
4469 Options.LoadSizes.push_back(8);
4470 }
4471 Options.LoadSizes.push_back(4);
4472 Options.LoadSizes.push_back(2);
4473 Options.LoadSizes.push_back(1);
4474 return Options;
4475}
4476
4477bool X86TTIImpl::enableInterleavedAccessVectorization() {
4478 // TODO: We expect this to be beneficial regardless of arch,
4479 // but there are currently some unexplained performance artifacts on Atom.
4480 // As a temporary solution, disable on Atom.
4481 return !(ST->isAtom());
4482}
4483
4484// Get estimation for interleaved load/store operations for AVX2.
4485// \p Factor is the interleaved-access factor (stride) - number of
4486// (interleaved) elements in the group.
4487// \p Indices contains the indices for a strided load: when the
4488// interleaved load has gaps they indicate which elements are used.
4489// If Indices is empty (or if the number of indices is equal to the size
4490// of the interleaved-access as given in \p Factor) the access has no gaps.
4491//
4492// As opposed to AVX-512, AVX2 does not have generic shuffles that allow
4493// computing the cost using a generic formula as a function of generic
4494// shuffles. We therefore use a lookup table instead, filled according to
4495// the instruction sequences that codegen currently generates.
4496int X86TTIImpl::getInterleavedMemoryOpCostAVX2(
4497 unsigned Opcode, FixedVectorType *VecTy, unsigned Factor,
4498 ArrayRef<unsigned> Indices, Align Alignment, unsigned AddressSpace,
4499 TTI::TargetCostKind CostKind, bool UseMaskForCond, bool UseMaskForGaps) {
4500
4501 if (UseMaskForCond || UseMaskForGaps)
4
Assuming 'UseMaskForCond' is false
5
Assuming 'UseMaskForGaps' is false
6
Taking false branch
4502 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
4503 Alignment, AddressSpace, CostKind,
4504 UseMaskForCond, UseMaskForGaps);
4505
4506 // We currently Support only fully-interleaved groups, with no gaps.
4507 // TODO: Support also strided loads (interleaved-groups with gaps).
4508 if (Indices.size() && Indices.size() != Factor)
7
Assuming the condition is true
8
Assuming the condition is true
9
Taking true branch
4509 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
10
Calling 'BasicTTIImplBase::getInterleavedMemoryOpCost'
4510 Alignment, AddressSpace,
4511 CostKind);
4512
4513 // VecTy for interleave memop is <VF*Factor x Elt>.
4514 // So, for VF=4, Interleave Factor = 3, Element type = i32 we have
4515 // VecTy = <12 x i32>.
4516 MVT LegalVT = getTLI()->getTypeLegalizationCost(DL, VecTy).second;
4517
4518 // This function can be called with VecTy=<6xi128>, Factor=3, in which case
4519 // the VF=2, while v2i128 is an unsupported MVT vector type
4520 // (see MachineValueType.h::getVectorVT()).
4521 if (!LegalVT.isVector())
4522 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
4523 Alignment, AddressSpace,
4524 CostKind);
4525
4526 unsigned VF = VecTy->getNumElements() / Factor;
4527 Type *ScalarTy = VecTy->getElementType();
4528
4529 // Calculate the number of memory operations (NumOfMemOps), required
4530 // for load/store the VecTy.
4531 unsigned VecTySize = DL.getTypeStoreSize(VecTy);
4532 unsigned LegalVTSize = LegalVT.getStoreSize();
4533 unsigned NumOfMemOps = (VecTySize + LegalVTSize - 1) / LegalVTSize;
4534
4535 // Get the cost of one memory operation.
4536 auto *SingleMemOpTy = FixedVectorType::get(VecTy->getElementType(),
4537 LegalVT.getVectorNumElements());
4538 unsigned MemOpCost = getMemoryOpCost(Opcode, SingleMemOpTy,
4539 MaybeAlign(Alignment), AddressSpace,
4540 CostKind);
4541
4542 auto *VT = FixedVectorType::get(ScalarTy, VF);
4543 EVT ETy = TLI->getValueType(DL, VT);
4544 if (!ETy.isSimple())
4545 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
4546 Alignment, AddressSpace,
4547 CostKind);
4548
4549 // TODO: Complete for other data-types and strides.
4550 // Each combination of Stride, ElementTy and VF results in a different
4551 // sequence; The cost tables are therefore accessed with:
4552 // Factor (stride) and VectorType=VFxElemType.
4553 // The Cost accounts only for the shuffle sequence;
4554 // The cost of the loads/stores is accounted for separately.
4555 //
4556 static const CostTblEntry AVX2InterleavedLoadTbl[] = {
4557 { 2, MVT::v4i64, 6 }, //(load 8i64 and) deinterleave into 2 x 4i64
4558 { 2, MVT::v4f64, 6 }, //(load 8f64 and) deinterleave into 2 x 4f64
4559
4560 { 3, MVT::v2i8, 10 }, //(load 6i8 and) deinterleave into 3 x 2i8
4561 { 3, MVT::v4i8, 4 }, //(load 12i8 and) deinterleave into 3 x 4i8
4562 { 3, MVT::v8i8, 9 }, //(load 24i8 and) deinterleave into 3 x 8i8
4563 { 3, MVT::v16i8, 11}, //(load 48i8 and) deinterleave into 3 x 16i8
4564 { 3, MVT::v32i8, 13}, //(load 96i8 and) deinterleave into 3 x 32i8
4565 { 3, MVT::v8f32, 17 }, //(load 24f32 and)deinterleave into 3 x 8f32
4566
4567 { 4, MVT::v2i8, 12 }, //(load 8i8 and) deinterleave into 4 x 2i8
4568 { 4, MVT::v4i8, 4 }, //(load 16i8 and) deinterleave into 4 x 4i8
4569 { 4, MVT::v8i8, 20 }, //(load 32i8 and) deinterleave into 4 x 8i8
4570 { 4, MVT::v16i8, 39 }, //(load 64i8 and) deinterleave into 4 x 16i8
4571 { 4, MVT::v32i8, 80 }, //(load 128i8 and) deinterleave into 4 x 32i8
4572
4573 { 8, MVT::v8f32, 40 } //(load 64f32 and)deinterleave into 8 x 8f32
4574 };
4575
4576 static const CostTblEntry AVX2InterleavedStoreTbl[] = {
4577 { 2, MVT::v4i64, 6 }, //interleave into 2 x 4i64 into 8i64 (and store)
4578 { 2, MVT::v4f64, 6 }, //interleave into 2 x 4f64 into 8f64 (and store)
4579
4580 { 3, MVT::v2i8, 7 }, //interleave 3 x 2i8 into 6i8 (and store)
4581 { 3, MVT::v4i8, 8 }, //interleave 3 x 4i8 into 12i8 (and store)
4582 { 3, MVT::v8i8, 11 }, //interleave 3 x 8i8 into 24i8 (and store)
4583 { 3, MVT::v16i8, 11 }, //interleave 3 x 16i8 into 48i8 (and store)
4584 { 3, MVT::v32i8, 13 }, //interleave 3 x 32i8 into 96i8 (and store)
4585
4586 { 4, MVT::v2i8, 12 }, //interleave 4 x 2i8 into 8i8 (and store)
4587 { 4, MVT::v4i8, 9 }, //interleave 4 x 4i8 into 16i8 (and store)
4588 { 4, MVT::v8i8, 10 }, //interleave 4 x 8i8 into 32i8 (and store)
4589 { 4, MVT::v16i8, 10 }, //interleave 4 x 16i8 into 64i8 (and store)
4590 { 4, MVT::v32i8, 12 } //interleave 4 x 32i8 into 128i8 (and store)
4591 };
4592
4593 if (Opcode == Instruction::Load) {
4594 if (const auto *Entry =
4595 CostTableLookup(AVX2InterleavedLoadTbl, Factor, ETy.getSimpleVT()))
4596 return NumOfMemOps * MemOpCost + Entry->Cost;
4597 } else {
4598 assert(Opcode == Instruction::Store &&((Opcode == Instruction::Store && "Expected Store Instruction at this point"
) ? static_cast<void> (0) : __assert_fail ("Opcode == Instruction::Store && \"Expected Store Instruction at this point\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 4599, __PRETTY_FUNCTION__))
4599 "Expected Store Instruction at this point")((Opcode == Instruction::Store && "Expected Store Instruction at this point"
) ? static_cast<void> (0) : __assert_fail ("Opcode == Instruction::Store && \"Expected Store Instruction at this point\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 4599, __PRETTY_FUNCTION__))
;
4600 if (const auto *Entry =
4601 CostTableLookup(AVX2InterleavedStoreTbl, Factor, ETy.getSimpleVT()))
4602 return NumOfMemOps * MemOpCost + Entry->Cost;
4603 }
4604
4605 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
4606 Alignment, AddressSpace, CostKind);
4607}
4608
4609// Get estimation for interleaved load/store operations and strided load.
4610// \p Indices contains indices for strided load.
4611// \p Factor - the factor of interleaving.
4612// AVX-512 provides 3-src shuffles that significantly reduces the cost.
4613int X86TTIImpl::getInterleavedMemoryOpCostAVX512(
4614 unsigned Opcode, FixedVectorType *VecTy, unsigned Factor,
4615 ArrayRef<unsigned> Indices, Align Alignment, unsigned AddressSpace,
4616 TTI::TargetCostKind CostKind, bool UseMaskForCond, bool UseMaskForGaps) {
4617
4618 if (UseMaskForCond || UseMaskForGaps)
4619 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
4620 Alignment, AddressSpace, CostKind,
4621 UseMaskForCond, UseMaskForGaps);
4622
4623 // VecTy for interleave memop is <VF*Factor x Elt>.
4624 // So, for VF=4, Interleave Factor = 3, Element type = i32 we have
4625 // VecTy = <12 x i32>.
4626
4627 // Calculate the number of memory operations (NumOfMemOps), required
4628 // for load/store the VecTy.
4629 MVT LegalVT = getTLI()->getTypeLegalizationCost(DL, VecTy).second;
4630 unsigned VecTySize = DL.getTypeStoreSize(VecTy);
4631 unsigned LegalVTSize = LegalVT.getStoreSize();
4632 unsigned NumOfMemOps = (VecTySize + LegalVTSize - 1) / LegalVTSize;
4633
4634 // Get the cost of one memory operation.
4635 auto *SingleMemOpTy = FixedVectorType::get(VecTy->getElementType(),
4636 LegalVT.getVectorNumElements());
4637 unsigned MemOpCost = getMemoryOpCost(Opcode, SingleMemOpTy,
4638 MaybeAlign(Alignment), AddressSpace,
4639 CostKind);
4640
4641 unsigned VF = VecTy->getNumElements() / Factor;
4642 MVT VT = MVT::getVectorVT(MVT::getVT(VecTy->getScalarType()), VF);
4643
4644 if (Opcode == Instruction::Load) {
4645 // The tables (AVX512InterleavedLoadTbl and AVX512InterleavedStoreTbl)
4646 // contain the cost of the optimized shuffle sequence that the
4647 // X86InterleavedAccess pass will generate.
4648 // The cost of loads and stores are computed separately from the table.
4649
4650 // X86InterleavedAccess support only the following interleaved-access group.
4651 static const CostTblEntry AVX512InterleavedLoadTbl[] = {
4652 {3, MVT::v16i8, 12}, //(load 48i8 and) deinterleave into 3 x 16i8
4653 {3, MVT::v32i8, 14}, //(load 96i8 and) deinterleave into 3 x 32i8
4654 {3, MVT::v64i8, 22}, //(load 96i8 and) deinterleave into 3 x 32i8
4655 };
4656
4657 if (const auto *Entry =
4658 CostTableLookup(AVX512InterleavedLoadTbl, Factor, VT))
4659 return NumOfMemOps * MemOpCost + Entry->Cost;
4660 //If an entry does not exist, fallback to the default implementation.
4661
4662 // Kind of shuffle depends on number of loaded values.
4663 // If we load the entire data in one register, we can use a 1-src shuffle.
4664 // Otherwise, we'll merge 2 sources in each operation.
4665 TTI::ShuffleKind ShuffleKind =
4666 (NumOfMemOps > 1) ? TTI::SK_PermuteTwoSrc : TTI::SK_PermuteSingleSrc;
4667
4668 unsigned ShuffleCost =
4669 getShuffleCost(ShuffleKind, SingleMemOpTy, 0, nullptr);
4670
4671 unsigned NumOfLoadsInInterleaveGrp =
4672 Indices.size() ? Indices.size() : Factor;
4673 auto *ResultTy = FixedVectorType::get(VecTy->getElementType(),
4674 VecTy->getNumElements() / Factor);
4675 unsigned NumOfResults =
4676 getTLI()->getTypeLegalizationCost(DL, ResultTy).first *
4677 NumOfLoadsInInterleaveGrp;
4678
4679 // About a half of the loads may be folded in shuffles when we have only
4680 // one result. If we have more than one result, we do not fold loads at all.
4681 unsigned NumOfUnfoldedLoads =
4682 NumOfResults > 1 ? NumOfMemOps : NumOfMemOps / 2;
4683
4684 // Get a number of shuffle operations per result.
4685 unsigned NumOfShufflesPerResult =
4686 std::max((unsigned)1, (unsigned)(NumOfMemOps - 1));
4687
4688 // The SK_MergeTwoSrc shuffle clobbers one of src operands.
4689 // When we have more than one destination, we need additional instructions
4690 // to keep sources.
4691 unsigned NumOfMoves = 0;
4692 if (NumOfResults > 1 && ShuffleKind == TTI::SK_PermuteTwoSrc)
4693 NumOfMoves = NumOfResults * NumOfShufflesPerResult / 2;
4694
4695 int Cost = NumOfResults * NumOfShufflesPerResult * ShuffleCost +
4696 NumOfUnfoldedLoads * MemOpCost + NumOfMoves;
4697
4698 return Cost;
4699 }
4700
4701 // Store.
4702 assert(Opcode == Instruction::Store &&((Opcode == Instruction::Store && "Expected Store Instruction at this point"
) ? static_cast<void> (0) : __assert_fail ("Opcode == Instruction::Store && \"Expected Store Instruction at this point\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 4703, __PRETTY_FUNCTION__))
4703 "Expected Store Instruction at this point")((Opcode == Instruction::Store && "Expected Store Instruction at this point"
) ? static_cast<void> (0) : __assert_fail ("Opcode == Instruction::Store && \"Expected Store Instruction at this point\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 4703, __PRETTY_FUNCTION__))
;
4704 // X86InterleavedAccess support only the following interleaved-access group.
4705 static const CostTblEntry AVX512InterleavedStoreTbl[] = {
4706 {3, MVT::v16i8, 12}, // interleave 3 x 16i8 into 48i8 (and store)
4707 {3, MVT::v32i8, 14}, // interleave 3 x 32i8 into 96i8 (and store)
4708 {3, MVT::v64i8, 26}, // interleave 3 x 64i8 into 96i8 (and store)
4709
4710 {4, MVT::v8i8, 10}, // interleave 4 x 8i8 into 32i8 (and store)
4711 {4, MVT::v16i8, 11}, // interleave 4 x 16i8 into 64i8 (and store)
4712 {4, MVT::v32i8, 14}, // interleave 4 x 32i8 into 128i8 (and store)
4713 {4, MVT::v64i8, 24} // interleave 4 x 32i8 into 256i8 (and store)
4714 };
4715
4716 if (const auto *Entry =
4717 CostTableLookup(AVX512InterleavedStoreTbl, Factor, VT))
4718 return NumOfMemOps * MemOpCost + Entry->Cost;
4719 //If an entry does not exist, fallback to the default implementation.
4720
4721 // There is no strided stores meanwhile. And store can't be folded in
4722 // shuffle.
4723 unsigned NumOfSources = Factor; // The number of values to be merged.
4724 unsigned ShuffleCost =
4725 getShuffleCost(TTI::SK_PermuteTwoSrc, SingleMemOpTy, 0, nullptr);
4726 unsigned NumOfShufflesPerStore = NumOfSources - 1;
4727
4728 // The SK_MergeTwoSrc shuffle clobbers one of src operands.
4729 // We need additional instructions to keep sources.
4730 unsigned NumOfMoves = NumOfMemOps * NumOfShufflesPerStore / 2;
4731 int Cost = NumOfMemOps * (MemOpCost + NumOfShufflesPerStore * ShuffleCost) +
4732 NumOfMoves;
4733 return Cost;
4734}
4735
4736int X86TTIImpl::getInterleavedMemoryOpCost(
4737 unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices,
4738 Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind,
4739 bool UseMaskForCond, bool UseMaskForGaps) {
4740 auto isSupportedOnAVX512 = [](Type *VecTy, bool HasBW) {
4741 Type *EltTy = cast<VectorType>(VecTy)->getElementType();
4742 if (EltTy->isFloatTy() || EltTy->isDoubleTy() || EltTy->isIntegerTy(64) ||
4743 EltTy->isIntegerTy(32) || EltTy->isPointerTy())
4744 return true;
4745 if (EltTy->isIntegerTy(16) || EltTy->isIntegerTy(8))
4746 return HasBW;
4747 return false;
4748 };
4749 if (ST->hasAVX512() && isSupportedOnAVX512(VecTy, ST->hasBWI()))
4750 return getInterleavedMemoryOpCostAVX512(
4751 Opcode, cast<FixedVectorType>(VecTy), Factor, Indices, Alignment,
4752 AddressSpace, CostKind, UseMaskForCond, UseMaskForGaps);
4753 if (ST->hasAVX2())
1
Taking true branch
4754 return getInterleavedMemoryOpCostAVX2(
3
Calling 'X86TTIImpl::getInterleavedMemoryOpCostAVX2'
4755 Opcode, cast<FixedVectorType>(VecTy), Factor, Indices, Alignment,
2
'VecTy' is a 'FixedVectorType'
4756 AddressSpace, CostKind, UseMaskForCond, UseMaskForGaps);
4757
4758 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
4759 Alignment, AddressSpace, CostKind,
4760 UseMaskForCond, UseMaskForGaps);
4761}

/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/CodeGen/BasicTTIImpl.h

1//===- BasicTTIImpl.h -------------------------------------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9/// \file
10/// This file provides a helper that implements much of the TTI interface in
11/// terms of the target-independent code generator and TargetLowering
12/// interfaces.
13//
14//===----------------------------------------------------------------------===//
15
16#ifndef LLVM_CODEGEN_BASICTTIIMPL_H
17#define LLVM_CODEGEN_BASICTTIIMPL_H
18
19#include "llvm/ADT/APInt.h"
20#include "llvm/ADT/ArrayRef.h"
21#include "llvm/ADT/BitVector.h"
22#include "llvm/ADT/SmallPtrSet.h"
23#include "llvm/ADT/SmallVector.h"
24#include "llvm/Analysis/LoopInfo.h"
25#include "llvm/Analysis/TargetTransformInfo.h"
26#include "llvm/Analysis/TargetTransformInfoImpl.h"
27#include "llvm/CodeGen/ISDOpcodes.h"
28#include "llvm/CodeGen/TargetLowering.h"
29#include "llvm/CodeGen/TargetSubtargetInfo.h"
30#include "llvm/CodeGen/ValueTypes.h"
31#include "llvm/IR/BasicBlock.h"
32#include "llvm/IR/Constant.h"
33#include "llvm/IR/Constants.h"
34#include "llvm/IR/DataLayout.h"
35#include "llvm/IR/DerivedTypes.h"
36#include "llvm/IR/InstrTypes.h"
37#include "llvm/IR/Instruction.h"
38#include "llvm/IR/Instructions.h"
39#include "llvm/IR/Intrinsics.h"
40#include "llvm/IR/Operator.h"
41#include "llvm/IR/Type.h"
42#include "llvm/IR/Value.h"
43#include "llvm/Support/Casting.h"
44#include "llvm/Support/CommandLine.h"
45#include "llvm/Support/ErrorHandling.h"
46#include "llvm/Support/MachineValueType.h"
47#include "llvm/Support/MathExtras.h"
48#include <algorithm>
49#include <cassert>
50#include <cstdint>
51#include <limits>
52#include <utility>
53
54namespace llvm {
55
56class Function;
57class GlobalValue;
58class LLVMContext;
59class ScalarEvolution;
60class SCEV;
61class TargetMachine;
62
63extern cl::opt<unsigned> PartialUnrollingThreshold;
64
65/// Base class which can be used to help build a TTI implementation.
66///
67/// This class provides as much implementation of the TTI interface as is
68/// possible using the target independent parts of the code generator.
69///
70/// In order to subclass it, your class must implement a getST() method to
71/// return the subtarget, and a getTLI() method to return the target lowering.
72/// We need these methods implemented in the derived class so that this class
73/// doesn't have to duplicate storage for them.
74template <typename T>
75class BasicTTIImplBase : public TargetTransformInfoImplCRTPBase<T> {
76private:
77 using BaseT = TargetTransformInfoImplCRTPBase<T>;
78 using TTI = TargetTransformInfo;
79
80 /// Helper function to access this as a T.
81 T *thisT() { return static_cast<T *>(this); }
82
83 /// Estimate a cost of Broadcast as an extract and sequence of insert
84 /// operations.
85 unsigned getBroadcastShuffleOverhead(FixedVectorType *VTy) {
86 unsigned Cost = 0;
87 // Broadcast cost is equal to the cost of extracting the zero'th element
88 // plus the cost of inserting it into every element of the result vector.
89 Cost += thisT()->getVectorInstrCost(Instruction::ExtractElement, VTy, 0);
90
91 for (int i = 0, e = VTy->getNumElements(); i < e; ++i) {
92 Cost += thisT()->getVectorInstrCost(Instruction::InsertElement, VTy, i);
93 }
94 return Cost;
95 }
96
97 /// Estimate a cost of shuffle as a sequence of extract and insert
98 /// operations.
99 unsigned getPermuteShuffleOverhead(FixedVectorType *VTy) {
100 unsigned Cost = 0;
101 // Shuffle cost is equal to the cost of extracting element from its argument
102 // plus the cost of inserting them onto the result vector.
103
104 // e.g. <4 x float> has a mask of <0,5,2,7> i.e we need to extract from
105 // index 0 of first vector, index 1 of second vector,index 2 of first
106 // vector and finally index 3 of second vector and insert them at index
107 // <0,1,2,3> of result vector.
108 for (int i = 0, e = VTy->getNumElements(); i < e; ++i) {
109 Cost += thisT()->getVectorInstrCost(Instruction::InsertElement, VTy, i);
110 Cost += thisT()->getVectorInstrCost(Instruction::ExtractElement, VTy, i);
111 }
112 return Cost;
113 }
114
115 /// Estimate a cost of subvector extraction as a sequence of extract and
116 /// insert operations.
117 unsigned getExtractSubvectorOverhead(VectorType *VTy, int Index,
118 FixedVectorType *SubVTy) {
119 assert(VTy && SubVTy &&((VTy && SubVTy && "Can only extract subvectors from vectors"
) ? static_cast<void> (0) : __assert_fail ("VTy && SubVTy && \"Can only extract subvectors from vectors\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/CodeGen/BasicTTIImpl.h"
, 120, __PRETTY_FUNCTION__))
120 "Can only extract subvectors from vectors")((VTy && SubVTy && "Can only extract subvectors from vectors"
) ? static_cast<void> (0) : __assert_fail ("VTy && SubVTy && \"Can only extract subvectors from vectors\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/CodeGen/BasicTTIImpl.h"
, 120, __PRETTY_FUNCTION__))
;
121 int NumSubElts = SubVTy->getNumElements();
122 assert((!isa<FixedVectorType>(VTy) ||(((!isa<FixedVectorType>(VTy) || (Index + NumSubElts) <=
(int)cast<FixedVectorType>(VTy)->getNumElements()) &&
"SK_ExtractSubvector index out of range") ? static_cast<void
> (0) : __assert_fail ("(!isa<FixedVectorType>(VTy) || (Index + NumSubElts) <= (int)cast<FixedVectorType>(VTy)->getNumElements()) && \"SK_ExtractSubvector index out of range\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/CodeGen/BasicTTIImpl.h"
, 125, __PRETTY_FUNCTION__))
123 (Index + NumSubElts) <=(((!isa<FixedVectorType>(VTy) || (Index + NumSubElts) <=
(int)cast<FixedVectorType>(VTy)->getNumElements()) &&
"SK_ExtractSubvector index out of range") ? static_cast<void
> (0) : __assert_fail ("(!isa<FixedVectorType>(VTy) || (Index + NumSubElts) <= (int)cast<FixedVectorType>(VTy)->getNumElements()) && \"SK_ExtractSubvector index out of range\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/CodeGen/BasicTTIImpl.h"
, 125, __PRETTY_FUNCTION__))
124 (int)cast<FixedVectorType>(VTy)->getNumElements()) &&(((!isa<FixedVectorType>(VTy) || (Index + NumSubElts) <=
(int)cast<FixedVectorType>(VTy)->getNumElements()) &&
"SK_ExtractSubvector index out of range") ? static_cast<void
> (0) : __assert_fail ("(!isa<FixedVectorType>(VTy) || (Index + NumSubElts) <= (int)cast<FixedVectorType>(VTy)->getNumElements()) && \"SK_ExtractSubvector index out of range\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/CodeGen/BasicTTIImpl.h"
, 125, __PRETTY_FUNCTION__))
125 "SK_ExtractSubvector index out of range")(((!isa<FixedVectorType>(VTy) || (Index + NumSubElts) <=
(int)cast<FixedVectorType>(VTy)->getNumElements()) &&
"SK_ExtractSubvector index out of range") ? static_cast<void
> (0) : __assert_fail ("(!isa<FixedVectorType>(VTy) || (Index + NumSubElts) <= (int)cast<FixedVectorType>(VTy)->getNumElements()) && \"SK_ExtractSubvector index out of range\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/CodeGen/BasicTTIImpl.h"
, 125, __PRETTY_FUNCTION__))
;
126
127 unsigned Cost = 0;
128 // Subvector extraction cost is equal to the cost of extracting element from
129 // the source type plus the cost of inserting them into the result vector
130 // type.
131 for (int i = 0; i != NumSubElts; ++i) {
132 Cost += thisT()->getVectorInstrCost(Instruction::ExtractElement, VTy,
133 i + Index);
134 Cost +=
135 thisT()->getVectorInstrCost(Instruction::InsertElement, SubVTy, i);
136 }
137 return Cost;
138 }
139
140 /// Estimate a cost of subvector insertion as a sequence of extract and
141 /// insert operations.
142 unsigned getInsertSubvectorOverhead(VectorType *VTy, int Index,
143 FixedVectorType *SubVTy) {
144 assert(VTy && SubVTy &&((VTy && SubVTy && "Can only insert subvectors into vectors"
) ? static_cast<void> (0) : __assert_fail ("VTy && SubVTy && \"Can only insert subvectors into vectors\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/CodeGen/BasicTTIImpl.h"
, 145, __PRETTY_FUNCTION__))
145 "Can only insert subvectors into vectors")((VTy && SubVTy && "Can only insert subvectors into vectors"
) ? static_cast<void> (0) : __assert_fail ("VTy && SubVTy && \"Can only insert subvectors into vectors\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/CodeGen/BasicTTIImpl.h"
, 145, __PRETTY_FUNCTION__))
;
146 int NumSubElts = SubVTy->getNumElements();
147 assert((!isa<FixedVectorType>(VTy) ||(((!isa<FixedVectorType>(VTy) || (Index + NumSubElts) <=
(int)cast<FixedVectorType>(VTy)->getNumElements()) &&
"SK_InsertSubvector index out of range") ? static_cast<void
> (0) : __assert_fail ("(!isa<FixedVectorType>(VTy) || (Index + NumSubElts) <= (int)cast<FixedVectorType>(VTy)->getNumElements()) && \"SK_InsertSubvector index out of range\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/CodeGen/BasicTTIImpl.h"
, 150, __PRETTY_FUNCTION__))
148 (Index + NumSubElts) <=(((!isa<FixedVectorType>(VTy) || (Index + NumSubElts) <=
(int)cast<FixedVectorType>(VTy)->getNumElements()) &&
"SK_InsertSubvector index out of range") ? static_cast<void
> (0) : __assert_fail ("(!isa<FixedVectorType>(VTy) || (Index + NumSubElts) <= (int)cast<FixedVectorType>(VTy)->getNumElements()) && \"SK_InsertSubvector index out of range\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/CodeGen/BasicTTIImpl.h"
, 150, __PRETTY_FUNCTION__))
149 (int)cast<FixedVectorType>(VTy)->getNumElements()) &&(((!isa<FixedVectorType>(VTy) || (Index + NumSubElts) <=
(int)cast<FixedVectorType>(VTy)->getNumElements()) &&
"SK_InsertSubvector index out of range") ? static_cast<void
> (0) : __assert_fail ("(!isa<FixedVectorType>(VTy) || (Index + NumSubElts) <= (int)cast<FixedVectorType>(VTy)->getNumElements()) && \"SK_InsertSubvector index out of range\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/CodeGen/BasicTTIImpl.h"
, 150, __PRETTY_FUNCTION__))
150 "SK_InsertSubvector index out of range")(((!isa<FixedVectorType>(VTy) || (Index + NumSubElts) <=
(int)cast<FixedVectorType>(VTy)->getNumElements()) &&
"SK_InsertSubvector index out of range") ? static_cast<void
> (0) : __assert_fail ("(!isa<FixedVectorType>(VTy) || (Index + NumSubElts) <= (int)cast<FixedVectorType>(VTy)->getNumElements()) && \"SK_InsertSubvector index out of range\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/CodeGen/BasicTTIImpl.h"
, 150, __PRETTY_FUNCTION__))
;
151
152 unsigned Cost = 0;
153 // Subvector insertion cost is equal to the cost of extracting element from
154 // the source type plus the cost of inserting them into the result vector
155 // type.
156 for (int i = 0; i != NumSubElts; ++i) {
157 Cost +=
158 thisT()->getVectorInstrCost(Instruction::ExtractElement, SubVTy, i);
159 Cost += thisT()->getVectorInstrCost(Instruction::InsertElement, VTy,
160 i + Index);
161 }
162 return Cost;
163 }
164
165 /// Local query method delegates up to T which *must* implement this!
166 const TargetSubtargetInfo *getST() const {
167 return static_cast<const T *>(this)->getST();
168 }
169
170 /// Local query method delegates up to T which *must* implement this!
171 const TargetLoweringBase *getTLI() const {
172 return static_cast<const T *>(this)->getTLI();
173 }
174
175 static ISD::MemIndexedMode getISDIndexedMode(TTI::MemIndexedMode M) {
176 switch (M) {
177 case TTI::MIM_Unindexed:
178 return ISD::UNINDEXED;
179 case TTI::MIM_PreInc:
180 return ISD::PRE_INC;
181 case TTI::MIM_PreDec:
182 return ISD::PRE_DEC;
183 case TTI::MIM_PostInc:
184 return ISD::POST_INC;
185 case TTI::MIM_PostDec:
186 return ISD::POST_DEC;
187 }
188 llvm_unreachable("Unexpected MemIndexedMode")::llvm::llvm_unreachable_internal("Unexpected MemIndexedMode"
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/CodeGen/BasicTTIImpl.h"
, 188)
;
189 }
190
191protected:
192 explicit BasicTTIImplBase(const TargetMachine *TM, const DataLayout &DL)
193 : BaseT(DL) {}
194 virtual ~BasicTTIImplBase() = default;
195
196 using TargetTransformInfoImplBase::DL;
197
198public:
199 /// \name Scalar TTI Implementations
200 /// @{
201 bool allowsMisalignedMemoryAccesses(LLVMContext &Context, unsigned BitWidth,
202 unsigned AddressSpace, unsigned Alignment,
203 bool *Fast) const {
204 EVT E = EVT::getIntegerVT(Context, BitWidth);
205 return getTLI()->allowsMisalignedMemoryAccesses(
206 E, AddressSpace, Alignment, MachineMemOperand::MONone, Fast);
207 }
208
209 bool hasBranchDivergence() { return false; }
210
211 bool useGPUDivergenceAnalysis() { return false; }
212
213 bool isSourceOfDivergence(const Value *V) { return false; }
214
215 bool isAlwaysUniform(const Value *V) { return false; }
216
217 unsigned getFlatAddressSpace() {
218 // Return an invalid address space.
219 return -1;
220 }
221
222 bool collectFlatAddressOperands(SmallVectorImpl<int> &OpIndexes,
223 Intrinsic::ID IID) const {
224 return false;
225 }
226
227 bool isNoopAddrSpaceCast(unsigned FromAS, unsigned ToAS) const {
228 return getTLI()->getTargetMachine().isNoopAddrSpaceCast(FromAS, ToAS);
229 }
230
231 unsigned getAssumedAddrSpace(const Value *V) const {
232 return getTLI()->getTargetMachine().getAssumedAddrSpace(V);
233 }
234
235 Value *rewriteIntrinsicWithAddressSpace(IntrinsicInst *II, Value *OldV,
236 Value *NewV) const {
237 return nullptr;
238 }
239
240 bool isLegalAddImmediate(int64_t imm) {
241 return getTLI()->isLegalAddImmediate(imm);
242 }
243
244 bool isLegalICmpImmediate(int64_t imm) {
245 return getTLI()->isLegalICmpImmediate(imm);
246 }
247
248 bool isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset,
249 bool HasBaseReg, int64_t Scale,
250 unsigned AddrSpace, Instruction *I = nullptr) {
251 TargetLoweringBase::AddrMode AM;
252 AM.BaseGV = BaseGV;
253 AM.BaseOffs = BaseOffset;
254 AM.HasBaseReg = HasBaseReg;
255 AM.Scale = Scale;
256 return getTLI()->isLegalAddressingMode(DL, AM, Ty, AddrSpace, I);
257 }
258
259 bool isIndexedLoadLegal(TTI::MemIndexedMode M, Type *Ty,
260 const DataLayout &DL) const {
261 EVT VT = getTLI()->getValueType(DL, Ty);
262 return getTLI()->isIndexedLoadLegal(getISDIndexedMode(M), VT);
263 }
264
265 bool isIndexedStoreLegal(TTI::MemIndexedMode M, Type *Ty,
266 const DataLayout &DL) const {
267 EVT VT = getTLI()->getValueType(DL, Ty);
268 return getTLI()->isIndexedStoreLegal(getISDIndexedMode(M), VT);
269 }
270
271 bool isLSRCostLess(TTI::LSRCost C1, TTI::LSRCost C2) {
272 return TargetTransformInfoImplBase::isLSRCostLess(C1, C2);
273 }
274
275 bool isNumRegsMajorCostOfLSR() {
276 return TargetTransformInfoImplBase::isNumRegsMajorCostOfLSR();
277 }
278
279 bool isProfitableLSRChainElement(Instruction *I) {
280 return TargetTransformInfoImplBase::isProfitableLSRChainElement(I);
281 }
282
283 int getScalingFactorCost(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset,
284 bool HasBaseReg, int64_t Scale, unsigned AddrSpace) {
285 TargetLoweringBase::AddrMode AM;
286 AM.BaseGV = BaseGV;
287 AM.BaseOffs = BaseOffset;
288 AM.HasBaseReg = HasBaseReg;
289 AM.Scale = Scale;
290 return getTLI()->getScalingFactorCost(DL, AM, Ty, AddrSpace);
291 }
292
293 bool isTruncateFree(Type *Ty1, Type *Ty2) {
294 return getTLI()->isTruncateFree(Ty1, Ty2);
295 }
296
297 bool isProfitableToHoist(Instruction *I) {
298 return getTLI()->isProfitableToHoist(I);
299 }
300
301 bool useAA() const { return getST()->useAA(); }
302
303 bool isTypeLegal(Type *Ty) {
304 EVT VT = getTLI()->getValueType(DL, Ty);
305 return getTLI()->isTypeLegal(VT);
306 }
307
308 unsigned getRegUsageForType(Type *Ty) {
309 return getTLI()->getTypeLegalizationCost(DL, Ty).first;
310 }
311
312 int getGEPCost(Type *PointeeType, const Value *Ptr,
313 ArrayRef<const Value *> Operands) {
314 return BaseT::getGEPCost(PointeeType, Ptr, Operands);
315 }
316
317 unsigned getEstimatedNumberOfCaseClusters(const SwitchInst &SI,
318 unsigned &JumpTableSize,
319 ProfileSummaryInfo *PSI,
320 BlockFrequencyInfo *BFI) {
321 /// Try to find the estimated number of clusters. Note that the number of
322 /// clusters identified in this function could be different from the actual
323 /// numbers found in lowering. This function ignore switches that are
324 /// lowered with a mix of jump table / bit test / BTree. This function was
325 /// initially intended to be used when estimating the cost of switch in
326 /// inline cost heuristic, but it's a generic cost model to be used in other
327 /// places (e.g., in loop unrolling).
328 unsigned N = SI.getNumCases();
329 const TargetLoweringBase *TLI = getTLI();
330 const DataLayout &DL = this->getDataLayout();
331
332 JumpTableSize = 0;
333 bool IsJTAllowed = TLI->areJTsAllowed(SI.getParent()->getParent());
334
335 // Early exit if both a jump table and bit test are not allowed.
336 if (N < 1 || (!IsJTAllowed && DL.getIndexSizeInBits(0u) < N))
337 return N;
338
339 APInt MaxCaseVal = SI.case_begin()->getCaseValue()->getValue();
340 APInt MinCaseVal = MaxCaseVal;
341 for (auto CI : SI.cases()) {
342 const APInt &CaseVal = CI.getCaseValue()->getValue();
343 if (CaseVal.sgt(MaxCaseVal))
344 MaxCaseVal = CaseVal;
345 if (CaseVal.slt(MinCaseVal))
346 MinCaseVal = CaseVal;
347 }
348
349 // Check if suitable for a bit test
350 if (N <= DL.getIndexSizeInBits(0u)) {
351 SmallPtrSet<const BasicBlock *, 4> Dests;
352 for (auto I : SI.cases())
353 Dests.insert(I.getCaseSuccessor());
354
355 if (TLI->isSuitableForBitTests(Dests.size(), N, MinCaseVal, MaxCaseVal,
356 DL))
357 return 1;
358 }
359
360 // Check if suitable for a jump table.
361 if (IsJTAllowed) {
362 if (N < 2 || N < TLI->getMinimumJumpTableEntries())
363 return N;
364 uint64_t Range =
365 (MaxCaseVal - MinCaseVal)
366 .getLimitedValue(std::numeric_limits<uint64_t>::max() - 1) + 1;
367 // Check whether a range of clusters is dense enough for a jump table
368 if (TLI->isSuitableForJumpTable(&SI, N, Range, PSI, BFI)) {
369 JumpTableSize = Range;
370 return 1;
371 }
372 }
373 return N;
374 }
375
376 bool shouldBuildLookupTables() {
377 const TargetLoweringBase *TLI = getTLI();
378 return TLI->isOperationLegalOrCustom(ISD::BR_JT, MVT::Other) ||
379 TLI->isOperationLegalOrCustom(ISD::BRIND, MVT::Other);
380 }
381
382 bool haveFastSqrt(Type *Ty) {
383 const TargetLoweringBase *TLI = getTLI();
384 EVT VT = TLI->getValueType(DL, Ty);
385 return TLI->isTypeLegal(VT) &&
386 TLI->isOperationLegalOrCustom(ISD::FSQRT, VT);
387 }
388
389 bool isFCmpOrdCheaperThanFCmpZero(Type *Ty) {
390 return true;
391 }
392
393 unsigned getFPOpCost(Type *Ty) {
394 // Check whether FADD is available, as a proxy for floating-point in
395 // general.
396 const TargetLoweringBase *TLI = getTLI();
397 EVT VT = TLI->getValueType(DL, Ty);
398 if (TLI->isOperationLegalOrCustomOrPromote(ISD::FADD, VT))
399 return TargetTransformInfo::TCC_Basic;
400 return TargetTransformInfo::TCC_Expensive;
401 }
402
403 unsigned getInliningThresholdMultiplier() { return 1; }
404 unsigned adjustInliningThreshold(const CallBase *CB) { return 0; }
405
406 int getInlinerVectorBonusPercent() { return 150; }
407
408 void getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
409 TTI::UnrollingPreferences &UP) {
410 // This unrolling functionality is target independent, but to provide some
411 // motivation for its intended use, for x86:
412
413 // According to the Intel 64 and IA-32 Architectures Optimization Reference
414 // Manual, Intel Core models and later have a loop stream detector (and
415 // associated uop queue) that can benefit from partial unrolling.
416 // The relevant requirements are:
417 // - The loop must have no more than 4 (8 for Nehalem and later) branches
418 // taken, and none of them may be calls.
419 // - The loop can have no more than 18 (28 for Nehalem and later) uops.
420
421 // According to the Software Optimization Guide for AMD Family 15h
422 // Processors, models 30h-4fh (Steamroller and later) have a loop predictor
423 // and loop buffer which can benefit from partial unrolling.
424 // The relevant requirements are:
425 // - The loop must have fewer than 16 branches
426 // - The loop must have less than 40 uops in all executed loop branches
427
428 // The number of taken branches in a loop is hard to estimate here, and
429 // benchmarking has revealed that it is better not to be conservative when
430 // estimating the branch count. As a result, we'll ignore the branch limits
431 // until someone finds a case where it matters in practice.
432
433 unsigned MaxOps;
434 const TargetSubtargetInfo *ST = getST();
435 if (PartialUnrollingThreshold.getNumOccurrences() > 0)
436 MaxOps = PartialUnrollingThreshold;
437 else if (ST->getSchedModel().LoopMicroOpBufferSize > 0)
438 MaxOps = ST->getSchedModel().LoopMicroOpBufferSize;
439 else
440 return;
441
442 // Scan the loop: don't unroll loops with calls.
443 for (BasicBlock *BB : L->blocks()) {
444 for (Instruction &I : *BB) {
445 if (isa<CallInst>(I) || isa<InvokeInst>(I)) {
446 if (const Function *F = cast<CallBase>(I).getCalledFunction()) {
447 if (!thisT()->isLoweredToCall(F))
448 continue;
449 }
450
451 return;
452 }
453 }
454 }
455
456 // Enable runtime and partial unrolling up to the specified size.
457 // Enable using trip count upper bound to unroll loops.
458 UP.Partial = UP.Runtime = UP.UpperBound = true;
459 UP.PartialThreshold = MaxOps;
460
461 // Avoid unrolling when optimizing for size.
462 UP.OptSizeThreshold = 0;
463 UP.PartialOptSizeThreshold = 0;
464
465 // Set number of instructions optimized when "back edge"
466 // becomes "fall through" to default value of 2.
467 UP.BEInsns = 2;
468 }
469
470 void getPeelingPreferences(Loop *L, ScalarEvolution &SE,
471 TTI::PeelingPreferences &PP) {
472 PP.PeelCount = 0;
473 PP.AllowPeeling = true;
474 PP.AllowLoopNestsPeeling = false;
475 PP.PeelProfiledIterations = true;
476 }
477
478 bool isHardwareLoopProfitable(Loop *L, ScalarEvolution &SE,
479 AssumptionCache &AC,
480 TargetLibraryInfo *LibInfo,
481 HardwareLoopInfo &HWLoopInfo) {
482 return BaseT::isHardwareLoopProfitable(L, SE, AC, LibInfo, HWLoopInfo);
483 }
484
485 bool preferPredicateOverEpilogue(Loop *L, LoopInfo *LI, ScalarEvolution &SE,
486 AssumptionCache &AC, TargetLibraryInfo *TLI,
487 DominatorTree *DT,
488 const LoopAccessInfo *LAI) {
489 return BaseT::preferPredicateOverEpilogue(L, LI, SE, AC, TLI, DT, LAI);
490 }
491
492 bool emitGetActiveLaneMask() {
493 return BaseT::emitGetActiveLaneMask();
494 }
495
496 Optional<Instruction *> instCombineIntrinsic(InstCombiner &IC,
497 IntrinsicInst &II) {
498 return BaseT::instCombineIntrinsic(IC, II);
499 }
500
501 Optional<Value *> simplifyDemandedUseBitsIntrinsic(InstCombiner &IC,
502 IntrinsicInst &II,
503 APInt DemandedMask,
504 KnownBits &Known,
505 bool &KnownBitsComputed) {
506 return BaseT::simplifyDemandedUseBitsIntrinsic(IC, II, DemandedMask, Known,
507 KnownBitsComputed);
508 }
509
510 Optional<Value *> simplifyDemandedVectorEltsIntrinsic(
511 InstCombiner &IC, IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts,
512 APInt &UndefElts2, APInt &UndefElts3,
513 std::function<void(Instruction *, unsigned, APInt, APInt &)>
514 SimplifyAndSetOp) {
515 return BaseT::simplifyDemandedVectorEltsIntrinsic(
516 IC, II, DemandedElts, UndefElts, UndefElts2, UndefElts3,
517 SimplifyAndSetOp);
518 }
519
520 int getInstructionLatency(const Instruction *I) {
521 if (isa<LoadInst>(I))
522 return getST()->getSchedModel().DefaultLoadLatency;
523
524 return BaseT::getInstructionLatency(I);
525 }
526
527 virtual Optional<unsigned>
528 getCacheSize(TargetTransformInfo::CacheLevel Level) const {
529 return Optional<unsigned>(
530 getST()->getCacheSize(static_cast<unsigned>(Level)));
531 }
532
533 virtual Optional<unsigned>
534 getCacheAssociativity(TargetTransformInfo::CacheLevel Level) const {
535 Optional<unsigned> TargetResult =
536 getST()->getCacheAssociativity(static_cast<unsigned>(Level));
537
538 if (TargetResult)
539 return TargetResult;
540
541 return BaseT::getCacheAssociativity(Level);
542 }
543
544 virtual unsigned getCacheLineSize() const {
545 return getST()->getCacheLineSize();
546 }
547
548 virtual unsigned getPrefetchDistance() const {
549 return getST()->getPrefetchDistance();
550 }
551
552 virtual unsigned getMinPrefetchStride(unsigned NumMemAccesses,
553 unsigned NumStridedMemAccesses,
554 unsigned NumPrefetches,
555 bool HasCall) const {
556 return getST()->getMinPrefetchStride(NumMemAccesses, NumStridedMemAccesses,
557 NumPrefetches, HasCall);
558 }
559
560 virtual unsigned getMaxPrefetchIterationsAhead() const {
561 return getST()->getMaxPrefetchIterationsAhead();
562 }
563
564 virtual bool enableWritePrefetching() const {
565 return getST()->enableWritePrefetching();
566 }
567
568 /// @}
569
570 /// \name Vector TTI Implementations
571 /// @{
572
573 unsigned getRegisterBitWidth(bool Vector) const { return 32; }
574
575 Optional<unsigned> getMaxVScale() const { return None; }
576
577 /// Estimate the overhead of scalarizing an instruction. Insert and Extract
578 /// are set if the demanded result elements need to be inserted and/or
579 /// extracted from vectors.
580 unsigned getScalarizationOverhead(VectorType *InTy, const APInt &DemandedElts,
581 bool Insert, bool Extract) {
582 /// FIXME: a bitfield is not a reasonable abstraction for talking about
583 /// which elements are needed from a scalable vector
584 auto *Ty = cast<FixedVectorType>(InTy);
585
586 assert(DemandedElts.getBitWidth() == Ty->getNumElements() &&((DemandedElts.getBitWidth() == Ty->getNumElements() &&
"Vector size mismatch") ? static_cast<void> (0) : __assert_fail
("DemandedElts.getBitWidth() == Ty->getNumElements() && \"Vector size mismatch\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/CodeGen/BasicTTIImpl.h"
, 587, __PRETTY_FUNCTION__))
587 "Vector size mismatch")((DemandedElts.getBitWidth() == Ty->getNumElements() &&
"Vector size mismatch") ? static_cast<void> (0) : __assert_fail
("DemandedElts.getBitWidth() == Ty->getNumElements() && \"Vector size mismatch\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/CodeGen/BasicTTIImpl.h"
, 587, __PRETTY_FUNCTION__))
;
588
589 unsigned Cost = 0;
590
591 for (int i = 0, e = Ty->getNumElements(); i < e; ++i) {
592 if (!DemandedElts[i])
593 continue;
594 if (Insert)
595 Cost += thisT()->getVectorInstrCost(Instruction::InsertElement, Ty, i);
596 if (Extract)
597 Cost += thisT()->getVectorInstrCost(Instruction::ExtractElement, Ty, i);
598 }
599
600 return Cost;
601 }
602
603 /// Helper wrapper for the DemandedElts variant of getScalarizationOverhead.
604 unsigned getScalarizationOverhead(VectorType *InTy, bool Insert,
605 bool Extract) {
606 auto *Ty = cast<FixedVectorType>(InTy);
607
608 APInt DemandedElts = APInt::getAllOnesValue(Ty->getNumElements());
609 return thisT()->getScalarizationOverhead(Ty, DemandedElts, Insert, Extract);
610 }
611
612 /// Estimate the overhead of scalarizing an instruction's unique
613 /// non-constant operands. The types of the arguments are ordinarily
614 /// scalar, in which case the costs are multiplied with VF.
615 unsigned getOperandsScalarizationOverhead(ArrayRef<const Value *> Args,
616 unsigned VF) {
617 unsigned Cost = 0;
618 SmallPtrSet<const Value*, 4> UniqueOperands;
619 for (const Value *A : Args) {
620 // Disregard things like metadata arguments.
621 Type *Ty = A->getType();
622 if (!Ty->isIntOrIntVectorTy() && !Ty->isFPOrFPVectorTy() &&
623 !Ty->isPtrOrPtrVectorTy())
624 continue;
625
626 if (!isa<Constant>(A) && UniqueOperands.insert(A).second) {
627 auto *VecTy = dyn_cast<VectorType>(Ty);
628 if (VecTy) {
629 // If A is a vector operand, VF should be 1 or correspond to A.
630 assert((VF == 1 ||(((VF == 1 || VF == cast<FixedVectorType>(VecTy)->getNumElements
()) && "Vector argument does not match VF") ? static_cast
<void> (0) : __assert_fail ("(VF == 1 || VF == cast<FixedVectorType>(VecTy)->getNumElements()) && \"Vector argument does not match VF\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/CodeGen/BasicTTIImpl.h"
, 632, __PRETTY_FUNCTION__))
631 VF == cast<FixedVectorType>(VecTy)->getNumElements()) &&(((VF == 1 || VF == cast<FixedVectorType>(VecTy)->getNumElements
()) && "Vector argument does not match VF") ? static_cast
<void> (0) : __assert_fail ("(VF == 1 || VF == cast<FixedVectorType>(VecTy)->getNumElements()) && \"Vector argument does not match VF\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/CodeGen/BasicTTIImpl.h"
, 632, __PRETTY_FUNCTION__))
632 "Vector argument does not match VF")(((VF == 1 || VF == cast<FixedVectorType>(VecTy)->getNumElements
()) && "Vector argument does not match VF") ? static_cast
<void> (0) : __assert_fail ("(VF == 1 || VF == cast<FixedVectorType>(VecTy)->getNumElements()) && \"Vector argument does not match VF\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/CodeGen/BasicTTIImpl.h"
, 632, __PRETTY_FUNCTION__))
;
633 }
634 else
635 VecTy = FixedVectorType::get(Ty, VF);
636
637 Cost += getScalarizationOverhead(VecTy, false, true);
638 }
639 }
640
641 return Cost;
642 }
643
644 unsigned getScalarizationOverhead(VectorType *InTy,
645 ArrayRef<const Value *> Args) {
646 auto *Ty = cast<FixedVectorType>(InTy);
647
648 unsigned Cost = 0;
649
650 Cost += getScalarizationOverhead(Ty, true, false);
651 if (!Args.empty())
652 Cost += getOperandsScalarizationOverhead(Args, Ty->getNumElements());
653 else
654 // When no information on arguments is provided, we add the cost
655 // associated with one argument as a heuristic.
656 Cost += getScalarizationOverhead(Ty, false, true);
657
658 return Cost;
659 }
660
661 unsigned getMaxInterleaveFactor(unsigned VF) { return 1; }
662
663 unsigned getArithmeticInstrCost(
664 unsigned Opcode, Type *Ty,
665 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput,
666 TTI::OperandValueKind Opd1Info = TTI::OK_AnyValue,
667 TTI::OperandValueKind Opd2Info = TTI::OK_AnyValue,
668 TTI::OperandValueProperties Opd1PropInfo = TTI::OP_None,
669 TTI::OperandValueProperties Opd2PropInfo = TTI::OP_None,
670 ArrayRef<const Value *> Args = ArrayRef<const Value *>(),
671 const Instruction *CxtI = nullptr) {
672 // Check if any of the operands are vector operands.
673 const TargetLoweringBase *TLI = getTLI();
674 int ISD = TLI->InstructionOpcodeToISD(Opcode);
675 assert(ISD && "Invalid opcode")((ISD && "Invalid opcode") ? static_cast<void> (
0) : __assert_fail ("ISD && \"Invalid opcode\"", "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/CodeGen/BasicTTIImpl.h"
, 675, __PRETTY_FUNCTION__))
;
676
677 // TODO: Handle more cost kinds.
678 if (CostKind != TTI::TCK_RecipThroughput)
679 return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind,
680 Opd1Info, Opd2Info,
681 Opd1PropInfo, Opd2PropInfo,
682 Args, CxtI);
683
684 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty);
685
686 bool IsFloat = Ty->isFPOrFPVectorTy();
687 // Assume that floating point arithmetic operations cost twice as much as
688 // integer operations.
689 unsigned OpCost = (IsFloat ? 2 : 1);
690
691 if (TLI->isOperationLegalOrPromote(ISD, LT.second)) {
692 // The operation is legal. Assume it costs 1.
693 // TODO: Once we have extract/insert subvector cost we need to use them.
694 return LT.first * OpCost;
695 }
696
697 if (!TLI->isOperationExpand(ISD, LT.second)) {
698 // If the operation is custom lowered, then assume that the code is twice
699 // as expensive.
700 return LT.first * 2 * OpCost;
701 }
702
703 // Else, assume that we need to scalarize this op.
704 // TODO: If one of the types get legalized by splitting, handle this
705 // similarly to what getCastInstrCost() does.
706 if (auto *VTy = dyn_cast<VectorType>(Ty)) {
707 unsigned Num = cast<FixedVectorType>(VTy)->getNumElements();
708 unsigned Cost = thisT()->getArithmeticInstrCost(
709 Opcode, VTy->getScalarType(), CostKind, Opd1Info, Opd2Info,
710 Opd1PropInfo, Opd2PropInfo, Args, CxtI);
711 // Return the cost of multiple scalar invocation plus the cost of
712 // inserting and extracting the values.
713 return getScalarizationOverhead(VTy, Args) + Num * Cost;
714 }
715
716 // We don't know anything about this scalar instruction.
717 return OpCost;
718 }
719
720 unsigned getShuffleCost(TTI::ShuffleKind Kind, VectorType *Tp, int Index,
721 VectorType *SubTp) {
722
723 switch (Kind) {
724 case TTI::SK_Broadcast:
725 return getBroadcastShuffleOverhead(cast<FixedVectorType>(Tp));
726 case TTI::SK_Select:
727 case TTI::SK_Reverse:
728 case TTI::SK_Transpose:
729 case TTI::SK_PermuteSingleSrc:
730 case TTI::SK_PermuteTwoSrc:
731 return getPermuteShuffleOverhead(cast<FixedVectorType>(Tp));
732 case TTI::SK_ExtractSubvector:
733 return getExtractSubvectorOverhead(Tp, Index,
734 cast<FixedVectorType>(SubTp));
735 case TTI::SK_InsertSubvector:
736 return getInsertSubvectorOverhead(Tp, Index,
737 cast<FixedVectorType>(SubTp));
738 }
739 llvm_unreachable("Unknown TTI::ShuffleKind")::llvm::llvm_unreachable_internal("Unknown TTI::ShuffleKind",
"/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/CodeGen/BasicTTIImpl.h"
, 739)
;
740 }
741
742 unsigned getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src,
743 TTI::CastContextHint CCH,
744 TTI::TargetCostKind CostKind,
745 const Instruction *I = nullptr) {
746 if (BaseT::getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I) == 0)
747 return 0;
748
749 const TargetLoweringBase *TLI = getTLI();
750 int ISD = TLI->InstructionOpcodeToISD(Opcode);
751 assert(ISD && "Invalid opcode")((ISD && "Invalid opcode") ? static_cast<void> (
0) : __assert_fail ("ISD && \"Invalid opcode\"", "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/CodeGen/BasicTTIImpl.h"
, 751, __PRETTY_FUNCTION__))
;
752 std::pair<unsigned, MVT> SrcLT = TLI->getTypeLegalizationCost(DL, Src);
753 std::pair<unsigned, MVT> DstLT = TLI->getTypeLegalizationCost(DL, Dst);
754
755 TypeSize SrcSize = SrcLT.second.getSizeInBits();
756 TypeSize DstSize = DstLT.second.getSizeInBits();
757 bool IntOrPtrSrc = Src->isIntegerTy() || Src->isPointerTy();
758 bool IntOrPtrDst = Dst->isIntegerTy() || Dst->isPointerTy();
759
760 switch (Opcode) {
761 default:
762 break;
763 case Instruction::Trunc:
764 // Check for NOOP conversions.
765 if (TLI->isTruncateFree(SrcLT.second, DstLT.second))
766 return 0;
767 LLVM_FALLTHROUGH[[gnu::fallthrough]];
768 case Instruction::BitCast:
769 // Bitcast between types that are legalized to the same type are free and
770 // assume int to/from ptr of the same size is also free.
771 if (SrcLT.first == DstLT.first && IntOrPtrSrc == IntOrPtrDst &&
772 SrcSize == DstSize)
773 return 0;
774 break;
775 case Instruction::FPExt:
776 if (I && getTLI()->isExtFree(I))
777 return 0;
778 break;
779 case Instruction::ZExt:
780 if (TLI->isZExtFree(SrcLT.second, DstLT.second))
781 return 0;
782 LLVM_FALLTHROUGH[[gnu::fallthrough]];
783 case Instruction::SExt:
784 if (I && getTLI()->isExtFree(I))
785 return 0;
786
787 // If this is a zext/sext of a load, return 0 if the corresponding
788 // extending load exists on target.
789 if (CCH == TTI::CastContextHint::Normal) {
790 EVT ExtVT = EVT::getEVT(Dst);
791 EVT LoadVT = EVT::getEVT(Src);
792 unsigned LType =
793 ((Opcode == Instruction::ZExt) ? ISD::ZEXTLOAD : ISD::SEXTLOAD);
794 if (TLI->isLoadExtLegal(LType, ExtVT, LoadVT))
795 return 0;
796 }
797 break;
798 case Instruction::AddrSpaceCast:
799 if (TLI->isFreeAddrSpaceCast(Src->getPointerAddressSpace(),
800 Dst->getPointerAddressSpace()))
801 return 0;
802 break;
803 }
804
805 auto *SrcVTy = dyn_cast<VectorType>(Src);
806 auto *DstVTy = dyn_cast<VectorType>(Dst);
807
808 // If the cast is marked as legal (or promote) then assume low cost.
809 if (SrcLT.first == DstLT.first &&
810 TLI->isOperationLegalOrPromote(ISD, DstLT.second))
811 return SrcLT.first;
812
813 // Handle scalar conversions.
814 if (!SrcVTy && !DstVTy) {
815 // Just check the op cost. If the operation is legal then assume it costs
816 // 1.
817 if (!TLI->isOperationExpand(ISD, DstLT.second))
818 return 1;
819
820 // Assume that illegal scalar instruction are expensive.
821 return 4;
822 }
823
824 // Check vector-to-vector casts.
825 if (DstVTy && SrcVTy) {
826 // If the cast is between same-sized registers, then the check is simple.
827 if (SrcLT.first == DstLT.first && SrcSize == DstSize) {
828
829 // Assume that Zext is done using AND.
830 if (Opcode == Instruction::ZExt)
831 return SrcLT.first;
832
833 // Assume that sext is done using SHL and SRA.
834 if (Opcode == Instruction::SExt)
835 return SrcLT.first * 2;
836
837 // Just check the op cost. If the operation is legal then assume it
838 // costs
839 // 1 and multiply by the type-legalization overhead.
840 if (!TLI->isOperationExpand(ISD, DstLT.second))
841 return SrcLT.first * 1;
842 }
843
844 // If we are legalizing by splitting, query the concrete TTI for the cost
845 // of casting the original vector twice. We also need to factor in the
846 // cost of the split itself. Count that as 1, to be consistent with
847 // TLI->getTypeLegalizationCost().
848 bool SplitSrc =
849 TLI->getTypeAction(Src->getContext(), TLI->getValueType(DL, Src)) ==
850 TargetLowering::TypeSplitVector;
851 bool SplitDst =
852 TLI->getTypeAction(Dst->getContext(), TLI->getValueType(DL, Dst)) ==
853 TargetLowering::TypeSplitVector;
854 if ((SplitSrc || SplitDst) &&
855 cast<FixedVectorType>(SrcVTy)->getNumElements() > 1 &&
856 cast<FixedVectorType>(DstVTy)->getNumElements() > 1) {
857 Type *SplitDstTy = VectorType::getHalfElementsVectorType(DstVTy);
858 Type *SplitSrcTy = VectorType::getHalfElementsVectorType(SrcVTy);
859 T *TTI = static_cast<T *>(this);
860 // If both types need to be split then the split is free.
861 unsigned SplitCost =
862 (!SplitSrc || !SplitDst) ? TTI->getVectorSplitCost() : 0;
863 return SplitCost +
864 (2 * TTI->getCastInstrCost(Opcode, SplitDstTy, SplitSrcTy, CCH,
865 CostKind, I));
866 }
867
868 // In other cases where the source or destination are illegal, assume
869 // the operation will get scalarized.
870 unsigned Num = cast<FixedVectorType>(DstVTy)->getNumElements();
871 unsigned Cost = thisT()->getCastInstrCost(
872 Opcode, Dst->getScalarType(), Src->getScalarType(), CCH, CostKind, I);
873
874 // Return the cost of multiple scalar invocation plus the cost of
875 // inserting and extracting the values.
876 return getScalarizationOverhead(DstVTy, true, true) + Num * Cost;
877 }
878
879 // We already handled vector-to-vector and scalar-to-scalar conversions.
880 // This
881 // is where we handle bitcast between vectors and scalars. We need to assume
882 // that the conversion is scalarized in one way or another.
883 if (Opcode == Instruction::BitCast) {
884 // Illegal bitcasts are done by storing and loading from a stack slot.
885 return (SrcVTy ? getScalarizationOverhead(SrcVTy, false, true) : 0) +
886 (DstVTy ? getScalarizationOverhead(DstVTy, true, false) : 0);
887 }
888
889 llvm_unreachable("Unhandled cast")::llvm::llvm_unreachable_internal("Unhandled cast", "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/CodeGen/BasicTTIImpl.h"
, 889)
;
890 }
891
892 unsigned getExtractWithExtendCost(unsigned Opcode, Type *Dst,
893 VectorType *VecTy, unsigned Index) {
894 return thisT()->getVectorInstrCost(Instruction::ExtractElement, VecTy,
895 Index) +
896 thisT()->getCastInstrCost(Opcode, Dst, VecTy->getElementType(),
897 TTI::CastContextHint::None, TTI::TCK_RecipThroughput);
898 }
899
900 unsigned getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind) {
901 return BaseT::getCFInstrCost(Opcode, CostKind);
902 }
903
904 unsigned getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy,
905 CmpInst::Predicate VecPred,
906 TTI::TargetCostKind CostKind,
907 const Instruction *I = nullptr) {
908 const TargetLoweringBase *TLI = getTLI();
909 int ISD = TLI->InstructionOpcodeToISD(Opcode);
910 assert(ISD && "Invalid opcode")((ISD && "Invalid opcode") ? static_cast<void> (
0) : __assert_fail ("ISD && \"Invalid opcode\"", "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/CodeGen/BasicTTIImpl.h"
, 910, __PRETTY_FUNCTION__))
;
911
912 // TODO: Handle other cost kinds.
913 if (CostKind != TTI::TCK_RecipThroughput)
914 return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, VecPred, CostKind,
915 I);
916
917 // Selects on vectors are actually vector selects.
918 if (ISD == ISD::SELECT) {
919 assert(CondTy && "CondTy must exist")((CondTy && "CondTy must exist") ? static_cast<void
> (0) : __assert_fail ("CondTy && \"CondTy must exist\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/CodeGen/BasicTTIImpl.h"
, 919, __PRETTY_FUNCTION__))
;
920 if (CondTy->isVectorTy())
921 ISD = ISD::VSELECT;
922 }
923 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy);
924
925 if (!(ValTy->isVectorTy() && !LT.second.isVector()) &&
926 !TLI->isOperationExpand(ISD, LT.second)) {
927 // The operation is legal. Assume it costs 1. Multiply
928 // by the type-legalization overhead.
929 return LT.first * 1;
930 }
931
932 // Otherwise, assume that the cast is scalarized.
933 // TODO: If one of the types get legalized by splitting, handle this
934 // similarly to what getCastInstrCost() does.
935 if (auto *ValVTy = dyn_cast<VectorType>(ValTy)) {
936 unsigned Num = cast<FixedVectorType>(ValVTy)->getNumElements();
937 if (CondTy)
938 CondTy = CondTy->getScalarType();
939 unsigned Cost = thisT()->getCmpSelInstrCost(
940 Opcode, ValVTy->getScalarType(), CondTy, VecPred, CostKind, I);
941
942 // Return the cost of multiple scalar invocation plus the cost of
943 // inserting and extracting the values.
944 return getScalarizationOverhead(ValVTy, true, false) + Num * Cost;
945 }
946
947 // Unknown scalar opcode.
948 return 1;
949 }
950
951 unsigned getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index) {
952 std::pair<unsigned, MVT> LT =
953 getTLI()->getTypeLegalizationCost(DL, Val->getScalarType());
954
955 return LT.first;
956 }
957
958 unsigned getMemoryOpCost(unsigned Opcode, Type *Src, MaybeAlign Alignment,
959 unsigned AddressSpace,
960 TTI::TargetCostKind CostKind,
961 const Instruction *I = nullptr) {
962 assert(!Src->isVoidTy() && "Invalid type")((!Src->isVoidTy() && "Invalid type") ? static_cast
<void> (0) : __assert_fail ("!Src->isVoidTy() && \"Invalid type\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/CodeGen/BasicTTIImpl.h"
, 962, __PRETTY_FUNCTION__))
;
963 // Assume types, such as structs, are expensive.
964 if (getTLI()->getValueType(DL, Src, true) == MVT::Other)
965 return 4;
966 std::pair<unsigned, MVT> LT = getTLI()->getTypeLegalizationCost(DL, Src);
967
968 // Assuming that all loads of legal types cost 1.
969 unsigned Cost = LT.first;
970 if (CostKind != TTI::TCK_RecipThroughput)
971 return Cost;
972
973 if (Src->isVectorTy() &&
974 // In practice it's not currently possible to have a change in lane
975 // length for extending loads or truncating stores so both types should
976 // have the same scalable property.
977 TypeSize::isKnownLT(Src->getPrimitiveSizeInBits(),
978 LT.second.getSizeInBits())) {
979 // This is a vector load that legalizes to a larger type than the vector
980 // itself. Unless the corresponding extending load or truncating store is
981 // legal, then this will scalarize.
982 TargetLowering::LegalizeAction LA = TargetLowering::Expand;
983 EVT MemVT = getTLI()->getValueType(DL, Src);
984 if (Opcode == Instruction::Store)
985 LA = getTLI()->getTruncStoreAction(LT.second, MemVT);
986 else
987 LA = getTLI()->getLoadExtAction(ISD::EXTLOAD, LT.second, MemVT);
988
989 if (LA != TargetLowering::Legal && LA != TargetLowering::Custom) {
990 // This is a vector load/store for some illegal type that is scalarized.
991 // We must account for the cost of building or decomposing the vector.
992 Cost += getScalarizationOverhead(cast<VectorType>(Src),
993 Opcode != Instruction::Store,
994 Opcode == Instruction::Store);
995 }
996 }
997
998 return Cost;
999 }
1000
1001 unsigned getGatherScatterOpCost(unsigned Opcode, Type *DataTy,
1002 const Value *Ptr, bool VariableMask,
1003 Align Alignment, TTI::TargetCostKind CostKind,
1004 const Instruction *I = nullptr) {
1005 auto *VT = cast<FixedVectorType>(DataTy);
1006 // Assume the target does not have support for gather/scatter operations
1007 // and provide a rough estimate.
1008 //
1009 // First, compute the cost of extracting the individual addresses and the
1010 // individual memory operations.
1011 int LoadCost =
1012 VT->getNumElements() *
1013 (getVectorInstrCost(
1014 Instruction::ExtractElement,
1015 FixedVectorType::get(PointerType::get(VT->getElementType(), 0),
1016 VT->getNumElements()),
1017 -1) +
1018 getMemoryOpCost(Opcode, VT->getElementType(), Alignment, 0, CostKind));
1019
1020 // Next, compute the cost of packing the result in a vector.
1021 int PackingCost = getScalarizationOverhead(VT, Opcode != Instruction::Store,
1022 Opcode == Instruction::Store);
1023
1024 int ConditionalCost = 0;
1025 if (VariableMask) {
1026 // Compute the cost of conditionally executing the memory operations with
1027 // variable masks. This includes extracting the individual conditions, a
1028 // branches and PHIs to combine the results.
1029 // NOTE: Estimating the cost of conditionally executing the memory
1030 // operations accurately is quite difficult and the current solution
1031 // provides a very rough estimate only.
1032 ConditionalCost =
1033 VT->getNumElements() *
1034 (getVectorInstrCost(
1035 Instruction::ExtractElement,
1036 FixedVectorType::get(Type::getInt1Ty(DataTy->getContext()),
1037 VT->getNumElements()),
1038 -1) +
1039 getCFInstrCost(Instruction::Br, CostKind) +
1040 getCFInstrCost(Instruction::PHI, CostKind));
1041 }
1042
1043 return LoadCost + PackingCost + ConditionalCost;
1044 }
1045
1046 unsigned getInterleavedMemoryOpCost(
1047 unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices,
1048 Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind,
1049 bool UseMaskForCond = false, bool UseMaskForGaps = false) {
1050 auto *VT = cast<FixedVectorType>(VecTy);
11
'VecTy' is a 'FixedVectorType'
1051
1052 unsigned NumElts = VT->getNumElements();
1053 assert(Factor > 1 && NumElts % Factor == 0 && "Invalid interleave factor")((Factor > 1 && NumElts % Factor == 0 && "Invalid interleave factor"
) ? static_cast<void> (0) : __assert_fail ("Factor > 1 && NumElts % Factor == 0 && \"Invalid interleave factor\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/CodeGen/BasicTTIImpl.h"
, 1053, __PRETTY_FUNCTION__))
;
12
Assuming 'Factor' is > 1
13
Assuming the condition is true
14
'?' condition is true
1054
1055 unsigned NumSubElts = NumElts / Factor;
1056 auto *SubVT = FixedVectorType::get(VT->getElementType(), NumSubElts);
1057
1058 // Firstly, the cost of load/store operation.
1059 unsigned Cost;
1060 if (UseMaskForCond
14.1
'UseMaskForCond' is false
14.1
'UseMaskForCond' is false
14.1
'UseMaskForCond' is false
|| UseMaskForGaps
14.2
'UseMaskForGaps' is false
14.2
'UseMaskForGaps' is false
14.2
'UseMaskForGaps' is false
)
15
Taking false branch
1061 Cost = thisT()->getMaskedMemoryOpCost(Opcode, VecTy, Alignment,
1062 AddressSpace, CostKind);
1063 else
1064 Cost = thisT()->getMemoryOpCost(Opcode, VecTy, Alignment, AddressSpace,
1065 CostKind);
1066
1067 // Legalize the vector type, and get the legalized and unlegalized type
1068 // sizes.
1069 MVT VecTyLT = getTLI()->getTypeLegalizationCost(DL, VecTy).second;
1070 unsigned VecTySize = thisT()->getDataLayout().getTypeStoreSize(VecTy);
1071 unsigned VecTyLTSize = VecTyLT.getStoreSize();
1072
1073 // Return the ceiling of dividing A by B.
1074 auto ceil = [](unsigned A, unsigned B) { return (A + B - 1) / B; };
1075
1076 // Scale the cost of the memory operation by the fraction of legalized
1077 // instructions that will actually be used. We shouldn't account for the
1078 // cost of dead instructions since they will be removed.
1079 //
1080 // E.g., An interleaved load of factor 8:
1081 // %vec = load <16 x i64>, <16 x i64>* %ptr
1082 // %v0 = shufflevector %vec, undef, <0, 8>
1083 //
1084 // If <16 x i64> is legalized to 8 v2i64 loads, only 2 of the loads will be
1085 // used (those corresponding to elements [0:1] and [8:9] of the unlegalized
1086 // type). The other loads are unused.
1087 //
1088 // We only scale the cost of loads since interleaved store groups aren't
1089 // allowed to have gaps.
1090 if (Opcode == Instruction::Load && VecTySize > VecTyLTSize) {
16
Assuming 'Opcode' is not equal to Load
1091 // The number of loads of a legal type it will take to represent a load
1092 // of the unlegalized vector type.
1093 unsigned NumLegalInsts = ceil(VecTySize, VecTyLTSize);
1094
1095 // The number of elements of the unlegalized type that correspond to a
1096 // single legal instruction.
1097 unsigned NumEltsPerLegalInst = ceil(NumElts, NumLegalInsts);
1098
1099 // Determine which legal instructions will be used.
1100 BitVector UsedInsts(NumLegalInsts, false);
1101 for (unsigned Index : Indices)