Bug Summary

File:llvm/lib/Target/X86/X86TargetTransformInfo.cpp
Warning:line 3111, column 20
Called C++ object pointer is null

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name X86TargetTransformInfo.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mframe-pointer=none -fmath-errno -fno-rounding-math -mconstructor-aliases -munwind-tables -target-cpu x86-64 -tune-cpu generic -fno-split-dwarf-inlining -debugger-tuning=gdb -ffunction-sections -fdata-sections -resource-dir /usr/lib/llvm-12/lib/clang/12.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/build-llvm/lib/Target/X86 -I /build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/lib/Target/X86 -I /build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/build-llvm/include -I /build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/include -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0/backward -internal-isystem /usr/local/include -internal-isystem /usr/lib/llvm-12/lib/clang/12.0.0/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir /build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/build-llvm/lib/Target/X86 -fdebug-prefix-map=/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b=. -ferror-limit 19 -fvisibility hidden -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -o /tmp/scan-build-2020-09-17-195756-12974-1 -x c++ /build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/lib/Target/X86/X86TargetTransformInfo.cpp

/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/lib/Target/X86/X86TargetTransformInfo.cpp

1//===-- X86TargetTransformInfo.cpp - X86 specific TTI pass ----------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8/// \file
9/// This file implements a TargetTransformInfo analysis pass specific to the
10/// X86 target machine. It uses the target's detailed information to provide
11/// more precise answers to certain TTI queries, while letting the target
12/// independent and default TTI implementations handle the rest.
13///
14//===----------------------------------------------------------------------===//
15/// About Cost Model numbers used below it's necessary to say the following:
16/// the numbers correspond to some "generic" X86 CPU instead of usage of
17/// concrete CPU model. Usually the numbers correspond to CPU where the feature
18/// apeared at the first time. For example, if we do Subtarget.hasSSE42() in
19/// the lookups below the cost is based on Nehalem as that was the first CPU
20/// to support that feature level and thus has most likely the worst case cost.
21/// Some examples of other technologies/CPUs:
22/// SSE 3 - Pentium4 / Athlon64
23/// SSE 4.1 - Penryn
24/// SSE 4.2 - Nehalem
25/// AVX - Sandy Bridge
26/// AVX2 - Haswell
27/// AVX-512 - Xeon Phi / Skylake
28/// And some examples of instruction target dependent costs (latency)
29/// divss sqrtss rsqrtss
30/// AMD K7 11-16 19 3
31/// Piledriver 9-24 13-15 5
32/// Jaguar 14 16 2
33/// Pentium II,III 18 30 2
34/// Nehalem 7-14 7-18 3
35/// Haswell 10-13 11 5
36/// TODO: Develop and implement the target dependent cost model and
37/// specialize cost numbers for different Cost Model Targets such as throughput,
38/// code size, latency and uop count.
39//===----------------------------------------------------------------------===//
40
41#include "X86TargetTransformInfo.h"
42#include "llvm/Analysis/TargetTransformInfo.h"
43#include "llvm/CodeGen/BasicTTIImpl.h"
44#include "llvm/CodeGen/CostTable.h"
45#include "llvm/CodeGen/TargetLowering.h"
46#include "llvm/IR/IntrinsicInst.h"
47#include "llvm/Support/Debug.h"
48
49using namespace llvm;
50
51#define DEBUG_TYPE"x86tti" "x86tti"
52
53//===----------------------------------------------------------------------===//
54//
55// X86 cost model.
56//
57//===----------------------------------------------------------------------===//
58
59TargetTransformInfo::PopcntSupportKind
60X86TTIImpl::getPopcntSupport(unsigned TyWidth) {
61 assert(isPowerOf2_32(TyWidth) && "Ty width must be power of 2")((isPowerOf2_32(TyWidth) && "Ty width must be power of 2"
) ? static_cast<void> (0) : __assert_fail ("isPowerOf2_32(TyWidth) && \"Ty width must be power of 2\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 61, __PRETTY_FUNCTION__))
;
62 // TODO: Currently the __builtin_popcount() implementation using SSE3
63 // instructions is inefficient. Once the problem is fixed, we should
64 // call ST->hasSSE3() instead of ST->hasPOPCNT().
65 return ST->hasPOPCNT() ? TTI::PSK_FastHardware : TTI::PSK_Software;
66}
67
68llvm::Optional<unsigned> X86TTIImpl::getCacheSize(
69 TargetTransformInfo::CacheLevel Level) const {
70 switch (Level) {
71 case TargetTransformInfo::CacheLevel::L1D:
72 // - Penryn
73 // - Nehalem
74 // - Westmere
75 // - Sandy Bridge
76 // - Ivy Bridge
77 // - Haswell
78 // - Broadwell
79 // - Skylake
80 // - Kabylake
81 return 32 * 1024; // 32 KByte
82 case TargetTransformInfo::CacheLevel::L2D:
83 // - Penryn
84 // - Nehalem
85 // - Westmere
86 // - Sandy Bridge
87 // - Ivy Bridge
88 // - Haswell
89 // - Broadwell
90 // - Skylake
91 // - Kabylake
92 return 256 * 1024; // 256 KByte
93 }
94
95 llvm_unreachable("Unknown TargetTransformInfo::CacheLevel")::llvm::llvm_unreachable_internal("Unknown TargetTransformInfo::CacheLevel"
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 95)
;
96}
97
98llvm::Optional<unsigned> X86TTIImpl::getCacheAssociativity(
99 TargetTransformInfo::CacheLevel Level) const {
100 // - Penryn
101 // - Nehalem
102 // - Westmere
103 // - Sandy Bridge
104 // - Ivy Bridge
105 // - Haswell
106 // - Broadwell
107 // - Skylake
108 // - Kabylake
109 switch (Level) {
110 case TargetTransformInfo::CacheLevel::L1D:
111 LLVM_FALLTHROUGH[[gnu::fallthrough]];
112 case TargetTransformInfo::CacheLevel::L2D:
113 return 8;
114 }
115
116 llvm_unreachable("Unknown TargetTransformInfo::CacheLevel")::llvm::llvm_unreachable_internal("Unknown TargetTransformInfo::CacheLevel"
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 116)
;
117}
118
119unsigned X86TTIImpl::getNumberOfRegisters(unsigned ClassID) const {
120 bool Vector = (ClassID == 1);
121 if (Vector && !ST->hasSSE1())
122 return 0;
123
124 if (ST->is64Bit()) {
125 if (Vector && ST->hasAVX512())
126 return 32;
127 return 16;
128 }
129 return 8;
130}
131
132unsigned X86TTIImpl::getRegisterBitWidth(bool Vector) const {
133 unsigned PreferVectorWidth = ST->getPreferVectorWidth();
134 if (Vector) {
135 if (ST->hasAVX512() && PreferVectorWidth >= 512)
136 return 512;
137 if (ST->hasAVX() && PreferVectorWidth >= 256)
138 return 256;
139 if (ST->hasSSE1() && PreferVectorWidth >= 128)
140 return 128;
141 return 0;
142 }
143
144 if (ST->is64Bit())
145 return 64;
146
147 return 32;
148}
149
150unsigned X86TTIImpl::getLoadStoreVecRegBitWidth(unsigned) const {
151 return getRegisterBitWidth(true);
152}
153
154unsigned X86TTIImpl::getMaxInterleaveFactor(unsigned VF) {
155 // If the loop will not be vectorized, don't interleave the loop.
156 // Let regular unroll to unroll the loop, which saves the overflow
157 // check and memory check cost.
158 if (VF == 1)
159 return 1;
160
161 if (ST->isAtom())
162 return 1;
163
164 // Sandybridge and Haswell have multiple execution ports and pipelined
165 // vector units.
166 if (ST->hasAVX())
167 return 4;
168
169 return 2;
170}
171
172int X86TTIImpl::getArithmeticInstrCost(unsigned Opcode, Type *Ty,
173 TTI::TargetCostKind CostKind,
174 TTI::OperandValueKind Op1Info,
175 TTI::OperandValueKind Op2Info,
176 TTI::OperandValueProperties Opd1PropInfo,
177 TTI::OperandValueProperties Opd2PropInfo,
178 ArrayRef<const Value *> Args,
179 const Instruction *CxtI) {
180 // TODO: Handle more cost kinds.
181 if (CostKind != TTI::TCK_RecipThroughput)
182 return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Op1Info,
183 Op2Info, Opd1PropInfo,
184 Opd2PropInfo, Args, CxtI);
185 // Legalize the type.
186 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty);
187
188 int ISD = TLI->InstructionOpcodeToISD(Opcode);
189 assert(ISD && "Invalid opcode")((ISD && "Invalid opcode") ? static_cast<void> (
0) : __assert_fail ("ISD && \"Invalid opcode\"", "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 189, __PRETTY_FUNCTION__))
;
190
191 static const CostTblEntry GLMCostTable[] = {
192 { ISD::FDIV, MVT::f32, 18 }, // divss
193 { ISD::FDIV, MVT::v4f32, 35 }, // divps
194 { ISD::FDIV, MVT::f64, 33 }, // divsd
195 { ISD::FDIV, MVT::v2f64, 65 }, // divpd
196 };
197
198 if (ST->useGLMDivSqrtCosts())
199 if (const auto *Entry = CostTableLookup(GLMCostTable, ISD,
200 LT.second))
201 return LT.first * Entry->Cost;
202
203 static const CostTblEntry SLMCostTable[] = {
204 { ISD::MUL, MVT::v4i32, 11 }, // pmulld
205 { ISD::MUL, MVT::v8i16, 2 }, // pmullw
206 { ISD::MUL, MVT::v16i8, 14 }, // extend/pmullw/trunc sequence.
207 { ISD::FMUL, MVT::f64, 2 }, // mulsd
208 { ISD::FMUL, MVT::v2f64, 4 }, // mulpd
209 { ISD::FMUL, MVT::v4f32, 2 }, // mulps
210 { ISD::FDIV, MVT::f32, 17 }, // divss
211 { ISD::FDIV, MVT::v4f32, 39 }, // divps
212 { ISD::FDIV, MVT::f64, 32 }, // divsd
213 { ISD::FDIV, MVT::v2f64, 69 }, // divpd
214 { ISD::FADD, MVT::v2f64, 2 }, // addpd
215 { ISD::FSUB, MVT::v2f64, 2 }, // subpd
216 // v2i64/v4i64 mul is custom lowered as a series of long:
217 // multiplies(3), shifts(3) and adds(2)
218 // slm muldq version throughput is 2 and addq throughput 4
219 // thus: 3X2 (muldq throughput) + 3X1 (shift throughput) +
220 // 3X4 (addq throughput) = 17
221 { ISD::MUL, MVT::v2i64, 17 },
222 // slm addq\subq throughput is 4
223 { ISD::ADD, MVT::v2i64, 4 },
224 { ISD::SUB, MVT::v2i64, 4 },
225 };
226
227 if (ST->isSLM()) {
228 if (Args.size() == 2 && ISD == ISD::MUL && LT.second == MVT::v4i32) {
229 // Check if the operands can be shrinked into a smaller datatype.
230 bool Op1Signed = false;
231 unsigned Op1MinSize = BaseT::minRequiredElementSize(Args[0], Op1Signed);
232 bool Op2Signed = false;
233 unsigned Op2MinSize = BaseT::minRequiredElementSize(Args[1], Op2Signed);
234
235 bool signedMode = Op1Signed | Op2Signed;
236 unsigned OpMinSize = std::max(Op1MinSize, Op2MinSize);
237
238 if (OpMinSize <= 7)
239 return LT.first * 3; // pmullw/sext
240 if (!signedMode && OpMinSize <= 8)
241 return LT.first * 3; // pmullw/zext
242 if (OpMinSize <= 15)
243 return LT.first * 5; // pmullw/pmulhw/pshuf
244 if (!signedMode && OpMinSize <= 16)
245 return LT.first * 5; // pmullw/pmulhw/pshuf
246 }
247
248 if (const auto *Entry = CostTableLookup(SLMCostTable, ISD,
249 LT.second)) {
250 return LT.first * Entry->Cost;
251 }
252 }
253
254 if ((ISD == ISD::SDIV || ISD == ISD::SREM || ISD == ISD::UDIV ||
255 ISD == ISD::UREM) &&
256 (Op2Info == TargetTransformInfo::OK_UniformConstantValue ||
257 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) &&
258 Opd2PropInfo == TargetTransformInfo::OP_PowerOf2) {
259 if (ISD == ISD::SDIV || ISD == ISD::SREM) {
260 // On X86, vector signed division by constants power-of-two are
261 // normally expanded to the sequence SRA + SRL + ADD + SRA.
262 // The OperandValue properties may not be the same as that of the previous
263 // operation; conservatively assume OP_None.
264 int Cost =
265 2 * getArithmeticInstrCost(Instruction::AShr, Ty, CostKind, Op1Info,
266 Op2Info,
267 TargetTransformInfo::OP_None,
268 TargetTransformInfo::OP_None);
269 Cost += getArithmeticInstrCost(Instruction::LShr, Ty, CostKind, Op1Info,
270 Op2Info,
271 TargetTransformInfo::OP_None,
272 TargetTransformInfo::OP_None);
273 Cost += getArithmeticInstrCost(Instruction::Add, Ty, CostKind, Op1Info,
274 Op2Info,
275 TargetTransformInfo::OP_None,
276 TargetTransformInfo::OP_None);
277
278 if (ISD == ISD::SREM) {
279 // For SREM: (X % C) is the equivalent of (X - (X/C)*C)
280 Cost += getArithmeticInstrCost(Instruction::Mul, Ty, CostKind, Op1Info,
281 Op2Info);
282 Cost += getArithmeticInstrCost(Instruction::Sub, Ty, CostKind, Op1Info,
283 Op2Info);
284 }
285
286 return Cost;
287 }
288
289 // Vector unsigned division/remainder will be simplified to shifts/masks.
290 if (ISD == ISD::UDIV)
291 return getArithmeticInstrCost(Instruction::LShr, Ty, CostKind,
292 Op1Info, Op2Info,
293 TargetTransformInfo::OP_None,
294 TargetTransformInfo::OP_None);
295
296 else // UREM
297 return getArithmeticInstrCost(Instruction::And, Ty, CostKind,
298 Op1Info, Op2Info,
299 TargetTransformInfo::OP_None,
300 TargetTransformInfo::OP_None);
301 }
302
303 static const CostTblEntry AVX512BWUniformConstCostTable[] = {
304 { ISD::SHL, MVT::v64i8, 2 }, // psllw + pand.
305 { ISD::SRL, MVT::v64i8, 2 }, // psrlw + pand.
306 { ISD::SRA, MVT::v64i8, 4 }, // psrlw, pand, pxor, psubb.
307 };
308
309 if (Op2Info == TargetTransformInfo::OK_UniformConstantValue &&
310 ST->hasBWI()) {
311 if (const auto *Entry = CostTableLookup(AVX512BWUniformConstCostTable, ISD,
312 LT.second))
313 return LT.first * Entry->Cost;
314 }
315
316 static const CostTblEntry AVX512UniformConstCostTable[] = {
317 { ISD::SRA, MVT::v2i64, 1 },
318 { ISD::SRA, MVT::v4i64, 1 },
319 { ISD::SRA, MVT::v8i64, 1 },
320
321 { ISD::SHL, MVT::v64i8, 4 }, // psllw + pand.
322 { ISD::SRL, MVT::v64i8, 4 }, // psrlw + pand.
323 { ISD::SRA, MVT::v64i8, 8 }, // psrlw, pand, pxor, psubb.
324
325 { ISD::SDIV, MVT::v16i32, 6 }, // pmuludq sequence
326 { ISD::SREM, MVT::v16i32, 8 }, // pmuludq+mul+sub sequence
327 { ISD::UDIV, MVT::v16i32, 5 }, // pmuludq sequence
328 { ISD::UREM, MVT::v16i32, 7 }, // pmuludq+mul+sub sequence
329 };
330
331 if (Op2Info == TargetTransformInfo::OK_UniformConstantValue &&
332 ST->hasAVX512()) {
333 if (const auto *Entry = CostTableLookup(AVX512UniformConstCostTable, ISD,
334 LT.second))
335 return LT.first * Entry->Cost;
336 }
337
338 static const CostTblEntry AVX2UniformConstCostTable[] = {
339 { ISD::SHL, MVT::v32i8, 2 }, // psllw + pand.
340 { ISD::SRL, MVT::v32i8, 2 }, // psrlw + pand.
341 { ISD::SRA, MVT::v32i8, 4 }, // psrlw, pand, pxor, psubb.
342
343 { ISD::SRA, MVT::v4i64, 4 }, // 2 x psrad + shuffle.
344
345 { ISD::SDIV, MVT::v8i32, 6 }, // pmuludq sequence
346 { ISD::SREM, MVT::v8i32, 8 }, // pmuludq+mul+sub sequence
347 { ISD::UDIV, MVT::v8i32, 5 }, // pmuludq sequence
348 { ISD::UREM, MVT::v8i32, 7 }, // pmuludq+mul+sub sequence
349 };
350
351 if (Op2Info == TargetTransformInfo::OK_UniformConstantValue &&
352 ST->hasAVX2()) {
353 if (const auto *Entry = CostTableLookup(AVX2UniformConstCostTable, ISD,
354 LT.second))
355 return LT.first * Entry->Cost;
356 }
357
358 static const CostTblEntry SSE2UniformConstCostTable[] = {
359 { ISD::SHL, MVT::v16i8, 2 }, // psllw + pand.
360 { ISD::SRL, MVT::v16i8, 2 }, // psrlw + pand.
361 { ISD::SRA, MVT::v16i8, 4 }, // psrlw, pand, pxor, psubb.
362
363 { ISD::SHL, MVT::v32i8, 4+2 }, // 2*(psllw + pand) + split.
364 { ISD::SRL, MVT::v32i8, 4+2 }, // 2*(psrlw + pand) + split.
365 { ISD::SRA, MVT::v32i8, 8+2 }, // 2*(psrlw, pand, pxor, psubb) + split.
366
367 { ISD::SDIV, MVT::v8i32, 12+2 }, // 2*pmuludq sequence + split.
368 { ISD::SREM, MVT::v8i32, 16+2 }, // 2*pmuludq+mul+sub sequence + split.
369 { ISD::SDIV, MVT::v4i32, 6 }, // pmuludq sequence
370 { ISD::SREM, MVT::v4i32, 8 }, // pmuludq+mul+sub sequence
371 { ISD::UDIV, MVT::v8i32, 10+2 }, // 2*pmuludq sequence + split.
372 { ISD::UREM, MVT::v8i32, 14+2 }, // 2*pmuludq+mul+sub sequence + split.
373 { ISD::UDIV, MVT::v4i32, 5 }, // pmuludq sequence
374 { ISD::UREM, MVT::v4i32, 7 }, // pmuludq+mul+sub sequence
375 };
376
377 // XOP has faster vXi8 shifts.
378 if (Op2Info == TargetTransformInfo::OK_UniformConstantValue &&
379 ST->hasSSE2() && !ST->hasXOP()) {
380 if (const auto *Entry =
381 CostTableLookup(SSE2UniformConstCostTable, ISD, LT.second))
382 return LT.first * Entry->Cost;
383 }
384
385 static const CostTblEntry AVX512BWConstCostTable[] = {
386 { ISD::SDIV, MVT::v64i8, 14 }, // 2*ext+2*pmulhw sequence
387 { ISD::SREM, MVT::v64i8, 16 }, // 2*ext+2*pmulhw+mul+sub sequence
388 { ISD::UDIV, MVT::v64i8, 14 }, // 2*ext+2*pmulhw sequence
389 { ISD::UREM, MVT::v64i8, 16 }, // 2*ext+2*pmulhw+mul+sub sequence
390 { ISD::SDIV, MVT::v32i16, 6 }, // vpmulhw sequence
391 { ISD::SREM, MVT::v32i16, 8 }, // vpmulhw+mul+sub sequence
392 { ISD::UDIV, MVT::v32i16, 6 }, // vpmulhuw sequence
393 { ISD::UREM, MVT::v32i16, 8 }, // vpmulhuw+mul+sub sequence
394 };
395
396 if ((Op2Info == TargetTransformInfo::OK_UniformConstantValue ||
397 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) &&
398 ST->hasBWI()) {
399 if (const auto *Entry =
400 CostTableLookup(AVX512BWConstCostTable, ISD, LT.second))
401 return LT.first * Entry->Cost;
402 }
403
404 static const CostTblEntry AVX512ConstCostTable[] = {
405 { ISD::SDIV, MVT::v16i32, 15 }, // vpmuldq sequence
406 { ISD::SREM, MVT::v16i32, 17 }, // vpmuldq+mul+sub sequence
407 { ISD::UDIV, MVT::v16i32, 15 }, // vpmuludq sequence
408 { ISD::UREM, MVT::v16i32, 17 }, // vpmuludq+mul+sub sequence
409 { ISD::SDIV, MVT::v64i8, 28 }, // 4*ext+4*pmulhw sequence
410 { ISD::SREM, MVT::v64i8, 32 }, // 4*ext+4*pmulhw+mul+sub sequence
411 { ISD::UDIV, MVT::v64i8, 28 }, // 4*ext+4*pmulhw sequence
412 { ISD::UREM, MVT::v64i8, 32 }, // 4*ext+4*pmulhw+mul+sub sequence
413 { ISD::SDIV, MVT::v32i16, 12 }, // 2*vpmulhw sequence
414 { ISD::SREM, MVT::v32i16, 16 }, // 2*vpmulhw+mul+sub sequence
415 { ISD::UDIV, MVT::v32i16, 12 }, // 2*vpmulhuw sequence
416 { ISD::UREM, MVT::v32i16, 16 }, // 2*vpmulhuw+mul+sub sequence
417 };
418
419 if ((Op2Info == TargetTransformInfo::OK_UniformConstantValue ||
420 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) &&
421 ST->hasAVX512()) {
422 if (const auto *Entry =
423 CostTableLookup(AVX512ConstCostTable, ISD, LT.second))
424 return LT.first * Entry->Cost;
425 }
426
427 static const CostTblEntry AVX2ConstCostTable[] = {
428 { ISD::SDIV, MVT::v32i8, 14 }, // 2*ext+2*pmulhw sequence
429 { ISD::SREM, MVT::v32i8, 16 }, // 2*ext+2*pmulhw+mul+sub sequence
430 { ISD::UDIV, MVT::v32i8, 14 }, // 2*ext+2*pmulhw sequence
431 { ISD::UREM, MVT::v32i8, 16 }, // 2*ext+2*pmulhw+mul+sub sequence
432 { ISD::SDIV, MVT::v16i16, 6 }, // vpmulhw sequence
433 { ISD::SREM, MVT::v16i16, 8 }, // vpmulhw+mul+sub sequence
434 { ISD::UDIV, MVT::v16i16, 6 }, // vpmulhuw sequence
435 { ISD::UREM, MVT::v16i16, 8 }, // vpmulhuw+mul+sub sequence
436 { ISD::SDIV, MVT::v8i32, 15 }, // vpmuldq sequence
437 { ISD::SREM, MVT::v8i32, 19 }, // vpmuldq+mul+sub sequence
438 { ISD::UDIV, MVT::v8i32, 15 }, // vpmuludq sequence
439 { ISD::UREM, MVT::v8i32, 19 }, // vpmuludq+mul+sub sequence
440 };
441
442 if ((Op2Info == TargetTransformInfo::OK_UniformConstantValue ||
443 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) &&
444 ST->hasAVX2()) {
445 if (const auto *Entry = CostTableLookup(AVX2ConstCostTable, ISD, LT.second))
446 return LT.first * Entry->Cost;
447 }
448
449 static const CostTblEntry SSE2ConstCostTable[] = {
450 { ISD::SDIV, MVT::v32i8, 28+2 }, // 4*ext+4*pmulhw sequence + split.
451 { ISD::SREM, MVT::v32i8, 32+2 }, // 4*ext+4*pmulhw+mul+sub sequence + split.
452 { ISD::SDIV, MVT::v16i8, 14 }, // 2*ext+2*pmulhw sequence
453 { ISD::SREM, MVT::v16i8, 16 }, // 2*ext+2*pmulhw+mul+sub sequence
454 { ISD::UDIV, MVT::v32i8, 28+2 }, // 4*ext+4*pmulhw sequence + split.
455 { ISD::UREM, MVT::v32i8, 32+2 }, // 4*ext+4*pmulhw+mul+sub sequence + split.
456 { ISD::UDIV, MVT::v16i8, 14 }, // 2*ext+2*pmulhw sequence
457 { ISD::UREM, MVT::v16i8, 16 }, // 2*ext+2*pmulhw+mul+sub sequence
458 { ISD::SDIV, MVT::v16i16, 12+2 }, // 2*pmulhw sequence + split.
459 { ISD::SREM, MVT::v16i16, 16+2 }, // 2*pmulhw+mul+sub sequence + split.
460 { ISD::SDIV, MVT::v8i16, 6 }, // pmulhw sequence
461 { ISD::SREM, MVT::v8i16, 8 }, // pmulhw+mul+sub sequence
462 { ISD::UDIV, MVT::v16i16, 12+2 }, // 2*pmulhuw sequence + split.
463 { ISD::UREM, MVT::v16i16, 16+2 }, // 2*pmulhuw+mul+sub sequence + split.
464 { ISD::UDIV, MVT::v8i16, 6 }, // pmulhuw sequence
465 { ISD::UREM, MVT::v8i16, 8 }, // pmulhuw+mul+sub sequence
466 { ISD::SDIV, MVT::v8i32, 38+2 }, // 2*pmuludq sequence + split.
467 { ISD::SREM, MVT::v8i32, 48+2 }, // 2*pmuludq+mul+sub sequence + split.
468 { ISD::SDIV, MVT::v4i32, 19 }, // pmuludq sequence
469 { ISD::SREM, MVT::v4i32, 24 }, // pmuludq+mul+sub sequence
470 { ISD::UDIV, MVT::v8i32, 30+2 }, // 2*pmuludq sequence + split.
471 { ISD::UREM, MVT::v8i32, 40+2 }, // 2*pmuludq+mul+sub sequence + split.
472 { ISD::UDIV, MVT::v4i32, 15 }, // pmuludq sequence
473 { ISD::UREM, MVT::v4i32, 20 }, // pmuludq+mul+sub sequence
474 };
475
476 if ((Op2Info == TargetTransformInfo::OK_UniformConstantValue ||
477 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) &&
478 ST->hasSSE2()) {
479 // pmuldq sequence.
480 if (ISD == ISD::SDIV && LT.second == MVT::v8i32 && ST->hasAVX())
481 return LT.first * 32;
482 if (ISD == ISD::SREM && LT.second == MVT::v8i32 && ST->hasAVX())
483 return LT.first * 38;
484 if (ISD == ISD::SDIV && LT.second == MVT::v4i32 && ST->hasSSE41())
485 return LT.first * 15;
486 if (ISD == ISD::SREM && LT.second == MVT::v4i32 && ST->hasSSE41())
487 return LT.first * 20;
488
489 if (const auto *Entry = CostTableLookup(SSE2ConstCostTable, ISD, LT.second))
490 return LT.first * Entry->Cost;
491 }
492
493 static const CostTblEntry AVX512BWShiftCostTable[] = {
494 { ISD::SHL, MVT::v8i16, 1 }, // vpsllvw
495 { ISD::SRL, MVT::v8i16, 1 }, // vpsrlvw
496 { ISD::SRA, MVT::v8i16, 1 }, // vpsravw
497
498 { ISD::SHL, MVT::v16i16, 1 }, // vpsllvw
499 { ISD::SRL, MVT::v16i16, 1 }, // vpsrlvw
500 { ISD::SRA, MVT::v16i16, 1 }, // vpsravw
501
502 { ISD::SHL, MVT::v32i16, 1 }, // vpsllvw
503 { ISD::SRL, MVT::v32i16, 1 }, // vpsrlvw
504 { ISD::SRA, MVT::v32i16, 1 }, // vpsravw
505 };
506
507 if (ST->hasBWI())
508 if (const auto *Entry = CostTableLookup(AVX512BWShiftCostTable, ISD, LT.second))
509 return LT.first * Entry->Cost;
510
511 static const CostTblEntry AVX2UniformCostTable[] = {
512 // Uniform splats are cheaper for the following instructions.
513 { ISD::SHL, MVT::v16i16, 1 }, // psllw.
514 { ISD::SRL, MVT::v16i16, 1 }, // psrlw.
515 { ISD::SRA, MVT::v16i16, 1 }, // psraw.
516 { ISD::SHL, MVT::v32i16, 2 }, // 2*psllw.
517 { ISD::SRL, MVT::v32i16, 2 }, // 2*psrlw.
518 { ISD::SRA, MVT::v32i16, 2 }, // 2*psraw.
519 };
520
521 if (ST->hasAVX2() &&
522 ((Op2Info == TargetTransformInfo::OK_UniformConstantValue) ||
523 (Op2Info == TargetTransformInfo::OK_UniformValue))) {
524 if (const auto *Entry =
525 CostTableLookup(AVX2UniformCostTable, ISD, LT.second))
526 return LT.first * Entry->Cost;
527 }
528
529 static const CostTblEntry SSE2UniformCostTable[] = {
530 // Uniform splats are cheaper for the following instructions.
531 { ISD::SHL, MVT::v8i16, 1 }, // psllw.
532 { ISD::SHL, MVT::v4i32, 1 }, // pslld
533 { ISD::SHL, MVT::v2i64, 1 }, // psllq.
534
535 { ISD::SRL, MVT::v8i16, 1 }, // psrlw.
536 { ISD::SRL, MVT::v4i32, 1 }, // psrld.
537 { ISD::SRL, MVT::v2i64, 1 }, // psrlq.
538
539 { ISD::SRA, MVT::v8i16, 1 }, // psraw.
540 { ISD::SRA, MVT::v4i32, 1 }, // psrad.
541 };
542
543 if (ST->hasSSE2() &&
544 ((Op2Info == TargetTransformInfo::OK_UniformConstantValue) ||
545 (Op2Info == TargetTransformInfo::OK_UniformValue))) {
546 if (const auto *Entry =
547 CostTableLookup(SSE2UniformCostTable, ISD, LT.second))
548 return LT.first * Entry->Cost;
549 }
550
551 static const CostTblEntry AVX512DQCostTable[] = {
552 { ISD::MUL, MVT::v2i64, 1 },
553 { ISD::MUL, MVT::v4i64, 1 },
554 { ISD::MUL, MVT::v8i64, 1 }
555 };
556
557 // Look for AVX512DQ lowering tricks for custom cases.
558 if (ST->hasDQI())
559 if (const auto *Entry = CostTableLookup(AVX512DQCostTable, ISD, LT.second))
560 return LT.first * Entry->Cost;
561
562 static const CostTblEntry AVX512BWCostTable[] = {
563 { ISD::SHL, MVT::v64i8, 11 }, // vpblendvb sequence.
564 { ISD::SRL, MVT::v64i8, 11 }, // vpblendvb sequence.
565 { ISD::SRA, MVT::v64i8, 24 }, // vpblendvb sequence.
566
567 { ISD::MUL, MVT::v64i8, 11 }, // extend/pmullw/trunc sequence.
568 { ISD::MUL, MVT::v32i8, 4 }, // extend/pmullw/trunc sequence.
569 { ISD::MUL, MVT::v16i8, 4 }, // extend/pmullw/trunc sequence.
570 };
571
572 // Look for AVX512BW lowering tricks for custom cases.
573 if (ST->hasBWI())
574 if (const auto *Entry = CostTableLookup(AVX512BWCostTable, ISD, LT.second))
575 return LT.first * Entry->Cost;
576
577 static const CostTblEntry AVX512CostTable[] = {
578 { ISD::SHL, MVT::v16i32, 1 },
579 { ISD::SRL, MVT::v16i32, 1 },
580 { ISD::SRA, MVT::v16i32, 1 },
581
582 { ISD::SHL, MVT::v8i64, 1 },
583 { ISD::SRL, MVT::v8i64, 1 },
584
585 { ISD::SRA, MVT::v2i64, 1 },
586 { ISD::SRA, MVT::v4i64, 1 },
587 { ISD::SRA, MVT::v8i64, 1 },
588
589 { ISD::MUL, MVT::v64i8, 26 }, // extend/pmullw/trunc sequence.
590 { ISD::MUL, MVT::v32i8, 13 }, // extend/pmullw/trunc sequence.
591 { ISD::MUL, MVT::v16i8, 5 }, // extend/pmullw/trunc sequence.
592 { ISD::MUL, MVT::v16i32, 1 }, // pmulld (Skylake from agner.org)
593 { ISD::MUL, MVT::v8i32, 1 }, // pmulld (Skylake from agner.org)
594 { ISD::MUL, MVT::v4i32, 1 }, // pmulld (Skylake from agner.org)
595 { ISD::MUL, MVT::v8i64, 8 }, // 3*pmuludq/3*shift/2*add
596
597 { ISD::FADD, MVT::v8f64, 1 }, // Skylake from http://www.agner.org/
598 { ISD::FSUB, MVT::v8f64, 1 }, // Skylake from http://www.agner.org/
599 { ISD::FMUL, MVT::v8f64, 1 }, // Skylake from http://www.agner.org/
600
601 { ISD::FADD, MVT::v16f32, 1 }, // Skylake from http://www.agner.org/
602 { ISD::FSUB, MVT::v16f32, 1 }, // Skylake from http://www.agner.org/
603 { ISD::FMUL, MVT::v16f32, 1 }, // Skylake from http://www.agner.org/
604 };
605
606 if (ST->hasAVX512())
607 if (const auto *Entry = CostTableLookup(AVX512CostTable, ISD, LT.second))
608 return LT.first * Entry->Cost;
609
610 static const CostTblEntry AVX2ShiftCostTable[] = {
611 // Shifts on v4i64/v8i32 on AVX2 is legal even though we declare to
612 // customize them to detect the cases where shift amount is a scalar one.
613 { ISD::SHL, MVT::v4i32, 1 },
614 { ISD::SRL, MVT::v4i32, 1 },
615 { ISD::SRA, MVT::v4i32, 1 },
616 { ISD::SHL, MVT::v8i32, 1 },
617 { ISD::SRL, MVT::v8i32, 1 },
618 { ISD::SRA, MVT::v8i32, 1 },
619 { ISD::SHL, MVT::v2i64, 1 },
620 { ISD::SRL, MVT::v2i64, 1 },
621 { ISD::SHL, MVT::v4i64, 1 },
622 { ISD::SRL, MVT::v4i64, 1 },
623 };
624
625 if (ST->hasAVX512()) {
626 if (ISD == ISD::SHL && LT.second == MVT::v32i16 &&
627 (Op2Info == TargetTransformInfo::OK_UniformConstantValue ||
628 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue))
629 // On AVX512, a packed v32i16 shift left by a constant build_vector
630 // is lowered into a vector multiply (vpmullw).
631 return getArithmeticInstrCost(Instruction::Mul, Ty, CostKind,
632 Op1Info, Op2Info,
633 TargetTransformInfo::OP_None,
634 TargetTransformInfo::OP_None);
635 }
636
637 // Look for AVX2 lowering tricks.
638 if (ST->hasAVX2()) {
639 if (ISD == ISD::SHL && LT.second == MVT::v16i16 &&
640 (Op2Info == TargetTransformInfo::OK_UniformConstantValue ||
641 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue))
642 // On AVX2, a packed v16i16 shift left by a constant build_vector
643 // is lowered into a vector multiply (vpmullw).
644 return getArithmeticInstrCost(Instruction::Mul, Ty, CostKind,
645 Op1Info, Op2Info,
646 TargetTransformInfo::OP_None,
647 TargetTransformInfo::OP_None);
648
649 if (const auto *Entry = CostTableLookup(AVX2ShiftCostTable, ISD, LT.second))
650 return LT.first * Entry->Cost;
651 }
652
653 static const CostTblEntry XOPShiftCostTable[] = {
654 // 128bit shifts take 1cy, but right shifts require negation beforehand.
655 { ISD::SHL, MVT::v16i8, 1 },
656 { ISD::SRL, MVT::v16i8, 2 },
657 { ISD::SRA, MVT::v16i8, 2 },
658 { ISD::SHL, MVT::v8i16, 1 },
659 { ISD::SRL, MVT::v8i16, 2 },
660 { ISD::SRA, MVT::v8i16, 2 },
661 { ISD::SHL, MVT::v4i32, 1 },
662 { ISD::SRL, MVT::v4i32, 2 },
663 { ISD::SRA, MVT::v4i32, 2 },
664 { ISD::SHL, MVT::v2i64, 1 },
665 { ISD::SRL, MVT::v2i64, 2 },
666 { ISD::SRA, MVT::v2i64, 2 },
667 // 256bit shifts require splitting if AVX2 didn't catch them above.
668 { ISD::SHL, MVT::v32i8, 2+2 },
669 { ISD::SRL, MVT::v32i8, 4+2 },
670 { ISD::SRA, MVT::v32i8, 4+2 },
671 { ISD::SHL, MVT::v16i16, 2+2 },
672 { ISD::SRL, MVT::v16i16, 4+2 },
673 { ISD::SRA, MVT::v16i16, 4+2 },
674 { ISD::SHL, MVT::v8i32, 2+2 },
675 { ISD::SRL, MVT::v8i32, 4+2 },
676 { ISD::SRA, MVT::v8i32, 4+2 },
677 { ISD::SHL, MVT::v4i64, 2+2 },
678 { ISD::SRL, MVT::v4i64, 4+2 },
679 { ISD::SRA, MVT::v4i64, 4+2 },
680 };
681
682 // Look for XOP lowering tricks.
683 if (ST->hasXOP()) {
684 // If the right shift is constant then we'll fold the negation so
685 // it's as cheap as a left shift.
686 int ShiftISD = ISD;
687 if ((ShiftISD == ISD::SRL || ShiftISD == ISD::SRA) &&
688 (Op2Info == TargetTransformInfo::OK_UniformConstantValue ||
689 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue))
690 ShiftISD = ISD::SHL;
691 if (const auto *Entry =
692 CostTableLookup(XOPShiftCostTable, ShiftISD, LT.second))
693 return LT.first * Entry->Cost;
694 }
695
696 static const CostTblEntry SSE2UniformShiftCostTable[] = {
697 // Uniform splats are cheaper for the following instructions.
698 { ISD::SHL, MVT::v16i16, 2+2 }, // 2*psllw + split.
699 { ISD::SHL, MVT::v8i32, 2+2 }, // 2*pslld + split.
700 { ISD::SHL, MVT::v4i64, 2+2 }, // 2*psllq + split.
701
702 { ISD::SRL, MVT::v16i16, 2+2 }, // 2*psrlw + split.
703 { ISD::SRL, MVT::v8i32, 2+2 }, // 2*psrld + split.
704 { ISD::SRL, MVT::v4i64, 2+2 }, // 2*psrlq + split.
705
706 { ISD::SRA, MVT::v16i16, 2+2 }, // 2*psraw + split.
707 { ISD::SRA, MVT::v8i32, 2+2 }, // 2*psrad + split.
708 { ISD::SRA, MVT::v2i64, 4 }, // 2*psrad + shuffle.
709 { ISD::SRA, MVT::v4i64, 8+2 }, // 2*(2*psrad + shuffle) + split.
710 };
711
712 if (ST->hasSSE2() &&
713 ((Op2Info == TargetTransformInfo::OK_UniformConstantValue) ||
714 (Op2Info == TargetTransformInfo::OK_UniformValue))) {
715
716 // Handle AVX2 uniform v4i64 ISD::SRA, it's not worth a table.
717 if (ISD == ISD::SRA && LT.second == MVT::v4i64 && ST->hasAVX2())
718 return LT.first * 4; // 2*psrad + shuffle.
719
720 if (const auto *Entry =
721 CostTableLookup(SSE2UniformShiftCostTable, ISD, LT.second))
722 return LT.first * Entry->Cost;
723 }
724
725 if (ISD == ISD::SHL &&
726 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) {
727 MVT VT = LT.second;
728 // Vector shift left by non uniform constant can be lowered
729 // into vector multiply.
730 if (((VT == MVT::v8i16 || VT == MVT::v4i32) && ST->hasSSE2()) ||
731 ((VT == MVT::v16i16 || VT == MVT::v8i32) && ST->hasAVX()))
732 ISD = ISD::MUL;
733 }
734
735 static const CostTblEntry AVX2CostTable[] = {
736 { ISD::SHL, MVT::v32i8, 11 }, // vpblendvb sequence.
737 { ISD::SHL, MVT::v64i8, 22 }, // 2*vpblendvb sequence.
738 { ISD::SHL, MVT::v16i16, 10 }, // extend/vpsrlvd/pack sequence.
739 { ISD::SHL, MVT::v32i16, 20 }, // 2*extend/vpsrlvd/pack sequence.
740
741 { ISD::SRL, MVT::v32i8, 11 }, // vpblendvb sequence.
742 { ISD::SRL, MVT::v64i8, 22 }, // 2*vpblendvb sequence.
743 { ISD::SRL, MVT::v16i16, 10 }, // extend/vpsrlvd/pack sequence.
744 { ISD::SRL, MVT::v32i16, 20 }, // 2*extend/vpsrlvd/pack sequence.
745
746 { ISD::SRA, MVT::v32i8, 24 }, // vpblendvb sequence.
747 { ISD::SRA, MVT::v64i8, 48 }, // 2*vpblendvb sequence.
748 { ISD::SRA, MVT::v16i16, 10 }, // extend/vpsravd/pack sequence.
749 { ISD::SRA, MVT::v32i16, 20 }, // 2*extend/vpsravd/pack sequence.
750 { ISD::SRA, MVT::v2i64, 4 }, // srl/xor/sub sequence.
751 { ISD::SRA, MVT::v4i64, 4 }, // srl/xor/sub sequence.
752
753 { ISD::SUB, MVT::v32i8, 1 }, // psubb
754 { ISD::ADD, MVT::v32i8, 1 }, // paddb
755 { ISD::SUB, MVT::v16i16, 1 }, // psubw
756 { ISD::ADD, MVT::v16i16, 1 }, // paddw
757 { ISD::SUB, MVT::v8i32, 1 }, // psubd
758 { ISD::ADD, MVT::v8i32, 1 }, // paddd
759 { ISD::SUB, MVT::v4i64, 1 }, // psubq
760 { ISD::ADD, MVT::v4i64, 1 }, // paddq
761
762 { ISD::MUL, MVT::v32i8, 17 }, // extend/pmullw/trunc sequence.
763 { ISD::MUL, MVT::v16i8, 7 }, // extend/pmullw/trunc sequence.
764 { ISD::MUL, MVT::v16i16, 1 }, // pmullw
765 { ISD::MUL, MVT::v8i32, 2 }, // pmulld (Haswell from agner.org)
766 { ISD::MUL, MVT::v4i64, 8 }, // 3*pmuludq/3*shift/2*add
767
768 { ISD::FADD, MVT::v4f64, 1 }, // Haswell from http://www.agner.org/
769 { ISD::FADD, MVT::v8f32, 1 }, // Haswell from http://www.agner.org/
770 { ISD::FSUB, MVT::v4f64, 1 }, // Haswell from http://www.agner.org/
771 { ISD::FSUB, MVT::v8f32, 1 }, // Haswell from http://www.agner.org/
772 { ISD::FMUL, MVT::v4f64, 1 }, // Haswell from http://www.agner.org/
773 { ISD::FMUL, MVT::v8f32, 1 }, // Haswell from http://www.agner.org/
774
775 { ISD::FDIV, MVT::f32, 7 }, // Haswell from http://www.agner.org/
776 { ISD::FDIV, MVT::v4f32, 7 }, // Haswell from http://www.agner.org/
777 { ISD::FDIV, MVT::v8f32, 14 }, // Haswell from http://www.agner.org/
778 { ISD::FDIV, MVT::f64, 14 }, // Haswell from http://www.agner.org/
779 { ISD::FDIV, MVT::v2f64, 14 }, // Haswell from http://www.agner.org/
780 { ISD::FDIV, MVT::v4f64, 28 }, // Haswell from http://www.agner.org/
781 };
782
783 // Look for AVX2 lowering tricks for custom cases.
784 if (ST->hasAVX2())
785 if (const auto *Entry = CostTableLookup(AVX2CostTable, ISD, LT.second))
786 return LT.first * Entry->Cost;
787
788 static const CostTblEntry AVX1CostTable[] = {
789 // We don't have to scalarize unsupported ops. We can issue two half-sized
790 // operations and we only need to extract the upper YMM half.
791 // Two ops + 1 extract + 1 insert = 4.
792 { ISD::MUL, MVT::v16i16, 4 },
793 { ISD::MUL, MVT::v8i32, 4 },
794 { ISD::SUB, MVT::v32i8, 4 },
795 { ISD::ADD, MVT::v32i8, 4 },
796 { ISD::SUB, MVT::v16i16, 4 },
797 { ISD::ADD, MVT::v16i16, 4 },
798 { ISD::SUB, MVT::v8i32, 4 },
799 { ISD::ADD, MVT::v8i32, 4 },
800 { ISD::SUB, MVT::v4i64, 4 },
801 { ISD::ADD, MVT::v4i64, 4 },
802
803 // A v4i64 multiply is custom lowered as two split v2i64 vectors that then
804 // are lowered as a series of long multiplies(3), shifts(3) and adds(2)
805 // Because we believe v4i64 to be a legal type, we must also include the
806 // extract+insert in the cost table. Therefore, the cost here is 18
807 // instead of 8.
808 { ISD::MUL, MVT::v4i64, 18 },
809
810 { ISD::MUL, MVT::v32i8, 26 }, // extend/pmullw/trunc sequence.
811
812 { ISD::FDIV, MVT::f32, 14 }, // SNB from http://www.agner.org/
813 { ISD::FDIV, MVT::v4f32, 14 }, // SNB from http://www.agner.org/
814 { ISD::FDIV, MVT::v8f32, 28 }, // SNB from http://www.agner.org/
815 { ISD::FDIV, MVT::f64, 22 }, // SNB from http://www.agner.org/
816 { ISD::FDIV, MVT::v2f64, 22 }, // SNB from http://www.agner.org/
817 { ISD::FDIV, MVT::v4f64, 44 }, // SNB from http://www.agner.org/
818 };
819
820 if (ST->hasAVX())
821 if (const auto *Entry = CostTableLookup(AVX1CostTable, ISD, LT.second))
822 return LT.first * Entry->Cost;
823
824 static const CostTblEntry SSE42CostTable[] = {
825 { ISD::FADD, MVT::f64, 1 }, // Nehalem from http://www.agner.org/
826 { ISD::FADD, MVT::f32, 1 }, // Nehalem from http://www.agner.org/
827 { ISD::FADD, MVT::v2f64, 1 }, // Nehalem from http://www.agner.org/
828 { ISD::FADD, MVT::v4f32, 1 }, // Nehalem from http://www.agner.org/
829
830 { ISD::FSUB, MVT::f64, 1 }, // Nehalem from http://www.agner.org/
831 { ISD::FSUB, MVT::f32 , 1 }, // Nehalem from http://www.agner.org/
832 { ISD::FSUB, MVT::v2f64, 1 }, // Nehalem from http://www.agner.org/
833 { ISD::FSUB, MVT::v4f32, 1 }, // Nehalem from http://www.agner.org/
834
835 { ISD::FMUL, MVT::f64, 1 }, // Nehalem from http://www.agner.org/
836 { ISD::FMUL, MVT::f32, 1 }, // Nehalem from http://www.agner.org/
837 { ISD::FMUL, MVT::v2f64, 1 }, // Nehalem from http://www.agner.org/
838 { ISD::FMUL, MVT::v4f32, 1 }, // Nehalem from http://www.agner.org/
839
840 { ISD::FDIV, MVT::f32, 14 }, // Nehalem from http://www.agner.org/
841 { ISD::FDIV, MVT::v4f32, 14 }, // Nehalem from http://www.agner.org/
842 { ISD::FDIV, MVT::f64, 22 }, // Nehalem from http://www.agner.org/
843 { ISD::FDIV, MVT::v2f64, 22 }, // Nehalem from http://www.agner.org/
844 };
845
846 if (ST->hasSSE42())
847 if (const auto *Entry = CostTableLookup(SSE42CostTable, ISD, LT.second))
848 return LT.first * Entry->Cost;
849
850 static const CostTblEntry SSE41CostTable[] = {
851 { ISD::SHL, MVT::v16i8, 11 }, // pblendvb sequence.
852 { ISD::SHL, MVT::v32i8, 2*11+2 }, // pblendvb sequence + split.
853 { ISD::SHL, MVT::v8i16, 14 }, // pblendvb sequence.
854 { ISD::SHL, MVT::v16i16, 2*14+2 }, // pblendvb sequence + split.
855 { ISD::SHL, MVT::v4i32, 4 }, // pslld/paddd/cvttps2dq/pmulld
856 { ISD::SHL, MVT::v8i32, 2*4+2 }, // pslld/paddd/cvttps2dq/pmulld + split
857
858 { ISD::SRL, MVT::v16i8, 12 }, // pblendvb sequence.
859 { ISD::SRL, MVT::v32i8, 2*12+2 }, // pblendvb sequence + split.
860 { ISD::SRL, MVT::v8i16, 14 }, // pblendvb sequence.
861 { ISD::SRL, MVT::v16i16, 2*14+2 }, // pblendvb sequence + split.
862 { ISD::SRL, MVT::v4i32, 11 }, // Shift each lane + blend.
863 { ISD::SRL, MVT::v8i32, 2*11+2 }, // Shift each lane + blend + split.
864
865 { ISD::SRA, MVT::v16i8, 24 }, // pblendvb sequence.
866 { ISD::SRA, MVT::v32i8, 2*24+2 }, // pblendvb sequence + split.
867 { ISD::SRA, MVT::v8i16, 14 }, // pblendvb sequence.
868 { ISD::SRA, MVT::v16i16, 2*14+2 }, // pblendvb sequence + split.
869 { ISD::SRA, MVT::v4i32, 12 }, // Shift each lane + blend.
870 { ISD::SRA, MVT::v8i32, 2*12+2 }, // Shift each lane + blend + split.
871
872 { ISD::MUL, MVT::v4i32, 2 } // pmulld (Nehalem from agner.org)
873 };
874
875 if (ST->hasSSE41())
876 if (const auto *Entry = CostTableLookup(SSE41CostTable, ISD, LT.second))
877 return LT.first * Entry->Cost;
878
879 static const CostTblEntry SSE2CostTable[] = {
880 // We don't correctly identify costs of casts because they are marked as
881 // custom.
882 { ISD::SHL, MVT::v16i8, 26 }, // cmpgtb sequence.
883 { ISD::SHL, MVT::v8i16, 32 }, // cmpgtb sequence.
884 { ISD::SHL, MVT::v4i32, 2*5 }, // We optimized this using mul.
885 { ISD::SHL, MVT::v2i64, 4 }, // splat+shuffle sequence.
886 { ISD::SHL, MVT::v4i64, 2*4+2 }, // splat+shuffle sequence + split.
887
888 { ISD::SRL, MVT::v16i8, 26 }, // cmpgtb sequence.
889 { ISD::SRL, MVT::v8i16, 32 }, // cmpgtb sequence.
890 { ISD::SRL, MVT::v4i32, 16 }, // Shift each lane + blend.
891 { ISD::SRL, MVT::v2i64, 4 }, // splat+shuffle sequence.
892 { ISD::SRL, MVT::v4i64, 2*4+2 }, // splat+shuffle sequence + split.
893
894 { ISD::SRA, MVT::v16i8, 54 }, // unpacked cmpgtb sequence.
895 { ISD::SRA, MVT::v8i16, 32 }, // cmpgtb sequence.
896 { ISD::SRA, MVT::v4i32, 16 }, // Shift each lane + blend.
897 { ISD::SRA, MVT::v2i64, 12 }, // srl/xor/sub sequence.
898 { ISD::SRA, MVT::v4i64, 2*12+2 }, // srl/xor/sub sequence+split.
899
900 { ISD::MUL, MVT::v16i8, 12 }, // extend/pmullw/trunc sequence.
901 { ISD::MUL, MVT::v8i16, 1 }, // pmullw
902 { ISD::MUL, MVT::v4i32, 6 }, // 3*pmuludq/4*shuffle
903 { ISD::MUL, MVT::v2i64, 8 }, // 3*pmuludq/3*shift/2*add
904
905 { ISD::FDIV, MVT::f32, 23 }, // Pentium IV from http://www.agner.org/
906 { ISD::FDIV, MVT::v4f32, 39 }, // Pentium IV from http://www.agner.org/
907 { ISD::FDIV, MVT::f64, 38 }, // Pentium IV from http://www.agner.org/
908 { ISD::FDIV, MVT::v2f64, 69 }, // Pentium IV from http://www.agner.org/
909
910 { ISD::FADD, MVT::f32, 2 }, // Pentium IV from http://www.agner.org/
911 { ISD::FADD, MVT::f64, 2 }, // Pentium IV from http://www.agner.org/
912
913 { ISD::FSUB, MVT::f32, 2 }, // Pentium IV from http://www.agner.org/
914 { ISD::FSUB, MVT::f64, 2 }, // Pentium IV from http://www.agner.org/
915 };
916
917 if (ST->hasSSE2())
918 if (const auto *Entry = CostTableLookup(SSE2CostTable, ISD, LT.second))
919 return LT.first * Entry->Cost;
920
921 static const CostTblEntry SSE1CostTable[] = {
922 { ISD::FDIV, MVT::f32, 17 }, // Pentium III from http://www.agner.org/
923 { ISD::FDIV, MVT::v4f32, 34 }, // Pentium III from http://www.agner.org/
924
925 { ISD::FADD, MVT::f32, 1 }, // Pentium III from http://www.agner.org/
926 { ISD::FADD, MVT::v4f32, 2 }, // Pentium III from http://www.agner.org/
927
928 { ISD::FSUB, MVT::f32, 1 }, // Pentium III from http://www.agner.org/
929 { ISD::FSUB, MVT::v4f32, 2 }, // Pentium III from http://www.agner.org/
930
931 { ISD::ADD, MVT::i8, 1 }, // Pentium III from http://www.agner.org/
932 { ISD::ADD, MVT::i16, 1 }, // Pentium III from http://www.agner.org/
933 { ISD::ADD, MVT::i32, 1 }, // Pentium III from http://www.agner.org/
934
935 { ISD::SUB, MVT::i8, 1 }, // Pentium III from http://www.agner.org/
936 { ISD::SUB, MVT::i16, 1 }, // Pentium III from http://www.agner.org/
937 { ISD::SUB, MVT::i32, 1 }, // Pentium III from http://www.agner.org/
938 };
939
940 if (ST->hasSSE1())
941 if (const auto *Entry = CostTableLookup(SSE1CostTable, ISD, LT.second))
942 return LT.first * Entry->Cost;
943
944 // It is not a good idea to vectorize division. We have to scalarize it and
945 // in the process we will often end up having to spilling regular
946 // registers. The overhead of division is going to dominate most kernels
947 // anyways so try hard to prevent vectorization of division - it is
948 // generally a bad idea. Assume somewhat arbitrarily that we have to be able
949 // to hide "20 cycles" for each lane.
950 if (LT.second.isVector() && (ISD == ISD::SDIV || ISD == ISD::SREM ||
951 ISD == ISD::UDIV || ISD == ISD::UREM)) {
952 int ScalarCost = getArithmeticInstrCost(
953 Opcode, Ty->getScalarType(), CostKind, Op1Info, Op2Info,
954 TargetTransformInfo::OP_None, TargetTransformInfo::OP_None);
955 return 20 * LT.first * LT.second.getVectorNumElements() * ScalarCost;
956 }
957
958 // Fallback to the default implementation.
959 return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Op1Info, Op2Info);
960}
961
962int X86TTIImpl::getShuffleCost(TTI::ShuffleKind Kind, VectorType *BaseTp,
963 int Index, VectorType *SubTp) {
964 // 64-bit packed float vectors (v2f32) are widened to type v4f32.
965 // 64-bit packed integer vectors (v2i32) are widened to type v4i32.
966 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, BaseTp);
967
968 // Treat Transpose as 2-op shuffles - there's no difference in lowering.
969 if (Kind == TTI::SK_Transpose)
970 Kind = TTI::SK_PermuteTwoSrc;
971
972 // For Broadcasts we are splatting the first element from the first input
973 // register, so only need to reference that input and all the output
974 // registers are the same.
975 if (Kind == TTI::SK_Broadcast)
976 LT.first = 1;
977
978 // Subvector extractions are free if they start at the beginning of a
979 // vector and cheap if the subvectors are aligned.
980 if (Kind == TTI::SK_ExtractSubvector && LT.second.isVector()) {
981 int NumElts = LT.second.getVectorNumElements();
982 if ((Index % NumElts) == 0)
983 return 0;
984 std::pair<int, MVT> SubLT = TLI->getTypeLegalizationCost(DL, SubTp);
985 if (SubLT.second.isVector()) {
986 int NumSubElts = SubLT.second.getVectorNumElements();
987 if ((Index % NumSubElts) == 0 && (NumElts % NumSubElts) == 0)
988 return SubLT.first;
989 // Handle some cases for widening legalization. For now we only handle
990 // cases where the original subvector was naturally aligned and evenly
991 // fit in its legalized subvector type.
992 // FIXME: Remove some of the alignment restrictions.
993 // FIXME: We can use permq for 64-bit or larger extracts from 256-bit
994 // vectors.
995 int OrigSubElts = cast<FixedVectorType>(SubTp)->getNumElements();
996 if (NumSubElts > OrigSubElts && (Index % OrigSubElts) == 0 &&
997 (NumSubElts % OrigSubElts) == 0 &&
998 LT.second.getVectorElementType() ==
999 SubLT.second.getVectorElementType() &&
1000 LT.second.getVectorElementType().getSizeInBits() ==
1001 BaseTp->getElementType()->getPrimitiveSizeInBits()) {
1002 assert(NumElts >= NumSubElts && NumElts > OrigSubElts &&((NumElts >= NumSubElts && NumElts > OrigSubElts
&& "Unexpected number of elements!") ? static_cast<
void> (0) : __assert_fail ("NumElts >= NumSubElts && NumElts > OrigSubElts && \"Unexpected number of elements!\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 1003, __PRETTY_FUNCTION__))
1003 "Unexpected number of elements!")((NumElts >= NumSubElts && NumElts > OrigSubElts
&& "Unexpected number of elements!") ? static_cast<
void> (0) : __assert_fail ("NumElts >= NumSubElts && NumElts > OrigSubElts && \"Unexpected number of elements!\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 1003, __PRETTY_FUNCTION__))
;
1004 auto *VecTy = FixedVectorType::get(BaseTp->getElementType(),
1005 LT.second.getVectorNumElements());
1006 auto *SubTy = FixedVectorType::get(BaseTp->getElementType(),
1007 SubLT.second.getVectorNumElements());
1008 int ExtractIndex = alignDown((Index % NumElts), NumSubElts);
1009 int ExtractCost = getShuffleCost(TTI::SK_ExtractSubvector, VecTy,
1010 ExtractIndex, SubTy);
1011
1012 // If the original size is 32-bits or more, we can use pshufd. Otherwise
1013 // if we have SSSE3 we can use pshufb.
1014 if (SubTp->getPrimitiveSizeInBits() >= 32 || ST->hasSSSE3())
1015 return ExtractCost + 1; // pshufd or pshufb
1016
1017 assert(SubTp->getPrimitiveSizeInBits() == 16 &&((SubTp->getPrimitiveSizeInBits() == 16 && "Unexpected vector size"
) ? static_cast<void> (0) : __assert_fail ("SubTp->getPrimitiveSizeInBits() == 16 && \"Unexpected vector size\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 1018, __PRETTY_FUNCTION__))
1018 "Unexpected vector size")((SubTp->getPrimitiveSizeInBits() == 16 && "Unexpected vector size"
) ? static_cast<void> (0) : __assert_fail ("SubTp->getPrimitiveSizeInBits() == 16 && \"Unexpected vector size\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 1018, __PRETTY_FUNCTION__))
;
1019
1020 return ExtractCost + 2; // worst case pshufhw + pshufd
1021 }
1022 }
1023 }
1024
1025 // Handle some common (illegal) sub-vector types as they are often very cheap
1026 // to shuffle even on targets without PSHUFB.
1027 EVT VT = TLI->getValueType(DL, BaseTp);
1028 if (VT.isSimple() && VT.isVector() && VT.getSizeInBits() < 128 &&
1029 !ST->hasSSSE3()) {
1030 static const CostTblEntry SSE2SubVectorShuffleTbl[] = {
1031 {TTI::SK_Broadcast, MVT::v4i16, 1}, // pshuflw
1032 {TTI::SK_Broadcast, MVT::v2i16, 1}, // pshuflw
1033 {TTI::SK_Broadcast, MVT::v8i8, 2}, // punpck/pshuflw
1034 {TTI::SK_Broadcast, MVT::v4i8, 2}, // punpck/pshuflw
1035 {TTI::SK_Broadcast, MVT::v2i8, 1}, // punpck
1036
1037 {TTI::SK_Reverse, MVT::v4i16, 1}, // pshuflw
1038 {TTI::SK_Reverse, MVT::v2i16, 1}, // pshuflw
1039 {TTI::SK_Reverse, MVT::v4i8, 3}, // punpck/pshuflw/packus
1040 {TTI::SK_Reverse, MVT::v2i8, 1}, // punpck
1041
1042 {TTI::SK_PermuteTwoSrc, MVT::v4i16, 2}, // punpck/pshuflw
1043 {TTI::SK_PermuteTwoSrc, MVT::v2i16, 2}, // punpck/pshuflw
1044 {TTI::SK_PermuteTwoSrc, MVT::v8i8, 7}, // punpck/pshuflw
1045 {TTI::SK_PermuteTwoSrc, MVT::v4i8, 4}, // punpck/pshuflw
1046 {TTI::SK_PermuteTwoSrc, MVT::v2i8, 2}, // punpck
1047
1048 {TTI::SK_PermuteSingleSrc, MVT::v4i16, 1}, // pshuflw
1049 {TTI::SK_PermuteSingleSrc, MVT::v2i16, 1}, // pshuflw
1050 {TTI::SK_PermuteSingleSrc, MVT::v8i8, 5}, // punpck/pshuflw
1051 {TTI::SK_PermuteSingleSrc, MVT::v4i8, 3}, // punpck/pshuflw
1052 {TTI::SK_PermuteSingleSrc, MVT::v2i8, 1}, // punpck
1053 };
1054
1055 if (ST->hasSSE2())
1056 if (const auto *Entry =
1057 CostTableLookup(SSE2SubVectorShuffleTbl, Kind, VT.getSimpleVT()))
1058 return Entry->Cost;
1059 }
1060
1061 // We are going to permute multiple sources and the result will be in multiple
1062 // destinations. Providing an accurate cost only for splits where the element
1063 // type remains the same.
1064 if (Kind == TTI::SK_PermuteSingleSrc && LT.first != 1) {
1065 MVT LegalVT = LT.second;
1066 if (LegalVT.isVector() &&
1067 LegalVT.getVectorElementType().getSizeInBits() ==
1068 BaseTp->getElementType()->getPrimitiveSizeInBits() &&
1069 LegalVT.getVectorNumElements() <
1070 cast<FixedVectorType>(BaseTp)->getNumElements()) {
1071
1072 unsigned VecTySize = DL.getTypeStoreSize(BaseTp);
1073 unsigned LegalVTSize = LegalVT.getStoreSize();
1074 // Number of source vectors after legalization:
1075 unsigned NumOfSrcs = (VecTySize + LegalVTSize - 1) / LegalVTSize;
1076 // Number of destination vectors after legalization:
1077 unsigned NumOfDests = LT.first;
1078
1079 auto *SingleOpTy = FixedVectorType::get(BaseTp->getElementType(),
1080 LegalVT.getVectorNumElements());
1081
1082 unsigned NumOfShuffles = (NumOfSrcs - 1) * NumOfDests;
1083 return NumOfShuffles *
1084 getShuffleCost(TTI::SK_PermuteTwoSrc, SingleOpTy, 0, nullptr);
1085 }
1086
1087 return BaseT::getShuffleCost(Kind, BaseTp, Index, SubTp);
1088 }
1089
1090 // For 2-input shuffles, we must account for splitting the 2 inputs into many.
1091 if (Kind == TTI::SK_PermuteTwoSrc && LT.first != 1) {
1092 // We assume that source and destination have the same vector type.
1093 int NumOfDests = LT.first;
1094 int NumOfShufflesPerDest = LT.first * 2 - 1;
1095 LT.first = NumOfDests * NumOfShufflesPerDest;
1096 }
1097
1098 static const CostTblEntry AVX512VBMIShuffleTbl[] = {
1099 {TTI::SK_Reverse, MVT::v64i8, 1}, // vpermb
1100 {TTI::SK_Reverse, MVT::v32i8, 1}, // vpermb
1101
1102 {TTI::SK_PermuteSingleSrc, MVT::v64i8, 1}, // vpermb
1103 {TTI::SK_PermuteSingleSrc, MVT::v32i8, 1}, // vpermb
1104
1105 {TTI::SK_PermuteTwoSrc, MVT::v64i8, 2}, // vpermt2b
1106 {TTI::SK_PermuteTwoSrc, MVT::v32i8, 2}, // vpermt2b
1107 {TTI::SK_PermuteTwoSrc, MVT::v16i8, 2} // vpermt2b
1108 };
1109
1110 if (ST->hasVBMI())
1111 if (const auto *Entry =
1112 CostTableLookup(AVX512VBMIShuffleTbl, Kind, LT.second))
1113 return LT.first * Entry->Cost;
1114
1115 static const CostTblEntry AVX512BWShuffleTbl[] = {
1116 {TTI::SK_Broadcast, MVT::v32i16, 1}, // vpbroadcastw
1117 {TTI::SK_Broadcast, MVT::v64i8, 1}, // vpbroadcastb
1118
1119 {TTI::SK_Reverse, MVT::v32i16, 2}, // vpermw
1120 {TTI::SK_Reverse, MVT::v16i16, 2}, // vpermw
1121 {TTI::SK_Reverse, MVT::v64i8, 2}, // pshufb + vshufi64x2
1122
1123 {TTI::SK_PermuteSingleSrc, MVT::v32i16, 2}, // vpermw
1124 {TTI::SK_PermuteSingleSrc, MVT::v16i16, 2}, // vpermw
1125 {TTI::SK_PermuteSingleSrc, MVT::v64i8, 8}, // extend to v32i16
1126
1127 {TTI::SK_PermuteTwoSrc, MVT::v32i16, 2}, // vpermt2w
1128 {TTI::SK_PermuteTwoSrc, MVT::v16i16, 2}, // vpermt2w
1129 {TTI::SK_PermuteTwoSrc, MVT::v8i16, 2}, // vpermt2w
1130 {TTI::SK_PermuteTwoSrc, MVT::v64i8, 19}, // 6 * v32i8 + 1
1131 };
1132
1133 if (ST->hasBWI())
1134 if (const auto *Entry =
1135 CostTableLookup(AVX512BWShuffleTbl, Kind, LT.second))
1136 return LT.first * Entry->Cost;
1137
1138 static const CostTblEntry AVX512ShuffleTbl[] = {
1139 {TTI::SK_Broadcast, MVT::v8f64, 1}, // vbroadcastpd
1140 {TTI::SK_Broadcast, MVT::v16f32, 1}, // vbroadcastps
1141 {TTI::SK_Broadcast, MVT::v8i64, 1}, // vpbroadcastq
1142 {TTI::SK_Broadcast, MVT::v16i32, 1}, // vpbroadcastd
1143 {TTI::SK_Broadcast, MVT::v32i16, 1}, // vpbroadcastw
1144 {TTI::SK_Broadcast, MVT::v64i8, 1}, // vpbroadcastb
1145
1146 {TTI::SK_Reverse, MVT::v8f64, 1}, // vpermpd
1147 {TTI::SK_Reverse, MVT::v16f32, 1}, // vpermps
1148 {TTI::SK_Reverse, MVT::v8i64, 1}, // vpermq
1149 {TTI::SK_Reverse, MVT::v16i32, 1}, // vpermd
1150
1151 {TTI::SK_PermuteSingleSrc, MVT::v8f64, 1}, // vpermpd
1152 {TTI::SK_PermuteSingleSrc, MVT::v4f64, 1}, // vpermpd
1153 {TTI::SK_PermuteSingleSrc, MVT::v2f64, 1}, // vpermpd
1154 {TTI::SK_PermuteSingleSrc, MVT::v16f32, 1}, // vpermps
1155 {TTI::SK_PermuteSingleSrc, MVT::v8f32, 1}, // vpermps
1156 {TTI::SK_PermuteSingleSrc, MVT::v4f32, 1}, // vpermps
1157 {TTI::SK_PermuteSingleSrc, MVT::v8i64, 1}, // vpermq
1158 {TTI::SK_PermuteSingleSrc, MVT::v4i64, 1}, // vpermq
1159 {TTI::SK_PermuteSingleSrc, MVT::v2i64, 1}, // vpermq
1160 {TTI::SK_PermuteSingleSrc, MVT::v16i32, 1}, // vpermd
1161 {TTI::SK_PermuteSingleSrc, MVT::v8i32, 1}, // vpermd
1162 {TTI::SK_PermuteSingleSrc, MVT::v4i32, 1}, // vpermd
1163 {TTI::SK_PermuteSingleSrc, MVT::v16i8, 1}, // pshufb
1164
1165 {TTI::SK_PermuteTwoSrc, MVT::v8f64, 1}, // vpermt2pd
1166 {TTI::SK_PermuteTwoSrc, MVT::v16f32, 1}, // vpermt2ps
1167 {TTI::SK_PermuteTwoSrc, MVT::v8i64, 1}, // vpermt2q
1168 {TTI::SK_PermuteTwoSrc, MVT::v16i32, 1}, // vpermt2d
1169 {TTI::SK_PermuteTwoSrc, MVT::v4f64, 1}, // vpermt2pd
1170 {TTI::SK_PermuteTwoSrc, MVT::v8f32, 1}, // vpermt2ps
1171 {TTI::SK_PermuteTwoSrc, MVT::v4i64, 1}, // vpermt2q
1172 {TTI::SK_PermuteTwoSrc, MVT::v8i32, 1}, // vpermt2d
1173 {TTI::SK_PermuteTwoSrc, MVT::v2f64, 1}, // vpermt2pd
1174 {TTI::SK_PermuteTwoSrc, MVT::v4f32, 1}, // vpermt2ps
1175 {TTI::SK_PermuteTwoSrc, MVT::v2i64, 1}, // vpermt2q
1176 {TTI::SK_PermuteTwoSrc, MVT::v4i32, 1}, // vpermt2d
1177
1178 // FIXME: This just applies the type legalization cost rules above
1179 // assuming these completely split.
1180 {TTI::SK_PermuteSingleSrc, MVT::v32i16, 14},
1181 {TTI::SK_PermuteSingleSrc, MVT::v64i8, 14},
1182 {TTI::SK_PermuteTwoSrc, MVT::v32i16, 42},
1183 {TTI::SK_PermuteTwoSrc, MVT::v64i8, 42},
1184 };
1185
1186 if (ST->hasAVX512())
1187 if (const auto *Entry = CostTableLookup(AVX512ShuffleTbl, Kind, LT.second))
1188 return LT.first * Entry->Cost;
1189
1190 static const CostTblEntry AVX2ShuffleTbl[] = {
1191 {TTI::SK_Broadcast, MVT::v4f64, 1}, // vbroadcastpd
1192 {TTI::SK_Broadcast, MVT::v8f32, 1}, // vbroadcastps
1193 {TTI::SK_Broadcast, MVT::v4i64, 1}, // vpbroadcastq
1194 {TTI::SK_Broadcast, MVT::v8i32, 1}, // vpbroadcastd
1195 {TTI::SK_Broadcast, MVT::v16i16, 1}, // vpbroadcastw
1196 {TTI::SK_Broadcast, MVT::v32i8, 1}, // vpbroadcastb
1197
1198 {TTI::SK_Reverse, MVT::v4f64, 1}, // vpermpd
1199 {TTI::SK_Reverse, MVT::v8f32, 1}, // vpermps
1200 {TTI::SK_Reverse, MVT::v4i64, 1}, // vpermq
1201 {TTI::SK_Reverse, MVT::v8i32, 1}, // vpermd
1202 {TTI::SK_Reverse, MVT::v16i16, 2}, // vperm2i128 + pshufb
1203 {TTI::SK_Reverse, MVT::v32i8, 2}, // vperm2i128 + pshufb
1204
1205 {TTI::SK_Select, MVT::v16i16, 1}, // vpblendvb
1206 {TTI::SK_Select, MVT::v32i8, 1}, // vpblendvb
1207
1208 {TTI::SK_PermuteSingleSrc, MVT::v4f64, 1}, // vpermpd
1209 {TTI::SK_PermuteSingleSrc, MVT::v8f32, 1}, // vpermps
1210 {TTI::SK_PermuteSingleSrc, MVT::v4i64, 1}, // vpermq
1211 {TTI::SK_PermuteSingleSrc, MVT::v8i32, 1}, // vpermd
1212 {TTI::SK_PermuteSingleSrc, MVT::v16i16, 4}, // vperm2i128 + 2*vpshufb
1213 // + vpblendvb
1214 {TTI::SK_PermuteSingleSrc, MVT::v32i8, 4}, // vperm2i128 + 2*vpshufb
1215 // + vpblendvb
1216
1217 {TTI::SK_PermuteTwoSrc, MVT::v4f64, 3}, // 2*vpermpd + vblendpd
1218 {TTI::SK_PermuteTwoSrc, MVT::v8f32, 3}, // 2*vpermps + vblendps
1219 {TTI::SK_PermuteTwoSrc, MVT::v4i64, 3}, // 2*vpermq + vpblendd
1220 {TTI::SK_PermuteTwoSrc, MVT::v8i32, 3}, // 2*vpermd + vpblendd
1221 {TTI::SK_PermuteTwoSrc, MVT::v16i16, 7}, // 2*vperm2i128 + 4*vpshufb
1222 // + vpblendvb
1223 {TTI::SK_PermuteTwoSrc, MVT::v32i8, 7}, // 2*vperm2i128 + 4*vpshufb
1224 // + vpblendvb
1225 };
1226
1227 if (ST->hasAVX2())
1228 if (const auto *Entry = CostTableLookup(AVX2ShuffleTbl, Kind, LT.second))
1229 return LT.first * Entry->Cost;
1230
1231 static const CostTblEntry XOPShuffleTbl[] = {
1232 {TTI::SK_PermuteSingleSrc, MVT::v4f64, 2}, // vperm2f128 + vpermil2pd
1233 {TTI::SK_PermuteSingleSrc, MVT::v8f32, 2}, // vperm2f128 + vpermil2ps
1234 {TTI::SK_PermuteSingleSrc, MVT::v4i64, 2}, // vperm2f128 + vpermil2pd
1235 {TTI::SK_PermuteSingleSrc, MVT::v8i32, 2}, // vperm2f128 + vpermil2ps
1236 {TTI::SK_PermuteSingleSrc, MVT::v16i16, 4}, // vextractf128 + 2*vpperm
1237 // + vinsertf128
1238 {TTI::SK_PermuteSingleSrc, MVT::v32i8, 4}, // vextractf128 + 2*vpperm
1239 // + vinsertf128
1240
1241 {TTI::SK_PermuteTwoSrc, MVT::v16i16, 9}, // 2*vextractf128 + 6*vpperm
1242 // + vinsertf128
1243 {TTI::SK_PermuteTwoSrc, MVT::v8i16, 1}, // vpperm
1244 {TTI::SK_PermuteTwoSrc, MVT::v32i8, 9}, // 2*vextractf128 + 6*vpperm
1245 // + vinsertf128
1246 {TTI::SK_PermuteTwoSrc, MVT::v16i8, 1}, // vpperm
1247 };
1248
1249 if (ST->hasXOP())
1250 if (const auto *Entry = CostTableLookup(XOPShuffleTbl, Kind, LT.second))
1251 return LT.first * Entry->Cost;
1252
1253 static const CostTblEntry AVX1ShuffleTbl[] = {
1254 {TTI::SK_Broadcast, MVT::v4f64, 2}, // vperm2f128 + vpermilpd
1255 {TTI::SK_Broadcast, MVT::v8f32, 2}, // vperm2f128 + vpermilps
1256 {TTI::SK_Broadcast, MVT::v4i64, 2}, // vperm2f128 + vpermilpd
1257 {TTI::SK_Broadcast, MVT::v8i32, 2}, // vperm2f128 + vpermilps
1258 {TTI::SK_Broadcast, MVT::v16i16, 3}, // vpshuflw + vpshufd + vinsertf128
1259 {TTI::SK_Broadcast, MVT::v32i8, 2}, // vpshufb + vinsertf128
1260
1261 {TTI::SK_Reverse, MVT::v4f64, 2}, // vperm2f128 + vpermilpd
1262 {TTI::SK_Reverse, MVT::v8f32, 2}, // vperm2f128 + vpermilps
1263 {TTI::SK_Reverse, MVT::v4i64, 2}, // vperm2f128 + vpermilpd
1264 {TTI::SK_Reverse, MVT::v8i32, 2}, // vperm2f128 + vpermilps
1265 {TTI::SK_Reverse, MVT::v16i16, 4}, // vextractf128 + 2*pshufb
1266 // + vinsertf128
1267 {TTI::SK_Reverse, MVT::v32i8, 4}, // vextractf128 + 2*pshufb
1268 // + vinsertf128
1269
1270 {TTI::SK_Select, MVT::v4i64, 1}, // vblendpd
1271 {TTI::SK_Select, MVT::v4f64, 1}, // vblendpd
1272 {TTI::SK_Select, MVT::v8i32, 1}, // vblendps
1273 {TTI::SK_Select, MVT::v8f32, 1}, // vblendps
1274 {TTI::SK_Select, MVT::v16i16, 3}, // vpand + vpandn + vpor
1275 {TTI::SK_Select, MVT::v32i8, 3}, // vpand + vpandn + vpor
1276
1277 {TTI::SK_PermuteSingleSrc, MVT::v4f64, 2}, // vperm2f128 + vshufpd
1278 {TTI::SK_PermuteSingleSrc, MVT::v4i64, 2}, // vperm2f128 + vshufpd
1279 {TTI::SK_PermuteSingleSrc, MVT::v8f32, 4}, // 2*vperm2f128 + 2*vshufps
1280 {TTI::SK_PermuteSingleSrc, MVT::v8i32, 4}, // 2*vperm2f128 + 2*vshufps
1281 {TTI::SK_PermuteSingleSrc, MVT::v16i16, 8}, // vextractf128 + 4*pshufb
1282 // + 2*por + vinsertf128
1283 {TTI::SK_PermuteSingleSrc, MVT::v32i8, 8}, // vextractf128 + 4*pshufb
1284 // + 2*por + vinsertf128
1285
1286 {TTI::SK_PermuteTwoSrc, MVT::v4f64, 3}, // 2*vperm2f128 + vshufpd
1287 {TTI::SK_PermuteTwoSrc, MVT::v4i64, 3}, // 2*vperm2f128 + vshufpd
1288 {TTI::SK_PermuteTwoSrc, MVT::v8f32, 4}, // 2*vperm2f128 + 2*vshufps
1289 {TTI::SK_PermuteTwoSrc, MVT::v8i32, 4}, // 2*vperm2f128 + 2*vshufps
1290 {TTI::SK_PermuteTwoSrc, MVT::v16i16, 15}, // 2*vextractf128 + 8*pshufb
1291 // + 4*por + vinsertf128
1292 {TTI::SK_PermuteTwoSrc, MVT::v32i8, 15}, // 2*vextractf128 + 8*pshufb
1293 // + 4*por + vinsertf128
1294 };
1295
1296 if (ST->hasAVX())
1297 if (const auto *Entry = CostTableLookup(AVX1ShuffleTbl, Kind, LT.second))
1298 return LT.first * Entry->Cost;
1299
1300 static const CostTblEntry SSE41ShuffleTbl[] = {
1301 {TTI::SK_Select, MVT::v2i64, 1}, // pblendw
1302 {TTI::SK_Select, MVT::v2f64, 1}, // movsd
1303 {TTI::SK_Select, MVT::v4i32, 1}, // pblendw
1304 {TTI::SK_Select, MVT::v4f32, 1}, // blendps
1305 {TTI::SK_Select, MVT::v8i16, 1}, // pblendw
1306 {TTI::SK_Select, MVT::v16i8, 1} // pblendvb
1307 };
1308
1309 if (ST->hasSSE41())
1310 if (const auto *Entry = CostTableLookup(SSE41ShuffleTbl, Kind, LT.second))
1311 return LT.first * Entry->Cost;
1312
1313 static const CostTblEntry SSSE3ShuffleTbl[] = {
1314 {TTI::SK_Broadcast, MVT::v8i16, 1}, // pshufb
1315 {TTI::SK_Broadcast, MVT::v16i8, 1}, // pshufb
1316
1317 {TTI::SK_Reverse, MVT::v8i16, 1}, // pshufb
1318 {TTI::SK_Reverse, MVT::v16i8, 1}, // pshufb
1319
1320 {TTI::SK_Select, MVT::v8i16, 3}, // 2*pshufb + por
1321 {TTI::SK_Select, MVT::v16i8, 3}, // 2*pshufb + por
1322
1323 {TTI::SK_PermuteSingleSrc, MVT::v8i16, 1}, // pshufb
1324 {TTI::SK_PermuteSingleSrc, MVT::v16i8, 1}, // pshufb
1325
1326 {TTI::SK_PermuteTwoSrc, MVT::v8i16, 3}, // 2*pshufb + por
1327 {TTI::SK_PermuteTwoSrc, MVT::v16i8, 3}, // 2*pshufb + por
1328 };
1329
1330 if (ST->hasSSSE3())
1331 if (const auto *Entry = CostTableLookup(SSSE3ShuffleTbl, Kind, LT.second))
1332 return LT.first * Entry->Cost;
1333
1334 static const CostTblEntry SSE2ShuffleTbl[] = {
1335 {TTI::SK_Broadcast, MVT::v2f64, 1}, // shufpd
1336 {TTI::SK_Broadcast, MVT::v2i64, 1}, // pshufd
1337 {TTI::SK_Broadcast, MVT::v4i32, 1}, // pshufd
1338 {TTI::SK_Broadcast, MVT::v8i16, 2}, // pshuflw + pshufd
1339 {TTI::SK_Broadcast, MVT::v16i8, 3}, // unpck + pshuflw + pshufd
1340
1341 {TTI::SK_Reverse, MVT::v2f64, 1}, // shufpd
1342 {TTI::SK_Reverse, MVT::v2i64, 1}, // pshufd
1343 {TTI::SK_Reverse, MVT::v4i32, 1}, // pshufd
1344 {TTI::SK_Reverse, MVT::v8i16, 3}, // pshuflw + pshufhw + pshufd
1345 {TTI::SK_Reverse, MVT::v16i8, 9}, // 2*pshuflw + 2*pshufhw
1346 // + 2*pshufd + 2*unpck + packus
1347
1348 {TTI::SK_Select, MVT::v2i64, 1}, // movsd
1349 {TTI::SK_Select, MVT::v2f64, 1}, // movsd
1350 {TTI::SK_Select, MVT::v4i32, 2}, // 2*shufps
1351 {TTI::SK_Select, MVT::v8i16, 3}, // pand + pandn + por
1352 {TTI::SK_Select, MVT::v16i8, 3}, // pand + pandn + por
1353
1354 {TTI::SK_PermuteSingleSrc, MVT::v2f64, 1}, // shufpd
1355 {TTI::SK_PermuteSingleSrc, MVT::v2i64, 1}, // pshufd
1356 {TTI::SK_PermuteSingleSrc, MVT::v4i32, 1}, // pshufd
1357 {TTI::SK_PermuteSingleSrc, MVT::v8i16, 5}, // 2*pshuflw + 2*pshufhw
1358 // + pshufd/unpck
1359 { TTI::SK_PermuteSingleSrc, MVT::v16i8, 10 }, // 2*pshuflw + 2*pshufhw
1360 // + 2*pshufd + 2*unpck + 2*packus
1361
1362 { TTI::SK_PermuteTwoSrc, MVT::v2f64, 1 }, // shufpd
1363 { TTI::SK_PermuteTwoSrc, MVT::v2i64, 1 }, // shufpd
1364 { TTI::SK_PermuteTwoSrc, MVT::v4i32, 2 }, // 2*{unpck,movsd,pshufd}
1365 { TTI::SK_PermuteTwoSrc, MVT::v8i16, 8 }, // blend+permute
1366 { TTI::SK_PermuteTwoSrc, MVT::v16i8, 13 }, // blend+permute
1367 };
1368
1369 if (ST->hasSSE2())
1370 if (const auto *Entry = CostTableLookup(SSE2ShuffleTbl, Kind, LT.second))
1371 return LT.first * Entry->Cost;
1372
1373 static const CostTblEntry SSE1ShuffleTbl[] = {
1374 { TTI::SK_Broadcast, MVT::v4f32, 1 }, // shufps
1375 { TTI::SK_Reverse, MVT::v4f32, 1 }, // shufps
1376 { TTI::SK_Select, MVT::v4f32, 2 }, // 2*shufps
1377 { TTI::SK_PermuteSingleSrc, MVT::v4f32, 1 }, // shufps
1378 { TTI::SK_PermuteTwoSrc, MVT::v4f32, 2 }, // 2*shufps
1379 };
1380
1381 if (ST->hasSSE1())
1382 if (const auto *Entry = CostTableLookup(SSE1ShuffleTbl, Kind, LT.second))
1383 return LT.first * Entry->Cost;
1384
1385 return BaseT::getShuffleCost(Kind, BaseTp, Index, SubTp);
1386}
1387
1388int X86TTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src,
1389 TTI::CastContextHint CCH,
1390 TTI::TargetCostKind CostKind,
1391 const Instruction *I) {
1392 int ISD = TLI->InstructionOpcodeToISD(Opcode);
1393 assert(ISD && "Invalid opcode")((ISD && "Invalid opcode") ? static_cast<void> (
0) : __assert_fail ("ISD && \"Invalid opcode\"", "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 1393, __PRETTY_FUNCTION__))
;
1394
1395 // TODO: Allow non-throughput costs that aren't binary.
1396 auto AdjustCost = [&CostKind](int Cost) {
1397 if (CostKind != TTI::TCK_RecipThroughput)
1398 return Cost == 0 ? 0 : 1;
1399 return Cost;
1400 };
1401
1402 // FIXME: Need a better design of the cost table to handle non-simple types of
1403 // potential massive combinations (elem_num x src_type x dst_type).
1404
1405 static const TypeConversionCostTblEntry AVX512BWConversionTbl[] {
1406 { ISD::SIGN_EXTEND, MVT::v32i16, MVT::v32i8, 1 },
1407 { ISD::ZERO_EXTEND, MVT::v32i16, MVT::v32i8, 1 },
1408
1409 // Mask sign extend has an instruction.
1410 { ISD::SIGN_EXTEND, MVT::v2i8, MVT::v2i1, 1 },
1411 { ISD::SIGN_EXTEND, MVT::v2i16, MVT::v2i1, 1 },
1412 { ISD::SIGN_EXTEND, MVT::v4i8, MVT::v4i1, 1 },
1413 { ISD::SIGN_EXTEND, MVT::v4i16, MVT::v4i1, 1 },
1414 { ISD::SIGN_EXTEND, MVT::v8i8, MVT::v8i1, 1 },
1415 { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i1, 1 },
1416 { ISD::SIGN_EXTEND, MVT::v16i8, MVT::v16i1, 1 },
1417 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i1, 1 },
1418 { ISD::SIGN_EXTEND, MVT::v32i8, MVT::v32i1, 1 },
1419 { ISD::SIGN_EXTEND, MVT::v32i16, MVT::v32i1, 1 },
1420 { ISD::SIGN_EXTEND, MVT::v64i8, MVT::v64i1, 1 },
1421
1422 // Mask zero extend is a sext + shift.
1423 { ISD::ZERO_EXTEND, MVT::v2i8, MVT::v2i1, 2 },
1424 { ISD::ZERO_EXTEND, MVT::v2i16, MVT::v2i1, 2 },
1425 { ISD::ZERO_EXTEND, MVT::v4i8, MVT::v4i1, 2 },
1426 { ISD::ZERO_EXTEND, MVT::v4i16, MVT::v4i1, 2 },
1427 { ISD::ZERO_EXTEND, MVT::v8i8, MVT::v8i1, 2 },
1428 { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i1, 2 },
1429 { ISD::ZERO_EXTEND, MVT::v16i8, MVT::v16i1, 2 },
1430 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i1, 2 },
1431 { ISD::ZERO_EXTEND, MVT::v32i8, MVT::v32i1, 2 },
1432 { ISD::ZERO_EXTEND, MVT::v32i16, MVT::v32i1, 2 },
1433 { ISD::ZERO_EXTEND, MVT::v64i8, MVT::v64i1, 2 },
1434
1435 { ISD::TRUNCATE, MVT::v32i8, MVT::v32i16, 2 },
1436 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i16, 2 }, // widen to zmm
1437 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i8, 2 }, // widen to zmm
1438 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i16, 2 }, // widen to zmm
1439 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i8, 2 }, // widen to zmm
1440 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i16, 2 }, // widen to zmm
1441 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i8, 2 }, // widen to zmm
1442 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i16, 2 }, // widen to zmm
1443 { ISD::TRUNCATE, MVT::v16i1, MVT::v16i8, 2 }, // widen to zmm
1444 { ISD::TRUNCATE, MVT::v16i1, MVT::v16i16, 2 }, // widen to zmm
1445 { ISD::TRUNCATE, MVT::v32i1, MVT::v32i8, 2 }, // widen to zmm
1446 { ISD::TRUNCATE, MVT::v32i1, MVT::v32i16, 2 },
1447 { ISD::TRUNCATE, MVT::v64i1, MVT::v64i8, 2 },
1448 };
1449
1450 static const TypeConversionCostTblEntry AVX512DQConversionTbl[] = {
1451 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i64, 1 },
1452 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i64, 1 },
1453
1454 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i64, 1 },
1455 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i64, 1 },
1456
1457 { ISD::FP_TO_SINT, MVT::v8i64, MVT::v8f32, 1 },
1458 { ISD::FP_TO_SINT, MVT::v8i64, MVT::v8f64, 1 },
1459
1460 { ISD::FP_TO_UINT, MVT::v8i64, MVT::v8f32, 1 },
1461 { ISD::FP_TO_UINT, MVT::v8i64, MVT::v8f64, 1 },
1462 };
1463
1464 // TODO: For AVX512DQ + AVX512VL, we also have cheap casts for 128-bit and
1465 // 256-bit wide vectors.
1466
1467 static const TypeConversionCostTblEntry AVX512FConversionTbl[] = {
1468 { ISD::FP_EXTEND, MVT::v8f64, MVT::v8f32, 1 },
1469 { ISD::FP_EXTEND, MVT::v8f64, MVT::v16f32, 3 },
1470 { ISD::FP_ROUND, MVT::v8f32, MVT::v8f64, 1 },
1471
1472 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i8, 3 }, // sext+vpslld+vptestmd
1473 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i8, 3 }, // sext+vpslld+vptestmd
1474 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i8, 3 }, // sext+vpslld+vptestmd
1475 { ISD::TRUNCATE, MVT::v16i1, MVT::v16i8, 3 }, // sext+vpslld+vptestmd
1476 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i16, 3 }, // sext+vpsllq+vptestmq
1477 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i16, 3 }, // sext+vpsllq+vptestmq
1478 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i16, 3 }, // sext+vpsllq+vptestmq
1479 { ISD::TRUNCATE, MVT::v16i1, MVT::v16i16, 3 }, // sext+vpslld+vptestmd
1480 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i32, 2 }, // zmm vpslld+vptestmd
1481 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i32, 2 }, // zmm vpslld+vptestmd
1482 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i32, 2 }, // zmm vpslld+vptestmd
1483 { ISD::TRUNCATE, MVT::v16i1, MVT::v16i32, 2 }, // vpslld+vptestmd
1484 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i64, 2 }, // zmm vpsllq+vptestmq
1485 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i64, 2 }, // zmm vpsllq+vptestmq
1486 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i64, 2 }, // vpsllq+vptestmq
1487 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i32, 2 },
1488 { ISD::TRUNCATE, MVT::v16i16, MVT::v16i32, 2 },
1489 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i64, 2 },
1490 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i64, 2 },
1491 { ISD::TRUNCATE, MVT::v8i32, MVT::v8i64, 1 },
1492 { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 1 }, // zmm vpmovqd
1493 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i64, 5 },// 2*vpmovqd+concat+vpmovdb
1494
1495 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i16, 3 }, // extend to v16i32
1496 { ISD::TRUNCATE, MVT::v32i8, MVT::v32i16, 8 },
1497
1498 // Sign extend is zmm vpternlogd+vptruncdb.
1499 // Zero extend is zmm broadcast load+vptruncdw.
1500 { ISD::SIGN_EXTEND, MVT::v2i8, MVT::v2i1, 3 },
1501 { ISD::ZERO_EXTEND, MVT::v2i8, MVT::v2i1, 4 },
1502 { ISD::SIGN_EXTEND, MVT::v4i8, MVT::v4i1, 3 },
1503 { ISD::ZERO_EXTEND, MVT::v4i8, MVT::v4i1, 4 },
1504 { ISD::SIGN_EXTEND, MVT::v8i8, MVT::v8i1, 3 },
1505 { ISD::ZERO_EXTEND, MVT::v8i8, MVT::v8i1, 4 },
1506 { ISD::SIGN_EXTEND, MVT::v16i8, MVT::v16i1, 3 },
1507 { ISD::ZERO_EXTEND, MVT::v16i8, MVT::v16i1, 4 },
1508
1509 // Sign extend is zmm vpternlogd+vptruncdw.
1510 // Zero extend is zmm vpternlogd+vptruncdw+vpsrlw.
1511 { ISD::SIGN_EXTEND, MVT::v2i16, MVT::v2i1, 3 },
1512 { ISD::ZERO_EXTEND, MVT::v2i16, MVT::v2i1, 4 },
1513 { ISD::SIGN_EXTEND, MVT::v4i16, MVT::v4i1, 3 },
1514 { ISD::ZERO_EXTEND, MVT::v4i16, MVT::v4i1, 4 },
1515 { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i1, 3 },
1516 { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i1, 4 },
1517 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i1, 3 },
1518 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i1, 4 },
1519
1520 { ISD::SIGN_EXTEND, MVT::v2i32, MVT::v2i1, 1 }, // zmm vpternlogd
1521 { ISD::ZERO_EXTEND, MVT::v2i32, MVT::v2i1, 2 }, // zmm vpternlogd+psrld
1522 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i1, 1 }, // zmm vpternlogd
1523 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i1, 2 }, // zmm vpternlogd+psrld
1524 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i1, 1 }, // zmm vpternlogd
1525 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i1, 2 }, // zmm vpternlogd+psrld
1526 { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i1, 1 }, // zmm vpternlogq
1527 { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i1, 2 }, // zmm vpternlogq+psrlq
1528 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i1, 1 }, // zmm vpternlogq
1529 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i1, 2 }, // zmm vpternlogq+psrlq
1530
1531 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i1, 1 }, // vpternlogd
1532 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i1, 2 }, // vpternlogd+psrld
1533 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i1, 1 }, // vpternlogq
1534 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i1, 2 }, // vpternlogq+psrlq
1535
1536 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 1 },
1537 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 1 },
1538 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i16, 1 },
1539 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i16, 1 },
1540 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i8, 1 },
1541 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i8, 1 },
1542 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i16, 1 },
1543 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i16, 1 },
1544 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i32, 1 },
1545 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i32, 1 },
1546
1547 { ISD::SIGN_EXTEND, MVT::v32i16, MVT::v32i8, 3 }, // FIXME: May not be right
1548 { ISD::ZERO_EXTEND, MVT::v32i16, MVT::v32i8, 3 }, // FIXME: May not be right
1549
1550 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i1, 4 },
1551 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i1, 3 },
1552 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i8, 2 },
1553 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i8, 2 },
1554 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i16, 2 },
1555 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i16, 2 },
1556 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i32, 1 },
1557 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i32, 1 },
1558
1559 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i1, 4 },
1560 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i1, 3 },
1561 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i8, 2 },
1562 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i8, 2 },
1563 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i16, 2 },
1564 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i16, 2 },
1565 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i32, 1 },
1566 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i32, 1 },
1567 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i64, 26 },
1568 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i64, 5 },
1569
1570 { ISD::FP_TO_SINT, MVT::v8i8, MVT::v8f64, 3 },
1571 { ISD::FP_TO_SINT, MVT::v8i16, MVT::v8f64, 3 },
1572 { ISD::FP_TO_SINT, MVT::v16i8, MVT::v16f32, 3 },
1573 { ISD::FP_TO_SINT, MVT::v16i16, MVT::v16f32, 3 },
1574
1575 { ISD::FP_TO_UINT, MVT::v8i32, MVT::v8f64, 1 },
1576 { ISD::FP_TO_UINT, MVT::v8i16, MVT::v8f64, 3 },
1577 { ISD::FP_TO_UINT, MVT::v8i8, MVT::v8f64, 3 },
1578 { ISD::FP_TO_UINT, MVT::v16i32, MVT::v16f32, 1 },
1579 { ISD::FP_TO_UINT, MVT::v16i16, MVT::v16f32, 3 },
1580 { ISD::FP_TO_UINT, MVT::v16i8, MVT::v16f32, 3 },
1581 };
1582
1583 static const TypeConversionCostTblEntry AVX512BWVLConversionTbl[] {
1584 // Mask sign extend has an instruction.
1585 { ISD::SIGN_EXTEND, MVT::v2i8, MVT::v2i1, 1 },
1586 { ISD::SIGN_EXTEND, MVT::v2i16, MVT::v2i1, 1 },
1587 { ISD::SIGN_EXTEND, MVT::v4i8, MVT::v4i1, 1 },
1588 { ISD::SIGN_EXTEND, MVT::v4i16, MVT::v4i1, 1 },
1589 { ISD::SIGN_EXTEND, MVT::v8i8, MVT::v8i1, 1 },
1590 { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i1, 1 },
1591 { ISD::SIGN_EXTEND, MVT::v16i8, MVT::v16i1, 1 },
1592 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i1, 1 },
1593 { ISD::SIGN_EXTEND, MVT::v32i8, MVT::v32i1, 1 },
1594
1595 // Mask zero extend is a sext + shift.
1596 { ISD::ZERO_EXTEND, MVT::v2i8, MVT::v2i1, 2 },
1597 { ISD::ZERO_EXTEND, MVT::v2i16, MVT::v2i1, 2 },
1598 { ISD::ZERO_EXTEND, MVT::v4i8, MVT::v4i1, 2 },
1599 { ISD::ZERO_EXTEND, MVT::v4i16, MVT::v4i1, 2 },
1600 { ISD::ZERO_EXTEND, MVT::v8i8, MVT::v8i1, 2 },
1601 { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i1, 2 },
1602 { ISD::ZERO_EXTEND, MVT::v16i8, MVT::v16i1, 2 },
1603 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i1, 2 },
1604 { ISD::ZERO_EXTEND, MVT::v32i8, MVT::v32i1, 2 },
1605
1606 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i16, 2 },
1607 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i8, 2 }, // vpsllw+vptestmb
1608 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i16, 2 }, // vpsllw+vptestmw
1609 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i8, 2 }, // vpsllw+vptestmb
1610 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i16, 2 }, // vpsllw+vptestmw
1611 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i8, 2 }, // vpsllw+vptestmb
1612 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i16, 2 }, // vpsllw+vptestmw
1613 { ISD::TRUNCATE, MVT::v16i1, MVT::v16i8, 2 }, // vpsllw+vptestmb
1614 { ISD::TRUNCATE, MVT::v16i1, MVT::v16i16, 2 }, // vpsllw+vptestmw
1615 { ISD::TRUNCATE, MVT::v32i1, MVT::v32i8, 2 }, // vpsllw+vptestmb
1616 };
1617
1618 static const TypeConversionCostTblEntry AVX512DQVLConversionTbl[] = {
1619 { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i64, 1 },
1620 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i64, 1 },
1621 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i64, 1 },
1622 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i64, 1 },
1623
1624 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i64, 1 },
1625 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 1 },
1626 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i64, 1 },
1627 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i64, 1 },
1628
1629 { ISD::FP_TO_SINT, MVT::v2i64, MVT::v2f32, 1 },
1630 { ISD::FP_TO_SINT, MVT::v4i64, MVT::v4f32, 1 },
1631 { ISD::FP_TO_SINT, MVT::v2i64, MVT::v2f64, 1 },
1632 { ISD::FP_TO_SINT, MVT::v4i64, MVT::v4f64, 1 },
1633
1634 { ISD::FP_TO_UINT, MVT::v2i64, MVT::v2f32, 1 },
1635 { ISD::FP_TO_UINT, MVT::v4i64, MVT::v4f32, 1 },
1636 { ISD::FP_TO_UINT, MVT::v2i64, MVT::v2f64, 1 },
1637 { ISD::FP_TO_UINT, MVT::v4i64, MVT::v4f64, 1 },
1638 };
1639
1640 static const TypeConversionCostTblEntry AVX512VLConversionTbl[] = {
1641 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i8, 3 }, // sext+vpslld+vptestmd
1642 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i8, 3 }, // sext+vpslld+vptestmd
1643 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i8, 3 }, // sext+vpslld+vptestmd
1644 { ISD::TRUNCATE, MVT::v16i1, MVT::v16i8, 8 }, // split+2*v8i8
1645 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i16, 3 }, // sext+vpsllq+vptestmq
1646 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i16, 3 }, // sext+vpsllq+vptestmq
1647 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i16, 3 }, // sext+vpsllq+vptestmq
1648 { ISD::TRUNCATE, MVT::v16i1, MVT::v16i16, 8 }, // split+2*v8i16
1649 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i32, 2 }, // vpslld+vptestmd
1650 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i32, 2 }, // vpslld+vptestmd
1651 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i32, 2 }, // vpslld+vptestmd
1652 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i64, 2 }, // vpsllq+vptestmq
1653 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i64, 2 }, // vpsllq+vptestmq
1654 { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 1 }, // vpmovqd
1655
1656 // sign extend is vpcmpeq+maskedmove+vpmovdw+vpacksswb
1657 // zero extend is vpcmpeq+maskedmove+vpmovdw+vpsrlw+vpackuswb
1658 { ISD::SIGN_EXTEND, MVT::v2i8, MVT::v2i1, 5 },
1659 { ISD::ZERO_EXTEND, MVT::v2i8, MVT::v2i1, 6 },
1660 { ISD::SIGN_EXTEND, MVT::v4i8, MVT::v4i1, 5 },
1661 { ISD::ZERO_EXTEND, MVT::v4i8, MVT::v4i1, 6 },
1662 { ISD::SIGN_EXTEND, MVT::v8i8, MVT::v8i1, 5 },
1663 { ISD::ZERO_EXTEND, MVT::v8i8, MVT::v8i1, 6 },
1664 { ISD::SIGN_EXTEND, MVT::v16i8, MVT::v16i1, 10 },
1665 { ISD::ZERO_EXTEND, MVT::v16i8, MVT::v16i1, 12 },
1666
1667 // sign extend is vpcmpeq+maskedmove+vpmovdw
1668 // zero extend is vpcmpeq+maskedmove+vpmovdw+vpsrlw
1669 { ISD::SIGN_EXTEND, MVT::v2i16, MVT::v2i1, 4 },
1670 { ISD::ZERO_EXTEND, MVT::v2i16, MVT::v2i1, 5 },
1671 { ISD::SIGN_EXTEND, MVT::v4i16, MVT::v4i1, 4 },
1672 { ISD::ZERO_EXTEND, MVT::v4i16, MVT::v4i1, 5 },
1673 { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i1, 4 },
1674 { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i1, 5 },
1675 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i1, 10 },
1676 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i1, 12 },
1677
1678 { ISD::SIGN_EXTEND, MVT::v2i32, MVT::v2i1, 1 }, // vpternlogd
1679 { ISD::ZERO_EXTEND, MVT::v2i32, MVT::v2i1, 2 }, // vpternlogd+psrld
1680 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i1, 1 }, // vpternlogd
1681 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i1, 2 }, // vpternlogd+psrld
1682 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i1, 1 }, // vpternlogd
1683 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i1, 2 }, // vpternlogd+psrld
1684 { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i1, 1 }, // vpternlogq
1685 { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i1, 2 }, // vpternlogq+psrlq
1686 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i1, 1 }, // vpternlogq
1687 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i1, 2 }, // vpternlogq+psrlq
1688
1689 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i8, 2 },
1690 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i8, 2 },
1691 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i8, 2 },
1692 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i16, 5 },
1693 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i16, 2 },
1694 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i16, 2 },
1695 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i32, 2 },
1696 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i32, 1 },
1697 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 },
1698 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i32, 1 },
1699 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i32, 1 },
1700 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i64, 5 },
1701 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 5 },
1702 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i64, 5 },
1703
1704 { ISD::UINT_TO_FP, MVT::f32, MVT::i64, 1 },
1705 { ISD::UINT_TO_FP, MVT::f64, MVT::i64, 1 },
1706
1707 { ISD::FP_TO_SINT, MVT::v8i8, MVT::v8f32, 3 },
1708 { ISD::FP_TO_UINT, MVT::v8i8, MVT::v8f32, 3 },
1709
1710 { ISD::FP_TO_UINT, MVT::i64, MVT::f32, 1 },
1711 { ISD::FP_TO_UINT, MVT::i64, MVT::f64, 1 },
1712
1713 { ISD::FP_TO_UINT, MVT::v2i32, MVT::v2f32, 1 },
1714 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f32, 1 },
1715 { ISD::FP_TO_UINT, MVT::v2i32, MVT::v2f64, 1 },
1716 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f64, 1 },
1717 { ISD::FP_TO_UINT, MVT::v8i32, MVT::v8f32, 1 },
1718 };
1719
1720 static const TypeConversionCostTblEntry AVX2ConversionTbl[] = {
1721 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i1, 3 },
1722 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i1, 3 },
1723 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i1, 3 },
1724 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i1, 3 },
1725 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i8, 1 },
1726 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i8, 1 },
1727 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 1 },
1728 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 1 },
1729 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i1, 1 },
1730 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i1, 1 },
1731 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 1 },
1732 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 1 },
1733 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 1 },
1734 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 1 },
1735 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 1 },
1736 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 1 },
1737 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 1 },
1738 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 1 },
1739 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i16, 3 },
1740 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i16, 3 },
1741
1742 { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 2 },
1743 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i32, 2 },
1744
1745 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i64, 2 },
1746 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i64, 2 },
1747 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 2 },
1748 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 2 },
1749
1750 { ISD::FP_EXTEND, MVT::v8f64, MVT::v8f32, 3 },
1751 { ISD::FP_ROUND, MVT::v8f32, MVT::v8f64, 3 },
1752
1753 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i32, 8 },
1754 };
1755
1756 static const TypeConversionCostTblEntry AVXConversionTbl[] = {
1757 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i1, 6 },
1758 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i1, 4 },
1759 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i1, 7 },
1760 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i1, 4 },
1761 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i8, 4 },
1762 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i8, 4 },
1763 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 4 },
1764 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 4 },
1765 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i1, 4 },
1766 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i1, 4 },
1767 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 4 },
1768 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 4 },
1769 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 4 },
1770 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 3 },
1771 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 4 },
1772 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 4 },
1773 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 4 },
1774 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 4 },
1775
1776 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i64, 4 },
1777 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i32, 5 },
1778 { ISD::TRUNCATE, MVT::v16i1, MVT::v16i16, 4 },
1779 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i64, 9 },
1780 { ISD::TRUNCATE, MVT::v16i1, MVT::v16i64, 11 },
1781
1782 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i16, 4 },
1783 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 4 },
1784 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 5 },
1785 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i64, 4 },
1786 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i64, 4 },
1787 { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 2 },
1788 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i64, 11 },
1789 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i64, 9 },
1790 { ISD::TRUNCATE, MVT::v8i32, MVT::v8i64, 3 },
1791 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i64, 11 },
1792
1793 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i1, 3 },
1794 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i1, 3 },
1795 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i1, 8 },
1796 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i8, 3 },
1797 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i8, 3 },
1798 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i8, 8 },
1799 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i16, 3 },
1800 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i16, 3 },
1801 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i16, 5 },
1802 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 },
1803 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i32, 1 },
1804 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i32, 1 },
1805
1806 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i1, 7 },
1807 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i1, 7 },
1808 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i1, 6 },
1809 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i8, 2 },
1810 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i8, 2 },
1811 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i8, 5 },
1812 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i16, 2 },
1813 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i16, 2 },
1814 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i16, 5 },
1815 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i32, 6 },
1816 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 6 },
1817 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i32, 6 },
1818 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i32, 9 },
1819 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 5 },
1820 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i64, 6 },
1821 // The generic code to compute the scalar overhead is currently broken.
1822 // Workaround this limitation by estimating the scalarization overhead
1823 // here. We have roughly 10 instructions per scalar element.
1824 // Multiply that by the vector width.
1825 // FIXME: remove that when PR19268 is fixed.
1826 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i64, 13 },
1827 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i64, 13 },
1828
1829 { ISD::FP_TO_SINT, MVT::v8i8, MVT::v8f32, 4 },
1830 { ISD::FP_TO_SINT, MVT::v4i8, MVT::v4f64, 3 },
1831 { ISD::FP_TO_SINT, MVT::v4i16, MVT::v4f64, 2 },
1832 { ISD::FP_TO_SINT, MVT::v8i16, MVT::v8f32, 3 },
1833
1834 { ISD::FP_TO_UINT, MVT::v4i8, MVT::v4f64, 3 },
1835 { ISD::FP_TO_UINT, MVT::v4i16, MVT::v4f64, 2 },
1836 { ISD::FP_TO_UINT, MVT::v8i8, MVT::v8f32, 4 },
1837 { ISD::FP_TO_UINT, MVT::v8i16, MVT::v8f32, 3 },
1838 // This node is expanded into scalarized operations but BasicTTI is overly
1839 // optimistic estimating its cost. It computes 3 per element (one
1840 // vector-extract, one scalar conversion and one vector-insert). The
1841 // problem is that the inserts form a read-modify-write chain so latency
1842 // should be factored in too. Inflating the cost per element by 1.
1843 { ISD::FP_TO_UINT, MVT::v8i32, MVT::v8f32, 8*4 },
1844 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f64, 4*4 },
1845
1846 { ISD::FP_EXTEND, MVT::v4f64, MVT::v4f32, 1 },
1847 { ISD::FP_ROUND, MVT::v4f32, MVT::v4f64, 1 },
1848 };
1849
1850 static const TypeConversionCostTblEntry SSE41ConversionTbl[] = {
1851 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i8, 2 },
1852 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i8, 2 },
1853 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 2 },
1854 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 2 },
1855 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 2 },
1856 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 2 },
1857
1858 { ISD::ZERO_EXTEND, MVT::v4i16, MVT::v4i8, 1 },
1859 { ISD::SIGN_EXTEND, MVT::v4i16, MVT::v4i8, 2 },
1860 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i8, 1 },
1861 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i8, 1 },
1862 { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i8, 1 },
1863 { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i8, 1 },
1864 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 2 },
1865 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 2 },
1866 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 2 },
1867 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 2 },
1868 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 4 },
1869 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 4 },
1870 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i16, 1 },
1871 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i16, 1 },
1872 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 2 },
1873 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 2 },
1874 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i16, 4 },
1875 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i16, 4 },
1876
1877 // These truncates end up widening elements.
1878 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i8, 1 }, // PMOVXZBQ
1879 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i16, 1 }, // PMOVXZWQ
1880 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i8, 1 }, // PMOVXZBD
1881
1882 { ISD::TRUNCATE, MVT::v2i8, MVT::v2i16, 1 },
1883 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i16, 1 },
1884 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i16, 1 },
1885 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i32, 1 },
1886 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i32, 1 },
1887 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 3 },
1888 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 3 },
1889 { ISD::TRUNCATE, MVT::v16i16, MVT::v16i32, 6 },
1890 { ISD::TRUNCATE, MVT::v2i8, MVT::v2i64, 1 }, // PSHUFB
1891
1892 { ISD::UINT_TO_FP, MVT::f32, MVT::i64, 4 },
1893 { ISD::UINT_TO_FP, MVT::f64, MVT::i64, 4 },
1894
1895 { ISD::FP_TO_SINT, MVT::v2i8, MVT::v2f32, 3 },
1896 { ISD::FP_TO_SINT, MVT::v2i8, MVT::v2f64, 3 },
1897
1898 { ISD::FP_TO_UINT, MVT::v2i8, MVT::v2f32, 3 },
1899 { ISD::FP_TO_UINT, MVT::v2i8, MVT::v2f64, 3 },
1900 { ISD::FP_TO_UINT, MVT::v4i16, MVT::v4f32, 2 },
1901 };
1902
1903 static const TypeConversionCostTblEntry SSE2ConversionTbl[] = {
1904 // These are somewhat magic numbers justified by looking at the output of
1905 // Intel's IACA, running some kernels and making sure when we take
1906 // legalization into account the throughput will be overestimated.
1907 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v16i8, 8 },
1908 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v16i8, 16*10 },
1909 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v8i16, 15 },
1910 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v8i16, 8*10 },
1911 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 5 },
1912 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v4i32, 2*10 },
1913 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i32, 2*10 },
1914 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v2i64, 15 },
1915 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i64, 2*10 },
1916
1917 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v16i8, 16*10 },
1918 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v16i8, 8 },
1919 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v8i16, 15 },
1920 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v8i16, 8*10 },
1921 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v4i32, 4*10 },
1922 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 8 },
1923 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 6 },
1924 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v2i64, 15 },
1925
1926 { ISD::FP_TO_SINT, MVT::v2i8, MVT::v2f32, 4 },
1927 { ISD::FP_TO_SINT, MVT::v2i16, MVT::v2f32, 2 },
1928 { ISD::FP_TO_SINT, MVT::v4i8, MVT::v4f32, 3 },
1929 { ISD::FP_TO_SINT, MVT::v4i16, MVT::v4f32, 2 },
1930 { ISD::FP_TO_SINT, MVT::v2i16, MVT::v2f64, 2 },
1931 { ISD::FP_TO_SINT, MVT::v2i8, MVT::v2f64, 4 },
1932
1933 { ISD::FP_TO_SINT, MVT::v2i32, MVT::v2f64, 1 },
1934
1935 { ISD::UINT_TO_FP, MVT::f32, MVT::i64, 6 },
1936 { ISD::UINT_TO_FP, MVT::f64, MVT::i64, 6 },
1937
1938 { ISD::FP_TO_UINT, MVT::i64, MVT::f32, 4 },
1939 { ISD::FP_TO_UINT, MVT::i64, MVT::f64, 4 },
1940 { ISD::FP_TO_UINT, MVT::v2i8, MVT::v2f32, 4 },
1941 { ISD::FP_TO_UINT, MVT::v2i8, MVT::v2f64, 4 },
1942 { ISD::FP_TO_UINT, MVT::v4i8, MVT::v4f32, 3 },
1943 { ISD::FP_TO_UINT, MVT::v2i16, MVT::v2f32, 2 },
1944 { ISD::FP_TO_UINT, MVT::v2i16, MVT::v2f64, 2 },
1945 { ISD::FP_TO_UINT, MVT::v4i16, MVT::v4f32, 4 },
1946
1947 { ISD::ZERO_EXTEND, MVT::v4i16, MVT::v4i8, 1 },
1948 { ISD::SIGN_EXTEND, MVT::v4i16, MVT::v4i8, 6 },
1949 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i8, 2 },
1950 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i8, 3 },
1951 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i8, 4 },
1952 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i8, 8 },
1953 { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i8, 1 },
1954 { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i8, 2 },
1955 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 6 },
1956 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 6 },
1957 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 3 },
1958 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 4 },
1959 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 9 },
1960 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 12 },
1961 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i16, 1 },
1962 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i16, 2 },
1963 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 3 },
1964 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 10 },
1965 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 3 },
1966 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 4 },
1967 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i16, 6 },
1968 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i16, 8 },
1969 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 3 },
1970 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 5 },
1971
1972 // These truncates are really widening elements.
1973 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i32, 1 }, // PSHUFD
1974 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i16, 2 }, // PUNPCKLWD+DQ
1975 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i8, 3 }, // PUNPCKLBW+WD+PSHUFD
1976 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i16, 1 }, // PUNPCKLWD
1977 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i8, 2 }, // PUNPCKLBW+WD
1978 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i8, 1 }, // PUNPCKLBW
1979
1980 { ISD::TRUNCATE, MVT::v2i8, MVT::v2i16, 2 }, // PAND+PACKUSWB
1981 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i16, 2 }, // PAND+PACKUSWB
1982 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i16, 2 }, // PAND+PACKUSWB
1983 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i16, 3 },
1984 { ISD::TRUNCATE, MVT::v2i8, MVT::v2i32, 3 }, // PAND+2*PACKUSWB
1985 { ISD::TRUNCATE, MVT::v2i16, MVT::v2i32, 1 },
1986 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i32, 3 },
1987 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i32, 3 },
1988 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 4 },
1989 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i32, 7 },
1990 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 5 },
1991 { ISD::TRUNCATE, MVT::v16i16, MVT::v16i32, 10 },
1992 { ISD::TRUNCATE, MVT::v2i8, MVT::v2i64, 4 }, // PAND+3*PACKUSWB
1993 { ISD::TRUNCATE, MVT::v2i16, MVT::v2i64, 2 }, // PSHUFD+PSHUFLW
1994 { ISD::TRUNCATE, MVT::v2i32, MVT::v2i64, 1 }, // PSHUFD
1995 };
1996
1997 std::pair<int, MVT> LTSrc = TLI->getTypeLegalizationCost(DL, Src);
1998 std::pair<int, MVT> LTDest = TLI->getTypeLegalizationCost(DL, Dst);
1999
2000 if (ST->hasSSE2() && !ST->hasAVX()) {
2001 if (const auto *Entry = ConvertCostTableLookup(SSE2ConversionTbl, ISD,
2002 LTDest.second, LTSrc.second))
2003 return AdjustCost(LTSrc.first * Entry->Cost);
2004 }
2005
2006 EVT SrcTy = TLI->getValueType(DL, Src);
2007 EVT DstTy = TLI->getValueType(DL, Dst);
2008
2009 // The function getSimpleVT only handles simple value types.
2010 if (!SrcTy.isSimple() || !DstTy.isSimple())
2011 return AdjustCost(BaseT::getCastInstrCost(Opcode, Dst, Src, CCH, CostKind));
2012
2013 MVT SimpleSrcTy = SrcTy.getSimpleVT();
2014 MVT SimpleDstTy = DstTy.getSimpleVT();
2015
2016 if (ST->useAVX512Regs()) {
2017 if (ST->hasBWI())
2018 if (const auto *Entry = ConvertCostTableLookup(AVX512BWConversionTbl, ISD,
2019 SimpleDstTy, SimpleSrcTy))
2020 return AdjustCost(Entry->Cost);
2021
2022 if (ST->hasDQI())
2023 if (const auto *Entry = ConvertCostTableLookup(AVX512DQConversionTbl, ISD,
2024 SimpleDstTy, SimpleSrcTy))
2025 return AdjustCost(Entry->Cost);
2026
2027 if (ST->hasAVX512())
2028 if (const auto *Entry = ConvertCostTableLookup(AVX512FConversionTbl, ISD,
2029 SimpleDstTy, SimpleSrcTy))
2030 return AdjustCost(Entry->Cost);
2031 }
2032
2033 if (ST->hasBWI())
2034 if (const auto *Entry = ConvertCostTableLookup(AVX512BWVLConversionTbl, ISD,
2035 SimpleDstTy, SimpleSrcTy))
2036 return AdjustCost(Entry->Cost);
2037
2038 if (ST->hasDQI())
2039 if (const auto *Entry = ConvertCostTableLookup(AVX512DQVLConversionTbl, ISD,
2040 SimpleDstTy, SimpleSrcTy))
2041 return AdjustCost(Entry->Cost);
2042
2043 if (ST->hasAVX512())
2044 if (const auto *Entry = ConvertCostTableLookup(AVX512VLConversionTbl, ISD,
2045 SimpleDstTy, SimpleSrcTy))
2046 return AdjustCost(Entry->Cost);
2047
2048 if (ST->hasAVX2()) {
2049 if (const auto *Entry = ConvertCostTableLookup(AVX2ConversionTbl, ISD,
2050 SimpleDstTy, SimpleSrcTy))
2051 return AdjustCost(Entry->Cost);
2052 }
2053
2054 if (ST->hasAVX()) {
2055 if (const auto *Entry = ConvertCostTableLookup(AVXConversionTbl, ISD,
2056 SimpleDstTy, SimpleSrcTy))
2057 return AdjustCost(Entry->Cost);
2058 }
2059
2060 if (ST->hasSSE41()) {
2061 if (const auto *Entry = ConvertCostTableLookup(SSE41ConversionTbl, ISD,
2062 SimpleDstTy, SimpleSrcTy))
2063 return AdjustCost(Entry->Cost);
2064 }
2065
2066 if (ST->hasSSE2()) {
2067 if (const auto *Entry = ConvertCostTableLookup(SSE2ConversionTbl, ISD,
2068 SimpleDstTy, SimpleSrcTy))
2069 return AdjustCost(Entry->Cost);
2070 }
2071
2072 return AdjustCost(
2073 BaseT::getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I));
2074}
2075
2076int X86TTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy,
2077 TTI::TargetCostKind CostKind,
2078 const Instruction *I) {
2079 // TODO: Handle other cost kinds.
2080 if (CostKind != TTI::TCK_RecipThroughput)
2081 return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, CostKind, I);
2082
2083 // Legalize the type.
2084 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy);
2085
2086 MVT MTy = LT.second;
2087
2088 int ISD = TLI->InstructionOpcodeToISD(Opcode);
2089 assert(ISD && "Invalid opcode")((ISD && "Invalid opcode") ? static_cast<void> (
0) : __assert_fail ("ISD && \"Invalid opcode\"", "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 2089, __PRETTY_FUNCTION__))
;
2090
2091 unsigned ExtraCost = 0;
2092 if (I && (Opcode == Instruction::ICmp || Opcode == Instruction::FCmp)) {
2093 // Some vector comparison predicates cost extra instructions.
2094 if (MTy.isVector() &&
2095 !((ST->hasXOP() && (!ST->hasAVX2() || MTy.is128BitVector())) ||
2096 (ST->hasAVX512() && 32 <= MTy.getScalarSizeInBits()) ||
2097 ST->hasBWI())) {
2098 switch (cast<CmpInst>(I)->getPredicate()) {
2099 case CmpInst::Predicate::ICMP_NE:
2100 // xor(cmpeq(x,y),-1)
2101 ExtraCost = 1;
2102 break;
2103 case CmpInst::Predicate::ICMP_SGE:
2104 case CmpInst::Predicate::ICMP_SLE:
2105 // xor(cmpgt(x,y),-1)
2106 ExtraCost = 1;
2107 break;
2108 case CmpInst::Predicate::ICMP_ULT:
2109 case CmpInst::Predicate::ICMP_UGT:
2110 // cmpgt(xor(x,signbit),xor(y,signbit))
2111 // xor(cmpeq(pmaxu(x,y),x),-1)
2112 ExtraCost = 2;
2113 break;
2114 case CmpInst::Predicate::ICMP_ULE:
2115 case CmpInst::Predicate::ICMP_UGE:
2116 if ((ST->hasSSE41() && MTy.getScalarSizeInBits() == 32) ||
2117 (ST->hasSSE2() && MTy.getScalarSizeInBits() < 32)) {
2118 // cmpeq(psubus(x,y),0)
2119 // cmpeq(pminu(x,y),x)
2120 ExtraCost = 1;
2121 } else {
2122 // xor(cmpgt(xor(x,signbit),xor(y,signbit)),-1)
2123 ExtraCost = 3;
2124 }
2125 break;
2126 default:
2127 break;
2128 }
2129 }
2130 }
2131
2132 static const CostTblEntry SLMCostTbl[] = {
2133 // slm pcmpeq/pcmpgt throughput is 2
2134 { ISD::SETCC, MVT::v2i64, 2 },
2135 };
2136
2137 static const CostTblEntry AVX512BWCostTbl[] = {
2138 { ISD::SETCC, MVT::v32i16, 1 },
2139 { ISD::SETCC, MVT::v64i8, 1 },
2140
2141 { ISD::SELECT, MVT::v32i16, 1 },
2142 { ISD::SELECT, MVT::v64i8, 1 },
2143 };
2144
2145 static const CostTblEntry AVX512CostTbl[] = {
2146 { ISD::SETCC, MVT::v8i64, 1 },
2147 { ISD::SETCC, MVT::v16i32, 1 },
2148 { ISD::SETCC, MVT::v8f64, 1 },
2149 { ISD::SETCC, MVT::v16f32, 1 },
2150
2151 { ISD::SELECT, MVT::v8i64, 1 },
2152 { ISD::SELECT, MVT::v16i32, 1 },
2153 { ISD::SELECT, MVT::v8f64, 1 },
2154 { ISD::SELECT, MVT::v16f32, 1 },
2155
2156 { ISD::SETCC, MVT::v32i16, 2 }, // FIXME: should probably be 4
2157 { ISD::SETCC, MVT::v64i8, 2 }, // FIXME: should probably be 4
2158
2159 { ISD::SELECT, MVT::v32i16, 2 }, // FIXME: should be 3
2160 { ISD::SELECT, MVT::v64i8, 2 }, // FIXME: should be 3
2161 };
2162
2163 static const CostTblEntry AVX2CostTbl[] = {
2164 { ISD::SETCC, MVT::v4i64, 1 },
2165 { ISD::SETCC, MVT::v8i32, 1 },
2166 { ISD::SETCC, MVT::v16i16, 1 },
2167 { ISD::SETCC, MVT::v32i8, 1 },
2168
2169 { ISD::SELECT, MVT::v4i64, 1 }, // pblendvb
2170 { ISD::SELECT, MVT::v8i32, 1 }, // pblendvb
2171 { ISD::SELECT, MVT::v16i16, 1 }, // pblendvb
2172 { ISD::SELECT, MVT::v32i8, 1 }, // pblendvb
2173 };
2174
2175 static const CostTblEntry AVX1CostTbl[] = {
2176 { ISD::SETCC, MVT::v4f64, 1 },
2177 { ISD::SETCC, MVT::v8f32, 1 },
2178 // AVX1 does not support 8-wide integer compare.
2179 { ISD::SETCC, MVT::v4i64, 4 },
2180 { ISD::SETCC, MVT::v8i32, 4 },
2181 { ISD::SETCC, MVT::v16i16, 4 },
2182 { ISD::SETCC, MVT::v32i8, 4 },
2183
2184 { ISD::SELECT, MVT::v4f64, 1 }, // vblendvpd
2185 { ISD::SELECT, MVT::v8f32, 1 }, // vblendvps
2186 { ISD::SELECT, MVT::v4i64, 1 }, // vblendvpd
2187 { ISD::SELECT, MVT::v8i32, 1 }, // vblendvps
2188 { ISD::SELECT, MVT::v16i16, 3 }, // vandps + vandnps + vorps
2189 { ISD::SELECT, MVT::v32i8, 3 }, // vandps + vandnps + vorps
2190 };
2191
2192 static const CostTblEntry SSE42CostTbl[] = {
2193 { ISD::SETCC, MVT::v2f64, 1 },
2194 { ISD::SETCC, MVT::v4f32, 1 },
2195 { ISD::SETCC, MVT::v2i64, 1 },
2196 };
2197
2198 static const CostTblEntry SSE41CostTbl[] = {
2199 { ISD::SELECT, MVT::v2f64, 1 }, // blendvpd
2200 { ISD::SELECT, MVT::v4f32, 1 }, // blendvps
2201 { ISD::SELECT, MVT::v2i64, 1 }, // pblendvb
2202 { ISD::SELECT, MVT::v4i32, 1 }, // pblendvb
2203 { ISD::SELECT, MVT::v8i16, 1 }, // pblendvb
2204 { ISD::SELECT, MVT::v16i8, 1 }, // pblendvb
2205 };
2206
2207 static const CostTblEntry SSE2CostTbl[] = {
2208 { ISD::SETCC, MVT::v2f64, 2 },
2209 { ISD::SETCC, MVT::f64, 1 },
2210 { ISD::SETCC, MVT::v2i64, 8 },
2211 { ISD::SETCC, MVT::v4i32, 1 },
2212 { ISD::SETCC, MVT::v8i16, 1 },
2213 { ISD::SETCC, MVT::v16i8, 1 },
2214
2215 { ISD::SELECT, MVT::v2f64, 3 }, // andpd + andnpd + orpd
2216 { ISD::SELECT, MVT::v2i64, 3 }, // pand + pandn + por
2217 { ISD::SELECT, MVT::v4i32, 3 }, // pand + pandn + por
2218 { ISD::SELECT, MVT::v8i16, 3 }, // pand + pandn + por
2219 { ISD::SELECT, MVT::v16i8, 3 }, // pand + pandn + por
2220 };
2221
2222 static const CostTblEntry SSE1CostTbl[] = {
2223 { ISD::SETCC, MVT::v4f32, 2 },
2224 { ISD::SETCC, MVT::f32, 1 },
2225
2226 { ISD::SELECT, MVT::v4f32, 3 }, // andps + andnps + orps
2227 };
2228
2229 if (ST->isSLM())
2230 if (const auto *Entry = CostTableLookup(SLMCostTbl, ISD, MTy))
2231 return LT.first * (ExtraCost + Entry->Cost);
2232
2233 if (ST->hasBWI())
2234 if (const auto *Entry = CostTableLookup(AVX512BWCostTbl, ISD, MTy))
2235 return LT.first * (ExtraCost + Entry->Cost);
2236
2237 if (ST->hasAVX512())
2238 if (const auto *Entry = CostTableLookup(AVX512CostTbl, ISD, MTy))
2239 return LT.first * (ExtraCost + Entry->Cost);
2240
2241 if (ST->hasAVX2())
2242 if (const auto *Entry = CostTableLookup(AVX2CostTbl, ISD, MTy))
2243 return LT.first * (ExtraCost + Entry->Cost);
2244
2245 if (ST->hasAVX())
2246 if (const auto *Entry = CostTableLookup(AVX1CostTbl, ISD, MTy))
2247 return LT.first * (ExtraCost + Entry->Cost);
2248
2249 if (ST->hasSSE42())
2250 if (const auto *Entry = CostTableLookup(SSE42CostTbl, ISD, MTy))
2251 return LT.first * (ExtraCost + Entry->Cost);
2252
2253 if (ST->hasSSE41())
2254 if (const auto *Entry = CostTableLookup(SSE41CostTbl, ISD, MTy))
2255 return LT.first * (ExtraCost + Entry->Cost);
2256
2257 if (ST->hasSSE2())
2258 if (const auto *Entry = CostTableLookup(SSE2CostTbl, ISD, MTy))
2259 return LT.first * (ExtraCost + Entry->Cost);
2260
2261 if (ST->hasSSE1())
2262 if (const auto *Entry = CostTableLookup(SSE1CostTbl, ISD, MTy))
2263 return LT.first * (ExtraCost + Entry->Cost);
2264
2265 return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, CostKind, I);
2266}
2267
2268unsigned X86TTIImpl::getAtomicMemIntrinsicMaxElementSize() const { return 16; }
2269
2270int X86TTIImpl::getTypeBasedIntrinsicInstrCost(
2271 const IntrinsicCostAttributes &ICA, TTI::TargetCostKind CostKind) {
2272
2273 // Costs should match the codegen from:
2274 // BITREVERSE: llvm\test\CodeGen\X86\vector-bitreverse.ll
2275 // BSWAP: llvm\test\CodeGen\X86\bswap-vector.ll
2276 // CTLZ: llvm\test\CodeGen\X86\vector-lzcnt-*.ll
2277 // CTPOP: llvm\test\CodeGen\X86\vector-popcnt-*.ll
2278 // CTTZ: llvm\test\CodeGen\X86\vector-tzcnt-*.ll
2279 static const CostTblEntry AVX512CDCostTbl[] = {
2280 { ISD::CTLZ, MVT::v8i64, 1 },
2281 { ISD::CTLZ, MVT::v16i32, 1 },
2282 { ISD::CTLZ, MVT::v32i16, 8 },
2283 { ISD::CTLZ, MVT::v64i8, 20 },
2284 { ISD::CTLZ, MVT::v4i64, 1 },
2285 { ISD::CTLZ, MVT::v8i32, 1 },
2286 { ISD::CTLZ, MVT::v16i16, 4 },
2287 { ISD::CTLZ, MVT::v32i8, 10 },
2288 { ISD::CTLZ, MVT::v2i64, 1 },
2289 { ISD::CTLZ, MVT::v4i32, 1 },
2290 { ISD::CTLZ, MVT::v8i16, 4 },
2291 { ISD::CTLZ, MVT::v16i8, 4 },
2292 };
2293 static const CostTblEntry AVX512BWCostTbl[] = {
2294 { ISD::ABS, MVT::v32i16, 1 },
2295 { ISD::ABS, MVT::v64i8, 1 },
2296 { ISD::BITREVERSE, MVT::v8i64, 5 },
2297 { ISD::BITREVERSE, MVT::v16i32, 5 },
2298 { ISD::BITREVERSE, MVT::v32i16, 5 },
2299 { ISD::BITREVERSE, MVT::v64i8, 5 },
2300 { ISD::CTLZ, MVT::v8i64, 23 },
2301 { ISD::CTLZ, MVT::v16i32, 22 },
2302 { ISD::CTLZ, MVT::v32i16, 18 },
2303 { ISD::CTLZ, MVT::v64i8, 17 },
2304 { ISD::CTPOP, MVT::v8i64, 7 },
2305 { ISD::CTPOP, MVT::v16i32, 11 },
2306 { ISD::CTPOP, MVT::v32i16, 9 },
2307 { ISD::CTPOP, MVT::v64i8, 6 },
2308 { ISD::CTTZ, MVT::v8i64, 10 },
2309 { ISD::CTTZ, MVT::v16i32, 14 },
2310 { ISD::CTTZ, MVT::v32i16, 12 },
2311 { ISD::CTTZ, MVT::v64i8, 9 },
2312 { ISD::SADDSAT, MVT::v32i16, 1 },
2313 { ISD::SADDSAT, MVT::v64i8, 1 },
2314 { ISD::SMAX, MVT::v32i16, 1 },
2315 { ISD::SMAX, MVT::v64i8, 1 },
2316 { ISD::SMIN, MVT::v32i16, 1 },
2317 { ISD::SMIN, MVT::v64i8, 1 },
2318 { ISD::SSUBSAT, MVT::v32i16, 1 },
2319 { ISD::SSUBSAT, MVT::v64i8, 1 },
2320 { ISD::UADDSAT, MVT::v32i16, 1 },
2321 { ISD::UADDSAT, MVT::v64i8, 1 },
2322 { ISD::UMAX, MVT::v32i16, 1 },
2323 { ISD::UMAX, MVT::v64i8, 1 },
2324 { ISD::UMIN, MVT::v32i16, 1 },
2325 { ISD::UMIN, MVT::v64i8, 1 },
2326 { ISD::USUBSAT, MVT::v32i16, 1 },
2327 { ISD::USUBSAT, MVT::v64i8, 1 },
2328 };
2329 static const CostTblEntry AVX512CostTbl[] = {
2330 { ISD::ABS, MVT::v8i64, 1 },
2331 { ISD::ABS, MVT::v16i32, 1 },
2332 { ISD::ABS, MVT::v32i16, 2 }, // FIXME: include split
2333 { ISD::ABS, MVT::v64i8, 2 }, // FIXME: include split
2334 { ISD::ABS, MVT::v4i64, 1 },
2335 { ISD::ABS, MVT::v2i64, 1 },
2336 { ISD::BITREVERSE, MVT::v8i64, 36 },
2337 { ISD::BITREVERSE, MVT::v16i32, 24 },
2338 { ISD::BITREVERSE, MVT::v32i16, 10 },
2339 { ISD::BITREVERSE, MVT::v64i8, 10 },
2340 { ISD::CTLZ, MVT::v8i64, 29 },
2341 { ISD::CTLZ, MVT::v16i32, 35 },
2342 { ISD::CTLZ, MVT::v32i16, 28 },
2343 { ISD::CTLZ, MVT::v64i8, 18 },
2344 { ISD::CTPOP, MVT::v8i64, 16 },
2345 { ISD::CTPOP, MVT::v16i32, 24 },
2346 { ISD::CTPOP, MVT::v32i16, 18 },
2347 { ISD::CTPOP, MVT::v64i8, 12 },
2348 { ISD::CTTZ, MVT::v8i64, 20 },
2349 { ISD::CTTZ, MVT::v16i32, 28 },
2350 { ISD::CTTZ, MVT::v32i16, 24 },
2351 { ISD::CTTZ, MVT::v64i8, 18 },
2352 { ISD::SMAX, MVT::v8i64, 1 },
2353 { ISD::SMAX, MVT::v16i32, 1 },
2354 { ISD::SMAX, MVT::v32i16, 2 }, // FIXME: include split
2355 { ISD::SMAX, MVT::v64i8, 2 }, // FIXME: include split
2356 { ISD::SMAX, MVT::v4i64, 1 },
2357 { ISD::SMAX, MVT::v2i64, 1 },
2358 { ISD::SMIN, MVT::v8i64, 1 },
2359 { ISD::SMIN, MVT::v16i32, 1 },
2360 { ISD::SMIN, MVT::v32i16, 2 }, // FIXME: include split
2361 { ISD::SMIN, MVT::v64i8, 2 }, // FIXME: include split
2362 { ISD::SMIN, MVT::v4i64, 1 },
2363 { ISD::SMIN, MVT::v2i64, 1 },
2364 { ISD::UMAX, MVT::v8i64, 1 },
2365 { ISD::UMAX, MVT::v16i32, 1 },
2366 { ISD::UMAX, MVT::v32i16, 2 }, // FIXME: include split
2367 { ISD::UMAX, MVT::v64i8, 2 }, // FIXME: include split
2368 { ISD::UMAX, MVT::v4i64, 1 },
2369 { ISD::UMAX, MVT::v2i64, 1 },
2370 { ISD::UMIN, MVT::v8i64, 1 },
2371 { ISD::UMIN, MVT::v16i32, 1 },
2372 { ISD::UMIN, MVT::v32i16, 2 }, // FIXME: include split
2373 { ISD::UMIN, MVT::v64i8, 2 }, // FIXME: include split
2374 { ISD::UMIN, MVT::v4i64, 1 },
2375 { ISD::UMIN, MVT::v2i64, 1 },
2376 { ISD::USUBSAT, MVT::v16i32, 2 }, // pmaxud + psubd
2377 { ISD::USUBSAT, MVT::v2i64, 2 }, // pmaxuq + psubq
2378 { ISD::USUBSAT, MVT::v4i64, 2 }, // pmaxuq + psubq
2379 { ISD::USUBSAT, MVT::v8i64, 2 }, // pmaxuq + psubq
2380 { ISD::UADDSAT, MVT::v16i32, 3 }, // not + pminud + paddd
2381 { ISD::UADDSAT, MVT::v2i64, 3 }, // not + pminuq + paddq
2382 { ISD::UADDSAT, MVT::v4i64, 3 }, // not + pminuq + paddq
2383 { ISD::UADDSAT, MVT::v8i64, 3 }, // not + pminuq + paddq
2384 { ISD::SADDSAT, MVT::v32i16, 2 }, // FIXME: include split
2385 { ISD::SADDSAT, MVT::v64i8, 2 }, // FIXME: include split
2386 { ISD::SSUBSAT, MVT::v32i16, 2 }, // FIXME: include split
2387 { ISD::SSUBSAT, MVT::v64i8, 2 }, // FIXME: include split
2388 { ISD::UADDSAT, MVT::v32i16, 2 }, // FIXME: include split
2389 { ISD::UADDSAT, MVT::v64i8, 2 }, // FIXME: include split
2390 { ISD::USUBSAT, MVT::v32i16, 2 }, // FIXME: include split
2391 { ISD::USUBSAT, MVT::v64i8, 2 }, // FIXME: include split
2392 { ISD::FMAXNUM, MVT::f32, 2 },
2393 { ISD::FMAXNUM, MVT::v4f32, 2 },
2394 { ISD::FMAXNUM, MVT::v8f32, 2 },
2395 { ISD::FMAXNUM, MVT::v16f32, 2 },
2396 { ISD::FMAXNUM, MVT::f64, 2 },
2397 { ISD::FMAXNUM, MVT::v2f64, 2 },
2398 { ISD::FMAXNUM, MVT::v4f64, 2 },
2399 { ISD::FMAXNUM, MVT::v8f64, 2 },
2400 };
2401 static const CostTblEntry XOPCostTbl[] = {
2402 { ISD::BITREVERSE, MVT::v4i64, 4 },
2403 { ISD::BITREVERSE, MVT::v8i32, 4 },
2404 { ISD::BITREVERSE, MVT::v16i16, 4 },
2405 { ISD::BITREVERSE, MVT::v32i8, 4 },
2406 { ISD::BITREVERSE, MVT::v2i64, 1 },
2407 { ISD::BITREVERSE, MVT::v4i32, 1 },
2408 { ISD::BITREVERSE, MVT::v8i16, 1 },
2409 { ISD::BITREVERSE, MVT::v16i8, 1 },
2410 { ISD::BITREVERSE, MVT::i64, 3 },
2411 { ISD::BITREVERSE, MVT::i32, 3 },
2412 { ISD::BITREVERSE, MVT::i16, 3 },
2413 { ISD::BITREVERSE, MVT::i8, 3 }
2414 };
2415 static const CostTblEntry AVX2CostTbl[] = {
2416 { ISD::ABS, MVT::v4i64, 2 }, // VBLENDVPD(X,VPSUBQ(0,X),X)
2417 { ISD::ABS, MVT::v8i32, 1 },
2418 { ISD::ABS, MVT::v16i16, 1 },
2419 { ISD::ABS, MVT::v32i8, 1 },
2420 { ISD::BITREVERSE, MVT::v4i64, 5 },
2421 { ISD::BITREVERSE, MVT::v8i32, 5 },
2422 { ISD::BITREVERSE, MVT::v16i16, 5 },
2423 { ISD::BITREVERSE, MVT::v32i8, 5 },
2424 { ISD::BSWAP, MVT::v4i64, 1 },
2425 { ISD::BSWAP, MVT::v8i32, 1 },
2426 { ISD::BSWAP, MVT::v16i16, 1 },
2427 { ISD::CTLZ, MVT::v4i64, 23 },
2428 { ISD::CTLZ, MVT::v8i32, 18 },
2429 { ISD::CTLZ, MVT::v16i16, 14 },
2430 { ISD::CTLZ, MVT::v32i8, 9 },
2431 { ISD::CTPOP, MVT::v4i64, 7 },
2432 { ISD::CTPOP, MVT::v8i32, 11 },
2433 { ISD::CTPOP, MVT::v16i16, 9 },
2434 { ISD::CTPOP, MVT::v32i8, 6 },
2435 { ISD::CTTZ, MVT::v4i64, 10 },
2436 { ISD::CTTZ, MVT::v8i32, 14 },
2437 { ISD::CTTZ, MVT::v16i16, 12 },
2438 { ISD::CTTZ, MVT::v32i8, 9 },
2439 { ISD::SADDSAT, MVT::v16i16, 1 },
2440 { ISD::SADDSAT, MVT::v32i8, 1 },
2441 { ISD::SMAX, MVT::v8i32, 1 },
2442 { ISD::SMAX, MVT::v16i16, 1 },
2443 { ISD::SMAX, MVT::v32i8, 1 },
2444 { ISD::SMIN, MVT::v8i32, 1 },
2445 { ISD::SMIN, MVT::v16i16, 1 },
2446 { ISD::SMIN, MVT::v32i8, 1 },
2447 { ISD::SSUBSAT, MVT::v16i16, 1 },
2448 { ISD::SSUBSAT, MVT::v32i8, 1 },
2449 { ISD::UADDSAT, MVT::v16i16, 1 },
2450 { ISD::UADDSAT, MVT::v32i8, 1 },
2451 { ISD::UADDSAT, MVT::v8i32, 3 }, // not + pminud + paddd
2452 { ISD::UMAX, MVT::v8i32, 1 },
2453 { ISD::UMAX, MVT::v16i16, 1 },
2454 { ISD::UMAX, MVT::v32i8, 1 },
2455 { ISD::UMIN, MVT::v8i32, 1 },
2456 { ISD::UMIN, MVT::v16i16, 1 },
2457 { ISD::UMIN, MVT::v32i8, 1 },
2458 { ISD::USUBSAT, MVT::v16i16, 1 },
2459 { ISD::USUBSAT, MVT::v32i8, 1 },
2460 { ISD::USUBSAT, MVT::v8i32, 2 }, // pmaxud + psubd
2461 { ISD::FSQRT, MVT::f32, 7 }, // Haswell from http://www.agner.org/
2462 { ISD::FSQRT, MVT::v4f32, 7 }, // Haswell from http://www.agner.org/
2463 { ISD::FSQRT, MVT::v8f32, 14 }, // Haswell from http://www.agner.org/
2464 { ISD::FSQRT, MVT::f64, 14 }, // Haswell from http://www.agner.org/
2465 { ISD::FSQRT, MVT::v2f64, 14 }, // Haswell from http://www.agner.org/
2466 { ISD::FSQRT, MVT::v4f64, 28 }, // Haswell from http://www.agner.org/
2467 };
2468 static const CostTblEntry AVX1CostTbl[] = {
2469 { ISD::ABS, MVT::v4i64, 6 }, // VBLENDVPD(X,VPSUBQ(0,X),X)
2470 { ISD::ABS, MVT::v8i32, 3 },
2471 { ISD::ABS, MVT::v16i16, 3 },
2472 { ISD::ABS, MVT::v32i8, 3 },
2473 { ISD::BITREVERSE, MVT::v4i64, 12 }, // 2 x 128-bit Op + extract/insert
2474 { ISD::BITREVERSE, MVT::v8i32, 12 }, // 2 x 128-bit Op + extract/insert
2475 { ISD::BITREVERSE, MVT::v16i16, 12 }, // 2 x 128-bit Op + extract/insert
2476 { ISD::BITREVERSE, MVT::v32i8, 12 }, // 2 x 128-bit Op + extract/insert
2477 { ISD::BSWAP, MVT::v4i64, 4 },
2478 { ISD::BSWAP, MVT::v8i32, 4 },
2479 { ISD::BSWAP, MVT::v16i16, 4 },
2480 { ISD::CTLZ, MVT::v4i64, 48 }, // 2 x 128-bit Op + extract/insert
2481 { ISD::CTLZ, MVT::v8i32, 38 }, // 2 x 128-bit Op + extract/insert
2482 { ISD::CTLZ, MVT::v16i16, 30 }, // 2 x 128-bit Op + extract/insert
2483 { ISD::CTLZ, MVT::v32i8, 20 }, // 2 x 128-bit Op + extract/insert
2484 { ISD::CTPOP, MVT::v4i64, 16 }, // 2 x 128-bit Op + extract/insert
2485 { ISD::CTPOP, MVT::v8i32, 24 }, // 2 x 128-bit Op + extract/insert
2486 { ISD::CTPOP, MVT::v16i16, 20 }, // 2 x 128-bit Op + extract/insert
2487 { ISD::CTPOP, MVT::v32i8, 14 }, // 2 x 128-bit Op + extract/insert
2488 { ISD::CTTZ, MVT::v4i64, 22 }, // 2 x 128-bit Op + extract/insert
2489 { ISD::CTTZ, MVT::v8i32, 30 }, // 2 x 128-bit Op + extract/insert
2490 { ISD::CTTZ, MVT::v16i16, 26 }, // 2 x 128-bit Op + extract/insert
2491 { ISD::CTTZ, MVT::v32i8, 20 }, // 2 x 128-bit Op + extract/insert
2492 { ISD::SADDSAT, MVT::v16i16, 4 }, // 2 x 128-bit Op + extract/insert
2493 { ISD::SADDSAT, MVT::v32i8, 4 }, // 2 x 128-bit Op + extract/insert
2494 { ISD::SMAX, MVT::v8i32, 4 }, // 2 x 128-bit Op + extract/insert
2495 { ISD::SMAX, MVT::v16i16, 4 }, // 2 x 128-bit Op + extract/insert
2496 { ISD::SMAX, MVT::v32i8, 4 }, // 2 x 128-bit Op + extract/insert
2497 { ISD::SMIN, MVT::v8i32, 4 }, // 2 x 128-bit Op + extract/insert
2498 { ISD::SMIN, MVT::v16i16, 4 }, // 2 x 128-bit Op + extract/insert
2499 { ISD::SMIN, MVT::v32i8, 4 }, // 2 x 128-bit Op + extract/insert
2500 { ISD::SSUBSAT, MVT::v16i16, 4 }, // 2 x 128-bit Op + extract/insert
2501 { ISD::SSUBSAT, MVT::v32i8, 4 }, // 2 x 128-bit Op + extract/insert
2502 { ISD::UADDSAT, MVT::v16i16, 4 }, // 2 x 128-bit Op + extract/insert
2503 { ISD::UADDSAT, MVT::v32i8, 4 }, // 2 x 128-bit Op + extract/insert
2504 { ISD::UADDSAT, MVT::v8i32, 8 }, // 2 x 128-bit Op + extract/insert
2505 { ISD::UMAX, MVT::v8i32, 4 }, // 2 x 128-bit Op + extract/insert
2506 { ISD::UMAX, MVT::v16i16, 4 }, // 2 x 128-bit Op + extract/insert
2507 { ISD::UMAX, MVT::v32i8, 4 }, // 2 x 128-bit Op + extract/insert
2508 { ISD::UMIN, MVT::v8i32, 4 }, // 2 x 128-bit Op + extract/insert
2509 { ISD::UMIN, MVT::v16i16, 4 }, // 2 x 128-bit Op + extract/insert
2510 { ISD::UMIN, MVT::v32i8, 4 }, // 2 x 128-bit Op + extract/insert
2511 { ISD::USUBSAT, MVT::v16i16, 4 }, // 2 x 128-bit Op + extract/insert
2512 { ISD::USUBSAT, MVT::v32i8, 4 }, // 2 x 128-bit Op + extract/insert
2513 { ISD::USUBSAT, MVT::v8i32, 6 }, // 2 x 128-bit Op + extract/insert
2514 { ISD::FMAXNUM, MVT::f32, 3 },
2515 { ISD::FMAXNUM, MVT::v4f32, 3 },
2516 { ISD::FMAXNUM, MVT::v8f32, 5 },
2517 { ISD::FMAXNUM, MVT::f64, 3 },
2518 { ISD::FMAXNUM, MVT::v2f64, 3 },
2519 { ISD::FMAXNUM, MVT::v4f64, 5 },
2520 { ISD::FSQRT, MVT::f32, 14 }, // SNB from http://www.agner.org/
2521 { ISD::FSQRT, MVT::v4f32, 14 }, // SNB from http://www.agner.org/
2522 { ISD::FSQRT, MVT::v8f32, 28 }, // SNB from http://www.agner.org/
2523 { ISD::FSQRT, MVT::f64, 21 }, // SNB from http://www.agner.org/
2524 { ISD::FSQRT, MVT::v2f64, 21 }, // SNB from http://www.agner.org/
2525 { ISD::FSQRT, MVT::v4f64, 43 }, // SNB from http://www.agner.org/
2526 };
2527 static const CostTblEntry GLMCostTbl[] = {
2528 { ISD::FSQRT, MVT::f32, 19 }, // sqrtss
2529 { ISD::FSQRT, MVT::v4f32, 37 }, // sqrtps
2530 { ISD::FSQRT, MVT::f64, 34 }, // sqrtsd
2531 { ISD::FSQRT, MVT::v2f64, 67 }, // sqrtpd
2532 };
2533 static const CostTblEntry SLMCostTbl[] = {
2534 { ISD::FSQRT, MVT::f32, 20 }, // sqrtss
2535 { ISD::FSQRT, MVT::v4f32, 40 }, // sqrtps
2536 { ISD::FSQRT, MVT::f64, 35 }, // sqrtsd
2537 { ISD::FSQRT, MVT::v2f64, 70 }, // sqrtpd
2538 };
2539 static const CostTblEntry SSE42CostTbl[] = {
2540 { ISD::ABS, MVT::v2i64, 3 }, // BLENDVPD(X,PSUBQ(0,X),X)
2541 { ISD::USUBSAT, MVT::v4i32, 2 }, // pmaxud + psubd
2542 { ISD::UADDSAT, MVT::v4i32, 3 }, // not + pminud + paddd
2543 { ISD::FSQRT, MVT::f32, 18 }, // Nehalem from http://www.agner.org/
2544 { ISD::FSQRT, MVT::v4f32, 18 }, // Nehalem from http://www.agner.org/
2545 };
2546 static const CostTblEntry SSE41CostTbl[] = {
2547 { ISD::SMAX, MVT::v4i32, 1 },
2548 { ISD::SMAX, MVT::v16i8, 1 },
2549 { ISD::SMIN, MVT::v4i32, 1 },
2550 { ISD::SMIN, MVT::v16i8, 1 },
2551 { ISD::UMAX, MVT::v4i32, 1 },
2552 { ISD::UMAX, MVT::v8i16, 1 },
2553 { ISD::UMIN, MVT::v4i32, 1 },
2554 { ISD::UMIN, MVT::v8i16, 1 },
2555 };
2556 static const CostTblEntry SSSE3CostTbl[] = {
2557 { ISD::ABS, MVT::v4i32, 1 },
2558 { ISD::ABS, MVT::v8i16, 1 },
2559 { ISD::ABS, MVT::v16i8, 1 },
2560 { ISD::BITREVERSE, MVT::v2i64, 5 },
2561 { ISD::BITREVERSE, MVT::v4i32, 5 },
2562 { ISD::BITREVERSE, MVT::v8i16, 5 },
2563 { ISD::BITREVERSE, MVT::v16i8, 5 },
2564 { ISD::BSWAP, MVT::v2i64, 1 },
2565 { ISD::BSWAP, MVT::v4i32, 1 },
2566 { ISD::BSWAP, MVT::v8i16, 1 },
2567 { ISD::CTLZ, MVT::v2i64, 23 },
2568 { ISD::CTLZ, MVT::v4i32, 18 },
2569 { ISD::CTLZ, MVT::v8i16, 14 },
2570 { ISD::CTLZ, MVT::v16i8, 9 },
2571 { ISD::CTPOP, MVT::v2i64, 7 },
2572 { ISD::CTPOP, MVT::v4i32, 11 },
2573 { ISD::CTPOP, MVT::v8i16, 9 },
2574 { ISD::CTPOP, MVT::v16i8, 6 },
2575 { ISD::CTTZ, MVT::v2i64, 10 },
2576 { ISD::CTTZ, MVT::v4i32, 14 },
2577 { ISD::CTTZ, MVT::v8i16, 12 },
2578 { ISD::CTTZ, MVT::v16i8, 9 }
2579 };
2580 static const CostTblEntry SSE2CostTbl[] = {
2581 { ISD::ABS, MVT::v2i64, 4 },
2582 { ISD::ABS, MVT::v4i32, 3 },
2583 { ISD::ABS, MVT::v8i16, 3 },
2584 { ISD::ABS, MVT::v16i8, 3 },
2585 { ISD::BITREVERSE, MVT::v2i64, 29 },
2586 { ISD::BITREVERSE, MVT::v4i32, 27 },
2587 { ISD::BITREVERSE, MVT::v8i16, 27 },
2588 { ISD::BITREVERSE, MVT::v16i8, 20 },
2589 { ISD::BSWAP, MVT::v2i64, 7 },
2590 { ISD::BSWAP, MVT::v4i32, 7 },
2591 { ISD::BSWAP, MVT::v8i16, 7 },
2592 { ISD::CTLZ, MVT::v2i64, 25 },
2593 { ISD::CTLZ, MVT::v4i32, 26 },
2594 { ISD::CTLZ, MVT::v8i16, 20 },
2595 { ISD::CTLZ, MVT::v16i8, 17 },
2596 { ISD::CTPOP, MVT::v2i64, 12 },
2597 { ISD::CTPOP, MVT::v4i32, 15 },
2598 { ISD::CTPOP, MVT::v8i16, 13 },
2599 { ISD::CTPOP, MVT::v16i8, 10 },
2600 { ISD::CTTZ, MVT::v2i64, 14 },
2601 { ISD::CTTZ, MVT::v4i32, 18 },
2602 { ISD::CTTZ, MVT::v8i16, 16 },
2603 { ISD::CTTZ, MVT::v16i8, 13 },
2604 { ISD::SADDSAT, MVT::v8i16, 1 },
2605 { ISD::SADDSAT, MVT::v16i8, 1 },
2606 { ISD::SMAX, MVT::v8i16, 1 },
2607 { ISD::SMIN, MVT::v8i16, 1 },
2608 { ISD::SSUBSAT, MVT::v8i16, 1 },
2609 { ISD::SSUBSAT, MVT::v16i8, 1 },
2610 { ISD::UADDSAT, MVT::v8i16, 1 },
2611 { ISD::UADDSAT, MVT::v16i8, 1 },
2612 { ISD::UMAX, MVT::v16i8, 1 },
2613 { ISD::UMIN, MVT::v16i8, 1 },
2614 { ISD::USUBSAT, MVT::v8i16, 1 },
2615 { ISD::USUBSAT, MVT::v16i8, 1 },
2616 { ISD::FMAXNUM, MVT::f64, 4 },
2617 { ISD::FMAXNUM, MVT::v2f64, 4 },
2618 { ISD::FSQRT, MVT::f64, 32 }, // Nehalem from http://www.agner.org/
2619 { ISD::FSQRT, MVT::v2f64, 32 }, // Nehalem from http://www.agner.org/
2620 };
2621 static const CostTblEntry SSE1CostTbl[] = {
2622 { ISD::FMAXNUM, MVT::f32, 4 },
2623 { ISD::FMAXNUM, MVT::v4f32, 4 },
2624 { ISD::FSQRT, MVT::f32, 28 }, // Pentium III from http://www.agner.org/
2625 { ISD::FSQRT, MVT::v4f32, 56 }, // Pentium III from http://www.agner.org/
2626 };
2627 static const CostTblEntry BMI64CostTbl[] = { // 64-bit targets
2628 { ISD::CTTZ, MVT::i64, 1 },
2629 };
2630 static const CostTblEntry BMI32CostTbl[] = { // 32 or 64-bit targets
2631 { ISD::CTTZ, MVT::i32, 1 },
2632 { ISD::CTTZ, MVT::i16, 1 },
2633 { ISD::CTTZ, MVT::i8, 1 },
2634 };
2635 static const CostTblEntry LZCNT64CostTbl[] = { // 64-bit targets
2636 { ISD::CTLZ, MVT::i64, 1 },
2637 };
2638 static const CostTblEntry LZCNT32CostTbl[] = { // 32 or 64-bit targets
2639 { ISD::CTLZ, MVT::i32, 1 },
2640 { ISD::CTLZ, MVT::i16, 1 },
2641 { ISD::CTLZ, MVT::i8, 1 },
2642 };
2643 static const CostTblEntry POPCNT64CostTbl[] = { // 64-bit targets
2644 { ISD::CTPOP, MVT::i64, 1 },
2645 };
2646 static const CostTblEntry POPCNT32CostTbl[] = { // 32 or 64-bit targets
2647 { ISD::CTPOP, MVT::i32, 1 },
2648 { ISD::CTPOP, MVT::i16, 1 },
2649 { ISD::CTPOP, MVT::i8, 1 },
2650 };
2651 static const CostTblEntry X64CostTbl[] = { // 64-bit targets
2652 { ISD::BITREVERSE, MVT::i64, 14 },
2653 { ISD::CTLZ, MVT::i64, 4 }, // BSR+XOR or BSR+XOR+CMOV
2654 { ISD::CTTZ, MVT::i64, 3 }, // TEST+BSF+CMOV/BRANCH
2655 { ISD::CTPOP, MVT::i64, 10 },
2656 { ISD::SADDO, MVT::i64, 1 },
2657 { ISD::UADDO, MVT::i64, 1 },
2658 };
2659 static const CostTblEntry X86CostTbl[] = { // 32 or 64-bit targets
2660 { ISD::BITREVERSE, MVT::i32, 14 },
2661 { ISD::BITREVERSE, MVT::i16, 14 },
2662 { ISD::BITREVERSE, MVT::i8, 11 },
2663 { ISD::CTLZ, MVT::i32, 4 }, // BSR+XOR or BSR+XOR+CMOV
2664 { ISD::CTLZ, MVT::i16, 4 }, // BSR+XOR or BSR+XOR+CMOV
2665 { ISD::CTLZ, MVT::i8, 4 }, // BSR+XOR or BSR+XOR+CMOV
2666 { ISD::CTTZ, MVT::i32, 3 }, // TEST+BSF+CMOV/BRANCH
2667 { ISD::CTTZ, MVT::i16, 3 }, // TEST+BSF+CMOV/BRANCH
2668 { ISD::CTTZ, MVT::i8, 3 }, // TEST+BSF+CMOV/BRANCH
2669 { ISD::CTPOP, MVT::i32, 8 },
2670 { ISD::CTPOP, MVT::i16, 9 },
2671 { ISD::CTPOP, MVT::i8, 7 },
2672 { ISD::SADDO, MVT::i32, 1 },
2673 { ISD::SADDO, MVT::i16, 1 },
2674 { ISD::SADDO, MVT::i8, 1 },
2675 { ISD::UADDO, MVT::i32, 1 },
2676 { ISD::UADDO, MVT::i16, 1 },
2677 { ISD::UADDO, MVT::i8, 1 },
2678 };
2679
2680 Type *RetTy = ICA.getReturnType();
2681 Type *OpTy = RetTy;
2682 Intrinsic::ID IID = ICA.getID();
2683 unsigned ISD = ISD::DELETED_NODE;
2684 switch (IID) {
2685 default:
2686 break;
2687 case Intrinsic::abs:
2688 ISD = ISD::ABS;
2689 break;
2690 case Intrinsic::bitreverse:
2691 ISD = ISD::BITREVERSE;
2692 break;
2693 case Intrinsic::bswap:
2694 ISD = ISD::BSWAP;
2695 break;
2696 case Intrinsic::ctlz:
2697 ISD = ISD::CTLZ;
2698 break;
2699 case Intrinsic::ctpop:
2700 ISD = ISD::CTPOP;
2701 break;
2702 case Intrinsic::cttz:
2703 ISD = ISD::CTTZ;
2704 break;
2705 case Intrinsic::maxnum:
2706 case Intrinsic::minnum:
2707 // FMINNUM has same costs so don't duplicate.
2708 ISD = ISD::FMAXNUM;
2709 break;
2710 case Intrinsic::sadd_sat:
2711 ISD = ISD::SADDSAT;
2712 break;
2713 case Intrinsic::smax:
2714 ISD = ISD::SMAX;
2715 break;
2716 case Intrinsic::smin:
2717 ISD = ISD::SMIN;
2718 break;
2719 case Intrinsic::ssub_sat:
2720 ISD = ISD::SSUBSAT;
2721 break;
2722 case Intrinsic::uadd_sat:
2723 ISD = ISD::UADDSAT;
2724 break;
2725 case Intrinsic::umax:
2726 ISD = ISD::UMAX;
2727 break;
2728 case Intrinsic::umin:
2729 ISD = ISD::UMIN;
2730 break;
2731 case Intrinsic::usub_sat:
2732 ISD = ISD::USUBSAT;
2733 break;
2734 case Intrinsic::sqrt:
2735 ISD = ISD::FSQRT;
2736 break;
2737 case Intrinsic::sadd_with_overflow:
2738 case Intrinsic::ssub_with_overflow:
2739 // SSUBO has same costs so don't duplicate.
2740 ISD = ISD::SADDO;
2741 OpTy = RetTy->getContainedType(0);
2742 break;
2743 case Intrinsic::uadd_with_overflow:
2744 case Intrinsic::usub_with_overflow:
2745 // USUBO has same costs so don't duplicate.
2746 ISD = ISD::UADDO;
2747 OpTy = RetTy->getContainedType(0);
2748 break;
2749 }
2750
2751 if (ISD != ISD::DELETED_NODE) {
2752 // Legalize the type.
2753 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, OpTy);
2754 MVT MTy = LT.second;
2755
2756 // Attempt to lookup cost.
2757 if (ST->useGLMDivSqrtCosts())
2758 if (const auto *Entry = CostTableLookup(GLMCostTbl, ISD, MTy))
2759 return LT.first * Entry->Cost;
2760
2761 if (ST->isSLM())
2762 if (const auto *Entry = CostTableLookup(SLMCostTbl, ISD, MTy))
2763 return LT.first * Entry->Cost;
2764
2765 if (ST->hasCDI())
2766 if (const auto *Entry = CostTableLookup(AVX512CDCostTbl, ISD, MTy))
2767 return LT.first * Entry->Cost;
2768
2769 if (ST->hasBWI())
2770 if (const auto *Entry = CostTableLookup(AVX512BWCostTbl, ISD, MTy))
2771 return LT.first * Entry->Cost;
2772
2773 if (ST->hasAVX512())
2774 if (const auto *Entry = CostTableLookup(AVX512CostTbl, ISD, MTy))
2775 return LT.first * Entry->Cost;
2776
2777 if (ST->hasXOP())
2778 if (const auto *Entry = CostTableLookup(XOPCostTbl, ISD, MTy))
2779 return LT.first * Entry->Cost;
2780
2781 if (ST->hasAVX2())
2782 if (const auto *Entry = CostTableLookup(AVX2CostTbl, ISD, MTy))
2783 return LT.first * Entry->Cost;
2784
2785 if (ST->hasAVX())
2786 if (const auto *Entry = CostTableLookup(AVX1CostTbl, ISD, MTy))
2787 return LT.first * Entry->Cost;
2788
2789 if (ST->hasSSE42())
2790 if (const auto *Entry = CostTableLookup(SSE42CostTbl, ISD, MTy))
2791 return LT.first * Entry->Cost;
2792
2793 if (ST->hasSSE41())
2794 if (const auto *Entry = CostTableLookup(SSE41CostTbl, ISD, MTy))
2795 return LT.first * Entry->Cost;
2796
2797 if (ST->hasSSSE3())
2798 if (const auto *Entry = CostTableLookup(SSSE3CostTbl, ISD, MTy))
2799 return LT.first * Entry->Cost;
2800
2801 if (ST->hasSSE2())
2802 if (const auto *Entry = CostTableLookup(SSE2CostTbl, ISD, MTy))
2803 return LT.first * Entry->Cost;
2804
2805 if (ST->hasSSE1())
2806 if (const auto *Entry = CostTableLookup(SSE1CostTbl, ISD, MTy))
2807 return LT.first * Entry->Cost;
2808
2809 if (ST->hasBMI()) {
2810 if (ST->is64Bit())
2811 if (const auto *Entry = CostTableLookup(BMI64CostTbl, ISD, MTy))
2812 return LT.first * Entry->Cost;
2813
2814 if (const auto *Entry = CostTableLookup(BMI32CostTbl, ISD, MTy))
2815 return LT.first * Entry->Cost;
2816 }
2817
2818 if (ST->hasLZCNT()) {
2819 if (ST->is64Bit())
2820 if (const auto *Entry = CostTableLookup(LZCNT64CostTbl, ISD, MTy))
2821 return LT.first * Entry->Cost;
2822
2823 if (const auto *Entry = CostTableLookup(LZCNT32CostTbl, ISD, MTy))
2824 return LT.first * Entry->Cost;
2825 }
2826
2827 if (ST->hasPOPCNT()) {
2828 if (ST->is64Bit())
2829 if (const auto *Entry = CostTableLookup(POPCNT64CostTbl, ISD, MTy))
2830 return LT.first * Entry->Cost;
2831
2832 if (const auto *Entry = CostTableLookup(POPCNT32CostTbl, ISD, MTy))
2833 return LT.first * Entry->Cost;
2834 }
2835
2836 // TODO - add BMI (TZCNT) scalar handling
2837
2838 if (ST->is64Bit())
2839 if (const auto *Entry = CostTableLookup(X64CostTbl, ISD, MTy))
2840 return LT.first * Entry->Cost;
2841
2842 if (const auto *Entry = CostTableLookup(X86CostTbl, ISD, MTy))
2843 return LT.first * Entry->Cost;
2844 }
2845
2846 return BaseT::getIntrinsicInstrCost(ICA, CostKind);
2847}
2848
2849int X86TTIImpl::getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA,
2850 TTI::TargetCostKind CostKind) {
2851 if (CostKind != TTI::TCK_RecipThroughput)
2852 return BaseT::getIntrinsicInstrCost(ICA, CostKind);
2853
2854 if (ICA.isTypeBasedOnly())
2855 return getTypeBasedIntrinsicInstrCost(ICA, CostKind);
2856
2857 static const CostTblEntry AVX512CostTbl[] = {
2858 { ISD::ROTL, MVT::v8i64, 1 },
2859 { ISD::ROTL, MVT::v4i64, 1 },
2860 { ISD::ROTL, MVT::v2i64, 1 },
2861 { ISD::ROTL, MVT::v16i32, 1 },
2862 { ISD::ROTL, MVT::v8i32, 1 },
2863 { ISD::ROTL, MVT::v4i32, 1 },
2864 { ISD::ROTR, MVT::v8i64, 1 },
2865 { ISD::ROTR, MVT::v4i64, 1 },
2866 { ISD::ROTR, MVT::v2i64, 1 },
2867 { ISD::ROTR, MVT::v16i32, 1 },
2868 { ISD::ROTR, MVT::v8i32, 1 },
2869 { ISD::ROTR, MVT::v4i32, 1 }
2870 };
2871 // XOP: ROTL = VPROT(X,Y), ROTR = VPROT(X,SUB(0,Y))
2872 static const CostTblEntry XOPCostTbl[] = {
2873 { ISD::ROTL, MVT::v4i64, 4 },
2874 { ISD::ROTL, MVT::v8i32, 4 },
2875 { ISD::ROTL, MVT::v16i16, 4 },
2876 { ISD::ROTL, MVT::v32i8, 4 },
2877 { ISD::ROTL, MVT::v2i64, 1 },
2878 { ISD::ROTL, MVT::v4i32, 1 },
2879 { ISD::ROTL, MVT::v8i16, 1 },
2880 { ISD::ROTL, MVT::v16i8, 1 },
2881 { ISD::ROTR, MVT::v4i64, 6 },
2882 { ISD::ROTR, MVT::v8i32, 6 },
2883 { ISD::ROTR, MVT::v16i16, 6 },
2884 { ISD::ROTR, MVT::v32i8, 6 },
2885 { ISD::ROTR, MVT::v2i64, 2 },
2886 { ISD::ROTR, MVT::v4i32, 2 },
2887 { ISD::ROTR, MVT::v8i16, 2 },
2888 { ISD::ROTR, MVT::v16i8, 2 }
2889 };
2890 static const CostTblEntry X64CostTbl[] = { // 64-bit targets
2891 { ISD::ROTL, MVT::i64, 1 },
2892 { ISD::ROTR, MVT::i64, 1 },
2893 { ISD::FSHL, MVT::i64, 4 }
2894 };
2895 static const CostTblEntry X86CostTbl[] = { // 32 or 64-bit targets
2896 { ISD::ROTL, MVT::i32, 1 },
2897 { ISD::ROTL, MVT::i16, 1 },
2898 { ISD::ROTL, MVT::i8, 1 },
2899 { ISD::ROTR, MVT::i32, 1 },
2900 { ISD::ROTR, MVT::i16, 1 },
2901 { ISD::ROTR, MVT::i8, 1 },
2902 { ISD::FSHL, MVT::i32, 4 },
2903 { ISD::FSHL, MVT::i16, 4 },
2904 { ISD::FSHL, MVT::i8, 4 }
2905 };
2906
2907 Intrinsic::ID IID = ICA.getID();
2908 Type *RetTy = ICA.getReturnType();
2909 const SmallVectorImpl<const Value *> &Args = ICA.getArgs();
2910 unsigned ISD = ISD::DELETED_NODE;
2911 switch (IID) {
2912 default:
2913 break;
2914 case Intrinsic::fshl:
2915 ISD = ISD::FSHL;
2916 if (Args[0] == Args[1])
2917 ISD = ISD::ROTL;
2918 break;
2919 case Intrinsic::fshr:
2920 // FSHR has same costs so don't duplicate.
2921 ISD = ISD::FSHL;
2922 if (Args[0] == Args[1])
2923 ISD = ISD::ROTR;
2924 break;
2925 }
2926
2927 if (ISD != ISD::DELETED_NODE) {
2928 // Legalize the type.
2929 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, RetTy);
2930 MVT MTy = LT.second;
2931
2932 // Attempt to lookup cost.
2933 if (ST->hasAVX512())
2934 if (const auto *Entry = CostTableLookup(AVX512CostTbl, ISD, MTy))
2935 return LT.first * Entry->Cost;
2936
2937 if (ST->hasXOP())
2938 if (const auto *Entry = CostTableLookup(XOPCostTbl, ISD, MTy))
2939 return LT.first * Entry->Cost;
2940
2941 if (ST->is64Bit())
2942 if (const auto *Entry = CostTableLookup(X64CostTbl, ISD, MTy))
2943 return LT.first * Entry->Cost;
2944
2945 if (const auto *Entry = CostTableLookup(X86CostTbl, ISD, MTy))
2946 return LT.first * Entry->Cost;
2947 }
2948
2949 return BaseT::getIntrinsicInstrCost(ICA, CostKind);
2950}
2951
2952int X86TTIImpl::getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index) {
2953 static const CostTblEntry SLMCostTbl[] = {
2954 { ISD::EXTRACT_VECTOR_ELT, MVT::i8, 4 },
2955 { ISD::EXTRACT_VECTOR_ELT, MVT::i16, 4 },
2956 { ISD::EXTRACT_VECTOR_ELT, MVT::i32, 4 },
2957 { ISD::EXTRACT_VECTOR_ELT, MVT::i64, 7 }
2958 };
2959
2960 assert(Val->isVectorTy() && "This must be a vector type")((Val->isVectorTy() && "This must be a vector type"
) ? static_cast<void> (0) : __assert_fail ("Val->isVectorTy() && \"This must be a vector type\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 2960, __PRETTY_FUNCTION__))
;
2961 Type *ScalarType = Val->getScalarType();
2962 int RegisterFileMoveCost = 0;
2963
2964 if (Index != -1U && (Opcode == Instruction::ExtractElement ||
2965 Opcode == Instruction::InsertElement)) {
2966 // Legalize the type.
2967 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Val);
2968
2969 // This type is legalized to a scalar type.
2970 if (!LT.second.isVector())
2971 return 0;
2972
2973 // The type may be split. Normalize the index to the new type.
2974 unsigned NumElts = LT.second.getVectorNumElements();
2975 unsigned SubNumElts = NumElts;
2976 Index = Index % NumElts;
2977
2978 // For >128-bit vectors, we need to extract higher 128-bit subvectors.
2979 // For inserts, we also need to insert the subvector back.
2980 if (LT.second.getSizeInBits() > 128) {
2981 assert((LT.second.getSizeInBits() % 128) == 0 && "Illegal vector")(((LT.second.getSizeInBits() % 128) == 0 && "Illegal vector"
) ? static_cast<void> (0) : __assert_fail ("(LT.second.getSizeInBits() % 128) == 0 && \"Illegal vector\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 2981, __PRETTY_FUNCTION__))
;
2982 unsigned NumSubVecs = LT.second.getSizeInBits() / 128;
2983 SubNumElts = NumElts / NumSubVecs;
2984 if (SubNumElts <= Index) {
2985 RegisterFileMoveCost += (Opcode == Instruction::InsertElement ? 2 : 1);
2986 Index %= SubNumElts;
2987 }
2988 }
2989
2990 if (Index == 0) {
2991 // Floating point scalars are already located in index #0.
2992 // Many insertions to #0 can fold away for scalar fp-ops, so let's assume
2993 // true for all.
2994 if (ScalarType->isFloatingPointTy())
2995 return RegisterFileMoveCost;
2996
2997 // Assume movd/movq XMM -> GPR is relatively cheap on all targets.
2998 if (ScalarType->isIntegerTy() && Opcode == Instruction::ExtractElement)
2999 return 1 + RegisterFileMoveCost;
3000 }
3001
3002 int ISD = TLI->InstructionOpcodeToISD(Opcode);
3003 assert(ISD && "Unexpected vector opcode")((ISD && "Unexpected vector opcode") ? static_cast<
void> (0) : __assert_fail ("ISD && \"Unexpected vector opcode\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 3003, __PRETTY_FUNCTION__))
;
3004 MVT MScalarTy = LT.second.getScalarType();
3005 if (ST->isSLM())
3006 if (auto *Entry = CostTableLookup(SLMCostTbl, ISD, MScalarTy))
3007 return Entry->Cost + RegisterFileMoveCost;
3008
3009 // Assume pinsr/pextr XMM <-> GPR is relatively cheap on all targets.
3010 if ((MScalarTy == MVT::i16 && ST->hasSSE2()) ||
3011 (MScalarTy.isInteger() && ST->hasSSE41()))
3012 return 1 + RegisterFileMoveCost;
3013
3014 // Assume insertps is relatively cheap on all targets.
3015 if (MScalarTy == MVT::f32 && ST->hasSSE41() &&
3016 Opcode == Instruction::InsertElement)
3017 return 1 + RegisterFileMoveCost;
3018
3019 // For extractions we just need to shuffle the element to index 0, which
3020 // should be very cheap (assume cost = 1). For insertions we need to shuffle
3021 // the elements to its destination. In both cases we must handle the
3022 // subvector move(s).
3023 // If the vector type is already less than 128-bits then don't reduce it.
3024 // TODO: Under what circumstances should we shuffle using the full width?
3025 int ShuffleCost = 1;
3026 if (Opcode == Instruction::InsertElement) {
3027 auto *SubTy = cast<VectorType>(Val);
3028 EVT VT = TLI->getValueType(DL, Val);
3029 if (VT.getScalarType() != MScalarTy || VT.getSizeInBits() >= 128)
3030 SubTy = FixedVectorType::get(ScalarType, SubNumElts);
3031 ShuffleCost = getShuffleCost(TTI::SK_PermuteTwoSrc, SubTy, 0, SubTy);
3032 }
3033 int IntOrFpCost = ScalarType->isFloatingPointTy() ? 0 : 1;
3034 return ShuffleCost + IntOrFpCost + RegisterFileMoveCost;
3035 }
3036
3037 // Add to the base cost if we know that the extracted element of a vector is
3038 // destined to be moved to and used in the integer register file.
3039 if (Opcode == Instruction::ExtractElement && ScalarType->isPointerTy())
3040 RegisterFileMoveCost += 1;
3041
3042 return BaseT::getVectorInstrCost(Opcode, Val, Index) + RegisterFileMoveCost;
3043}
3044
3045unsigned X86TTIImpl::getScalarizationOverhead(VectorType *Ty,
3046 const APInt &DemandedElts,
3047 bool Insert, bool Extract) {
3048 unsigned Cost = 0;
3049
3050 // For insertions, a ISD::BUILD_VECTOR style vector initialization can be much
3051 // cheaper than an accumulation of ISD::INSERT_VECTOR_ELT.
3052 if (Insert) {
3053 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty);
3054 MVT MScalarTy = LT.second.getScalarType();
3055
3056 if ((MScalarTy == MVT::i16 && ST->hasSSE2()) ||
3057 (MScalarTy.isInteger() && ST->hasSSE41()) ||
3058 (MScalarTy == MVT::f32 && ST->hasSSE41())) {
3059 // For types we can insert directly, insertion into 128-bit sub vectors is
3060 // cheap, followed by a cheap chain of concatenations.
3061 if (LT.second.getSizeInBits() <= 128) {
3062 Cost +=
3063 BaseT::getScalarizationOverhead(Ty, DemandedElts, Insert, false);
3064 } else {
3065 unsigned NumSubVecs = LT.second.getSizeInBits() / 128;
3066 Cost += (PowerOf2Ceil(NumSubVecs) - 1) * LT.first;
3067 Cost += DemandedElts.countPopulation();
3068
3069 // For vXf32 cases, insertion into the 0'th index in each v4f32
3070 // 128-bit vector is free.
3071 // NOTE: This assumes legalization widens vXf32 vectors.
3072 if (MScalarTy == MVT::f32)
3073 for (unsigned i = 0, e = cast<FixedVectorType>(Ty)->getNumElements();
3074 i < e; i += 4)
3075 if (DemandedElts[i])
3076 Cost--;
3077 }
3078 } else if (LT.second.isVector()) {
3079 // Without fast insertion, we need to use MOVD/MOVQ to pass each demanded
3080 // integer element as a SCALAR_TO_VECTOR, then we build the vector as a
3081 // series of UNPCK followed by CONCAT_VECTORS - all of these can be
3082 // considered cheap.
3083 if (Ty->isIntOrIntVectorTy())
3084 Cost += DemandedElts.countPopulation();
3085
3086 // Get the smaller of the legalized or original pow2-extended number of
3087 // vector elements, which represents the number of unpacks we'll end up
3088 // performing.
3089 unsigned NumElts = LT.second.getVectorNumElements();
3090 unsigned Pow2Elts =
3091 PowerOf2Ceil(cast<FixedVectorType>(Ty)->getNumElements());
3092 Cost += (std::min<unsigned>(NumElts, Pow2Elts) - 1) * LT.first;
3093 }
3094 }
3095
3096 // TODO: Use default extraction for now, but we should investigate extending this
3097 // to handle repeated subvector extraction.
3098 if (Extract)
3099 Cost += BaseT::getScalarizationOverhead(Ty, DemandedElts, false, Extract);
3100
3101 return Cost;
3102}
3103
3104int X86TTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src,
3105 MaybeAlign Alignment, unsigned AddressSpace,
3106 TTI::TargetCostKind CostKind,
3107 const Instruction *I) {
3108 // TODO: Handle other cost kinds.
3109 if (CostKind != TTI::TCK_RecipThroughput) {
20
Assuming 'CostKind' is not equal to TCK_RecipThroughput
21
Taking true branch
3110 if (isa_and_nonnull<StoreInst>(I)) {
22
Assuming 'I' is a 'StoreInst'
23
Taking true branch
3111 Value *Ptr = I->getOperand(1);
24
Called C++ object pointer is null
3112 // Store instruction with index and scale costs 2 Uops.
3113 // Check the preceding GEP to identify non-const indices.
3114 if (auto *GEP = dyn_cast<GetElementPtrInst>(Ptr)) {
3115 if (!all_of(GEP->indices(), [](Value *V) { return isa<Constant>(V); }))
3116 return TTI::TCC_Basic * 2;
3117 }
3118 }
3119 return TTI::TCC_Basic;
3120 }
3121
3122 // Handle non-power-of-two vectors such as <3 x float>
3123 if (auto *VTy = dyn_cast<FixedVectorType>(Src)) {
3124 unsigned NumElem = VTy->getNumElements();
3125
3126 // Handle a few common cases:
3127 // <3 x float>
3128 if (NumElem == 3 && VTy->getScalarSizeInBits() == 32)
3129 // Cost = 64 bit store + extract + 32 bit store.
3130 return 3;
3131
3132 // <3 x double>
3133 if (NumElem == 3 && VTy->getScalarSizeInBits() == 64)
3134 // Cost = 128 bit store + unpack + 64 bit store.
3135 return 3;
3136
3137 // Assume that all other non-power-of-two numbers are scalarized.
3138 if (!isPowerOf2_32(NumElem)) {
3139 APInt DemandedElts = APInt::getAllOnesValue(NumElem);
3140 int Cost = BaseT::getMemoryOpCost(Opcode, VTy->getScalarType(), Alignment,
3141 AddressSpace, CostKind);
3142 int SplitCost = getScalarizationOverhead(VTy, DemandedElts,
3143 Opcode == Instruction::Load,
3144 Opcode == Instruction::Store);
3145 return NumElem * Cost + SplitCost;
3146 }
3147 }
3148
3149 // Type legalization can't handle structs
3150 if (TLI->getValueType(DL, Src, true) == MVT::Other)
3151 return BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace,
3152 CostKind);
3153
3154 // Legalize the type.
3155 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Src);
3156 assert((Opcode == Instruction::Load || Opcode == Instruction::Store) &&(((Opcode == Instruction::Load || Opcode == Instruction::Store
) && "Invalid Opcode") ? static_cast<void> (0) :
__assert_fail ("(Opcode == Instruction::Load || Opcode == Instruction::Store) && \"Invalid Opcode\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 3157, __PRETTY_FUNCTION__))
3157 "Invalid Opcode")(((Opcode == Instruction::Load || Opcode == Instruction::Store
) && "Invalid Opcode") ? static_cast<void> (0) :
__assert_fail ("(Opcode == Instruction::Load || Opcode == Instruction::Store) && \"Invalid Opcode\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 3157, __PRETTY_FUNCTION__))
;
3158
3159 // Each load/store unit costs 1.
3160 int Cost = LT.first * 1;
3161
3162 // This isn't exactly right. We're using slow unaligned 32-byte accesses as a
3163 // proxy for a double-pumped AVX memory interface such as on Sandybridge.
3164 if (LT.second.getStoreSize() == 32 && ST->isUnalignedMem32Slow())
3165 Cost *= 2;
3166
3167 return Cost;
3168}
3169
3170int X86TTIImpl::getMaskedMemoryOpCost(unsigned Opcode, Type *SrcTy,
3171 Align Alignment, unsigned AddressSpace,
3172 TTI::TargetCostKind CostKind) {
3173 bool IsLoad = (Instruction::Load == Opcode);
3174 bool IsStore = (Instruction::Store == Opcode);
3175
3176 auto *SrcVTy = dyn_cast<FixedVectorType>(SrcTy);
3177 if (!SrcVTy)
3178 // To calculate scalar take the regular cost, without mask
3179 return getMemoryOpCost(Opcode, SrcTy, Alignment, AddressSpace, CostKind);
3180
3181 unsigned NumElem = SrcVTy->getNumElements();
3182 auto *MaskTy =
3183 FixedVectorType::get(Type::getInt8Ty(SrcVTy->getContext()), NumElem);
3184 if ((IsLoad && !isLegalMaskedLoad(SrcVTy, Alignment)) ||
3185 (IsStore && !isLegalMaskedStore(SrcVTy, Alignment)) ||
3186 !isPowerOf2_32(NumElem)) {
3187 // Scalarization
3188 APInt DemandedElts = APInt::getAllOnesValue(NumElem);
3189 int MaskSplitCost =
3190 getScalarizationOverhead(MaskTy, DemandedElts, false, true);
3191 int ScalarCompareCost = getCmpSelInstrCost(
3192 Instruction::ICmp, Type::getInt8Ty(SrcVTy->getContext()), nullptr,
3193 CostKind);
3194 int BranchCost = getCFInstrCost(Instruction::Br, CostKind);
3195 int MaskCmpCost = NumElem * (BranchCost + ScalarCompareCost);
3196 int ValueSplitCost =
3197 getScalarizationOverhead(SrcVTy, DemandedElts, IsLoad, IsStore);
3198 int MemopCost =
3199 NumElem * BaseT::getMemoryOpCost(Opcode, SrcVTy->getScalarType(),
3200 Alignment, AddressSpace, CostKind);
3201 return MemopCost + ValueSplitCost + MaskSplitCost + MaskCmpCost;
3202 }
3203
3204 // Legalize the type.
3205 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, SrcVTy);
3206 auto VT = TLI->getValueType(DL, SrcVTy);
3207 int Cost = 0;
3208 if (VT.isSimple() && LT.second != VT.getSimpleVT() &&
3209 LT.second.getVectorNumElements() == NumElem)
3210 // Promotion requires expand/truncate for data and a shuffle for mask.
3211 Cost += getShuffleCost(TTI::SK_PermuteTwoSrc, SrcVTy, 0, nullptr) +
3212 getShuffleCost(TTI::SK_PermuteTwoSrc, MaskTy, 0, nullptr);
3213
3214 else if (LT.second.getVectorNumElements() > NumElem) {
3215 auto *NewMaskTy = FixedVectorType::get(MaskTy->getElementType(),
3216 LT.second.getVectorNumElements());
3217 // Expanding requires fill mask with zeroes
3218 Cost += getShuffleCost(TTI::SK_InsertSubvector, NewMaskTy, 0, MaskTy);
3219 }
3220
3221 // Pre-AVX512 - each maskmov load costs 2 + store costs ~8.
3222 if (!ST->hasAVX512())
3223 return Cost + LT.first * (IsLoad ? 2 : 8);
3224
3225 // AVX-512 masked load/store is cheapper
3226 return Cost + LT.first;
3227}
3228
3229int X86TTIImpl::getAddressComputationCost(Type *Ty, ScalarEvolution *SE,
3230 const SCEV *Ptr) {
3231 // Address computations in vectorized code with non-consecutive addresses will
3232 // likely result in more instructions compared to scalar code where the
3233 // computation can more often be merged into the index mode. The resulting
3234 // extra micro-ops can significantly decrease throughput.
3235 const unsigned NumVectorInstToHideOverhead = 10;
3236
3237 // Cost modeling of Strided Access Computation is hidden by the indexing
3238 // modes of X86 regardless of the stride value. We dont believe that there
3239 // is a difference between constant strided access in gerenal and constant
3240 // strided value which is less than or equal to 64.
3241 // Even in the case of (loop invariant) stride whose value is not known at
3242 // compile time, the address computation will not incur more than one extra
3243 // ADD instruction.
3244 if (Ty->isVectorTy() && SE) {
3245 if (!BaseT::isStridedAccess(Ptr))
3246 return NumVectorInstToHideOverhead;
3247 if (!BaseT::getConstantStrideStep(SE, Ptr))
3248 return 1;
3249 }
3250
3251 return BaseT::getAddressComputationCost(Ty, SE, Ptr);
3252}
3253
3254int X86TTIImpl::getArithmeticReductionCost(unsigned Opcode, VectorType *ValTy,
3255 bool IsPairwise,
3256 TTI::TargetCostKind CostKind) {
3257 // Just use the default implementation for pair reductions.
3258 if (IsPairwise)
3259 return BaseT::getArithmeticReductionCost(Opcode, ValTy, IsPairwise, CostKind);
3260
3261 // We use the Intel Architecture Code Analyzer(IACA) to measure the throughput
3262 // and make it as the cost.
3263
3264 static const CostTblEntry SLMCostTblNoPairWise[] = {
3265 { ISD::FADD, MVT::v2f64, 3 },
3266 { ISD::ADD, MVT::v2i64, 5 },
3267 };
3268
3269 static const CostTblEntry SSE2CostTblNoPairWise[] = {
3270 { ISD::FADD, MVT::v2f64, 2 },
3271 { ISD::FADD, MVT::v4f32, 4 },
3272 { ISD::ADD, MVT::v2i64, 2 }, // The data reported by the IACA tool is "1.6".
3273 { ISD::ADD, MVT::v2i32, 2 }, // FIXME: chosen to be less than v4i32
3274 { ISD::ADD, MVT::v4i32, 3 }, // The data reported by the IACA tool is "3.3".
3275 { ISD::ADD, MVT::v2i16, 2 }, // The data reported by the IACA tool is "4.3".
3276 { ISD::ADD, MVT::v4i16, 3 }, // The data reported by the IACA tool is "4.3".
3277 { ISD::ADD, MVT::v8i16, 4 }, // The data reported by the IACA tool is "4.3".
3278 { ISD::ADD, MVT::v2i8, 2 },
3279 { ISD::ADD, MVT::v4i8, 2 },
3280 { ISD::ADD, MVT::v8i8, 2 },
3281 { ISD::ADD, MVT::v16i8, 3 },
3282 };
3283
3284 static const CostTblEntry AVX1CostTblNoPairWise[] = {
3285 { ISD::FADD, MVT::v4f64, 3 },
3286 { ISD::FADD, MVT::v4f32, 3 },
3287 { ISD::FADD, MVT::v8f32, 4 },
3288 { ISD::ADD, MVT::v2i64, 1 }, // The data reported by the IACA tool is "1.5".
3289 { ISD::ADD, MVT::v4i64, 3 },
3290 { ISD::ADD, MVT::v8i32, 5 },
3291 { ISD::ADD, MVT::v16i16, 5 },
3292 { ISD::ADD, MVT::v32i8, 4 },
3293 };
3294
3295 int ISD = TLI->InstructionOpcodeToISD(Opcode);
3296 assert(ISD && "Invalid opcode")((ISD && "Invalid opcode") ? static_cast<void> (
0) : __assert_fail ("ISD && \"Invalid opcode\"", "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 3296, __PRETTY_FUNCTION__))
;
3297
3298 // Before legalizing the type, give a chance to look up illegal narrow types
3299 // in the table.
3300 // FIXME: Is there a better way to do this?
3301 EVT VT = TLI->getValueType(DL, ValTy);
3302 if (VT.isSimple()) {
3303 MVT MTy = VT.getSimpleVT();
3304 if (ST->isSLM())
3305 if (const auto *Entry = CostTableLookup(SLMCostTblNoPairWise, ISD, MTy))
3306 return Entry->Cost;
3307
3308 if (ST->hasAVX())
3309 if (const auto *Entry = CostTableLookup(AVX1CostTblNoPairWise, ISD, MTy))
3310 return Entry->Cost;
3311
3312 if (ST->hasSSE2())
3313 if (const auto *Entry = CostTableLookup(SSE2CostTblNoPairWise, ISD, MTy))
3314 return Entry->Cost;
3315 }
3316
3317 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy);
3318
3319 MVT MTy = LT.second;
3320
3321 auto *ValVTy = cast<FixedVectorType>(ValTy);
3322
3323 unsigned ArithmeticCost = 0;
3324 if (LT.first != 1 && MTy.isVector() &&
3325 MTy.getVectorNumElements() < ValVTy->getNumElements()) {
3326 // Type needs to be split. We need LT.first - 1 arithmetic ops.
3327 auto *SingleOpTy = FixedVectorType::get(ValVTy->getElementType(),
3328 MTy.getVectorNumElements());
3329 ArithmeticCost = getArithmeticInstrCost(Opcode, SingleOpTy, CostKind);
3330 ArithmeticCost *= LT.first - 1;
3331 }
3332
3333 if (ST->isSLM())
3334 if (const auto *Entry = CostTableLookup(SLMCostTblNoPairWise, ISD, MTy))
3335 return ArithmeticCost + Entry->Cost;
3336
3337 if (ST->hasAVX())
3338 if (const auto *Entry = CostTableLookup(AVX1CostTblNoPairWise, ISD, MTy))
3339 return ArithmeticCost + Entry->Cost;
3340
3341 if (ST->hasSSE2())
3342 if (const auto *Entry = CostTableLookup(SSE2CostTblNoPairWise, ISD, MTy))
3343 return ArithmeticCost + Entry->Cost;
3344
3345 // FIXME: These assume a naive kshift+binop lowering, which is probably
3346 // conservative in most cases.
3347 static const CostTblEntry AVX512BoolReduction[] = {
3348 { ISD::AND, MVT::v2i1, 3 },
3349 { ISD::AND, MVT::v4i1, 5 },
3350 { ISD::AND, MVT::v8i1, 7 },
3351 { ISD::AND, MVT::v16i1, 9 },
3352 { ISD::AND, MVT::v32i1, 11 },
3353 { ISD::AND, MVT::v64i1, 13 },
3354 { ISD::OR, MVT::v2i1, 3 },
3355 { ISD::OR, MVT::v4i1, 5 },
3356 { ISD::OR, MVT::v8i1, 7 },
3357 { ISD::OR, MVT::v16i1, 9 },
3358 { ISD::OR, MVT::v32i1, 11 },
3359 { ISD::OR, MVT::v64i1, 13 },
3360 };
3361
3362 static const CostTblEntry AVX2BoolReduction[] = {
3363 { ISD::AND, MVT::v16i16, 2 }, // vpmovmskb + cmp
3364 { ISD::AND, MVT::v32i8, 2 }, // vpmovmskb + cmp
3365 { ISD::OR, MVT::v16i16, 2 }, // vpmovmskb + cmp
3366 { ISD::OR, MVT::v32i8, 2 }, // vpmovmskb + cmp
3367 };
3368
3369 static const CostTblEntry AVX1BoolReduction[] = {
3370 { ISD::AND, MVT::v4i64, 2 }, // vmovmskpd + cmp
3371 { ISD::AND, MVT::v8i32, 2 }, // vmovmskps + cmp
3372 { ISD::AND, MVT::v16i16, 4 }, // vextractf128 + vpand + vpmovmskb + cmp
3373 { ISD::AND, MVT::v32i8, 4 }, // vextractf128 + vpand + vpmovmskb + cmp
3374 { ISD::OR, MVT::v4i64, 2 }, // vmovmskpd + cmp
3375 { ISD::OR, MVT::v8i32, 2 }, // vmovmskps + cmp
3376 { ISD::OR, MVT::v16i16, 4 }, // vextractf128 + vpor + vpmovmskb + cmp
3377 { ISD::OR, MVT::v32i8, 4 }, // vextractf128 + vpor + vpmovmskb + cmp
3378 };
3379
3380 static const CostTblEntry SSE2BoolReduction[] = {
3381 { ISD::AND, MVT::v2i64, 2 }, // movmskpd + cmp
3382 { ISD::AND, MVT::v4i32, 2 }, // movmskps + cmp
3383 { ISD::AND, MVT::v8i16, 2 }, // pmovmskb + cmp
3384 { ISD::AND, MVT::v16i8, 2 }, // pmovmskb + cmp
3385 { ISD::OR, MVT::v2i64, 2 }, // movmskpd + cmp
3386 { ISD::OR, MVT::v4i32, 2 }, // movmskps + cmp
3387 { ISD::OR, MVT::v8i16, 2 }, // pmovmskb + cmp
3388 { ISD::OR, MVT::v16i8, 2 }, // pmovmskb + cmp
3389 };
3390
3391 // Handle bool allof/anyof patterns.
3392 if (ValVTy->getElementType()->isIntegerTy(1)) {
3393 unsigned ArithmeticCost = 0;
3394 if (LT.first != 1 && MTy.isVector() &&
3395 MTy.getVectorNumElements() < ValVTy->getNumElements()) {
3396 // Type needs to be split. We need LT.first - 1 arithmetic ops.
3397 auto *SingleOpTy = FixedVectorType::get(ValVTy->getElementType(),
3398 MTy.getVectorNumElements());
3399 ArithmeticCost = getArithmeticInstrCost(Opcode, SingleOpTy, CostKind);
3400 ArithmeticCost *= LT.first - 1;
3401 }
3402
3403 if (ST->hasAVX512())
3404 if (const auto *Entry = CostTableLookup(AVX512BoolReduction, ISD, MTy))
3405 return ArithmeticCost + Entry->Cost;
3406 if (ST->hasAVX2())
3407 if (const auto *Entry = CostTableLookup(AVX2BoolReduction, ISD, MTy))
3408 return ArithmeticCost + Entry->Cost;
3409 if (ST->hasAVX())
3410 if (const auto *Entry = CostTableLookup(AVX1BoolReduction, ISD, MTy))
3411 return ArithmeticCost + Entry->Cost;
3412 if (ST->hasSSE2())
3413 if (const auto *Entry = CostTableLookup(SSE2BoolReduction, ISD, MTy))
3414 return ArithmeticCost + Entry->Cost;
3415
3416 return BaseT::getArithmeticReductionCost(Opcode, ValVTy, IsPairwise,
3417 CostKind);
3418 }
3419
3420 unsigned NumVecElts = ValVTy->getNumElements();
3421 unsigned ScalarSize = ValVTy->getScalarSizeInBits();
3422
3423 // Special case power of 2 reductions where the scalar type isn't changed
3424 // by type legalization.
3425 if (!isPowerOf2_32(NumVecElts) || ScalarSize != MTy.getScalarSizeInBits())
3426 return BaseT::getArithmeticReductionCost(Opcode, ValVTy, IsPairwise,
3427 CostKind);
3428
3429 unsigned ReductionCost = 0;
3430
3431 auto *Ty = ValVTy;
3432 if (LT.first != 1 && MTy.isVector() &&
3433 MTy.getVectorNumElements() < ValVTy->getNumElements()) {
3434 // Type needs to be split. We need LT.first - 1 arithmetic ops.
3435 Ty = FixedVectorType::get(ValVTy->getElementType(),
3436 MTy.getVectorNumElements());
3437 ReductionCost = getArithmeticInstrCost(Opcode, Ty, CostKind);
3438 ReductionCost *= LT.first - 1;
3439 NumVecElts = MTy.getVectorNumElements();
3440 }
3441
3442 // Now handle reduction with the legal type, taking into account size changes
3443 // at each level.
3444 while (NumVecElts > 1) {
3445 // Determine the size of the remaining vector we need to reduce.
3446 unsigned Size = NumVecElts * ScalarSize;
3447 NumVecElts /= 2;
3448 // If we're reducing from 256/512 bits, use an extract_subvector.
3449 if (Size > 128) {
3450 auto *SubTy = FixedVectorType::get(ValVTy->getElementType(), NumVecElts);
3451 ReductionCost +=
3452 getShuffleCost(TTI::SK_ExtractSubvector, Ty, NumVecElts, SubTy);
3453 Ty = SubTy;
3454 } else if (Size == 128) {
3455 // Reducing from 128 bits is a permute of v2f64/v2i64.
3456 FixedVectorType *ShufTy;
3457 if (ValVTy->isFloatingPointTy())
3458 ShufTy =
3459 FixedVectorType::get(Type::getDoubleTy(ValVTy->getContext()), 2);
3460 else
3461 ShufTy =
3462 FixedVectorType::get(Type::getInt64Ty(ValVTy->getContext()), 2);
3463 ReductionCost +=
3464 getShuffleCost(TTI::SK_PermuteSingleSrc, ShufTy, 0, nullptr);
3465 } else if (Size == 64) {
3466 // Reducing from 64 bits is a shuffle of v4f32/v4i32.
3467 FixedVectorType *ShufTy;
3468 if (ValVTy->isFloatingPointTy())
3469 ShufTy =
3470 FixedVectorType::get(Type::getFloatTy(ValVTy->getContext()), 4);
3471 else
3472 ShufTy =
3473 FixedVectorType::get(Type::getInt32Ty(ValVTy->getContext()), 4);
3474 ReductionCost +=
3475 getShuffleCost(TTI::SK_PermuteSingleSrc, ShufTy, 0, nullptr);
3476 } else {
3477 // Reducing from smaller size is a shift by immediate.
3478 auto *ShiftTy = FixedVectorType::get(
3479 Type::getIntNTy(ValVTy->getContext(), Size), 128 / Size);
3480 ReductionCost += getArithmeticInstrCost(
3481 Instruction::LShr, ShiftTy, CostKind,
3482 TargetTransformInfo::OK_AnyValue,
3483 TargetTransformInfo::OK_UniformConstantValue,
3484 TargetTransformInfo::OP_None, TargetTransformInfo::OP_None);
3485 }
3486
3487 // Add the arithmetic op for this level.
3488 ReductionCost += getArithmeticInstrCost(Opcode, Ty, CostKind);
3489 }
3490
3491 // Add the final extract element to the cost.
3492 return ReductionCost + getVectorInstrCost(Instruction::ExtractElement, Ty, 0);
3493}
3494
3495int X86TTIImpl::getMinMaxCost(Type *Ty, Type *CondTy, bool IsUnsigned) {
3496 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty);
3497
3498 MVT MTy = LT.second;
3499
3500 int ISD;
3501 if (Ty->isIntOrIntVectorTy()) {
3502 ISD = IsUnsigned ? ISD::UMIN : ISD::SMIN;
3503 } else {
3504 assert(Ty->isFPOrFPVectorTy() &&((Ty->isFPOrFPVectorTy() && "Expected float point or integer vector type."
) ? static_cast<void> (0) : __assert_fail ("Ty->isFPOrFPVectorTy() && \"Expected float point or integer vector type.\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 3505, __PRETTY_FUNCTION__))
3505 "Expected float point or integer vector type.")((Ty->isFPOrFPVectorTy() && "Expected float point or integer vector type."
) ? static_cast<void> (0) : __assert_fail ("Ty->isFPOrFPVectorTy() && \"Expected float point or integer vector type.\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 3505, __PRETTY_FUNCTION__))
;
3506 ISD = ISD::FMINNUM;
3507 }
3508
3509 static const CostTblEntry SSE1CostTbl[] = {
3510 {ISD::FMINNUM, MVT::v4f32, 1},
3511 };
3512
3513 static const CostTblEntry SSE2CostTbl[] = {
3514 {ISD::FMINNUM, MVT::v2f64, 1},
3515 {ISD::SMIN, MVT::v8i16, 1},
3516 {ISD::UMIN, MVT::v16i8, 1},
3517 };
3518
3519 static const CostTblEntry SSE41CostTbl[] = {
3520 {ISD::SMIN, MVT::v4i32, 1},
3521 {ISD::UMIN, MVT::v4i32, 1},
3522 {ISD::UMIN, MVT::v8i16, 1},
3523 {ISD::SMIN, MVT::v16i8, 1},
3524 };
3525
3526 static const CostTblEntry SSE42CostTbl[] = {
3527 {ISD::UMIN, MVT::v2i64, 3}, // xor+pcmpgtq+blendvpd
3528 };
3529
3530 static const CostTblEntry AVX1CostTbl[] = {
3531 {ISD::FMINNUM, MVT::v8f32, 1},
3532 {ISD::FMINNUM, MVT::v4f64, 1},
3533 {ISD::SMIN, MVT::v8i32, 3},
3534 {ISD::UMIN, MVT::v8i32, 3},
3535 {ISD::SMIN, MVT::v16i16, 3},
3536 {ISD::UMIN, MVT::v16i16, 3},
3537 {ISD::SMIN, MVT::v32i8, 3},
3538 {ISD::UMIN, MVT::v32i8, 3},
3539 };
3540
3541 static const CostTblEntry AVX2CostTbl[] = {
3542 {ISD::SMIN, MVT::v8i32, 1},
3543 {ISD::UMIN, MVT::v8i32, 1},
3544 {ISD::SMIN, MVT::v16i16, 1},
3545 {ISD::UMIN, MVT::v16i16, 1},
3546 {ISD::SMIN, MVT::v32i8, 1},
3547 {ISD::UMIN, MVT::v32i8, 1},
3548 };
3549
3550 static const CostTblEntry AVX512CostTbl[] = {
3551 {ISD::FMINNUM, MVT::v16f32, 1},
3552 {ISD::FMINNUM, MVT::v8f64, 1},
3553 {ISD::SMIN, MVT::v2i64, 1},
3554 {ISD::UMIN, MVT::v2i64, 1},
3555 {ISD::SMIN, MVT::v4i64, 1},
3556 {ISD::UMIN, MVT::v4i64, 1},
3557 {ISD::SMIN, MVT::v8i64, 1},
3558 {ISD::UMIN, MVT::v8i64, 1},
3559 {ISD::SMIN, MVT::v16i32, 1},
3560 {ISD::UMIN, MVT::v16i32, 1},
3561 };
3562
3563 static const CostTblEntry AVX512BWCostTbl[] = {
3564 {ISD::SMIN, MVT::v32i16, 1},
3565 {ISD::UMIN, MVT::v32i16, 1},
3566 {ISD::SMIN, MVT::v64i8, 1},
3567 {ISD::UMIN, MVT::v64i8, 1},
3568 };
3569
3570 // If we have a native MIN/MAX instruction for this type, use it.
3571 if (ST->hasBWI())
3572 if (const auto *Entry = CostTableLookup(AVX512BWCostTbl, ISD, MTy))
3573 return LT.first * Entry->Cost;
3574
3575 if (ST->hasAVX512())
3576 if (const auto *Entry = CostTableLookup(AVX512CostTbl, ISD, MTy))
3577 return LT.first * Entry->Cost;
3578
3579 if (ST->hasAVX2())
3580 if (const auto *Entry = CostTableLookup(AVX2CostTbl, ISD, MTy))
3581 return LT.first * Entry->Cost;
3582
3583 if (ST->hasAVX())
3584 if (const auto *Entry = CostTableLookup(AVX1CostTbl, ISD, MTy))
3585 return LT.first * Entry->Cost;
3586
3587 if (ST->hasSSE42())
3588 if (const auto *Entry = CostTableLookup(SSE42CostTbl, ISD, MTy))
3589 return LT.first * Entry->Cost;
3590
3591 if (ST->hasSSE41())
3592 if (const auto *Entry = CostTableLookup(SSE41CostTbl, ISD, MTy))
3593 return LT.first * Entry->Cost;
3594
3595 if (ST->hasSSE2())
3596 if (const auto *Entry = CostTableLookup(SSE2CostTbl, ISD, MTy))
3597 return LT.first * Entry->Cost;
3598
3599 if (ST->hasSSE1())
3600 if (const auto *Entry = CostTableLookup(SSE1CostTbl, ISD, MTy))
3601 return LT.first * Entry->Cost;
3602
3603 unsigned CmpOpcode;
3604 if (Ty->isFPOrFPVectorTy()) {
3605 CmpOpcode = Instruction::FCmp;
3606 } else {
3607 assert(Ty->isIntOrIntVectorTy() &&((Ty->isIntOrIntVectorTy() && "expecting floating point or integer type for min/max reduction"
) ? static_cast<void> (0) : __assert_fail ("Ty->isIntOrIntVectorTy() && \"expecting floating point or integer type for min/max reduction\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 3608, __PRETTY_FUNCTION__))
3608 "expecting floating point or integer type for min/max reduction")((Ty->isIntOrIntVectorTy() && "expecting floating point or integer type for min/max reduction"
) ? static_cast<void> (0) : __assert_fail ("Ty->isIntOrIntVectorTy() && \"expecting floating point or integer type for min/max reduction\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 3608, __PRETTY_FUNCTION__))
;
3609 CmpOpcode = Instruction::ICmp;
3610 }
3611
3612 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
3613 // Otherwise fall back to cmp+select.
3614 return getCmpSelInstrCost(CmpOpcode, Ty, CondTy, CostKind) +
3615 getCmpSelInstrCost(Instruction::Select, Ty, CondTy, CostKind);
3616}
3617
3618int X86TTIImpl::getMinMaxReductionCost(VectorType *ValTy, VectorType *CondTy,
3619 bool IsPairwise, bool IsUnsigned,
3620 TTI::TargetCostKind CostKind) {
3621 // Just use the default implementation for pair reductions.
3622 if (IsPairwise)
3623 return BaseT::getMinMaxReductionCost(ValTy, CondTy, IsPairwise, IsUnsigned,
3624 CostKind);
3625
3626 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy);
3627
3628 MVT MTy = LT.second;
3629
3630 int ISD;
3631 if (ValTy->isIntOrIntVectorTy()) {
3632 ISD = IsUnsigned ? ISD::UMIN : ISD::SMIN;
3633 } else {
3634 assert(ValTy->isFPOrFPVectorTy() &&((ValTy->isFPOrFPVectorTy() && "Expected float point or integer vector type."
) ? static_cast<void> (0) : __assert_fail ("ValTy->isFPOrFPVectorTy() && \"Expected float point or integer vector type.\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 3635, __PRETTY_FUNCTION__))
3635 "Expected float point or integer vector type.")((ValTy->isFPOrFPVectorTy() && "Expected float point or integer vector type."
) ? static_cast<void> (0) : __assert_fail ("ValTy->isFPOrFPVectorTy() && \"Expected float point or integer vector type.\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 3635, __PRETTY_FUNCTION__))
;
3636 ISD = ISD::FMINNUM;
3637 }
3638
3639 // We use the Intel Architecture Code Analyzer(IACA) to measure the throughput
3640 // and make it as the cost.
3641
3642 static const CostTblEntry SSE2CostTblNoPairWise[] = {
3643 {ISD::UMIN, MVT::v2i16, 5}, // need pxors to use pminsw/pmaxsw
3644 {ISD::UMIN, MVT::v4i16, 7}, // need pxors to use pminsw/pmaxsw
3645 {ISD::UMIN, MVT::v8i16, 9}, // need pxors to use pminsw/pmaxsw
3646 };
3647
3648 static const CostTblEntry SSE41CostTblNoPairWise[] = {
3649 {ISD::SMIN, MVT::v2i16, 3}, // same as sse2
3650 {ISD::SMIN, MVT::v4i16, 5}, // same as sse2
3651 {ISD::UMIN, MVT::v2i16, 5}, // same as sse2
3652 {ISD::UMIN, MVT::v4i16, 7}, // same as sse2
3653 {ISD::SMIN, MVT::v8i16, 4}, // phminposuw+xor
3654 {ISD::UMIN, MVT::v8i16, 4}, // FIXME: umin is cheaper than umax
3655 {ISD::SMIN, MVT::v2i8, 3}, // pminsb
3656 {ISD::SMIN, MVT::v4i8, 5}, // pminsb
3657 {ISD::SMIN, MVT::v8i8, 7}, // pminsb
3658 {ISD::SMIN, MVT::v16i8, 6},
3659 {ISD::UMIN, MVT::v2i8, 3}, // same as sse2
3660 {ISD::UMIN, MVT::v4i8, 5}, // same as sse2
3661 {ISD::UMIN, MVT::v8i8, 7}, // same as sse2
3662 {ISD::UMIN, MVT::v16i8, 6}, // FIXME: umin is cheaper than umax
3663 };
3664
3665 static const CostTblEntry AVX1CostTblNoPairWise[] = {
3666 {ISD::SMIN, MVT::v16i16, 6},
3667 {ISD::UMIN, MVT::v16i16, 6}, // FIXME: umin is cheaper than umax
3668 {ISD::SMIN, MVT::v32i8, 8},
3669 {ISD::UMIN, MVT::v32i8, 8},
3670 };
3671
3672 static const CostTblEntry AVX512BWCostTblNoPairWise[] = {
3673 {ISD::SMIN, MVT::v32i16, 8},
3674 {ISD::UMIN, MVT::v32i16, 8}, // FIXME: umin is cheaper than umax
3675 {ISD::SMIN, MVT::v64i8, 10},
3676 {ISD::UMIN, MVT::v64i8, 10},
3677 };
3678
3679 // Before legalizing the type, give a chance to look up illegal narrow types
3680 // in the table.
3681 // FIXME: Is there a better way to do this?
3682 EVT VT = TLI->getValueType(DL, ValTy);
3683 if (VT.isSimple()) {
3684 MVT MTy = VT.getSimpleVT();
3685 if (ST->hasBWI())
3686 if (const auto *Entry = CostTableLookup(AVX512BWCostTblNoPairWise, ISD, MTy))
3687 return Entry->Cost;
3688
3689 if (ST->hasAVX())
3690 if (const auto *Entry = CostTableLookup(AVX1CostTblNoPairWise, ISD, MTy))
3691 return Entry->Cost;
3692
3693 if (ST->hasSSE41())
3694 if (const auto *Entry = CostTableLookup(SSE41CostTblNoPairWise, ISD, MTy))
3695 return Entry->Cost;
3696
3697 if (ST->hasSSE2())
3698 if (const auto *Entry = CostTableLookup(SSE2CostTblNoPairWise, ISD, MTy))
3699 return Entry->Cost;
3700 }
3701
3702 auto *ValVTy = cast<FixedVectorType>(ValTy);
3703 unsigned NumVecElts = ValVTy->getNumElements();
3704
3705 auto *Ty = ValVTy;
3706 unsigned MinMaxCost = 0;
3707 if (LT.first != 1 && MTy.isVector() &&
3708 MTy.getVectorNumElements() < ValVTy->getNumElements()) {
3709 // Type needs to be split. We need LT.first - 1 operations ops.
3710 Ty = FixedVectorType::get(ValVTy->getElementType(),
3711 MTy.getVectorNumElements());
3712 auto *SubCondTy = FixedVectorType::get(CondTy->getElementType(),
3713 MTy.getVectorNumElements());
3714 MinMaxCost = getMinMaxCost(Ty, SubCondTy, IsUnsigned);
3715 MinMaxCost *= LT.first - 1;
3716 NumVecElts = MTy.getVectorNumElements();
3717 }
3718
3719 if (ST->hasBWI())
3720 if (const auto *Entry = CostTableLookup(AVX512BWCostTblNoPairWise, ISD, MTy))
3721 return MinMaxCost + Entry->Cost;
3722
3723 if (ST->hasAVX())
3724 if (const auto *Entry = CostTableLookup(AVX1CostTblNoPairWise, ISD, MTy))
3725 return MinMaxCost + Entry->Cost;
3726
3727 if (ST->hasSSE41())
3728 if (const auto *Entry = CostTableLookup(SSE41CostTblNoPairWise, ISD, MTy))
3729 return MinMaxCost + Entry->Cost;
3730
3731 if (ST->hasSSE2())
3732 if (const auto *Entry = CostTableLookup(SSE2CostTblNoPairWise, ISD, MTy))
3733 return MinMaxCost + Entry->Cost;
3734
3735 unsigned ScalarSize = ValTy->getScalarSizeInBits();
3736
3737 // Special case power of 2 reductions where the scalar type isn't changed
3738 // by type legalization.
3739 if (!isPowerOf2_32(ValVTy->getNumElements()) ||
3740 ScalarSize != MTy.getScalarSizeInBits())
3741 return BaseT::getMinMaxReductionCost(ValTy, CondTy, IsPairwise, IsUnsigned,
3742 CostKind);
3743
3744 // Now handle reduction with the legal type, taking into account size changes
3745 // at each level.
3746 while (NumVecElts > 1) {
3747 // Determine the size of the remaining vector we need to reduce.
3748 unsigned Size = NumVecElts * ScalarSize;
3749 NumVecElts /= 2;
3750 // If we're reducing from 256/512 bits, use an extract_subvector.
3751 if (Size > 128) {
3752 auto *SubTy = FixedVectorType::get(ValVTy->getElementType(), NumVecElts);
3753 MinMaxCost +=
3754 getShuffleCost(TTI::SK_ExtractSubvector, Ty, NumVecElts, SubTy);
3755 Ty = SubTy;
3756 } else if (Size == 128) {
3757 // Reducing from 128 bits is a permute of v2f64/v2i64.
3758 VectorType *ShufTy;
3759 if (ValTy->isFloatingPointTy())
3760 ShufTy =
3761 FixedVectorType::get(Type::getDoubleTy(ValTy->getContext()), 2);
3762 else
3763 ShufTy = FixedVectorType::get(Type::getInt64Ty(ValTy->getContext()), 2);
3764 MinMaxCost +=
3765 getShuffleCost(TTI::SK_PermuteSingleSrc, ShufTy, 0, nullptr);
3766 } else if (Size == 64) {
3767 // Reducing from 64 bits is a shuffle of v4f32/v4i32.
3768 FixedVectorType *ShufTy;
3769 if (ValTy->isFloatingPointTy())
3770 ShufTy = FixedVectorType::get(Type::getFloatTy(ValTy->getContext()), 4);
3771 else
3772 ShufTy = FixedVectorType::get(Type::getInt32Ty(ValTy->getContext()), 4);
3773 MinMaxCost +=
3774 getShuffleCost(TTI::SK_PermuteSingleSrc, ShufTy, 0, nullptr);
3775 } else {
3776 // Reducing from smaller size is a shift by immediate.
3777 auto *ShiftTy = FixedVectorType::get(
3778 Type::getIntNTy(ValTy->getContext(), Size), 128 / Size);
3779 MinMaxCost += getArithmeticInstrCost(
3780 Instruction::LShr, ShiftTy, TTI::TCK_RecipThroughput,
3781 TargetTransformInfo::OK_AnyValue,
3782 TargetTransformInfo::OK_UniformConstantValue,
3783 TargetTransformInfo::OP_None, TargetTransformInfo::OP_None);
3784 }
3785
3786 // Add the arithmetic op for this level.
3787 auto *SubCondTy =
3788 FixedVectorType::get(CondTy->getElementType(), Ty->getNumElements());
3789 MinMaxCost += getMinMaxCost(Ty, SubCondTy, IsUnsigned);
3790 }
3791
3792 // Add the final extract element to the cost.
3793 return MinMaxCost + getVectorInstrCost(Instruction::ExtractElement, Ty, 0);
3794}
3795
3796/// Calculate the cost of materializing a 64-bit value. This helper
3797/// method might only calculate a fraction of a larger immediate. Therefore it
3798/// is valid to return a cost of ZERO.
3799int X86TTIImpl::getIntImmCost(int64_t Val) {
3800 if (Val == 0)
3801 return TTI::TCC_Free;
3802
3803 if (isInt<32>(Val))
3804 return TTI::TCC_Basic;
3805
3806 return 2 * TTI::TCC_Basic;
3807}
3808
3809int X86TTIImpl::getIntImmCost(const APInt &Imm, Type *Ty,
3810 TTI::TargetCostKind CostKind) {
3811 assert(Ty->isIntegerTy())((Ty->isIntegerTy()) ? static_cast<void> (0) : __assert_fail
("Ty->isIntegerTy()", "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 3811, __PRETTY_FUNCTION__))
;
3812
3813 unsigned BitSize = Ty->getPrimitiveSizeInBits();
3814 if (BitSize == 0)
3815 return ~0U;
3816
3817 // Never hoist constants larger than 128bit, because this might lead to
3818 // incorrect code generation or assertions in codegen.
3819 // Fixme: Create a cost model for types larger than i128 once the codegen
3820 // issues have been fixed.
3821 if (BitSize > 128)
3822 return TTI::TCC_Free;
3823
3824 if (Imm == 0)
3825 return TTI::TCC_Free;
3826
3827 // Sign-extend all constants to a multiple of 64-bit.
3828 APInt ImmVal = Imm;
3829 if (BitSize % 64 != 0)
3830 ImmVal = Imm.sext(alignTo(BitSize, 64));
3831
3832 // Split the constant into 64-bit chunks and calculate the cost for each
3833 // chunk.
3834 int Cost = 0;
3835 for (unsigned ShiftVal = 0; ShiftVal < BitSize; ShiftVal += 64) {
3836 APInt Tmp = ImmVal.ashr(ShiftVal).sextOrTrunc(64);
3837 int64_t Val = Tmp.getSExtValue();
3838 Cost += getIntImmCost(Val);
3839 }
3840 // We need at least one instruction to materialize the constant.
3841 return std::max(1, Cost);
3842}
3843
3844int X86TTIImpl::getIntImmCostInst(unsigned Opcode, unsigned Idx, const APInt &Imm,
3845 Type *Ty, TTI::TargetCostKind CostKind) {
3846 assert(Ty->isIntegerTy())((Ty->isIntegerTy()) ? static_cast<void> (0) : __assert_fail
("Ty->isIntegerTy()", "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 3846, __PRETTY_FUNCTION__))
;
3847
3848 unsigned BitSize = Ty->getPrimitiveSizeInBits();
3849 // There is no cost model for constants with a bit size of 0. Return TCC_Free
3850 // here, so that constant hoisting will ignore this constant.
3851 if (BitSize == 0)
3852 return TTI::TCC_Free;
3853
3854 unsigned ImmIdx = ~0U;
3855 switch (Opcode) {
3856 default:
3857 return TTI::TCC_Free;
3858 case Instruction::GetElementPtr:
3859 // Always hoist the base address of a GetElementPtr. This prevents the
3860 // creation of new constants for every base constant that gets constant
3861 // folded with the offset.
3862 if (Idx == 0)
3863 return 2 * TTI::TCC_Basic;
3864 return TTI::TCC_Free;
3865 case Instruction::Store:
3866 ImmIdx = 0;
3867 break;
3868 case Instruction::ICmp:
3869 // This is an imperfect hack to prevent constant hoisting of
3870 // compares that might be trying to check if a 64-bit value fits in
3871 // 32-bits. The backend can optimize these cases using a right shift by 32.
3872 // Ideally we would check the compare predicate here. There also other
3873 // similar immediates the backend can use shifts for.
3874 if (Idx == 1 && Imm.getBitWidth() == 64) {
3875 uint64_t ImmVal = Imm.getZExtValue();
3876 if (ImmVal == 0x100000000ULL || ImmVal == 0xffffffff)
3877 return TTI::TCC_Free;
3878 }
3879 ImmIdx = 1;
3880 break;
3881 case Instruction::And:
3882 // We support 64-bit ANDs with immediates with 32-bits of leading zeroes
3883 // by using a 32-bit operation with implicit zero extension. Detect such
3884 // immediates here as the normal path expects bit 31 to be sign extended.
3885 if (Idx == 1 && Imm.getBitWidth() == 64 && isUInt<32>(Imm.getZExtValue()))
3886 return TTI::TCC_Free;
3887 ImmIdx = 1;
3888 break;
3889 case Instruction::Add:
3890 case Instruction::Sub:
3891 // For add/sub, we can use the opposite instruction for INT32_MIN.
3892 if (Idx == 1 && Imm.getBitWidth() == 64 && Imm.getZExtValue() == 0x80000000)
3893 return TTI::TCC_Free;
3894 ImmIdx = 1;
3895 break;
3896 case Instruction::UDiv:
3897 case Instruction::SDiv:
3898 case Instruction::URem:
3899 case Instruction::SRem:
3900 // Division by constant is typically expanded later into a different
3901 // instruction sequence. This completely changes the constants.
3902 // Report them as "free" to stop ConstantHoist from marking them as opaque.
3903 return TTI::TCC_Free;
3904 case Instruction::Mul:
3905 case Instruction::Or:
3906 case Instruction::Xor:
3907 ImmIdx = 1;
3908 break;
3909 // Always return TCC_Free for the shift value of a shift instruction.
3910 case Instruction::Shl:
3911 case Instruction::LShr:
3912 case Instruction::AShr:
3913 if (Idx == 1)
3914 return TTI::TCC_Free;
3915 break;
3916 case Instruction::Trunc:
3917 case Instruction::ZExt:
3918 case Instruction::SExt:
3919 case Instruction::IntToPtr:
3920 case Instruction::PtrToInt:
3921 case Instruction::BitCast:
3922 case Instruction::PHI:
3923 case Instruction::Call:
3924 case Instruction::Select:
3925 case Instruction::Ret:
3926 case Instruction::Load:
3927 break;
3928 }
3929
3930 if (Idx == ImmIdx) {
3931 int NumConstants = divideCeil(BitSize, 64);
3932 int Cost = X86TTIImpl::getIntImmCost(Imm, Ty, CostKind);
3933 return (Cost <= NumConstants * TTI::TCC_Basic)
3934 ? static_cast<int>(TTI::TCC_Free)
3935 : Cost;
3936 }
3937
3938 return X86TTIImpl::getIntImmCost(Imm, Ty, CostKind);
3939}
3940
3941int X86TTIImpl::getIntImmCostIntrin(Intrinsic::ID IID, unsigned Idx,
3942 const APInt &Imm, Type *Ty,
3943 TTI::TargetCostKind CostKind) {
3944 assert(Ty->isIntegerTy())((Ty->isIntegerTy()) ? static_cast<void> (0) : __assert_fail
("Ty->isIntegerTy()", "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 3944, __PRETTY_FUNCTION__))
;
3945
3946 unsigned BitSize = Ty->getPrimitiveSizeInBits();
3947 // There is no cost model for constants with a bit size of 0. Return TCC_Free
3948 // here, so that constant hoisting will ignore this constant.
3949 if (BitSize == 0)
3950 return TTI::TCC_Free;
3951
3952 switch (IID) {
3953 default:
3954 return TTI::TCC_Free;
3955 case Intrinsic::sadd_with_overflow:
3956 case Intrinsic::uadd_with_overflow:
3957 case Intrinsic::ssub_with_overflow:
3958 case Intrinsic::usub_with_overflow:
3959 case Intrinsic::smul_with_overflow:
3960 case Intrinsic::umul_with_overflow:
3961 if ((Idx == 1) && Imm.getBitWidth() <= 64 && isInt<32>(Imm.getSExtValue()))
3962 return TTI::TCC_Free;
3963 break;
3964 case Intrinsic::experimental_stackmap:
3965 if ((Idx < 2) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
3966 return TTI::TCC_Free;
3967 break;
3968 case Intrinsic::experimental_patchpoint_void:
3969 case Intrinsic::experimental_patchpoint_i64:
3970 if ((Idx < 4) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
3971 return TTI::TCC_Free;
3972 break;
3973 }
3974 return X86TTIImpl::getIntImmCost(Imm, Ty, CostKind);
3975}
3976
3977unsigned
3978X86TTIImpl::getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind) {
3979 if (CostKind != TTI::TCK_RecipThroughput)
3980 return Opcode == Instruction::PHI ? 0 : 1;
3981 // Branches are assumed to be predicted.
3982 return CostKind == TTI::TCK_RecipThroughput ? 0 : 1;
3983}
3984
3985int X86TTIImpl::getGatherOverhead() const {
3986 // Some CPUs have more overhead for gather. The specified overhead is relative
3987 // to the Load operation. "2" is the number provided by Intel architects. This
3988 // parameter is used for cost estimation of Gather Op and comparison with
3989 // other alternatives.
3990 // TODO: Remove the explicit hasAVX512()?, That would mean we would only
3991 // enable gather with a -march.
3992 if (ST->hasAVX512() || (ST->hasAVX2() && ST->hasFastGather()))
3993 return 2;
3994
3995 return 1024;
3996}
3997
3998int X86TTIImpl::getScatterOverhead() const {
3999 if (ST->hasAVX512())
4000 return 2;
4001
4002 return 1024;
4003}
4004
4005// Return an average cost of Gather / Scatter instruction, maybe improved later
4006int X86TTIImpl::getGSVectorCost(unsigned Opcode, Type *SrcVTy, const Value *Ptr,
4007 Align Alignment, unsigned AddressSpace) {
4008
4009 assert(isa<VectorType>(SrcVTy) && "Unexpected type in getGSVectorCost")((isa<VectorType>(SrcVTy) && "Unexpected type in getGSVectorCost"
) ? static_cast<void> (0) : __assert_fail ("isa<VectorType>(SrcVTy) && \"Unexpected type in getGSVectorCost\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 4009, __PRETTY_FUNCTION__))
;
4010 unsigned VF = cast<FixedVectorType>(SrcVTy)->getNumElements();
4011
4012 // Try to reduce index size from 64 bit (default for GEP)
4013 // to 32. It is essential for VF 16. If the index can't be reduced to 32, the
4014 // operation will use 16 x 64 indices which do not fit in a zmm and needs
4015 // to split. Also check that the base pointer is the same for all lanes,
4016 // and that there's at most one variable index.
4017 auto getIndexSizeInBits = [](const Value *Ptr, const DataLayout &DL) {
4018 unsigned IndexSize = DL.getPointerSizeInBits();
4019 const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr);
4020 if (IndexSize < 64 || !GEP)
4021 return IndexSize;
4022
4023 unsigned NumOfVarIndices = 0;
4024 const Value *Ptrs = GEP->getPointerOperand();
4025 if (Ptrs->getType()->isVectorTy() && !getSplatValue(Ptrs))
4026 return IndexSize;
4027 for (unsigned i = 1; i < GEP->getNumOperands(); ++i) {
4028 if (isa<Constant>(GEP->getOperand(i)))
4029 continue;
4030 Type *IndxTy = GEP->getOperand(i)->getType();
4031 if (auto *IndexVTy = dyn_cast<VectorType>(IndxTy))
4032 IndxTy = IndexVTy->getElementType();
4033 if ((IndxTy->getPrimitiveSizeInBits() == 64 &&
4034 !isa<SExtInst>(GEP->getOperand(i))) ||
4035 ++NumOfVarIndices > 1)
4036 return IndexSize; // 64
4037 }
4038 return (unsigned)32;
4039 };
4040
4041 // Trying to reduce IndexSize to 32 bits for vector 16.
4042 // By default the IndexSize is equal to pointer size.
4043 unsigned IndexSize = (ST->hasAVX512() && VF >= 16)
4044 ? getIndexSizeInBits(Ptr, DL)
4045 : DL.getPointerSizeInBits();
4046
4047 auto *IndexVTy = FixedVectorType::get(
4048 IntegerType::get(SrcVTy->getContext(), IndexSize), VF);
4049 std::pair<int, MVT> IdxsLT = TLI->getTypeLegalizationCost(DL, IndexVTy);
4050 std::pair<int, MVT> SrcLT = TLI->getTypeLegalizationCost(DL, SrcVTy);
4051 int SplitFactor = std::max(IdxsLT.first, SrcLT.first);
4052 if (SplitFactor > 1) {
4053 // Handle splitting of vector of pointers
4054 auto *SplitSrcTy =
4055 FixedVectorType::get(SrcVTy->getScalarType(), VF / SplitFactor);
4056 return SplitFactor * getGSVectorCost(Opcode, SplitSrcTy, Ptr, Alignment,
4057 AddressSpace);
4058 }
4059
4060 // The gather / scatter cost is given by Intel architects. It is a rough
4061 // number since we are looking at one instruction in a time.
4062 const int GSOverhead = (Opcode == Instruction::Load)
4063 ? getGatherOverhead()
4064 : getScatterOverhead();
4065 return GSOverhead + VF * getMemoryOpCost(Opcode, SrcVTy->getScalarType(),
4066 MaybeAlign(Alignment), AddressSpace,
4067 TTI::TCK_RecipThroughput);
4068}
4069
4070/// Return the cost of full scalarization of gather / scatter operation.
4071///
4072/// Opcode - Load or Store instruction.
4073/// SrcVTy - The type of the data vector that should be gathered or scattered.
4074/// VariableMask - The mask is non-constant at compile time.
4075/// Alignment - Alignment for one element.
4076/// AddressSpace - pointer[s] address space.
4077///
4078int X86TTIImpl::getGSScalarCost(unsigned Opcode, Type *SrcVTy,
4079 bool VariableMask, Align Alignment,
4080 unsigned AddressSpace) {
4081 unsigned VF = cast<FixedVectorType>(SrcVTy)->getNumElements();
4082 APInt DemandedElts = APInt::getAllOnesValue(VF);
4083 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
4084
4085 int MaskUnpackCost = 0;
4086 if (VariableMask) {
4087 auto *MaskTy =
4088 FixedVectorType::get(Type::getInt1Ty(SrcVTy->getContext()), VF);
4089 MaskUnpackCost =
4090 getScalarizationOverhead(MaskTy, DemandedElts, false, true);
4091 int ScalarCompareCost =
4092 getCmpSelInstrCost(Instruction::ICmp, Type::getInt1Ty(SrcVTy->getContext()),
4093 nullptr, CostKind);
4094 int BranchCost = getCFInstrCost(Instruction::Br, CostKind);
4095 MaskUnpackCost += VF * (BranchCost + ScalarCompareCost);
4096 }
4097
4098 // The cost of the scalar loads/stores.
4099 int MemoryOpCost = VF * getMemoryOpCost(Opcode, SrcVTy->getScalarType(),
4100 MaybeAlign(Alignment), AddressSpace,
4101 CostKind);
4102
4103 int InsertExtractCost = 0;
4104 if (Opcode == Instruction::Load)
4105 for (unsigned i = 0; i < VF; ++i)
4106 // Add the cost of inserting each scalar load into the vector
4107 InsertExtractCost +=
4108 getVectorInstrCost(Instruction::InsertElement, SrcVTy, i);
4109 else
4110 for (unsigned i = 0; i < VF; ++i)
4111 // Add the cost of extracting each element out of the data vector
4112 InsertExtractCost +=
4113 getVectorInstrCost(Instruction::ExtractElement, SrcVTy, i);
4114
4115 return MemoryOpCost + MaskUnpackCost + InsertExtractCost;
4116}
4117
4118/// Calculate the cost of Gather / Scatter operation
4119int X86TTIImpl::getGatherScatterOpCost(unsigned Opcode, Type *SrcVTy,
4120 const Value *Ptr, bool VariableMask,
4121 Align Alignment,
4122 TTI::TargetCostKind CostKind,
4123 const Instruction *I = nullptr) {
4124
4125 if (CostKind != TTI::TCK_RecipThroughput)
4126 return 1;
4127
4128 assert(SrcVTy->isVectorTy() && "Unexpected data type for Gather/Scatter")((SrcVTy->isVectorTy() && "Unexpected data type for Gather/Scatter"
) ? static_cast<void> (0) : __assert_fail ("SrcVTy->isVectorTy() && \"Unexpected data type for Gather/Scatter\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 4128, __PRETTY_FUNCTION__))
;
4129 unsigned VF = cast<FixedVectorType>(SrcVTy)->getNumElements();
4130 PointerType *PtrTy = dyn_cast<PointerType>(Ptr->getType());
4131 if (!PtrTy && Ptr->getType()->isVectorTy())
4132 PtrTy = dyn_cast<PointerType>(
4133 cast<VectorType>(Ptr->getType())->getElementType());
4134 assert(PtrTy && "Unexpected type for Ptr argument")((PtrTy && "Unexpected type for Ptr argument") ? static_cast
<void> (0) : __assert_fail ("PtrTy && \"Unexpected type for Ptr argument\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 4134, __PRETTY_FUNCTION__))
;
4135 unsigned AddressSpace = PtrTy->getAddressSpace();
4136
4137 bool Scalarize = false;
4138 if ((Opcode == Instruction::Load &&
4139 !isLegalMaskedGather(SrcVTy, Align(Alignment))) ||
4140 (Opcode == Instruction::Store &&
4141 !isLegalMaskedScatter(SrcVTy, Align(Alignment))))
4142 Scalarize = true;
4143 // Gather / Scatter for vector 2 is not profitable on KNL / SKX
4144 // Vector-4 of gather/scatter instruction does not exist on KNL.
4145 // We can extend it to 8 elements, but zeroing upper bits of
4146 // the mask vector will add more instructions. Right now we give the scalar
4147 // cost of vector-4 for KNL. TODO: Check, maybe the gather/scatter instruction
4148 // is better in the VariableMask case.
4149 if (ST->hasAVX512() && (VF == 2 || (VF == 4 && !ST->hasVLX())))
4150 Scalarize = true;
4151
4152 if (Scalarize)
4153 return getGSScalarCost(Opcode, SrcVTy, VariableMask, Alignment,
4154 AddressSpace);
4155
4156 return getGSVectorCost(Opcode, SrcVTy, Ptr, Alignment, AddressSpace);
4157}
4158
4159bool X86TTIImpl::isLSRCostLess(TargetTransformInfo::LSRCost &C1,
4160 TargetTransformInfo::LSRCost &C2) {
4161 // X86 specific here are "instruction number 1st priority".
4162 return std::tie(C1.Insns, C1.NumRegs, C1.AddRecCost,
4163 C1.NumIVMuls, C1.NumBaseAdds,
4164 C1.ScaleCost, C1.ImmCost, C1.SetupCost) <
4165 std::tie(C2.Insns, C2.NumRegs, C2.AddRecCost,
4166 C2.NumIVMuls, C2.NumBaseAdds,
4167 C2.ScaleCost, C2.ImmCost, C2.SetupCost);
4168}
4169
4170bool X86TTIImpl::canMacroFuseCmp() {
4171 return ST->hasMacroFusion() || ST->hasBranchFusion();
4172}
4173
4174bool X86TTIImpl::isLegalMaskedLoad(Type *DataTy, Align Alignment) {
4175 if (!ST->hasAVX())
4176 return false;
4177
4178 // The backend can't handle a single element vector.
4179 if (isa<VectorType>(DataTy) &&
4180 cast<FixedVectorType>(DataTy)->getNumElements() == 1)
4181 return false;
4182 Type *ScalarTy = DataTy->getScalarType();
4183
4184 if (ScalarTy->isPointerTy())
4185 return true;
4186
4187 if (ScalarTy->isFloatTy() || ScalarTy->isDoubleTy())
4188 return true;
4189
4190 if (!ScalarTy->isIntegerTy())
4191 return false;
4192
4193 unsigned IntWidth = ScalarTy->getIntegerBitWidth();
4194 return IntWidth == 32 || IntWidth == 64 ||
4195 ((IntWidth == 8 || IntWidth == 16) && ST->hasBWI());
4196}
4197
4198bool X86TTIImpl::isLegalMaskedStore(Type *DataType, Align Alignment) {
4199 return isLegalMaskedLoad(DataType, Alignment);
4200}
4201
4202bool X86TTIImpl::isLegalNTLoad(Type *DataType, Align Alignment) {
4203 unsigned DataSize = DL.getTypeStoreSize(DataType);
4204 // The only supported nontemporal loads are for aligned vectors of 16 or 32
4205 // bytes. Note that 32-byte nontemporal vector loads are supported by AVX2
4206 // (the equivalent stores only require AVX).
4207 if (Alignment >= DataSize && (DataSize == 16 || DataSize == 32))
4208 return DataSize == 16 ? ST->hasSSE1() : ST->hasAVX2();
4209
4210 return false;
4211}
4212
4213bool X86TTIImpl::isLegalNTStore(Type *DataType, Align Alignment) {
4214 unsigned DataSize = DL.getTypeStoreSize(DataType);
4215
4216 // SSE4A supports nontemporal stores of float and double at arbitrary
4217 // alignment.
4218 if (ST->hasSSE4A() && (DataType->isFloatTy() || DataType->isDoubleTy()))
4219 return true;
4220
4221 // Besides the SSE4A subtarget exception above, only aligned stores are
4222 // available nontemporaly on any other subtarget. And only stores with a size
4223 // of 4..32 bytes (powers of 2, only) are permitted.
4224 if (Alignment < DataSize || DataSize < 4 || DataSize > 32 ||
4225 !isPowerOf2_32(DataSize))
4226 return false;
4227
4228 // 32-byte vector nontemporal stores are supported by AVX (the equivalent
4229 // loads require AVX2).
4230 if (DataSize == 32)
4231 return ST->hasAVX();
4232 else if (DataSize == 16)
4233 return ST->hasSSE1();
4234 return true;
4235}
4236
4237bool X86TTIImpl::isLegalMaskedExpandLoad(Type *DataTy) {
4238 if (!isa<VectorType>(DataTy))
4239 return false;
4240
4241 if (!ST->hasAVX512())
4242 return false;
4243
4244 // The backend can't handle a single element vector.
4245 if (cast<FixedVectorType>(DataTy)->getNumElements() == 1)
4246 return false;
4247
4248 Type *ScalarTy = cast<VectorType>(DataTy)->getElementType();
4249
4250 if (ScalarTy->isFloatTy() || ScalarTy->isDoubleTy())
4251 return true;
4252
4253 if (!ScalarTy->isIntegerTy())
4254 return false;
4255
4256 unsigned IntWidth = ScalarTy->getIntegerBitWidth();
4257 return IntWidth == 32 || IntWidth == 64 ||
4258 ((IntWidth == 8 || IntWidth == 16) && ST->hasVBMI2());
4259}
4260
4261bool X86TTIImpl::isLegalMaskedCompressStore(Type *DataTy) {
4262 return isLegalMaskedExpandLoad(DataTy);
4263}
4264
4265bool X86TTIImpl::isLegalMaskedGather(Type *DataTy, Align Alignment) {
4266 // Some CPUs have better gather performance than others.
4267 // TODO: Remove the explicit ST->hasAVX512()?, That would mean we would only
4268 // enable gather with a -march.
4269 if (!(ST->hasAVX512() || (ST->hasFastGather() && ST->hasAVX2())))
4270 return false;
4271
4272 // This function is called now in two cases: from the Loop Vectorizer
4273 // and from the Scalarizer.
4274 // When the Loop Vectorizer asks about legality of the feature,
4275 // the vectorization factor is not calculated yet. The Loop Vectorizer
4276 // sends a scalar type and the decision is based on the width of the
4277 // scalar element.
4278 // Later on, the cost model will estimate usage this intrinsic based on
4279 // the vector type.
4280 // The Scalarizer asks again about legality. It sends a vector type.
4281 // In this case we can reject non-power-of-2 vectors.
4282 // We also reject single element vectors as the type legalizer can't
4283 // scalarize it.
4284 if (auto *DataVTy = dyn_cast<FixedVectorType>(DataTy)) {
4285 unsigned NumElts = DataVTy->getNumElements();
4286 if (NumElts == 1)
4287 return false;
4288 }
4289 Type *ScalarTy = DataTy->getScalarType();
4290 if (ScalarTy->isPointerTy())
4291 return true;
4292
4293 if (ScalarTy->isFloatTy() || ScalarTy->isDoubleTy())
4294 return true;
4295
4296 if (!ScalarTy->isIntegerTy())
4297 return false;
4298
4299 unsigned IntWidth = ScalarTy->getIntegerBitWidth();
4300 return IntWidth == 32 || IntWidth == 64;
4301}
4302
4303bool X86TTIImpl::isLegalMaskedScatter(Type *DataType, Align Alignment) {
4304 // AVX2 doesn't support scatter
4305 if (!ST->hasAVX512())
4306 return false;
4307 return isLegalMaskedGather(DataType, Alignment);
4308}
4309
4310bool X86TTIImpl::hasDivRemOp(Type *DataType, bool IsSigned) {
4311 EVT VT = TLI->getValueType(DL, DataType);
4312 return TLI->isOperationLegal(IsSigned ? ISD::SDIVREM : ISD::UDIVREM, VT);
4313}
4314
4315bool X86TTIImpl::isFCmpOrdCheaperThanFCmpZero(Type *Ty) {
4316 return false;
4317}
4318
4319bool X86TTIImpl::areInlineCompatible(const Function *Caller,
4320 const Function *Callee) const {
4321 const TargetMachine &TM = getTLI()->getTargetMachine();
4322
4323 // Work this as a subsetting of subtarget features.
4324 const FeatureBitset &CallerBits =
4325 TM.getSubtargetImpl(*Caller)->getFeatureBits();
4326 const FeatureBitset &CalleeBits =
4327 TM.getSubtargetImpl(*Callee)->getFeatureBits();
4328
4329 FeatureBitset RealCallerBits = CallerBits & ~InlineFeatureIgnoreList;
4330 FeatureBitset RealCalleeBits = CalleeBits & ~InlineFeatureIgnoreList;
4331 return (RealCallerBits & RealCalleeBits) == RealCalleeBits;
4332}
4333
4334bool X86TTIImpl::areFunctionArgsABICompatible(
4335 const Function *Caller, const Function *Callee,
4336 SmallPtrSetImpl<Argument *> &Args) const {
4337 if (!BaseT::areFunctionArgsABICompatible(Caller, Callee, Args))
4338 return false;
4339
4340 // If we get here, we know the target features match. If one function
4341 // considers 512-bit vectors legal and the other does not, consider them
4342 // incompatible.
4343 const TargetMachine &TM = getTLI()->getTargetMachine();
4344
4345 if (TM.getSubtarget<X86Subtarget>(*Caller).useAVX512Regs() ==
4346 TM.getSubtarget<X86Subtarget>(*Callee).useAVX512Regs())
4347 return true;
4348
4349 // Consider the arguments compatible if they aren't vectors or aggregates.
4350 // FIXME: Look at the size of vectors.
4351 // FIXME: Look at the element types of aggregates to see if there are vectors.
4352 // FIXME: The API of this function seems intended to allow arguments
4353 // to be removed from the set, but the caller doesn't check if the set
4354 // becomes empty so that may not work in practice.
4355 return llvm::none_of(Args, [](Argument *A) {
4356 auto *EltTy = cast<PointerType>(A->getType())->getElementType();
4357 return EltTy->isVectorTy() || EltTy->isAggregateType();
4358 });
4359}
4360
4361X86TTIImpl::TTI::MemCmpExpansionOptions
4362X86TTIImpl::enableMemCmpExpansion(bool OptSize, bool IsZeroCmp) const {
4363 TTI::MemCmpExpansionOptions Options;
4364 Options.MaxNumLoads = TLI->getMaxExpandSizeMemcmp(OptSize);
4365 Options.NumLoadsPerBlock = 2;
4366 // All GPR and vector loads can be unaligned.
4367 Options.AllowOverlappingLoads = true;
4368 if (IsZeroCmp) {
4369 // Only enable vector loads for equality comparison. Right now the vector
4370 // version is not as fast for three way compare (see #33329).
4371 const unsigned PreferredWidth = ST->getPreferVectorWidth();
4372 if (PreferredWidth >= 512 && ST->hasAVX512()) Options.LoadSizes.push_back(64);
4373 if (PreferredWidth >= 256 && ST->hasAVX()) Options.LoadSizes.push_back(32);
4374 if (PreferredWidth >= 128 && ST->hasSSE2()) Options.LoadSizes.push_back(16);
4375 }
4376 if (ST->is64Bit()) {
4377 Options.LoadSizes.push_back(8);
4378 }
4379 Options.LoadSizes.push_back(4);
4380 Options.LoadSizes.push_back(2);
4381 Options.LoadSizes.push_back(1);
4382 return Options;
4383}
4384
4385bool X86TTIImpl::enableInterleavedAccessVectorization() {
4386 // TODO: We expect this to be beneficial regardless of arch,
4387 // but there are currently some unexplained performance artifacts on Atom.
4388 // As a temporary solution, disable on Atom.
4389 return !(ST->isAtom());
4390}
4391
4392// Get estimation for interleaved load/store operations for AVX2.
4393// \p Factor is the interleaved-access factor (stride) - number of
4394// (interleaved) elements in the group.
4395// \p Indices contains the indices for a strided load: when the
4396// interleaved load has gaps they indicate which elements are used.
4397// If Indices is empty (or if the number of indices is equal to the size
4398// of the interleaved-access as given in \p Factor) the access has no gaps.
4399//
4400// As opposed to AVX-512, AVX2 does not have generic shuffles that allow
4401// computing the cost using a generic formula as a function of generic
4402// shuffles. We therefore use a lookup table instead, filled according to
4403// the instruction sequences that codegen currently generates.
4404int X86TTIImpl::getInterleavedMemoryOpCostAVX2(
4405 unsigned Opcode, FixedVectorType *VecTy, unsigned Factor,
4406 ArrayRef<unsigned> Indices, Align Alignment, unsigned AddressSpace,
4407 TTI::TargetCostKind CostKind, bool UseMaskForCond, bool UseMaskForGaps) {
4408
4409 if (UseMaskForCond || UseMaskForGaps)
4410 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
4411 Alignment, AddressSpace, CostKind,
4412 UseMaskForCond, UseMaskForGaps);
4413
4414 // We currently Support only fully-interleaved groups, with no gaps.
4415 // TODO: Support also strided loads (interleaved-groups with gaps).
4416 if (Indices.size() && Indices.size() != Factor)
4417 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
4418 Alignment, AddressSpace,
4419 CostKind);
4420
4421 // VecTy for interleave memop is <VF*Factor x Elt>.
4422 // So, for VF=4, Interleave Factor = 3, Element type = i32 we have
4423 // VecTy = <12 x i32>.
4424 MVT LegalVT = getTLI()->getTypeLegalizationCost(DL, VecTy).second;
4425
4426 // This function can be called with VecTy=<6xi128>, Factor=3, in which case
4427 // the VF=2, while v2i128 is an unsupported MVT vector type
4428 // (see MachineValueType.h::getVectorVT()).
4429 if (!LegalVT.isVector())
4430 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
4431 Alignment, AddressSpace,
4432 CostKind);
4433
4434 unsigned VF = VecTy->getNumElements() / Factor;
4435 Type *ScalarTy = VecTy->getElementType();
4436
4437 // Calculate the number of memory operations (NumOfMemOps), required
4438 // for load/store the VecTy.
4439 unsigned VecTySize = DL.getTypeStoreSize(VecTy);
4440 unsigned LegalVTSize = LegalVT.getStoreSize();
4441 unsigned NumOfMemOps = (VecTySize + LegalVTSize - 1) / LegalVTSize;
4442
4443 // Get the cost of one memory operation.
4444 auto *SingleMemOpTy = FixedVectorType::get(VecTy->getElementType(),
4445 LegalVT.getVectorNumElements());
4446 unsigned MemOpCost = getMemoryOpCost(Opcode, SingleMemOpTy,
4447 MaybeAlign(Alignment), AddressSpace,
4448 CostKind);
4449
4450 auto *VT = FixedVectorType::get(ScalarTy, VF);
4451 EVT ETy = TLI->getValueType(DL, VT);
4452 if (!ETy.isSimple())
4453 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
4454 Alignment, AddressSpace,
4455 CostKind);
4456
4457 // TODO: Complete for other data-types and strides.
4458 // Each combination of Stride, ElementTy and VF results in a different
4459 // sequence; The cost tables are therefore accessed with:
4460 // Factor (stride) and VectorType=VFxElemType.
4461 // The Cost accounts only for the shuffle sequence;
4462 // The cost of the loads/stores is accounted for separately.
4463 //
4464 static const CostTblEntry AVX2InterleavedLoadTbl[] = {
4465 { 2, MVT::v4i64, 6 }, //(load 8i64 and) deinterleave into 2 x 4i64
4466 { 2, MVT::v4f64, 6 }, //(load 8f64 and) deinterleave into 2 x 4f64
4467
4468 { 3, MVT::v2i8, 10 }, //(load 6i8 and) deinterleave into 3 x 2i8
4469 { 3, MVT::v4i8, 4 }, //(load 12i8 and) deinterleave into 3 x 4i8
4470 { 3, MVT::v8i8, 9 }, //(load 24i8 and) deinterleave into 3 x 8i8
4471 { 3, MVT::v16i8, 11}, //(load 48i8 and) deinterleave into 3 x 16i8
4472 { 3, MVT::v32i8, 13}, //(load 96i8 and) deinterleave into 3 x 32i8
4473 { 3, MVT::v8f32, 17 }, //(load 24f32 and)deinterleave into 3 x 8f32
4474
4475 { 4, MVT::v2i8, 12 }, //(load 8i8 and) deinterleave into 4 x 2i8
4476 { 4, MVT::v4i8, 4 }, //(load 16i8 and) deinterleave into 4 x 4i8
4477 { 4, MVT::v8i8, 20 }, //(load 32i8 and) deinterleave into 4 x 8i8
4478 { 4, MVT::v16i8, 39 }, //(load 64i8 and) deinterleave into 4 x 16i8
4479 { 4, MVT::v32i8, 80 }, //(load 128i8 and) deinterleave into 4 x 32i8
4480
4481 { 8, MVT::v8f32, 40 } //(load 64f32 and)deinterleave into 8 x 8f32
4482 };
4483
4484 static const CostTblEntry AVX2InterleavedStoreTbl[] = {
4485 { 2, MVT::v4i64, 6 }, //interleave into 2 x 4i64 into 8i64 (and store)
4486 { 2, MVT::v4f64, 6 }, //interleave into 2 x 4f64 into 8f64 (and store)
4487
4488 { 3, MVT::v2i8, 7 }, //interleave 3 x 2i8 into 6i8 (and store)
4489 { 3, MVT::v4i8, 8 }, //interleave 3 x 4i8 into 12i8 (and store)
4490 { 3, MVT::v8i8, 11 }, //interleave 3 x 8i8 into 24i8 (and store)
4491 { 3, MVT::v16i8, 11 }, //interleave 3 x 16i8 into 48i8 (and store)
4492 { 3, MVT::v32i8, 13 }, //interleave 3 x 32i8 into 96i8 (and store)
4493
4494 { 4, MVT::v2i8, 12 }, //interleave 4 x 2i8 into 8i8 (and store)
4495 { 4, MVT::v4i8, 9 }, //interleave 4 x 4i8 into 16i8 (and store)
4496 { 4, MVT::v8i8, 10 }, //interleave 4 x 8i8 into 32i8 (and store)
4497 { 4, MVT::v16i8, 10 }, //interleave 4 x 16i8 into 64i8 (and store)
4498 { 4, MVT::v32i8, 12 } //interleave 4 x 32i8 into 128i8 (and store)
4499 };
4500
4501 if (Opcode == Instruction::Load) {
4502 if (const auto *Entry =
4503 CostTableLookup(AVX2InterleavedLoadTbl, Factor, ETy.getSimpleVT()))
4504 return NumOfMemOps * MemOpCost + Entry->Cost;
4505 } else {
4506 assert(Opcode == Instruction::Store &&((Opcode == Instruction::Store && "Expected Store Instruction at this point"
) ? static_cast<void> (0) : __assert_fail ("Opcode == Instruction::Store && \"Expected Store Instruction at this point\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 4507, __PRETTY_FUNCTION__))
4507 "Expected Store Instruction at this point")((Opcode == Instruction::Store && "Expected Store Instruction at this point"
) ? static_cast<void> (0) : __assert_fail ("Opcode == Instruction::Store && \"Expected Store Instruction at this point\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 4507, __PRETTY_FUNCTION__))
;
4508 if (const auto *Entry =
4509 CostTableLookup(AVX2InterleavedStoreTbl, Factor, ETy.getSimpleVT()))
4510 return NumOfMemOps * MemOpCost + Entry->Cost;
4511 }
4512
4513 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
4514 Alignment, AddressSpace, CostKind);
4515}
4516
4517// Get estimation for interleaved load/store operations and strided load.
4518// \p Indices contains indices for strided load.
4519// \p Factor - the factor of interleaving.
4520// AVX-512 provides 3-src shuffles that significantly reduces the cost.
4521int X86TTIImpl::getInterleavedMemoryOpCostAVX512(
4522 unsigned Opcode, FixedVectorType *VecTy, unsigned Factor,
4523 ArrayRef<unsigned> Indices, Align Alignment, unsigned AddressSpace,
4524 TTI::TargetCostKind CostKind, bool UseMaskForCond, bool UseMaskForGaps) {
4525
4526 if (UseMaskForCond || UseMaskForGaps)
4527 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
4528 Alignment, AddressSpace, CostKind,
4529 UseMaskForCond, UseMaskForGaps);
4530
4531 // VecTy for interleave memop is <VF*Factor x Elt>.
4532 // So, for VF=4, Interleave Factor = 3, Element type = i32 we have
4533 // VecTy = <12 x i32>.
4534
4535 // Calculate the number of memory operations (NumOfMemOps), required
4536 // for load/store the VecTy.
4537 MVT LegalVT = getTLI()->getTypeLegalizationCost(DL, VecTy).second;
4538 unsigned VecTySize = DL.getTypeStoreSize(VecTy);
4539 unsigned LegalVTSize = LegalVT.getStoreSize();
4540 unsigned NumOfMemOps = (VecTySize + LegalVTSize - 1) / LegalVTSize;
4541
4542 // Get the cost of one memory operation.
4543 auto *SingleMemOpTy = FixedVectorType::get(VecTy->getElementType(),
4544 LegalVT.getVectorNumElements());
4545 unsigned MemOpCost = getMemoryOpCost(Opcode, SingleMemOpTy,
4546 MaybeAlign(Alignment), AddressSpace,
4547 CostKind);
4548
4549 unsigned VF = VecTy->getNumElements() / Factor;
4550 MVT VT = MVT::getVectorVT(MVT::getVT(VecTy->getScalarType()), VF);
4551
4552 if (Opcode == Instruction::Load) {
4553 // The tables (AVX512InterleavedLoadTbl and AVX512InterleavedStoreTbl)
4554 // contain the cost of the optimized shuffle sequence that the
4555 // X86InterleavedAccess pass will generate.
4556 // The cost of loads and stores are computed separately from the table.
4557
4558 // X86InterleavedAccess support only the following interleaved-access group.
4559 static const CostTblEntry AVX512InterleavedLoadTbl[] = {
4560 {3, MVT::v16i8, 12}, //(load 48i8 and) deinterleave into 3 x 16i8
4561 {3, MVT::v32i8, 14}, //(load 96i8 and) deinterleave into 3 x 32i8
4562 {3, MVT::v64i8, 22}, //(load 96i8 and) deinterleave into 3 x 32i8
4563 };
4564
4565 if (const auto *Entry =
4566 CostTableLookup(AVX512InterleavedLoadTbl, Factor, VT))
4567 return NumOfMemOps * MemOpCost + Entry->Cost;
4568 //If an entry does not exist, fallback to the default implementation.
4569
4570 // Kind of shuffle depends on number of loaded values.
4571 // If we load the entire data in one register, we can use a 1-src shuffle.
4572 // Otherwise, we'll merge 2 sources in each operation.
4573 TTI::ShuffleKind ShuffleKind =
4574 (NumOfMemOps > 1) ? TTI::SK_PermuteTwoSrc : TTI::SK_PermuteSingleSrc;
4575
4576 unsigned ShuffleCost =
4577 getShuffleCost(ShuffleKind, SingleMemOpTy, 0, nullptr);
4578
4579 unsigned NumOfLoadsInInterleaveGrp =
4580 Indices.size() ? Indices.size() : Factor;
4581 auto *ResultTy = FixedVectorType::get(VecTy->getElementType(),
4582 VecTy->getNumElements() / Factor);
4583 unsigned NumOfResults =
4584 getTLI()->getTypeLegalizationCost(DL, ResultTy).first *
4585 NumOfLoadsInInterleaveGrp;
4586
4587 // About a half of the loads may be folded in shuffles when we have only
4588 // one result. If we have more than one result, we do not fold loads at all.
4589 unsigned NumOfUnfoldedLoads =
4590 NumOfResults > 1 ? NumOfMemOps : NumOfMemOps / 2;
4591
4592 // Get a number of shuffle operations per result.
4593 unsigned NumOfShufflesPerResult =
4594 std::max((unsigned)1, (unsigned)(NumOfMemOps - 1));
4595
4596 // The SK_MergeTwoSrc shuffle clobbers one of src operands.
4597 // When we have more than one destination, we need additional instructions
4598 // to keep sources.
4599 unsigned NumOfMoves = 0;
4600 if (NumOfResults > 1 && ShuffleKind == TTI::SK_PermuteTwoSrc)
4601 NumOfMoves = NumOfResults * NumOfShufflesPerResult / 2;
4602
4603 int Cost = NumOfResults * NumOfShufflesPerResult * ShuffleCost +
4604 NumOfUnfoldedLoads * MemOpCost + NumOfMoves;
4605
4606 return Cost;
4607 }
4608
4609 // Store.
4610 assert(Opcode == Instruction::Store &&((Opcode == Instruction::Store && "Expected Store Instruction at this point"
) ? static_cast<void> (0) : __assert_fail ("Opcode == Instruction::Store && \"Expected Store Instruction at this point\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 4611, __PRETTY_FUNCTION__))
4611 "Expected Store Instruction at this point")((Opcode == Instruction::Store && "Expected Store Instruction at this point"
) ? static_cast<void> (0) : __assert_fail ("Opcode == Instruction::Store && \"Expected Store Instruction at this point\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 4611, __PRETTY_FUNCTION__))
;
4612 // X86InterleavedAccess support only the following interleaved-access group.
4613 static const CostTblEntry AVX512InterleavedStoreTbl[] = {
4614 {3, MVT::v16i8, 12}, // interleave 3 x 16i8 into 48i8 (and store)
4615 {3, MVT::v32i8, 14}, // interleave 3 x 32i8 into 96i8 (and store)
4616 {3, MVT::v64i8, 26}, // interleave 3 x 64i8 into 96i8 (and store)
4617
4618 {4, MVT::v8i8, 10}, // interleave 4 x 8i8 into 32i8 (and store)
4619 {4, MVT::v16i8, 11}, // interleave 4 x 16i8 into 64i8 (and store)
4620 {4, MVT::v32i8, 14}, // interleave 4 x 32i8 into 128i8 (and store)
4621 {4, MVT::v64i8, 24} // interleave 4 x 32i8 into 256i8 (and store)
4622 };
4623
4624 if (const auto *Entry =
4625 CostTableLookup(AVX512InterleavedStoreTbl, Factor, VT))
4626 return NumOfMemOps * MemOpCost + Entry->Cost;
4627 //If an entry does not exist, fallback to the default implementation.
4628
4629 // There is no strided stores meanwhile. And store can't be folded in
4630 // shuffle.
4631 unsigned NumOfSources = Factor; // The number of values to be merged.
4632 unsigned ShuffleCost =
4633 getShuffleCost(TTI::SK_PermuteTwoSrc, SingleMemOpTy, 0, nullptr);
4634 unsigned NumOfShufflesPerStore = NumOfSources - 1;
4635
4636 // The SK_MergeTwoSrc shuffle clobbers one of src operands.
4637 // We need additional instructions to keep sources.
4638 unsigned NumOfMoves = NumOfMemOps * NumOfShufflesPerStore / 2;
4639 int Cost = NumOfMemOps * (MemOpCost + NumOfShufflesPerStore * ShuffleCost) +
4640 NumOfMoves;
4641 return Cost;
4642}
4643
4644int X86TTIImpl::getInterleavedMemoryOpCost(
4645 unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices,
4646 Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind,
4647 bool UseMaskForCond, bool UseMaskForGaps) {
4648 auto isSupportedOnAVX512 = [](Type *VecTy, bool HasBW) {
4649 Type *EltTy = cast<VectorType>(VecTy)->getElementType();
4650 if (EltTy->isFloatTy() || EltTy->isDoubleTy() || EltTy->isIntegerTy(64) ||
4651 EltTy->isIntegerTy(32) || EltTy->isPointerTy())
4652 return true;
4653 if (EltTy->isIntegerTy(16) || EltTy->isIntegerTy(8))
4654 return HasBW;
4655 return false;
4656 };
4657 if (ST->hasAVX512() && isSupportedOnAVX512(VecTy, ST->hasBWI()))
1
Calling 'X86Subtarget::hasAVX512'
4
Returning from 'X86Subtarget::hasAVX512'
4658 return getInterleavedMemoryOpCostAVX512(
4659 Opcode, cast<FixedVectorType>(VecTy), Factor, Indices, Alignment,
4660 AddressSpace, CostKind, UseMaskForCond, UseMaskForGaps);
4661 if (ST->hasAVX2())
5
Calling 'X86Subtarget::hasAVX2'
8
Returning from 'X86Subtarget::hasAVX2'
9
Taking false branch
4662 return getInterleavedMemoryOpCostAVX2(
4663 Opcode, cast<FixedVectorType>(VecTy), Factor, Indices, Alignment,
4664 AddressSpace, CostKind, UseMaskForCond, UseMaskForGaps);
4665
4666 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
10
Calling 'BasicTTIImplBase::getInterleavedMemoryOpCost'
4667 Alignment, AddressSpace, CostKind,
4668 UseMaskForCond, UseMaskForGaps);
4669}

/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/lib/Target/X86/X86Subtarget.h

1//===-- X86Subtarget.h - Define Subtarget for the X86 ----------*- C++ -*--===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file declares the X86 specific subclass of TargetSubtargetInfo.
10//
11//===----------------------------------------------------------------------===//
12
13#ifndef LLVM_LIB_TARGET_X86_X86SUBTARGET_H
14#define LLVM_LIB_TARGET_X86_X86SUBTARGET_H
15
16#include "X86FrameLowering.h"
17#include "X86ISelLowering.h"
18#include "X86InstrInfo.h"
19#include "X86SelectionDAGInfo.h"
20#include "llvm/ADT/Triple.h"
21#include "llvm/CodeGen/TargetSubtargetInfo.h"
22#include "llvm/IR/CallingConv.h"
23#include <climits>
24#include <memory>
25
26#define GET_SUBTARGETINFO_HEADER
27#include "X86GenSubtargetInfo.inc"
28
29namespace llvm {
30
31class CallLowering;
32class GlobalValue;
33class InstructionSelector;
34class LegalizerInfo;
35class RegisterBankInfo;
36class StringRef;
37class TargetMachine;
38
39/// The X86 backend supports a number of different styles of PIC.
40///
41namespace PICStyles {
42
43enum class Style {
44 StubPIC, // Used on i386-darwin in pic mode.
45 GOT, // Used on 32 bit elf on when in pic mode.
46 RIPRel, // Used on X86-64 when in pic mode.
47 None // Set when not in pic mode.
48};
49
50} // end namespace PICStyles
51
52class X86Subtarget final : public X86GenSubtargetInfo {
53 // NOTE: Do not add anything new to this list. Coarse, CPU name based flags
54 // are not a good idea. We should be migrating away from these.
55 enum X86ProcFamilyEnum {
56 Others,
57 IntelAtom,
58 IntelSLM
59 };
60
61 enum X86SSEEnum {
62 NoSSE, SSE1, SSE2, SSE3, SSSE3, SSE41, SSE42, AVX, AVX2, AVX512F
63 };
64
65 enum X863DNowEnum {
66 NoThreeDNow, MMX, ThreeDNow, ThreeDNowA
67 };
68
69 /// X86 processor family: Intel Atom, and others
70 X86ProcFamilyEnum X86ProcFamily = Others;
71
72 /// Which PIC style to use
73 PICStyles::Style PICStyle;
74
75 const TargetMachine &TM;
76
77 /// SSE1, SSE2, SSE3, SSSE3, SSE41, SSE42, or none supported.
78 X86SSEEnum X86SSELevel = NoSSE;
79
80 /// MMX, 3DNow, 3DNow Athlon, or none supported.
81 X863DNowEnum X863DNowLevel = NoThreeDNow;
82
83 /// True if the processor supports X87 instructions.
84 bool HasX87 = false;
85
86 /// True if the processor supports CMPXCHG8B.
87 bool HasCmpxchg8b = false;
88
89 /// True if this processor has NOPL instruction
90 /// (generally pentium pro+).
91 bool HasNOPL = false;
92
93 /// True if this processor has conditional move instructions
94 /// (generally pentium pro+).
95 bool HasCMov = false;
96
97 /// True if the processor supports X86-64 instructions.
98 bool HasX86_64 = false;
99
100 /// True if the processor supports POPCNT.
101 bool HasPOPCNT = false;
102
103 /// True if the processor supports SSE4A instructions.
104 bool HasSSE4A = false;
105
106 /// Target has AES instructions
107 bool HasAES = false;
108 bool HasVAES = false;
109
110 /// Target has FXSAVE/FXRESTOR instructions
111 bool HasFXSR = false;
112
113 /// Target has XSAVE instructions
114 bool HasXSAVE = false;
115
116 /// Target has XSAVEOPT instructions
117 bool HasXSAVEOPT = false;
118
119 /// Target has XSAVEC instructions
120 bool HasXSAVEC = false;
121
122 /// Target has XSAVES instructions
123 bool HasXSAVES = false;
124
125 /// Target has carry-less multiplication
126 bool HasPCLMUL = false;
127 bool HasVPCLMULQDQ = false;
128
129 /// Target has Galois Field Arithmetic instructions
130 bool HasGFNI = false;
131
132 /// Target has 3-operand fused multiply-add
133 bool HasFMA = false;
134
135 /// Target has 4-operand fused multiply-add
136 bool HasFMA4 = false;
137
138 /// Target has XOP instructions
139 bool HasXOP = false;
140
141 /// Target has TBM instructions.
142 bool HasTBM = false;
143
144 /// Target has LWP instructions
145 bool HasLWP = false;
146
147 /// True if the processor has the MOVBE instruction.
148 bool HasMOVBE = false;
149
150 /// True if the processor has the RDRAND instruction.
151 bool HasRDRAND = false;
152
153 /// Processor has 16-bit floating point conversion instructions.
154 bool HasF16C = false;
155
156 /// Processor has FS/GS base insturctions.
157 bool HasFSGSBase = false;
158
159 /// Processor has LZCNT instruction.
160 bool HasLZCNT = false;
161
162 /// Processor has BMI1 instructions.
163 bool HasBMI = false;
164
165 /// Processor has BMI2 instructions.
166 bool HasBMI2 = false;
167
168 /// Processor has VBMI instructions.
169 bool HasVBMI = false;
170
171 /// Processor has VBMI2 instructions.
172 bool HasVBMI2 = false;
173
174 /// Processor has Integer Fused Multiply Add
175 bool HasIFMA = false;
176
177 /// Processor has RTM instructions.
178 bool HasRTM = false;
179
180 /// Processor has ADX instructions.
181 bool HasADX = false;
182
183 /// Processor has SHA instructions.
184 bool HasSHA = false;
185
186 /// Processor has PRFCHW instructions.
187 bool HasPRFCHW = false;
188
189 /// Processor has RDSEED instructions.
190 bool HasRDSEED = false;
191
192 /// Processor has LAHF/SAHF instructions in 64-bit mode.
193 bool HasLAHFSAHF64 = false;
194
195 /// Processor has MONITORX/MWAITX instructions.
196 bool HasMWAITX = false;
197
198 /// Processor has Cache Line Zero instruction
199 bool HasCLZERO = false;
200
201 /// Processor has Cache Line Demote instruction
202 bool HasCLDEMOTE = false;
203
204 /// Processor has MOVDIRI instruction (direct store integer).
205 bool HasMOVDIRI = false;
206
207 /// Processor has MOVDIR64B instruction (direct store 64 bytes).
208 bool HasMOVDIR64B = false;
209
210 /// Processor has ptwrite instruction.
211 bool HasPTWRITE = false;
212
213 /// Processor has Prefetch with intent to Write instruction
214 bool HasPREFETCHWT1 = false;
215
216 /// True if SHLD instructions are slow.
217 bool IsSHLDSlow = false;
218
219 /// True if the PMULLD instruction is slow compared to PMULLW/PMULHW and
220 // PMULUDQ.
221 bool IsPMULLDSlow = false;
222
223 /// True if the PMADDWD instruction is slow compared to PMULLD.
224 bool IsPMADDWDSlow = false;
225
226 /// True if unaligned memory accesses of 16-bytes are slow.
227 bool IsUAMem16Slow = false;
228
229 /// True if unaligned memory accesses of 32-bytes are slow.
230 bool IsUAMem32Slow = false;
231
232 /// True if SSE operations can have unaligned memory operands.
233 /// This may require setting a configuration bit in the processor.
234 bool HasSSEUnalignedMem = false;
235
236 /// True if this processor has the CMPXCHG16B instruction;
237 /// this is true for most x86-64 chips, but not the first AMD chips.
238 bool HasCmpxchg16b = false;
239
240 /// True if the LEA instruction should be used for adjusting
241 /// the stack pointer. This is an optimization for Intel Atom processors.
242 bool UseLeaForSP = false;
243
244 /// True if POPCNT instruction has a false dependency on the destination register.
245 bool HasPOPCNTFalseDeps = false;
246
247 /// True if LZCNT/TZCNT instructions have a false dependency on the destination register.
248 bool HasLZCNTFalseDeps = false;
249
250 /// True if its preferable to combine to a single shuffle using a variable
251 /// mask over multiple fixed shuffles.
252 bool HasFastVariableShuffle = false;
253
254 /// True if vzeroupper instructions should be inserted after code that uses
255 /// ymm or zmm registers.
256 bool InsertVZEROUPPER = false;
257
258 /// True if there is no performance penalty for writing NOPs with up to
259 /// 7 bytes.
260 bool HasFast7ByteNOP = false;
261
262 /// True if there is no performance penalty for writing NOPs with up to
263 /// 11 bytes.
264 bool HasFast11ByteNOP = false;
265
266 /// True if there is no performance penalty for writing NOPs with up to
267 /// 15 bytes.
268 bool HasFast15ByteNOP = false;
269
270 /// True if gather is reasonably fast. This is true for Skylake client and
271 /// all AVX-512 CPUs.
272 bool HasFastGather = false;
273
274 /// True if hardware SQRTSS instruction is at least as fast (latency) as
275 /// RSQRTSS followed by a Newton-Raphson iteration.
276 bool HasFastScalarFSQRT = false;
277
278 /// True if hardware SQRTPS/VSQRTPS instructions are at least as fast
279 /// (throughput) as RSQRTPS/VRSQRTPS followed by a Newton-Raphson iteration.
280 bool HasFastVectorFSQRT = false;
281
282 /// True if 8-bit divisions are significantly faster than
283 /// 32-bit divisions and should be used when possible.
284 bool HasSlowDivide32 = false;
285
286 /// True if 32-bit divides are significantly faster than
287 /// 64-bit divisions and should be used when possible.
288 bool HasSlowDivide64 = false;
289
290 /// True if LZCNT instruction is fast.
291 bool HasFastLZCNT = false;
292
293 /// True if SHLD based rotate is fast.
294 bool HasFastSHLDRotate = false;
295
296 /// True if the processor supports macrofusion.
297 bool HasMacroFusion = false;
298
299 /// True if the processor supports branch fusion.
300 bool HasBranchFusion = false;
301
302 /// True if the processor has enhanced REP MOVSB/STOSB.
303 bool HasERMSB = false;
304
305 /// True if the processor has fast short REP MOV.
306 bool HasFSRM = false;
307
308 /// True if the short functions should be padded to prevent
309 /// a stall when returning too early.
310 bool PadShortFunctions = false;
311
312 /// True if two memory operand instructions should use a temporary register
313 /// instead.
314 bool SlowTwoMemOps = false;
315
316 /// True if the LEA instruction inputs have to be ready at address generation
317 /// (AG) time.
318 bool LEAUsesAG = false;
319
320 /// True if the LEA instruction with certain arguments is slow
321 bool SlowLEA = false;
322
323 /// True if the LEA instruction has all three source operands: base, index,
324 /// and offset or if the LEA instruction uses base and index registers where
325 /// the base is EBP, RBP,or R13
326 bool Slow3OpsLEA = false;
327
328 /// True if INC and DEC instructions are slow when writing to flags
329 bool SlowIncDec = false;
330
331 /// Processor has AVX-512 PreFetch Instructions
332 bool HasPFI = false;
333
334 /// Processor has AVX-512 Exponential and Reciprocal Instructions
335 bool HasERI = false;
336
337 /// Processor has AVX-512 Conflict Detection Instructions
338 bool HasCDI = false;
339
340 /// Processor has AVX-512 population count Instructions
341 bool HasVPOPCNTDQ = false;
342
343 /// Processor has AVX-512 Doubleword and Quadword instructions
344 bool HasDQI = false;
345
346 /// Processor has AVX-512 Byte and Word instructions
347 bool HasBWI = false;
348
349 /// Processor has AVX-512 Vector Length eXtenstions
350 bool HasVLX = false;
351
352 /// Processor has PKU extenstions
353 bool HasPKU = false;
354
355 /// Processor has AVX-512 Vector Neural Network Instructions
356 bool HasVNNI = false;
357
358 /// Processor has AVX-512 bfloat16 floating-point extensions
359 bool HasBF16 = false;
360
361 /// Processor supports ENQCMD instructions
362 bool HasENQCMD = false;
363
364 /// Processor has AVX-512 Bit Algorithms instructions
365 bool HasBITALG = false;
366
367 /// Processor has AVX-512 vp2intersect instructions
368 bool HasVP2INTERSECT = false;
369
370 /// Processor supports CET SHSTK - Control-Flow Enforcement Technology
371 /// using Shadow Stack
372 bool HasSHSTK = false;
373
374 /// Processor supports Invalidate Process-Context Identifier
375 bool HasINVPCID = false;
376
377 /// Processor has Software Guard Extensions
378 bool HasSGX = false;
379
380 /// Processor supports Flush Cache Line instruction
381 bool HasCLFLUSHOPT = false;
382
383 /// Processor supports Cache Line Write Back instruction
384 bool HasCLWB = false;
385
386 /// Processor supports Write Back No Invalidate instruction
387 bool HasWBNOINVD = false;
388
389 /// Processor support RDPID instruction
390 bool HasRDPID = false;
391
392 /// Processor supports WaitPKG instructions
393 bool HasWAITPKG = false;
394
395 /// Processor supports PCONFIG instruction
396 bool HasPCONFIG = false;
397
398 /// Processor supports SERIALIZE instruction
399 bool HasSERIALIZE = false;
400
401 /// Processor supports TSXLDTRK instruction
402 bool HasTSXLDTRK = false;
403
404 /// Processor has AMX support
405 bool HasAMXTILE = false;
406 bool HasAMXBF16 = false;
407 bool HasAMXINT8 = false;
408
409 /// Processor has a single uop BEXTR implementation.
410 bool HasFastBEXTR = false;
411
412 /// Try harder to combine to horizontal vector ops if they are fast.
413 bool HasFastHorizontalOps = false;
414
415 /// Prefer a left/right scalar logical shifts pair over a shift+and pair.
416 bool HasFastScalarShiftMasks = false;
417
418 /// Prefer a left/right vector logical shifts pair over a shift+and pair.
419 bool HasFastVectorShiftMasks = false;
420
421 /// Use a retpoline thunk rather than indirect calls to block speculative
422 /// execution.
423 bool UseRetpolineIndirectCalls = false;
424
425 /// Use a retpoline thunk or remove any indirect branch to block speculative
426 /// execution.
427 bool UseRetpolineIndirectBranches = false;
428
429 /// Deprecated flag, query `UseRetpolineIndirectCalls` and
430 /// `UseRetpolineIndirectBranches` instead.
431 bool DeprecatedUseRetpoline = false;
432
433 /// When using a retpoline thunk, call an externally provided thunk rather
434 /// than emitting one inside the compiler.
435 bool UseRetpolineExternalThunk = false;
436
437 /// Prevent generation of indirect call/branch instructions from memory,
438 /// and force all indirect call/branch instructions from a register to be
439 /// preceded by an LFENCE. Also decompose RET instructions into a
440 /// POP+LFENCE+JMP sequence.
441 bool UseLVIControlFlowIntegrity = false;
442
443 /// Enable Speculative Execution Side Effect Suppression
444 bool UseSpeculativeExecutionSideEffectSuppression = false;
445
446 /// Insert LFENCE instructions to prevent data speculatively injected into
447 /// loads from being used maliciously.
448 bool UseLVILoadHardening = false;
449
450 /// Use software floating point for code generation.
451 bool UseSoftFloat = false;
452
453 /// Use alias analysis during code generation.
454 bool UseAA = false;
455
456 /// The minimum alignment known to hold of the stack frame on
457 /// entry to the function and which must be maintained by every function.
458 Align stackAlignment = Align(4);
459
460 /// Max. memset / memcpy size that is turned into rep/movs, rep/stos ops.
461 ///
462 // FIXME: this is a known good value for Yonah. How about others?
463 unsigned MaxInlineSizeThreshold = 128;
464
465 /// Indicates target prefers 128 bit instructions.
466 bool Prefer128Bit = false;
467
468 /// Indicates target prefers 256 bit instructions.
469 bool Prefer256Bit = false;
470
471 /// Indicates target prefers AVX512 mask registers.
472 bool PreferMaskRegisters = false;
473
474 /// Use Goldmont specific floating point div/sqrt costs.
475 bool UseGLMDivSqrtCosts = false;
476
477 /// What processor and OS we're targeting.
478 Triple TargetTriple;
479
480 /// GlobalISel related APIs.
481 std::unique_ptr<CallLowering> CallLoweringInfo;
482 std::unique_ptr<LegalizerInfo> Legalizer;
483 std::unique_ptr<RegisterBankInfo> RegBankInfo;
484 std::unique_ptr<InstructionSelector> InstSelector;
485
486private:
487 /// Override the stack alignment.
488 MaybeAlign StackAlignOverride;
489
490 /// Preferred vector width from function attribute.
491 unsigned PreferVectorWidthOverride;
492
493 /// Resolved preferred vector width from function attribute and subtarget
494 /// features.
495 unsigned PreferVectorWidth = UINT32_MAX(4294967295U);
496
497 /// Required vector width from function attribute.
498 unsigned RequiredVectorWidth;
499
500 /// True if compiling for 64-bit, false for 16-bit or 32-bit.
501 bool In64BitMode = false;
502
503 /// True if compiling for 32-bit, false for 16-bit or 64-bit.
504 bool In32BitMode = false;
505
506 /// True if compiling for 16-bit, false for 32-bit or 64-bit.
507 bool In16BitMode = false;
508
509 X86SelectionDAGInfo TSInfo;
510 // Ordering here is important. X86InstrInfo initializes X86RegisterInfo which
511 // X86TargetLowering needs.
512 X86InstrInfo InstrInfo;
513 X86TargetLowering TLInfo;
514 X86FrameLowering FrameLowering;
515
516public:
517 /// This constructor initializes the data members to match that
518 /// of the specified triple.
519 ///
520 X86Subtarget(const Triple &TT, StringRef CPU, StringRef TuneCPU, StringRef FS,
521 const X86TargetMachine &TM, MaybeAlign StackAlignOverride,
522 unsigned PreferVectorWidthOverride,
523 unsigned RequiredVectorWidth);
524
525 const X86TargetLowering *getTargetLowering() const override {
526 return &TLInfo;
527 }
528
529 const X86InstrInfo *getInstrInfo() const override { return &InstrInfo; }
530
531 const X86FrameLowering *getFrameLowering() const override {
532 return &FrameLowering;
533 }
534
535 const X86SelectionDAGInfo *getSelectionDAGInfo() const override {
536 return &TSInfo;
537 }
538
539 const X86RegisterInfo *getRegisterInfo() const override {
540 return &getInstrInfo()->getRegisterInfo();
541 }
542
543 /// Returns the minimum alignment known to hold of the
544 /// stack frame on entry to the function and which must be maintained by every
545 /// function for this subtarget.
546 Align getStackAlignment() const { return stackAlignment; }
547
548 /// Returns the maximum memset / memcpy size
549 /// that still makes it profitable to inline the call.
550 unsigned getMaxInlineSizeThreshold() const { return MaxInlineSizeThreshold; }
551
552 /// ParseSubtargetFeatures - Parses features string setting specified
553 /// subtarget options. Definition of function is auto generated by tblgen.
554 void ParseSubtargetFeatures(StringRef CPU, StringRef TuneCPU, StringRef FS);
555
556 /// Methods used by Global ISel
557 const CallLowering *getCallLowering() const override;
558 InstructionSelector *getInstructionSelector() const override;
559 const LegalizerInfo *getLegalizerInfo() const override;
560 const RegisterBankInfo *getRegBankInfo() const override;
561
562private:
563 /// Initialize the full set of dependencies so we can use an initializer
564 /// list for X86Subtarget.
565 X86Subtarget &initializeSubtargetDependencies(StringRef CPU,
566 StringRef TuneCPU,
567 StringRef FS);
568 void initSubtargetFeatures(StringRef CPU, StringRef TuneCPU, StringRef FS);
569
570public:
571 /// Is this x86_64? (disregarding specific ABI / programming model)
572 bool is64Bit() const {
573 return In64BitMode;
574 }
575
576 bool is32Bit() const {
577 return In32BitMode;
578 }
579
580 bool is16Bit() const {
581 return In16BitMode;
582 }
583
584 /// Is this x86_64 with the ILP32 programming model (x32 ABI)?
585 bool isTarget64BitILP32() const {
586 return In64BitMode && (TargetTriple.getEnvironment() == Triple::GNUX32 ||
587 TargetTriple.isOSNaCl());
588 }
589
590 /// Is this x86_64 with the LP64 programming model (standard AMD64, no x32)?
591 bool isTarget64BitLP64() const {
592 return In64BitMode && (TargetTriple.getEnvironment() != Triple::GNUX32 &&
593 !TargetTriple.isOSNaCl());
594 }
595
596 PICStyles::Style getPICStyle() const { return PICStyle; }
597 void setPICStyle(PICStyles::Style Style) { PICStyle = Style; }
598
599 bool hasX87() const { return HasX87; }
600 bool hasCmpxchg8b() const { return HasCmpxchg8b; }
601 bool hasNOPL() const { return HasNOPL; }
602 // SSE codegen depends on cmovs, and all SSE1+ processors support them.
603 // All 64-bit processors support cmov.
604 bool hasCMov() const { return HasCMov || X86SSELevel >= SSE1 || is64Bit(); }
605 bool hasSSE1() const { return X86SSELevel >= SSE1; }
606 bool hasSSE2() const { return X86SSELevel >= SSE2; }
607 bool hasSSE3() const { return X86SSELevel >= SSE3; }
608 bool hasSSSE3() const { return X86SSELevel >= SSSE3; }
609 bool hasSSE41() const { return X86SSELevel >= SSE41; }
610 bool hasSSE42() const { return X86SSELevel >= SSE42; }
611 bool hasAVX() const { return X86SSELevel >= AVX; }
612 bool hasAVX2() const { return X86SSELevel >= AVX2; }
6
Assuming field 'X86SSELevel' is < AVX2
7
Returning zero, which participates in a condition later
613 bool hasAVX512() const { return X86SSELevel >= AVX512F; }
2
Assuming field 'X86SSELevel' is < AVX512F
3
Returning zero, which participates in a condition later
614 bool hasInt256() const { return hasAVX2(); }
615 bool hasSSE4A() const { return HasSSE4A; }
616 bool hasMMX() const { return X863DNowLevel >= MMX; }
617 bool has3DNow() const { return X863DNowLevel >= ThreeDNow; }
618 bool has3DNowA() const { return X863DNowLevel >= ThreeDNowA; }
619 bool hasPOPCNT() const { return HasPOPCNT; }
620 bool hasAES() const { return HasAES; }
621 bool hasVAES() const { return HasVAES; }
622 bool hasFXSR() const { return HasFXSR; }
623 bool hasXSAVE() const { return HasXSAVE; }
624 bool hasXSAVEOPT() const { return HasXSAVEOPT; }
625 bool hasXSAVEC() const { return HasXSAVEC; }
626 bool hasXSAVES() const { return HasXSAVES; }
627 bool hasPCLMUL() const { return HasPCLMUL; }
628 bool hasVPCLMULQDQ() const { return HasVPCLMULQDQ; }
629 bool hasGFNI() const { return HasGFNI; }
630 // Prefer FMA4 to FMA - its better for commutation/memory folding and
631 // has equal or better performance on all supported targets.
632 bool hasFMA() const { return HasFMA; }
633 bool hasFMA4() const { return HasFMA4; }
634 bool hasAnyFMA() const { return hasFMA() || hasFMA4(); }
635 bool hasXOP() const { return HasXOP; }
636 bool hasTBM() const { return HasTBM; }
637 bool hasLWP() const { return HasLWP; }
638 bool hasMOVBE() const { return HasMOVBE; }
639 bool hasRDRAND() const { return HasRDRAND; }
640 bool hasF16C() const { return HasF16C; }
641 bool hasFSGSBase() const { return HasFSGSBase; }
642 bool hasLZCNT() const { return HasLZCNT; }
643 bool hasBMI() const { return HasBMI; }
644 bool hasBMI2() const { return HasBMI2; }
645 bool hasVBMI() const { return HasVBMI; }
646 bool hasVBMI2() const { return HasVBMI2; }
647 bool hasIFMA() const { return HasIFMA; }
648 bool hasRTM() const { return HasRTM; }
649 bool hasADX() const { return HasADX; }
650 bool hasSHA() const { return HasSHA; }
651 bool hasPRFCHW() const { return HasPRFCHW; }
652 bool hasPREFETCHWT1() const { return HasPREFETCHWT1; }
653 bool hasPrefetchW() const {
654 // The PREFETCHW instruction was added with 3DNow but later CPUs gave it
655 // its own CPUID bit as part of deprecating 3DNow. Intel eventually added
656 // it and KNL has another that prefetches to L2 cache. We assume the
657 // L1 version exists if the L2 version does.
658 return has3DNow() || hasPRFCHW() || hasPREFETCHWT1();
659 }
660 bool hasSSEPrefetch() const {
661 // We implicitly enable these when we have a write prefix supporting cache
662 // level OR if we have prfchw, but don't already have a read prefetch from
663 // 3dnow.
664 return hasSSE1() || (hasPRFCHW() && !has3DNow()) || hasPREFETCHWT1();
665 }
666 bool hasRDSEED() const { return HasRDSEED; }
667 bool hasLAHFSAHF() const { return HasLAHFSAHF64 || !is64Bit(); }
668 bool hasMWAITX() const { return HasMWAITX; }
669 bool hasCLZERO() const { return HasCLZERO; }
670 bool hasCLDEMOTE() const { return HasCLDEMOTE; }
671 bool hasMOVDIRI() const { return HasMOVDIRI; }
672 bool hasMOVDIR64B() const { return HasMOVDIR64B; }
673 bool hasPTWRITE() const { return HasPTWRITE; }
674 bool isSHLDSlow() const { return IsSHLDSlow; }
675 bool isPMULLDSlow() const { return IsPMULLDSlow; }
676 bool isPMADDWDSlow() const { return IsPMADDWDSlow; }
677 bool isUnalignedMem16Slow() const { return IsUAMem16Slow; }
678 bool isUnalignedMem32Slow() const { return IsUAMem32Slow; }
679 bool hasSSEUnalignedMem() const { return HasSSEUnalignedMem; }
680 bool hasCmpxchg16b() const { return HasCmpxchg16b && is64Bit(); }
681 bool useLeaForSP() const { return UseLeaForSP; }
682 bool hasPOPCNTFalseDeps() const { return HasPOPCNTFalseDeps; }
683 bool hasLZCNTFalseDeps() const { return HasLZCNTFalseDeps; }
684 bool hasFastVariableShuffle() const {
685 return HasFastVariableShuffle;
686 }
687 bool insertVZEROUPPER() const { return InsertVZEROUPPER; }
688 bool hasFastGather() const { return HasFastGather; }
689 bool hasFastScalarFSQRT() const { return HasFastScalarFSQRT; }
690 bool hasFastVectorFSQRT() const { return HasFastVectorFSQRT; }
691 bool hasFastLZCNT() const { return HasFastLZCNT; }
692 bool hasFastSHLDRotate() const { return HasFastSHLDRotate; }
693 bool hasFastBEXTR() const { return HasFastBEXTR; }
694 bool hasFastHorizontalOps() const { return HasFastHorizontalOps; }
695 bool hasFastScalarShiftMasks() const { return HasFastScalarShiftMasks; }
696 bool hasFastVectorShiftMasks() const { return HasFastVectorShiftMasks; }
697 bool hasMacroFusion() const { return HasMacroFusion; }
698 bool hasBranchFusion() const { return HasBranchFusion; }
699 bool hasERMSB() const { return HasERMSB; }
700 bool hasFSRM() const { return HasFSRM; }
701 bool hasSlowDivide32() const { return HasSlowDivide32; }
702 bool hasSlowDivide64() const { return HasSlowDivide64; }
703 bool padShortFunctions() const { return PadShortFunctions; }
704 bool slowTwoMemOps() const { return SlowTwoMemOps; }
705 bool LEAusesAG() const { return LEAUsesAG; }
706 bool slowLEA() const { return SlowLEA; }
707 bool slow3OpsLEA() const { return Slow3OpsLEA; }
708 bool slowIncDec() const { return SlowIncDec; }
709 bool hasCDI() const { return HasCDI; }
710 bool hasVPOPCNTDQ() const { return HasVPOPCNTDQ; }
711 bool hasPFI() const { return HasPFI; }
712 bool hasERI() const { return HasERI; }
713 bool hasDQI() const { return HasDQI; }
714 bool hasBWI() const { return HasBWI; }
715 bool hasVLX() const { return HasVLX; }
716 bool hasPKU() const { return HasPKU; }
717 bool hasVNNI() const { return HasVNNI; }
718 bool hasBF16() const { return HasBF16; }
719 bool hasVP2INTERSECT() const { return HasVP2INTERSECT; }
720 bool hasBITALG() const { return HasBITALG; }
721 bool hasSHSTK() const { return HasSHSTK; }
722 bool hasCLFLUSHOPT() const { return HasCLFLUSHOPT; }
723 bool hasCLWB() const { return HasCLWB; }
724 bool hasWBNOINVD() const { return HasWBNOINVD; }
725 bool hasRDPID() const { return HasRDPID; }
726 bool hasWAITPKG() const { return HasWAITPKG; }
727 bool hasPCONFIG() const { return HasPCONFIG; }
728 bool hasSGX() const { return HasSGX; }
729 bool hasINVPCID() const { return HasINVPCID; }
730 bool hasENQCMD() const { return HasENQCMD; }
731 bool hasSERIALIZE() const { return HasSERIALIZE; }
732 bool hasTSXLDTRK() const { return HasTSXLDTRK; }
733 bool useRetpolineIndirectCalls() const { return UseRetpolineIndirectCalls; }
734 bool useRetpolineIndirectBranches() const {
735 return UseRetpolineIndirectBranches;
736 }
737 bool hasAMXTILE() const { return HasAMXTILE; }
738 bool hasAMXBF16() const { return HasAMXBF16; }
739 bool hasAMXINT8() const { return HasAMXINT8; }
740 bool useRetpolineExternalThunk() const { return UseRetpolineExternalThunk; }
741
742 // These are generic getters that OR together all of the thunk types
743 // supported by the subtarget. Therefore useIndirectThunk*() will return true
744 // if any respective thunk feature is enabled.
745 bool useIndirectThunkCalls() const {
746 return useRetpolineIndirectCalls() || useLVIControlFlowIntegrity();
747 }
748 bool useIndirectThunkBranches() const {
749 return useRetpolineIndirectBranches() || useLVIControlFlowIntegrity();
750 }
751
752 bool preferMaskRegisters() const { return PreferMaskRegisters; }
753 bool useGLMDivSqrtCosts() const { return UseGLMDivSqrtCosts; }
754 bool useLVIControlFlowIntegrity() const { return UseLVIControlFlowIntegrity; }
755 bool useLVILoadHardening() const { return UseLVILoadHardening; }
756 bool useSpeculativeExecutionSideEffectSuppression() const {
757 return UseSpeculativeExecutionSideEffectSuppression;
758 }
759
760 unsigned getPreferVectorWidth() const { return PreferVectorWidth; }
761 unsigned getRequiredVectorWidth() const { return RequiredVectorWidth; }
762
763 // Helper functions to determine when we should allow widening to 512-bit
764 // during codegen.
765 // TODO: Currently we're always allowing widening on CPUs without VLX,
766 // because for many cases we don't have a better option.
767 bool canExtendTo512DQ() const {
768 return hasAVX512() && (!hasVLX() || getPreferVectorWidth() >= 512);
769 }
770 bool canExtendTo512BW() const {
771 return hasBWI() && canExtendTo512DQ();
772 }
773
774 // If there are no 512-bit vectors and we prefer not to use 512-bit registers,
775 // disable them in the legalizer.
776 bool useAVX512Regs() const {
777 return hasAVX512() && (canExtendTo512DQ() || RequiredVectorWidth > 256);
778 }
779
780 bool useBWIRegs() const {
781 return hasBWI() && useAVX512Regs();
782 }
783
784 bool isXRaySupported() const override { return is64Bit(); }
785
786 /// TODO: to be removed later and replaced with suitable properties
787 bool isAtom() const { return X86ProcFamily == IntelAtom; }
788 bool isSLM() const { return X86ProcFamily == IntelSLM; }
789 bool useSoftFloat() const { return UseSoftFloat; }
790 bool useAA() const override { return UseAA; }
791
792 /// Use mfence if we have SSE2 or we're on x86-64 (even if we asked for
793 /// no-sse2). There isn't any reason to disable it if the target processor
794 /// supports it.
795 bool hasMFence() const { return hasSSE2() || is64Bit(); }
796
797 const Triple &getTargetTriple() const { return TargetTriple; }
798
799 bool isTargetDarwin() const { return TargetTriple.isOSDarwin(); }
800 bool isTargetFreeBSD() const { return TargetTriple.isOSFreeBSD(); }
801 bool isTargetDragonFly() const { return TargetTriple.isOSDragonFly(); }
802 bool isTargetSolaris() const { return TargetTriple.isOSSolaris(); }
803 bool isTargetPS4() const { return TargetTriple.isPS4CPU(); }
804
805 bool isTargetELF() const { return TargetTriple.isOSBinFormatELF(); }
806 bool isTargetCOFF() const { return TargetTriple.isOSBinFormatCOFF(); }
807 bool isTargetMachO() const { return TargetTriple.isOSBinFormatMachO(); }
808
809 bool isTargetLinux() const { return TargetTriple.isOSLinux(); }
810 bool isTargetKFreeBSD() const { return TargetTriple.isOSKFreeBSD(); }
811 bool isTargetGlibc() const { return TargetTriple.isOSGlibc(); }
812 bool isTargetAndroid() const { return TargetTriple.isAndroid(); }
813 bool isTargetNaCl() const { return TargetTriple.isOSNaCl(); }
814 bool isTargetNaCl32() const { return isTargetNaCl() && !is64Bit(); }
815 bool isTargetNaCl64() const { return isTargetNaCl() && is64Bit(); }
816 bool isTargetMCU() const { return TargetTriple.isOSIAMCU(); }
817 bool isTargetFuchsia() const { return TargetTriple.isOSFuchsia(); }
818
819 bool isTargetWindowsMSVC() const {
820 return TargetTriple.isWindowsMSVCEnvironment();
821 }
822
823 bool isTargetWindowsCoreCLR() const {
824 return TargetTriple.isWindowsCoreCLREnvironment();
825 }
826
827 bool isTargetWindowsCygwin() const {
828 return TargetTriple.isWindowsCygwinEnvironment();
829 }
830
831 bool isTargetWindowsGNU() const {
832 return TargetTriple.isWindowsGNUEnvironment();
833 }
834
835 bool isTargetWindowsItanium() const {
836 return TargetTriple.isWindowsItaniumEnvironment();
837 }
838
839 bool isTargetCygMing() const { return TargetTriple.isOSCygMing(); }
840
841 bool isOSWindows() const { return TargetTriple.isOSWindows(); }
842
843 bool isTargetWin64() const { return In64BitMode && isOSWindows(); }
844
845 bool isTargetWin32() const { return !In64BitMode && isOSWindows(); }
846
847 bool isPICStyleGOT() const { return PICStyle == PICStyles::Style::GOT; }
848 bool isPICStyleRIPRel() const { return PICStyle == PICStyles::Style::RIPRel; }
849
850 bool isPICStyleStubPIC() const {
851 return PICStyle == PICStyles::Style::StubPIC;
852 }
853
854 bool isPositionIndependent() const;
855
856 bool isCallingConvWin64(CallingConv::ID CC) const {
857 switch (CC) {
858 // On Win64, all these conventions just use the default convention.
859 case CallingConv::C:
860 case CallingConv::Fast:
861 case CallingConv::Tail:
862 case CallingConv::Swift:
863 case CallingConv::X86_FastCall:
864 case CallingConv::X86_StdCall:
865 case CallingConv::X86_ThisCall:
866 case CallingConv::X86_VectorCall:
867 case CallingConv::Intel_OCL_BI:
868 return isTargetWin64();
869 // This convention allows using the Win64 convention on other targets.
870 case CallingConv::Win64:
871 return true;
872 // This convention allows using the SysV convention on Windows targets.
873 case CallingConv::X86_64_SysV:
874 return false;
875 // Otherwise, who knows what this is.
876 default:
877 return false;
878 }
879 }
880
881 /// Classify a global variable reference for the current subtarget according
882 /// to how we should reference it in a non-pcrel context.
883 unsigned char classifyLocalReference(const GlobalValue *GV) const;
884
885 unsigned char classifyGlobalReference(const GlobalValue *GV,
886 const Module &M) const;
887 unsigned char classifyGlobalReference(const GlobalValue *GV) const;
888
889 /// Classify a global function reference for the current subtarget.
890 unsigned char classifyGlobalFunctionReference(const GlobalValue *GV,
891 const Module &M) const;
892 unsigned char classifyGlobalFunctionReference(const GlobalValue *GV) const;
893
894 /// Classify a blockaddress reference for the current subtarget according to
895 /// how we should reference it in a non-pcrel context.
896 unsigned char classifyBlockAddressReference() const;
897
898 /// Return true if the subtarget allows calls to immediate address.
899 bool isLegalToCallImmediateAddr() const;
900
901 /// If we are using indirect thunks, we need to expand indirectbr to avoid it
902 /// lowering to an actual indirect jump.
903 bool enableIndirectBrExpand() const override {
904 return useIndirectThunkBranches();
905 }
906
907 /// Enable the MachineScheduler pass for all X86 subtargets.
908 bool enableMachineScheduler() const override { return true; }
909
910 bool enableEarlyIfConversion() const override;
911
912 void getPostRAMutations(std::vector<std::unique_ptr<ScheduleDAGMutation>>
913 &Mutations) const override;
914
915 AntiDepBreakMode getAntiDepBreakMode() const override {
916 return TargetSubtargetInfo::ANTIDEP_CRITICAL;
917 }
918
919 bool enableAdvancedRASplitCost() const override { return true; }
920};
921
922} // end namespace llvm
923
924#endif // LLVM_LIB_TARGET_X86_X86SUBTARGET_H

/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/include/llvm/CodeGen/BasicTTIImpl.h

1//===- BasicTTIImpl.h -------------------------------------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9/// \file
10/// This file provides a helper that implements much of the TTI interface in
11/// terms of the target-independent code generator and TargetLowering
12/// interfaces.
13//
14//===----------------------------------------------------------------------===//
15
16#ifndef LLVM_CODEGEN_BASICTTIIMPL_H
17#define LLVM_CODEGEN_BASICTTIIMPL_H
18
19#include "llvm/ADT/APInt.h"
20#include "llvm/ADT/ArrayRef.h"
21#include "llvm/ADT/BitVector.h"
22#include "llvm/ADT/SmallPtrSet.h"
23#include "llvm/ADT/SmallVector.h"
24#include "llvm/Analysis/LoopInfo.h"
25#include "llvm/Analysis/TargetTransformInfo.h"
26#include "llvm/Analysis/TargetTransformInfoImpl.h"
27#include "llvm/CodeGen/ISDOpcodes.h"
28#include "llvm/CodeGen/TargetLowering.h"
29#include "llvm/CodeGen/TargetSubtargetInfo.h"
30#include "llvm/CodeGen/ValueTypes.h"
31#include "llvm/IR/BasicBlock.h"
32#include "llvm/IR/Constant.h"
33#include "llvm/IR/Constants.h"
34#include "llvm/IR/DataLayout.h"
35#include "llvm/IR/DerivedTypes.h"
36#include "llvm/IR/InstrTypes.h"
37#include "llvm/IR/Instruction.h"
38#include "llvm/IR/Instructions.h"
39#include "llvm/IR/Intrinsics.h"
40#include "llvm/IR/Operator.h"
41#include "llvm/IR/Type.h"
42#include "llvm/IR/Value.h"
43#include "llvm/Support/Casting.h"
44#include "llvm/Support/CommandLine.h"
45#include "llvm/Support/ErrorHandling.h"
46#include "llvm/Support/MachineValueType.h"
47#include "llvm/Support/MathExtras.h"
48#include <algorithm>
49#include <cassert>
50#include <cstdint>
51#include <limits>
52#include <utility>
53
54namespace llvm {
55
56class Function;
57class GlobalValue;
58class LLVMContext;
59class ScalarEvolution;
60class SCEV;
61class TargetMachine;
62
63extern cl::opt<unsigned> PartialUnrollingThreshold;
64
65/// Base class which can be used to help build a TTI implementation.
66///
67/// This class provides as much implementation of the TTI interface as is
68/// possible using the target independent parts of the code generator.
69///
70/// In order to subclass it, your class must implement a getST() method to
71/// return the subtarget, and a getTLI() method to return the target lowering.
72/// We need these methods implemented in the derived class so that this class
73/// doesn't have to duplicate storage for them.
74template <typename T>
75class BasicTTIImplBase : public TargetTransformInfoImplCRTPBase<T> {
76private:
77 using BaseT = TargetTransformInfoImplCRTPBase<T>;
78 using TTI = TargetTransformInfo;
79
80 /// Helper function to access this as a T.
81 T *thisT() { return static_cast<T *>(this); }
82
83 /// Estimate a cost of Broadcast as an extract and sequence of insert
84 /// operations.
85 unsigned getBroadcastShuffleOverhead(FixedVectorType *VTy) {
86 unsigned Cost = 0;
87 // Broadcast cost is equal to the cost of extracting the zero'th element
88 // plus the cost of inserting it into every element of the result vector.
89 Cost += thisT()->getVectorInstrCost(Instruction::ExtractElement, VTy, 0);
90
91 for (int i = 0, e = VTy->getNumElements(); i < e; ++i) {
92 Cost += thisT()->getVectorInstrCost(Instruction::InsertElement, VTy, i);
93 }
94 return Cost;
95 }
96
97 /// Estimate a cost of shuffle as a sequence of extract and insert
98 /// operations.
99 unsigned getPermuteShuffleOverhead(FixedVectorType *VTy) {
100 unsigned Cost = 0;
101 // Shuffle cost is equal to the cost of extracting element from its argument
102 // plus the cost of inserting them onto the result vector.
103
104 // e.g. <4 x float> has a mask of <0,5,2,7> i.e we need to extract from
105 // index 0 of first vector, index 1 of second vector,index 2 of first
106 // vector and finally index 3 of second vector and insert them at index
107 // <0,1,2,3> of result vector.
108 for (int i = 0, e = VTy->getNumElements(); i < e; ++i) {
109 Cost += thisT()->getVectorInstrCost(Instruction::InsertElement, VTy, i);
110 Cost += thisT()->getVectorInstrCost(Instruction::ExtractElement, VTy, i);
111 }
112 return Cost;
113 }
114
115 /// Estimate a cost of subvector extraction as a sequence of extract and
116 /// insert operations.
117 unsigned getExtractSubvectorOverhead(FixedVectorType *VTy, int Index,
118 FixedVectorType *SubVTy) {
119 assert(VTy && SubVTy &&((VTy && SubVTy && "Can only extract subvectors from vectors"
) ? static_cast<void> (0) : __assert_fail ("VTy && SubVTy && \"Can only extract subvectors from vectors\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/include/llvm/CodeGen/BasicTTIImpl.h"
, 120, __PRETTY_FUNCTION__))
120 "Can only extract subvectors from vectors")((VTy && SubVTy && "Can only extract subvectors from vectors"
) ? static_cast<void> (0) : __assert_fail ("VTy && SubVTy && \"Can only extract subvectors from vectors\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/include/llvm/CodeGen/BasicTTIImpl.h"
, 120, __PRETTY_FUNCTION__))
;
121 int NumSubElts = SubVTy->getNumElements();
122 assert((Index + NumSubElts) <= (int)VTy->getNumElements() &&(((Index + NumSubElts) <= (int)VTy->getNumElements() &&
"SK_ExtractSubvector index out of range") ? static_cast<void
> (0) : __assert_fail ("(Index + NumSubElts) <= (int)VTy->getNumElements() && \"SK_ExtractSubvector index out of range\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/include/llvm/CodeGen/BasicTTIImpl.h"
, 123, __PRETTY_FUNCTION__))
123 "SK_ExtractSubvector index out of range")(((Index + NumSubElts) <= (int)VTy->getNumElements() &&
"SK_ExtractSubvector index out of range") ? static_cast<void
> (0) : __assert_fail ("(Index + NumSubElts) <= (int)VTy->getNumElements() && \"SK_ExtractSubvector index out of range\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/include/llvm/CodeGen/BasicTTIImpl.h"
, 123, __PRETTY_FUNCTION__))
;
124
125 unsigned Cost = 0;
126 // Subvector extraction cost is equal to the cost of extracting element from
127 // the source type plus the cost of inserting them into the result vector
128 // type.
129 for (int i = 0; i != NumSubElts; ++i) {
130 Cost += thisT()->getVectorInstrCost(Instruction::ExtractElement, VTy,
131 i + Index);
132 Cost +=
133 thisT()->getVectorInstrCost(Instruction::InsertElement, SubVTy, i);
134 }
135 return Cost;
136 }
137
138 /// Estimate a cost of subvector insertion as a sequence of extract and
139 /// insert operations.
140 unsigned getInsertSubvectorOverhead(FixedVectorType *VTy, int Index,
141 FixedVectorType *SubVTy) {
142 assert(VTy && SubVTy &&((VTy && SubVTy && "Can only insert subvectors into vectors"
) ? static_cast<void> (0) : __assert_fail ("VTy && SubVTy && \"Can only insert subvectors into vectors\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/include/llvm/CodeGen/BasicTTIImpl.h"
, 143, __PRETTY_FUNCTION__))
143 "Can only insert subvectors into vectors")((VTy && SubVTy && "Can only insert subvectors into vectors"
) ? static_cast<void> (0) : __assert_fail ("VTy && SubVTy && \"Can only insert subvectors into vectors\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/include/llvm/CodeGen/BasicTTIImpl.h"
, 143, __PRETTY_FUNCTION__))
;
144 int NumSubElts = SubVTy->getNumElements();
145 assert((Index + NumSubElts) <= (int)VTy->getNumElements() &&(((Index + NumSubElts) <= (int)VTy->getNumElements() &&
"SK_InsertSubvector index out of range") ? static_cast<void
> (0) : __assert_fail ("(Index + NumSubElts) <= (int)VTy->getNumElements() && \"SK_InsertSubvector index out of range\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/include/llvm/CodeGen/BasicTTIImpl.h"
, 146, __PRETTY_FUNCTION__))
146 "SK_InsertSubvector index out of range")(((Index + NumSubElts) <= (int)VTy->getNumElements() &&
"SK_InsertSubvector index out of range") ? static_cast<void
> (0) : __assert_fail ("(Index + NumSubElts) <= (int)VTy->getNumElements() && \"SK_InsertSubvector index out of range\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/include/llvm/CodeGen/BasicTTIImpl.h"
, 146, __PRETTY_FUNCTION__))
;
147
148 unsigned Cost = 0;
149 // Subvector insertion cost is equal to the cost of extracting element from
150 // the source type plus the cost of inserting them into the result vector
151 // type.
152 for (int i = 0; i != NumSubElts; ++i) {
153 Cost +=
154 thisT()->getVectorInstrCost(Instruction::ExtractElement, SubVTy, i);
155 Cost += thisT()->getVectorInstrCost(Instruction::InsertElement, VTy,
156 i + Index);
157 }
158 return Cost;
159 }
160
161 /// Local query method delegates up to T which *must* implement this!
162 const TargetSubtargetInfo *getST() const {
163 return static_cast<const T *>(this)->getST();
164 }
165
166 /// Local query method delegates up to T which *must* implement this!
167 const TargetLoweringBase *getTLI() const {
168 return static_cast<const T *>(this)->getTLI();
169 }
170
171 static ISD::MemIndexedMode getISDIndexedMode(TTI::MemIndexedMode M) {
172 switch (M) {
173 case TTI::MIM_Unindexed:
174 return ISD::UNINDEXED;
175 case TTI::MIM_PreInc:
176 return ISD::PRE_INC;
177 case TTI::MIM_PreDec:
178 return ISD::PRE_DEC;
179 case TTI::MIM_PostInc:
180 return ISD::POST_INC;
181 case TTI::MIM_PostDec:
182 return ISD::POST_DEC;
183 }
184 llvm_unreachable("Unexpected MemIndexedMode")::llvm::llvm_unreachable_internal("Unexpected MemIndexedMode"
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/include/llvm/CodeGen/BasicTTIImpl.h"
, 184)
;
185 }
186
187protected:
188 explicit BasicTTIImplBase(const TargetMachine *TM, const DataLayout &DL)
189 : BaseT(DL) {}
190 virtual ~BasicTTIImplBase() = default;
191
192 using TargetTransformInfoImplBase::DL;
193
194public:
195 /// \name Scalar TTI Implementations
196 /// @{
197 bool allowsMisalignedMemoryAccesses(LLVMContext &Context, unsigned BitWidth,
198 unsigned AddressSpace, unsigned Alignment,
199 bool *Fast) const {
200 EVT E = EVT::getIntegerVT(Context, BitWidth);
201 return getTLI()->allowsMisalignedMemoryAccesses(
202 E, AddressSpace, Alignment, MachineMemOperand::MONone, Fast);
203 }
204
205 bool hasBranchDivergence() { return false; }
206
207 bool useGPUDivergenceAnalysis() { return false; }
208
209 bool isSourceOfDivergence(const Value *V) { return false; }
210
211 bool isAlwaysUniform(const Value *V) { return false; }
212
213 unsigned getFlatAddressSpace() {
214 // Return an invalid address space.
215 return -1;
216 }
217
218 bool collectFlatAddressOperands(SmallVectorImpl<int> &OpIndexes,
219 Intrinsic::ID IID) const {
220 return false;
221 }
222
223 bool isNoopAddrSpaceCast(unsigned FromAS, unsigned ToAS) const {
224 return getTLI()->getTargetMachine().isNoopAddrSpaceCast(FromAS, ToAS);
225 }
226
227 Value *rewriteIntrinsicWithAddressSpace(IntrinsicInst *II, Value *OldV,
228 Value *NewV) const {
229 return nullptr;
230 }
231
232 bool isLegalAddImmediate(int64_t imm) {
233 return getTLI()->isLegalAddImmediate(imm);
234 }
235
236 bool isLegalICmpImmediate(int64_t imm) {
237 return getTLI()->isLegalICmpImmediate(imm);
238 }
239
240 bool isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset,
241 bool HasBaseReg, int64_t Scale,
242 unsigned AddrSpace, Instruction *I = nullptr) {
243 TargetLoweringBase::AddrMode AM;
244 AM.BaseGV = BaseGV;
245 AM.BaseOffs = BaseOffset;
246 AM.HasBaseReg = HasBaseReg;
247 AM.Scale = Scale;
248 return getTLI()->isLegalAddressingMode(DL, AM, Ty, AddrSpace, I);
249 }
250
251 bool isIndexedLoadLegal(TTI::MemIndexedMode M, Type *Ty,
252 const DataLayout &DL) const {
253 EVT VT = getTLI()->getValueType(DL, Ty);
254 return getTLI()->isIndexedLoadLegal(getISDIndexedMode(M), VT);
255 }
256
257 bool isIndexedStoreLegal(TTI::MemIndexedMode M, Type *Ty,
258 const DataLayout &DL) const {
259 EVT VT = getTLI()->getValueType(DL, Ty);
260 return getTLI()->isIndexedStoreLegal(getISDIndexedMode(M), VT);
261 }
262
263 bool isLSRCostLess(TTI::LSRCost C1, TTI::LSRCost C2) {
264 return TargetTransformInfoImplBase::isLSRCostLess(C1, C2);
265 }
266
267 bool isProfitableLSRChainElement(Instruction *I) {
268 return TargetTransformInfoImplBase::isProfitableLSRChainElement(I);
269 }
270
271 int getScalingFactorCost(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset,
272 bool HasBaseReg, int64_t Scale, unsigned AddrSpace) {
273 TargetLoweringBase::AddrMode AM;
274 AM.BaseGV = BaseGV;
275 AM.BaseOffs = BaseOffset;
276 AM.HasBaseReg = HasBaseReg;
277 AM.Scale = Scale;
278 return getTLI()->getScalingFactorCost(DL, AM, Ty, AddrSpace);
279 }
280
281 bool isTruncateFree(Type *Ty1, Type *Ty2) {
282 return getTLI()->isTruncateFree(Ty1, Ty2);
283 }
284
285 bool isProfitableToHoist(Instruction *I) {
286 return getTLI()->isProfitableToHoist(I);
287 }
288
289 bool useAA() const { return getST()->useAA(); }
290
291 bool isTypeLegal(Type *Ty) {
292 EVT VT = getTLI()->getValueType(DL, Ty);
293 return getTLI()->isTypeLegal(VT);
294 }
295
296 int getGEPCost(Type *PointeeType, const Value *Ptr,
297 ArrayRef<const Value *> Operands) {
298 return BaseT::getGEPCost(PointeeType, Ptr, Operands);
299 }
300
301 unsigned getEstimatedNumberOfCaseClusters(const SwitchInst &SI,
302 unsigned &JumpTableSize,
303 ProfileSummaryInfo *PSI,
304 BlockFrequencyInfo *BFI) {
305 /// Try to find the estimated number of clusters. Note that the number of
306 /// clusters identified in this function could be different from the actual
307 /// numbers found in lowering. This function ignore switches that are
308 /// lowered with a mix of jump table / bit test / BTree. This function was
309 /// initially intended to be used when estimating the cost of switch in
310 /// inline cost heuristic, but it's a generic cost model to be used in other
311 /// places (e.g., in loop unrolling).
312 unsigned N = SI.getNumCases();
313 const TargetLoweringBase *TLI = getTLI();
314 const DataLayout &DL = this->getDataLayout();
315
316 JumpTableSize = 0;
317 bool IsJTAllowed = TLI->areJTsAllowed(SI.getParent()->getParent());
318
319 // Early exit if both a jump table and bit test are not allowed.
320 if (N < 1 || (!IsJTAllowed && DL.getIndexSizeInBits(0u) < N))
321 return N;
322
323 APInt MaxCaseVal = SI.case_begin()->getCaseValue()->getValue();
324 APInt MinCaseVal = MaxCaseVal;
325 for (auto CI : SI.cases()) {
326 const APInt &CaseVal = CI.getCaseValue()->getValue();
327 if (CaseVal.sgt(MaxCaseVal))
328 MaxCaseVal = CaseVal;
329 if (CaseVal.slt(MinCaseVal))
330 MinCaseVal = CaseVal;