Bug Summary

File:llvm/lib/Target/X86/X86TargetTransformInfo.cpp
Warning:line 3180, column 20
Called C++ object pointer is null

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name X86TargetTransformInfo.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mframe-pointer=none -fmath-errno -fno-rounding-math -mconstructor-aliases -munwind-tables -target-cpu x86-64 -tune-cpu generic -fno-split-dwarf-inlining -debugger-tuning=gdb -ffunction-sections -fdata-sections -resource-dir /usr/lib/llvm-12/lib/clang/12.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/build-llvm/lib/Target/X86 -I /build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/lib/Target/X86 -I /build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/build-llvm/include -I /build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/include -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0/backward -internal-isystem /usr/local/include -internal-isystem /usr/lib/llvm-12/lib/clang/12.0.0/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir /build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/build-llvm/lib/Target/X86 -fdebug-prefix-map=/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d=. -ferror-limit 19 -fvisibility hidden -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -o /tmp/scan-build-2020-11-29-190409-37574-1 -x c++ /build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/lib/Target/X86/X86TargetTransformInfo.cpp

/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/lib/Target/X86/X86TargetTransformInfo.cpp

1//===-- X86TargetTransformInfo.cpp - X86 specific TTI pass ----------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8/// \file
9/// This file implements a TargetTransformInfo analysis pass specific to the
10/// X86 target machine. It uses the target's detailed information to provide
11/// more precise answers to certain TTI queries, while letting the target
12/// independent and default TTI implementations handle the rest.
13///
14//===----------------------------------------------------------------------===//
15/// About Cost Model numbers used below it's necessary to say the following:
16/// the numbers correspond to some "generic" X86 CPU instead of usage of
17/// concrete CPU model. Usually the numbers correspond to CPU where the feature
18/// apeared at the first time. For example, if we do Subtarget.hasSSE42() in
19/// the lookups below the cost is based on Nehalem as that was the first CPU
20/// to support that feature level and thus has most likely the worst case cost.
21/// Some examples of other technologies/CPUs:
22/// SSE 3 - Pentium4 / Athlon64
23/// SSE 4.1 - Penryn
24/// SSE 4.2 - Nehalem
25/// AVX - Sandy Bridge
26/// AVX2 - Haswell
27/// AVX-512 - Xeon Phi / Skylake
28/// And some examples of instruction target dependent costs (latency)
29/// divss sqrtss rsqrtss
30/// AMD K7 11-16 19 3
31/// Piledriver 9-24 13-15 5
32/// Jaguar 14 16 2
33/// Pentium II,III 18 30 2
34/// Nehalem 7-14 7-18 3
35/// Haswell 10-13 11 5
36/// TODO: Develop and implement the target dependent cost model and
37/// specialize cost numbers for different Cost Model Targets such as throughput,
38/// code size, latency and uop count.
39//===----------------------------------------------------------------------===//
40
41#include "X86TargetTransformInfo.h"
42#include "llvm/Analysis/TargetTransformInfo.h"
43#include "llvm/CodeGen/BasicTTIImpl.h"
44#include "llvm/CodeGen/CostTable.h"
45#include "llvm/CodeGen/TargetLowering.h"
46#include "llvm/IR/IntrinsicInst.h"
47#include "llvm/Support/Debug.h"
48
49using namespace llvm;
50
51#define DEBUG_TYPE"x86tti" "x86tti"
52
53//===----------------------------------------------------------------------===//
54//
55// X86 cost model.
56//
57//===----------------------------------------------------------------------===//
58
59TargetTransformInfo::PopcntSupportKind
60X86TTIImpl::getPopcntSupport(unsigned TyWidth) {
61 assert(isPowerOf2_32(TyWidth) && "Ty width must be power of 2")((isPowerOf2_32(TyWidth) && "Ty width must be power of 2"
) ? static_cast<void> (0) : __assert_fail ("isPowerOf2_32(TyWidth) && \"Ty width must be power of 2\""
, "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 61, __PRETTY_FUNCTION__))
;
62 // TODO: Currently the __builtin_popcount() implementation using SSE3
63 // instructions is inefficient. Once the problem is fixed, we should
64 // call ST->hasSSE3() instead of ST->hasPOPCNT().
65 return ST->hasPOPCNT() ? TTI::PSK_FastHardware : TTI::PSK_Software;
66}
67
68llvm::Optional<unsigned> X86TTIImpl::getCacheSize(
69 TargetTransformInfo::CacheLevel Level) const {
70 switch (Level) {
71 case TargetTransformInfo::CacheLevel::L1D:
72 // - Penryn
73 // - Nehalem
74 // - Westmere
75 // - Sandy Bridge
76 // - Ivy Bridge
77 // - Haswell
78 // - Broadwell
79 // - Skylake
80 // - Kabylake
81 return 32 * 1024; // 32 KByte
82 case TargetTransformInfo::CacheLevel::L2D:
83 // - Penryn
84 // - Nehalem
85 // - Westmere
86 // - Sandy Bridge
87 // - Ivy Bridge
88 // - Haswell
89 // - Broadwell
90 // - Skylake
91 // - Kabylake
92 return 256 * 1024; // 256 KByte
93 }
94
95 llvm_unreachable("Unknown TargetTransformInfo::CacheLevel")::llvm::llvm_unreachable_internal("Unknown TargetTransformInfo::CacheLevel"
, "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 95)
;
96}
97
98llvm::Optional<unsigned> X86TTIImpl::getCacheAssociativity(
99 TargetTransformInfo::CacheLevel Level) const {
100 // - Penryn
101 // - Nehalem
102 // - Westmere
103 // - Sandy Bridge
104 // - Ivy Bridge
105 // - Haswell
106 // - Broadwell
107 // - Skylake
108 // - Kabylake
109 switch (Level) {
110 case TargetTransformInfo::CacheLevel::L1D:
111 LLVM_FALLTHROUGH[[gnu::fallthrough]];
112 case TargetTransformInfo::CacheLevel::L2D:
113 return 8;
114 }
115
116 llvm_unreachable("Unknown TargetTransformInfo::CacheLevel")::llvm::llvm_unreachable_internal("Unknown TargetTransformInfo::CacheLevel"
, "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 116)
;
117}
118
119unsigned X86TTIImpl::getNumberOfRegisters(unsigned ClassID) const {
120 bool Vector = (ClassID == 1);
121 if (Vector && !ST->hasSSE1())
122 return 0;
123
124 if (ST->is64Bit()) {
125 if (Vector && ST->hasAVX512())
126 return 32;
127 return 16;
128 }
129 return 8;
130}
131
132unsigned X86TTIImpl::getRegisterBitWidth(bool Vector) const {
133 unsigned PreferVectorWidth = ST->getPreferVectorWidth();
134 if (Vector) {
135 if (ST->hasAVX512() && PreferVectorWidth >= 512)
136 return 512;
137 if (ST->hasAVX() && PreferVectorWidth >= 256)
138 return 256;
139 if (ST->hasSSE1() && PreferVectorWidth >= 128)
140 return 128;
141 return 0;
142 }
143
144 if (ST->is64Bit())
145 return 64;
146
147 return 32;
148}
149
150unsigned X86TTIImpl::getLoadStoreVecRegBitWidth(unsigned) const {
151 return getRegisterBitWidth(true);
152}
153
154unsigned X86TTIImpl::getMaxInterleaveFactor(unsigned VF) {
155 // If the loop will not be vectorized, don't interleave the loop.
156 // Let regular unroll to unroll the loop, which saves the overflow
157 // check and memory check cost.
158 if (VF == 1)
159 return 1;
160
161 if (ST->isAtom())
162 return 1;
163
164 // Sandybridge and Haswell have multiple execution ports and pipelined
165 // vector units.
166 if (ST->hasAVX())
167 return 4;
168
169 return 2;
170}
171
172int X86TTIImpl::getArithmeticInstrCost(unsigned Opcode, Type *Ty,
173 TTI::TargetCostKind CostKind,
174 TTI::OperandValueKind Op1Info,
175 TTI::OperandValueKind Op2Info,
176 TTI::OperandValueProperties Opd1PropInfo,
177 TTI::OperandValueProperties Opd2PropInfo,
178 ArrayRef<const Value *> Args,
179 const Instruction *CxtI) {
180 // TODO: Handle more cost kinds.
181 if (CostKind != TTI::TCK_RecipThroughput)
182 return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Op1Info,
183 Op2Info, Opd1PropInfo,
184 Opd2PropInfo, Args, CxtI);
185 // Legalize the type.
186 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty);
187
188 int ISD = TLI->InstructionOpcodeToISD(Opcode);
189 assert(ISD && "Invalid opcode")((ISD && "Invalid opcode") ? static_cast<void> (
0) : __assert_fail ("ISD && \"Invalid opcode\"", "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 189, __PRETTY_FUNCTION__))
;
190
191 static const CostTblEntry GLMCostTable[] = {
192 { ISD::FDIV, MVT::f32, 18 }, // divss
193 { ISD::FDIV, MVT::v4f32, 35 }, // divps
194 { ISD::FDIV, MVT::f64, 33 }, // divsd
195 { ISD::FDIV, MVT::v2f64, 65 }, // divpd
196 };
197
198 if (ST->useGLMDivSqrtCosts())
199 if (const auto *Entry = CostTableLookup(GLMCostTable, ISD,
200 LT.second))
201 return LT.first * Entry->Cost;
202
203 static const CostTblEntry SLMCostTable[] = {
204 { ISD::MUL, MVT::v4i32, 11 }, // pmulld
205 { ISD::MUL, MVT::v8i16, 2 }, // pmullw
206 { ISD::MUL, MVT::v16i8, 14 }, // extend/pmullw/trunc sequence.
207 { ISD::FMUL, MVT::f64, 2 }, // mulsd
208 { ISD::FMUL, MVT::v2f64, 4 }, // mulpd
209 { ISD::FMUL, MVT::v4f32, 2 }, // mulps
210 { ISD::FDIV, MVT::f32, 17 }, // divss
211 { ISD::FDIV, MVT::v4f32, 39 }, // divps
212 { ISD::FDIV, MVT::f64, 32 }, // divsd
213 { ISD::FDIV, MVT::v2f64, 69 }, // divpd
214 { ISD::FADD, MVT::v2f64, 2 }, // addpd
215 { ISD::FSUB, MVT::v2f64, 2 }, // subpd
216 // v2i64/v4i64 mul is custom lowered as a series of long:
217 // multiplies(3), shifts(3) and adds(2)
218 // slm muldq version throughput is 2 and addq throughput 4
219 // thus: 3X2 (muldq throughput) + 3X1 (shift throughput) +
220 // 3X4 (addq throughput) = 17
221 { ISD::MUL, MVT::v2i64, 17 },
222 // slm addq\subq throughput is 4
223 { ISD::ADD, MVT::v2i64, 4 },
224 { ISD::SUB, MVT::v2i64, 4 },
225 };
226
227 if (ST->isSLM()) {
228 if (Args.size() == 2 && ISD == ISD::MUL && LT.second == MVT::v4i32) {
229 // Check if the operands can be shrinked into a smaller datatype.
230 bool Op1Signed = false;
231 unsigned Op1MinSize = BaseT::minRequiredElementSize(Args[0], Op1Signed);
232 bool Op2Signed = false;
233 unsigned Op2MinSize = BaseT::minRequiredElementSize(Args[1], Op2Signed);
234
235 bool signedMode = Op1Signed | Op2Signed;
236 unsigned OpMinSize = std::max(Op1MinSize, Op2MinSize);
237
238 if (OpMinSize <= 7)
239 return LT.first * 3; // pmullw/sext
240 if (!signedMode && OpMinSize <= 8)
241 return LT.first * 3; // pmullw/zext
242 if (OpMinSize <= 15)
243 return LT.first * 5; // pmullw/pmulhw/pshuf
244 if (!signedMode && OpMinSize <= 16)
245 return LT.first * 5; // pmullw/pmulhw/pshuf
246 }
247
248 if (const auto *Entry = CostTableLookup(SLMCostTable, ISD,
249 LT.second)) {
250 return LT.first * Entry->Cost;
251 }
252 }
253
254 if ((ISD == ISD::SDIV || ISD == ISD::SREM || ISD == ISD::UDIV ||
255 ISD == ISD::UREM) &&
256 (Op2Info == TargetTransformInfo::OK_UniformConstantValue ||
257 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) &&
258 Opd2PropInfo == TargetTransformInfo::OP_PowerOf2) {
259 if (ISD == ISD::SDIV || ISD == ISD::SREM) {
260 // On X86, vector signed division by constants power-of-two are
261 // normally expanded to the sequence SRA + SRL + ADD + SRA.
262 // The OperandValue properties may not be the same as that of the previous
263 // operation; conservatively assume OP_None.
264 int Cost =
265 2 * getArithmeticInstrCost(Instruction::AShr, Ty, CostKind, Op1Info,
266 Op2Info,
267 TargetTransformInfo::OP_None,
268 TargetTransformInfo::OP_None);
269 Cost += getArithmeticInstrCost(Instruction::LShr, Ty, CostKind, Op1Info,
270 Op2Info,
271 TargetTransformInfo::OP_None,
272 TargetTransformInfo::OP_None);
273 Cost += getArithmeticInstrCost(Instruction::Add, Ty, CostKind, Op1Info,
274 Op2Info,
275 TargetTransformInfo::OP_None,
276 TargetTransformInfo::OP_None);
277
278 if (ISD == ISD::SREM) {
279 // For SREM: (X % C) is the equivalent of (X - (X/C)*C)
280 Cost += getArithmeticInstrCost(Instruction::Mul, Ty, CostKind, Op1Info,
281 Op2Info);
282 Cost += getArithmeticInstrCost(Instruction::Sub, Ty, CostKind, Op1Info,
283 Op2Info);
284 }
285
286 return Cost;
287 }
288
289 // Vector unsigned division/remainder will be simplified to shifts/masks.
290 if (ISD == ISD::UDIV)
291 return getArithmeticInstrCost(Instruction::LShr, Ty, CostKind,
292 Op1Info, Op2Info,
293 TargetTransformInfo::OP_None,
294 TargetTransformInfo::OP_None);
295
296 else // UREM
297 return getArithmeticInstrCost(Instruction::And, Ty, CostKind,
298 Op1Info, Op2Info,
299 TargetTransformInfo::OP_None,
300 TargetTransformInfo::OP_None);
301 }
302
303 static const CostTblEntry AVX512BWUniformConstCostTable[] = {
304 { ISD::SHL, MVT::v64i8, 2 }, // psllw + pand.
305 { ISD::SRL, MVT::v64i8, 2 }, // psrlw + pand.
306 { ISD::SRA, MVT::v64i8, 4 }, // psrlw, pand, pxor, psubb.
307 };
308
309 if (Op2Info == TargetTransformInfo::OK_UniformConstantValue &&
310 ST->hasBWI()) {
311 if (const auto *Entry = CostTableLookup(AVX512BWUniformConstCostTable, ISD,
312 LT.second))
313 return LT.first * Entry->Cost;
314 }
315
316 static const CostTblEntry AVX512UniformConstCostTable[] = {
317 { ISD::SRA, MVT::v2i64, 1 },
318 { ISD::SRA, MVT::v4i64, 1 },
319 { ISD::SRA, MVT::v8i64, 1 },
320
321 { ISD::SHL, MVT::v64i8, 4 }, // psllw + pand.
322 { ISD::SRL, MVT::v64i8, 4 }, // psrlw + pand.
323 { ISD::SRA, MVT::v64i8, 8 }, // psrlw, pand, pxor, psubb.
324
325 { ISD::SDIV, MVT::v16i32, 6 }, // pmuludq sequence
326 { ISD::SREM, MVT::v16i32, 8 }, // pmuludq+mul+sub sequence
327 { ISD::UDIV, MVT::v16i32, 5 }, // pmuludq sequence
328 { ISD::UREM, MVT::v16i32, 7 }, // pmuludq+mul+sub sequence
329 };
330
331 if (Op2Info == TargetTransformInfo::OK_UniformConstantValue &&
332 ST->hasAVX512()) {
333 if (const auto *Entry = CostTableLookup(AVX512UniformConstCostTable, ISD,
334 LT.second))
335 return LT.first * Entry->Cost;
336 }
337
338 static const CostTblEntry AVX2UniformConstCostTable[] = {
339 { ISD::SHL, MVT::v32i8, 2 }, // psllw + pand.
340 { ISD::SRL, MVT::v32i8, 2 }, // psrlw + pand.
341 { ISD::SRA, MVT::v32i8, 4 }, // psrlw, pand, pxor, psubb.
342
343 { ISD::SRA, MVT::v4i64, 4 }, // 2 x psrad + shuffle.
344
345 { ISD::SDIV, MVT::v8i32, 6 }, // pmuludq sequence
346 { ISD::SREM, MVT::v8i32, 8 }, // pmuludq+mul+sub sequence
347 { ISD::UDIV, MVT::v8i32, 5 }, // pmuludq sequence
348 { ISD::UREM, MVT::v8i32, 7 }, // pmuludq+mul+sub sequence
349 };
350
351 if (Op2Info == TargetTransformInfo::OK_UniformConstantValue &&
352 ST->hasAVX2()) {
353 if (const auto *Entry = CostTableLookup(AVX2UniformConstCostTable, ISD,
354 LT.second))
355 return LT.first * Entry->Cost;
356 }
357
358 static const CostTblEntry SSE2UniformConstCostTable[] = {
359 { ISD::SHL, MVT::v16i8, 2 }, // psllw + pand.
360 { ISD::SRL, MVT::v16i8, 2 }, // psrlw + pand.
361 { ISD::SRA, MVT::v16i8, 4 }, // psrlw, pand, pxor, psubb.
362
363 { ISD::SHL, MVT::v32i8, 4+2 }, // 2*(psllw + pand) + split.
364 { ISD::SRL, MVT::v32i8, 4+2 }, // 2*(psrlw + pand) + split.
365 { ISD::SRA, MVT::v32i8, 8+2 }, // 2*(psrlw, pand, pxor, psubb) + split.
366
367 { ISD::SDIV, MVT::v8i32, 12+2 }, // 2*pmuludq sequence + split.
368 { ISD::SREM, MVT::v8i32, 16+2 }, // 2*pmuludq+mul+sub sequence + split.
369 { ISD::SDIV, MVT::v4i32, 6 }, // pmuludq sequence
370 { ISD::SREM, MVT::v4i32, 8 }, // pmuludq+mul+sub sequence
371 { ISD::UDIV, MVT::v8i32, 10+2 }, // 2*pmuludq sequence + split.
372 { ISD::UREM, MVT::v8i32, 14+2 }, // 2*pmuludq+mul+sub sequence + split.
373 { ISD::UDIV, MVT::v4i32, 5 }, // pmuludq sequence
374 { ISD::UREM, MVT::v4i32, 7 }, // pmuludq+mul+sub sequence
375 };
376
377 // XOP has faster vXi8 shifts.
378 if (Op2Info == TargetTransformInfo::OK_UniformConstantValue &&
379 ST->hasSSE2() && !ST->hasXOP()) {
380 if (const auto *Entry =
381 CostTableLookup(SSE2UniformConstCostTable, ISD, LT.second))
382 return LT.first * Entry->Cost;
383 }
384
385 static const CostTblEntry AVX512BWConstCostTable[] = {
386 { ISD::SDIV, MVT::v64i8, 14 }, // 2*ext+2*pmulhw sequence
387 { ISD::SREM, MVT::v64i8, 16 }, // 2*ext+2*pmulhw+mul+sub sequence
388 { ISD::UDIV, MVT::v64i8, 14 }, // 2*ext+2*pmulhw sequence
389 { ISD::UREM, MVT::v64i8, 16 }, // 2*ext+2*pmulhw+mul+sub sequence
390 { ISD::SDIV, MVT::v32i16, 6 }, // vpmulhw sequence
391 { ISD::SREM, MVT::v32i16, 8 }, // vpmulhw+mul+sub sequence
392 { ISD::UDIV, MVT::v32i16, 6 }, // vpmulhuw sequence
393 { ISD::UREM, MVT::v32i16, 8 }, // vpmulhuw+mul+sub sequence
394 };
395
396 if ((Op2Info == TargetTransformInfo::OK_UniformConstantValue ||
397 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) &&
398 ST->hasBWI()) {
399 if (const auto *Entry =
400 CostTableLookup(AVX512BWConstCostTable, ISD, LT.second))
401 return LT.first * Entry->Cost;
402 }
403
404 static const CostTblEntry AVX512ConstCostTable[] = {
405 { ISD::SDIV, MVT::v16i32, 15 }, // vpmuldq sequence
406 { ISD::SREM, MVT::v16i32, 17 }, // vpmuldq+mul+sub sequence
407 { ISD::UDIV, MVT::v16i32, 15 }, // vpmuludq sequence
408 { ISD::UREM, MVT::v16i32, 17 }, // vpmuludq+mul+sub sequence
409 { ISD::SDIV, MVT::v64i8, 28 }, // 4*ext+4*pmulhw sequence
410 { ISD::SREM, MVT::v64i8, 32 }, // 4*ext+4*pmulhw+mul+sub sequence
411 { ISD::UDIV, MVT::v64i8, 28 }, // 4*ext+4*pmulhw sequence
412 { ISD::UREM, MVT::v64i8, 32 }, // 4*ext+4*pmulhw+mul+sub sequence
413 { ISD::SDIV, MVT::v32i16, 12 }, // 2*vpmulhw sequence
414 { ISD::SREM, MVT::v32i16, 16 }, // 2*vpmulhw+mul+sub sequence
415 { ISD::UDIV, MVT::v32i16, 12 }, // 2*vpmulhuw sequence
416 { ISD::UREM, MVT::v32i16, 16 }, // 2*vpmulhuw+mul+sub sequence
417 };
418
419 if ((Op2Info == TargetTransformInfo::OK_UniformConstantValue ||
420 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) &&
421 ST->hasAVX512()) {
422 if (const auto *Entry =
423 CostTableLookup(AVX512ConstCostTable, ISD, LT.second))
424 return LT.first * Entry->Cost;
425 }
426
427 static const CostTblEntry AVX2ConstCostTable[] = {
428 { ISD::SDIV, MVT::v32i8, 14 }, // 2*ext+2*pmulhw sequence
429 { ISD::SREM, MVT::v32i8, 16 }, // 2*ext+2*pmulhw+mul+sub sequence
430 { ISD::UDIV, MVT::v32i8, 14 }, // 2*ext+2*pmulhw sequence
431 { ISD::UREM, MVT::v32i8, 16 }, // 2*ext+2*pmulhw+mul+sub sequence
432 { ISD::SDIV, MVT::v16i16, 6 }, // vpmulhw sequence
433 { ISD::SREM, MVT::v16i16, 8 }, // vpmulhw+mul+sub sequence
434 { ISD::UDIV, MVT::v16i16, 6 }, // vpmulhuw sequence
435 { ISD::UREM, MVT::v16i16, 8 }, // vpmulhuw+mul+sub sequence
436 { ISD::SDIV, MVT::v8i32, 15 }, // vpmuldq sequence
437 { ISD::SREM, MVT::v8i32, 19 }, // vpmuldq+mul+sub sequence
438 { ISD::UDIV, MVT::v8i32, 15 }, // vpmuludq sequence
439 { ISD::UREM, MVT::v8i32, 19 }, // vpmuludq+mul+sub sequence
440 };
441
442 if ((Op2Info == TargetTransformInfo::OK_UniformConstantValue ||
443 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) &&
444 ST->hasAVX2()) {
445 if (const auto *Entry = CostTableLookup(AVX2ConstCostTable, ISD, LT.second))
446 return LT.first * Entry->Cost;
447 }
448
449 static const CostTblEntry SSE2ConstCostTable[] = {
450 { ISD::SDIV, MVT::v32i8, 28+2 }, // 4*ext+4*pmulhw sequence + split.
451 { ISD::SREM, MVT::v32i8, 32+2 }, // 4*ext+4*pmulhw+mul+sub sequence + split.
452 { ISD::SDIV, MVT::v16i8, 14 }, // 2*ext+2*pmulhw sequence
453 { ISD::SREM, MVT::v16i8, 16 }, // 2*ext+2*pmulhw+mul+sub sequence
454 { ISD::UDIV, MVT::v32i8, 28+2 }, // 4*ext+4*pmulhw sequence + split.
455 { ISD::UREM, MVT::v32i8, 32+2 }, // 4*ext+4*pmulhw+mul+sub sequence + split.
456 { ISD::UDIV, MVT::v16i8, 14 }, // 2*ext+2*pmulhw sequence
457 { ISD::UREM, MVT::v16i8, 16 }, // 2*ext+2*pmulhw+mul+sub sequence
458 { ISD::SDIV, MVT::v16i16, 12+2 }, // 2*pmulhw sequence + split.
459 { ISD::SREM, MVT::v16i16, 16+2 }, // 2*pmulhw+mul+sub sequence + split.
460 { ISD::SDIV, MVT::v8i16, 6 }, // pmulhw sequence
461 { ISD::SREM, MVT::v8i16, 8 }, // pmulhw+mul+sub sequence
462 { ISD::UDIV, MVT::v16i16, 12+2 }, // 2*pmulhuw sequence + split.
463 { ISD::UREM, MVT::v16i16, 16+2 }, // 2*pmulhuw+mul+sub sequence + split.
464 { ISD::UDIV, MVT::v8i16, 6 }, // pmulhuw sequence
465 { ISD::UREM, MVT::v8i16, 8 }, // pmulhuw+mul+sub sequence
466 { ISD::SDIV, MVT::v8i32, 38+2 }, // 2*pmuludq sequence + split.
467 { ISD::SREM, MVT::v8i32, 48+2 }, // 2*pmuludq+mul+sub sequence + split.
468 { ISD::SDIV, MVT::v4i32, 19 }, // pmuludq sequence
469 { ISD::SREM, MVT::v4i32, 24 }, // pmuludq+mul+sub sequence
470 { ISD::UDIV, MVT::v8i32, 30+2 }, // 2*pmuludq sequence + split.
471 { ISD::UREM, MVT::v8i32, 40+2 }, // 2*pmuludq+mul+sub sequence + split.
472 { ISD::UDIV, MVT::v4i32, 15 }, // pmuludq sequence
473 { ISD::UREM, MVT::v4i32, 20 }, // pmuludq+mul+sub sequence
474 };
475
476 if ((Op2Info == TargetTransformInfo::OK_UniformConstantValue ||
477 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) &&
478 ST->hasSSE2()) {
479 // pmuldq sequence.
480 if (ISD == ISD::SDIV && LT.second == MVT::v8i32 && ST->hasAVX())
481 return LT.first * 32;
482 if (ISD == ISD::SREM && LT.second == MVT::v8i32 && ST->hasAVX())
483 return LT.first * 38;
484 if (ISD == ISD::SDIV && LT.second == MVT::v4i32 && ST->hasSSE41())
485 return LT.first * 15;
486 if (ISD == ISD::SREM && LT.second == MVT::v4i32 && ST->hasSSE41())
487 return LT.first * 20;
488
489 if (const auto *Entry = CostTableLookup(SSE2ConstCostTable, ISD, LT.second))
490 return LT.first * Entry->Cost;
491 }
492
493 static const CostTblEntry AVX512BWShiftCostTable[] = {
494 { ISD::SHL, MVT::v8i16, 1 }, // vpsllvw
495 { ISD::SRL, MVT::v8i16, 1 }, // vpsrlvw
496 { ISD::SRA, MVT::v8i16, 1 }, // vpsravw
497
498 { ISD::SHL, MVT::v16i16, 1 }, // vpsllvw
499 { ISD::SRL, MVT::v16i16, 1 }, // vpsrlvw
500 { ISD::SRA, MVT::v16i16, 1 }, // vpsravw
501
502 { ISD::SHL, MVT::v32i16, 1 }, // vpsllvw
503 { ISD::SRL, MVT::v32i16, 1 }, // vpsrlvw
504 { ISD::SRA, MVT::v32i16, 1 }, // vpsravw
505 };
506
507 if (ST->hasBWI())
508 if (const auto *Entry = CostTableLookup(AVX512BWShiftCostTable, ISD, LT.second))
509 return LT.first * Entry->Cost;
510
511 static const CostTblEntry AVX2UniformCostTable[] = {
512 // Uniform splats are cheaper for the following instructions.
513 { ISD::SHL, MVT::v16i16, 1 }, // psllw.
514 { ISD::SRL, MVT::v16i16, 1 }, // psrlw.
515 { ISD::SRA, MVT::v16i16, 1 }, // psraw.
516 { ISD::SHL, MVT::v32i16, 2 }, // 2*psllw.
517 { ISD::SRL, MVT::v32i16, 2 }, // 2*psrlw.
518 { ISD::SRA, MVT::v32i16, 2 }, // 2*psraw.
519 };
520
521 if (ST->hasAVX2() &&
522 ((Op2Info == TargetTransformInfo::OK_UniformConstantValue) ||
523 (Op2Info == TargetTransformInfo::OK_UniformValue))) {
524 if (const auto *Entry =
525 CostTableLookup(AVX2UniformCostTable, ISD, LT.second))
526 return LT.first * Entry->Cost;
527 }
528
529 static const CostTblEntry SSE2UniformCostTable[] = {
530 // Uniform splats are cheaper for the following instructions.
531 { ISD::SHL, MVT::v8i16, 1 }, // psllw.
532 { ISD::SHL, MVT::v4i32, 1 }, // pslld
533 { ISD::SHL, MVT::v2i64, 1 }, // psllq.
534
535 { ISD::SRL, MVT::v8i16, 1 }, // psrlw.
536 { ISD::SRL, MVT::v4i32, 1 }, // psrld.
537 { ISD::SRL, MVT::v2i64, 1 }, // psrlq.
538
539 { ISD::SRA, MVT::v8i16, 1 }, // psraw.
540 { ISD::SRA, MVT::v4i32, 1 }, // psrad.
541 };
542
543 if (ST->hasSSE2() &&
544 ((Op2Info == TargetTransformInfo::OK_UniformConstantValue) ||
545 (Op2Info == TargetTransformInfo::OK_UniformValue))) {
546 if (const auto *Entry =
547 CostTableLookup(SSE2UniformCostTable, ISD, LT.second))
548 return LT.first * Entry->Cost;
549 }
550
551 static const CostTblEntry AVX512DQCostTable[] = {
552 { ISD::MUL, MVT::v2i64, 1 },
553 { ISD::MUL, MVT::v4i64, 1 },
554 { ISD::MUL, MVT::v8i64, 1 }
555 };
556
557 // Look for AVX512DQ lowering tricks for custom cases.
558 if (ST->hasDQI())
559 if (const auto *Entry = CostTableLookup(AVX512DQCostTable, ISD, LT.second))
560 return LT.first * Entry->Cost;
561
562 static const CostTblEntry AVX512BWCostTable[] = {
563 { ISD::SHL, MVT::v64i8, 11 }, // vpblendvb sequence.
564 { ISD::SRL, MVT::v64i8, 11 }, // vpblendvb sequence.
565 { ISD::SRA, MVT::v64i8, 24 }, // vpblendvb sequence.
566
567 { ISD::MUL, MVT::v64i8, 11 }, // extend/pmullw/trunc sequence.
568 { ISD::MUL, MVT::v32i8, 4 }, // extend/pmullw/trunc sequence.
569 { ISD::MUL, MVT::v16i8, 4 }, // extend/pmullw/trunc sequence.
570 };
571
572 // Look for AVX512BW lowering tricks for custom cases.
573 if (ST->hasBWI())
574 if (const auto *Entry = CostTableLookup(AVX512BWCostTable, ISD, LT.second))
575 return LT.first * Entry->Cost;
576
577 static const CostTblEntry AVX512CostTable[] = {
578 { ISD::SHL, MVT::v16i32, 1 },
579 { ISD::SRL, MVT::v16i32, 1 },
580 { ISD::SRA, MVT::v16i32, 1 },
581
582 { ISD::SHL, MVT::v8i64, 1 },
583 { ISD::SRL, MVT::v8i64, 1 },
584
585 { ISD::SRA, MVT::v2i64, 1 },
586 { ISD::SRA, MVT::v4i64, 1 },
587 { ISD::SRA, MVT::v8i64, 1 },
588
589 { ISD::MUL, MVT::v64i8, 26 }, // extend/pmullw/trunc sequence.
590 { ISD::MUL, MVT::v32i8, 13 }, // extend/pmullw/trunc sequence.
591 { ISD::MUL, MVT::v16i8, 5 }, // extend/pmullw/trunc sequence.
592 { ISD::MUL, MVT::v16i32, 1 }, // pmulld (Skylake from agner.org)
593 { ISD::MUL, MVT::v8i32, 1 }, // pmulld (Skylake from agner.org)
594 { ISD::MUL, MVT::v4i32, 1 }, // pmulld (Skylake from agner.org)
595 { ISD::MUL, MVT::v8i64, 8 }, // 3*pmuludq/3*shift/2*add
596
597 { ISD::FADD, MVT::v8f64, 1 }, // Skylake from http://www.agner.org/
598 { ISD::FSUB, MVT::v8f64, 1 }, // Skylake from http://www.agner.org/
599 { ISD::FMUL, MVT::v8f64, 1 }, // Skylake from http://www.agner.org/
600
601 { ISD::FADD, MVT::v16f32, 1 }, // Skylake from http://www.agner.org/
602 { ISD::FSUB, MVT::v16f32, 1 }, // Skylake from http://www.agner.org/
603 { ISD::FMUL, MVT::v16f32, 1 }, // Skylake from http://www.agner.org/
604 };
605
606 if (ST->hasAVX512())
607 if (const auto *Entry = CostTableLookup(AVX512CostTable, ISD, LT.second))
608 return LT.first * Entry->Cost;
609
610 static const CostTblEntry AVX2ShiftCostTable[] = {
611 // Shifts on v4i64/v8i32 on AVX2 is legal even though we declare to
612 // customize them to detect the cases where shift amount is a scalar one.
613 { ISD::SHL, MVT::v4i32, 1 },
614 { ISD::SRL, MVT::v4i32, 1 },
615 { ISD::SRA, MVT::v4i32, 1 },
616 { ISD::SHL, MVT::v8i32, 1 },
617 { ISD::SRL, MVT::v8i32, 1 },
618 { ISD::SRA, MVT::v8i32, 1 },
619 { ISD::SHL, MVT::v2i64, 1 },
620 { ISD::SRL, MVT::v2i64, 1 },
621 { ISD::SHL, MVT::v4i64, 1 },
622 { ISD::SRL, MVT::v4i64, 1 },
623 };
624
625 if (ST->hasAVX512()) {
626 if (ISD == ISD::SHL && LT.second == MVT::v32i16 &&
627 (Op2Info == TargetTransformInfo::OK_UniformConstantValue ||
628 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue))
629 // On AVX512, a packed v32i16 shift left by a constant build_vector
630 // is lowered into a vector multiply (vpmullw).
631 return getArithmeticInstrCost(Instruction::Mul, Ty, CostKind,
632 Op1Info, Op2Info,
633 TargetTransformInfo::OP_None,
634 TargetTransformInfo::OP_None);
635 }
636
637 // Look for AVX2 lowering tricks.
638 if (ST->hasAVX2()) {
639 if (ISD == ISD::SHL && LT.second == MVT::v16i16 &&
640 (Op2Info == TargetTransformInfo::OK_UniformConstantValue ||
641 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue))
642 // On AVX2, a packed v16i16 shift left by a constant build_vector
643 // is lowered into a vector multiply (vpmullw).
644 return getArithmeticInstrCost(Instruction::Mul, Ty, CostKind,
645 Op1Info, Op2Info,
646 TargetTransformInfo::OP_None,
647 TargetTransformInfo::OP_None);
648
649 if (const auto *Entry = CostTableLookup(AVX2ShiftCostTable, ISD, LT.second))
650 return LT.first * Entry->Cost;
651 }
652
653 static const CostTblEntry XOPShiftCostTable[] = {
654 // 128bit shifts take 1cy, but right shifts require negation beforehand.
655 { ISD::SHL, MVT::v16i8, 1 },
656 { ISD::SRL, MVT::v16i8, 2 },
657 { ISD::SRA, MVT::v16i8, 2 },
658 { ISD::SHL, MVT::v8i16, 1 },
659 { ISD::SRL, MVT::v8i16, 2 },
660 { ISD::SRA, MVT::v8i16, 2 },
661 { ISD::SHL, MVT::v4i32, 1 },
662 { ISD::SRL, MVT::v4i32, 2 },
663 { ISD::SRA, MVT::v4i32, 2 },
664 { ISD::SHL, MVT::v2i64, 1 },
665 { ISD::SRL, MVT::v2i64, 2 },
666 { ISD::SRA, MVT::v2i64, 2 },
667 // 256bit shifts require splitting if AVX2 didn't catch them above.
668 { ISD::SHL, MVT::v32i8, 2+2 },
669 { ISD::SRL, MVT::v32i8, 4+2 },
670 { ISD::SRA, MVT::v32i8, 4+2 },
671 { ISD::SHL, MVT::v16i16, 2+2 },
672 { ISD::SRL, MVT::v16i16, 4+2 },
673 { ISD::SRA, MVT::v16i16, 4+2 },
674 { ISD::SHL, MVT::v8i32, 2+2 },
675 { ISD::SRL, MVT::v8i32, 4+2 },
676 { ISD::SRA, MVT::v8i32, 4+2 },
677 { ISD::SHL, MVT::v4i64, 2+2 },
678 { ISD::SRL, MVT::v4i64, 4+2 },
679 { ISD::SRA, MVT::v4i64, 4+2 },
680 };
681
682 // Look for XOP lowering tricks.
683 if (ST->hasXOP()) {
684 // If the right shift is constant then we'll fold the negation so
685 // it's as cheap as a left shift.
686 int ShiftISD = ISD;
687 if ((ShiftISD == ISD::SRL || ShiftISD == ISD::SRA) &&
688 (Op2Info == TargetTransformInfo::OK_UniformConstantValue ||
689 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue))
690 ShiftISD = ISD::SHL;
691 if (const auto *Entry =
692 CostTableLookup(XOPShiftCostTable, ShiftISD, LT.second))
693 return LT.first * Entry->Cost;
694 }
695
696 static const CostTblEntry SSE2UniformShiftCostTable[] = {
697 // Uniform splats are cheaper for the following instructions.
698 { ISD::SHL, MVT::v16i16, 2+2 }, // 2*psllw + split.
699 { ISD::SHL, MVT::v8i32, 2+2 }, // 2*pslld + split.
700 { ISD::SHL, MVT::v4i64, 2+2 }, // 2*psllq + split.
701
702 { ISD::SRL, MVT::v16i16, 2+2 }, // 2*psrlw + split.
703 { ISD::SRL, MVT::v8i32, 2+2 }, // 2*psrld + split.
704 { ISD::SRL, MVT::v4i64, 2+2 }, // 2*psrlq + split.
705
706 { ISD::SRA, MVT::v16i16, 2+2 }, // 2*psraw + split.
707 { ISD::SRA, MVT::v8i32, 2+2 }, // 2*psrad + split.
708 { ISD::SRA, MVT::v2i64, 4 }, // 2*psrad + shuffle.
709 { ISD::SRA, MVT::v4i64, 8+2 }, // 2*(2*psrad + shuffle) + split.
710 };
711
712 if (ST->hasSSE2() &&
713 ((Op2Info == TargetTransformInfo::OK_UniformConstantValue) ||
714 (Op2Info == TargetTransformInfo::OK_UniformValue))) {
715
716 // Handle AVX2 uniform v4i64 ISD::SRA, it's not worth a table.
717 if (ISD == ISD::SRA && LT.second == MVT::v4i64 && ST->hasAVX2())
718 return LT.first * 4; // 2*psrad + shuffle.
719
720 if (const auto *Entry =
721 CostTableLookup(SSE2UniformShiftCostTable, ISD, LT.second))
722 return LT.first * Entry->Cost;
723 }
724
725 if (ISD == ISD::SHL &&
726 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) {
727 MVT VT = LT.second;
728 // Vector shift left by non uniform constant can be lowered
729 // into vector multiply.
730 if (((VT == MVT::v8i16 || VT == MVT::v4i32) && ST->hasSSE2()) ||
731 ((VT == MVT::v16i16 || VT == MVT::v8i32) && ST->hasAVX()))
732 ISD = ISD::MUL;
733 }
734
735 static const CostTblEntry AVX2CostTable[] = {
736 { ISD::SHL, MVT::v32i8, 11 }, // vpblendvb sequence.
737 { ISD::SHL, MVT::v64i8, 22 }, // 2*vpblendvb sequence.
738 { ISD::SHL, MVT::v16i16, 10 }, // extend/vpsrlvd/pack sequence.
739 { ISD::SHL, MVT::v32i16, 20 }, // 2*extend/vpsrlvd/pack sequence.
740
741 { ISD::SRL, MVT::v32i8, 11 }, // vpblendvb sequence.
742 { ISD::SRL, MVT::v64i8, 22 }, // 2*vpblendvb sequence.
743 { ISD::SRL, MVT::v16i16, 10 }, // extend/vpsrlvd/pack sequence.
744 { ISD::SRL, MVT::v32i16, 20 }, // 2*extend/vpsrlvd/pack sequence.
745
746 { ISD::SRA, MVT::v32i8, 24 }, // vpblendvb sequence.
747 { ISD::SRA, MVT::v64i8, 48 }, // 2*vpblendvb sequence.
748 { ISD::SRA, MVT::v16i16, 10 }, // extend/vpsravd/pack sequence.
749 { ISD::SRA, MVT::v32i16, 20 }, // 2*extend/vpsravd/pack sequence.
750 { ISD::SRA, MVT::v2i64, 4 }, // srl/xor/sub sequence.
751 { ISD::SRA, MVT::v4i64, 4 }, // srl/xor/sub sequence.
752
753 { ISD::SUB, MVT::v32i8, 1 }, // psubb
754 { ISD::ADD, MVT::v32i8, 1 }, // paddb
755 { ISD::SUB, MVT::v16i16, 1 }, // psubw
756 { ISD::ADD, MVT::v16i16, 1 }, // paddw
757 { ISD::SUB, MVT::v8i32, 1 }, // psubd
758 { ISD::ADD, MVT::v8i32, 1 }, // paddd
759 { ISD::SUB, MVT::v4i64, 1 }, // psubq
760 { ISD::ADD, MVT::v4i64, 1 }, // paddq
761
762 { ISD::MUL, MVT::v32i8, 17 }, // extend/pmullw/trunc sequence.
763 { ISD::MUL, MVT::v16i8, 7 }, // extend/pmullw/trunc sequence.
764 { ISD::MUL, MVT::v16i16, 1 }, // pmullw
765 { ISD::MUL, MVT::v8i32, 2 }, // pmulld (Haswell from agner.org)
766 { ISD::MUL, MVT::v4i64, 8 }, // 3*pmuludq/3*shift/2*add
767
768 { ISD::FADD, MVT::v4f64, 1 }, // Haswell from http://www.agner.org/
769 { ISD::FADD, MVT::v8f32, 1 }, // Haswell from http://www.agner.org/
770 { ISD::FSUB, MVT::v4f64, 1 }, // Haswell from http://www.agner.org/
771 { ISD::FSUB, MVT::v8f32, 1 }, // Haswell from http://www.agner.org/
772 { ISD::FMUL, MVT::v4f64, 1 }, // Haswell from http://www.agner.org/
773 { ISD::FMUL, MVT::v8f32, 1 }, // Haswell from http://www.agner.org/
774
775 { ISD::FDIV, MVT::f32, 7 }, // Haswell from http://www.agner.org/
776 { ISD::FDIV, MVT::v4f32, 7 }, // Haswell from http://www.agner.org/
777 { ISD::FDIV, MVT::v8f32, 14 }, // Haswell from http://www.agner.org/
778 { ISD::FDIV, MVT::f64, 14 }, // Haswell from http://www.agner.org/
779 { ISD::FDIV, MVT::v2f64, 14 }, // Haswell from http://www.agner.org/
780 { ISD::FDIV, MVT::v4f64, 28 }, // Haswell from http://www.agner.org/
781 };
782
783 // Look for AVX2 lowering tricks for custom cases.
784 if (ST->hasAVX2())
785 if (const auto *Entry = CostTableLookup(AVX2CostTable, ISD, LT.second))
786 return LT.first * Entry->Cost;
787
788 static const CostTblEntry AVX1CostTable[] = {
789 // We don't have to scalarize unsupported ops. We can issue two half-sized
790 // operations and we only need to extract the upper YMM half.
791 // Two ops + 1 extract + 1 insert = 4.
792 { ISD::MUL, MVT::v16i16, 4 },
793 { ISD::MUL, MVT::v8i32, 4 },
794 { ISD::SUB, MVT::v32i8, 4 },
795 { ISD::ADD, MVT::v32i8, 4 },
796 { ISD::SUB, MVT::v16i16, 4 },
797 { ISD::ADD, MVT::v16i16, 4 },
798 { ISD::SUB, MVT::v8i32, 4 },
799 { ISD::ADD, MVT::v8i32, 4 },
800 { ISD::SUB, MVT::v4i64, 4 },
801 { ISD::ADD, MVT::v4i64, 4 },
802
803 // A v4i64 multiply is custom lowered as two split v2i64 vectors that then
804 // are lowered as a series of long multiplies(3), shifts(3) and adds(2)
805 // Because we believe v4i64 to be a legal type, we must also include the
806 // extract+insert in the cost table. Therefore, the cost here is 18
807 // instead of 8.
808 { ISD::MUL, MVT::v4i64, 18 },
809
810 { ISD::MUL, MVT::v32i8, 26 }, // extend/pmullw/trunc sequence.
811
812 { ISD::FDIV, MVT::f32, 14 }, // SNB from http://www.agner.org/
813 { ISD::FDIV, MVT::v4f32, 14 }, // SNB from http://www.agner.org/
814 { ISD::FDIV, MVT::v8f32, 28 }, // SNB from http://www.agner.org/
815 { ISD::FDIV, MVT::f64, 22 }, // SNB from http://www.agner.org/
816 { ISD::FDIV, MVT::v2f64, 22 }, // SNB from http://www.agner.org/
817 { ISD::FDIV, MVT::v4f64, 44 }, // SNB from http://www.agner.org/
818 };
819
820 if (ST->hasAVX())
821 if (const auto *Entry = CostTableLookup(AVX1CostTable, ISD, LT.second))
822 return LT.first * Entry->Cost;
823
824 static const CostTblEntry SSE42CostTable[] = {
825 { ISD::FADD, MVT::f64, 1 }, // Nehalem from http://www.agner.org/
826 { ISD::FADD, MVT::f32, 1 }, // Nehalem from http://www.agner.org/
827 { ISD::FADD, MVT::v2f64, 1 }, // Nehalem from http://www.agner.org/
828 { ISD::FADD, MVT::v4f32, 1 }, // Nehalem from http://www.agner.org/
829
830 { ISD::FSUB, MVT::f64, 1 }, // Nehalem from http://www.agner.org/
831 { ISD::FSUB, MVT::f32 , 1 }, // Nehalem from http://www.agner.org/
832 { ISD::FSUB, MVT::v2f64, 1 }, // Nehalem from http://www.agner.org/
833 { ISD::FSUB, MVT::v4f32, 1 }, // Nehalem from http://www.agner.org/
834
835 { ISD::FMUL, MVT::f64, 1 }, // Nehalem from http://www.agner.org/
836 { ISD::FMUL, MVT::f32, 1 }, // Nehalem from http://www.agner.org/
837 { ISD::FMUL, MVT::v2f64, 1 }, // Nehalem from http://www.agner.org/
838 { ISD::FMUL, MVT::v4f32, 1 }, // Nehalem from http://www.agner.org/
839
840 { ISD::FDIV, MVT::f32, 14 }, // Nehalem from http://www.agner.org/
841 { ISD::FDIV, MVT::v4f32, 14 }, // Nehalem from http://www.agner.org/
842 { ISD::FDIV, MVT::f64, 22 }, // Nehalem from http://www.agner.org/
843 { ISD::FDIV, MVT::v2f64, 22 }, // Nehalem from http://www.agner.org/
844 };
845
846 if (ST->hasSSE42())
847 if (const auto *Entry = CostTableLookup(SSE42CostTable, ISD, LT.second))
848 return LT.first * Entry->Cost;
849
850 static const CostTblEntry SSE41CostTable[] = {
851 { ISD::SHL, MVT::v16i8, 11 }, // pblendvb sequence.
852 { ISD::SHL, MVT::v32i8, 2*11+2 }, // pblendvb sequence + split.
853 { ISD::SHL, MVT::v8i16, 14 }, // pblendvb sequence.
854 { ISD::SHL, MVT::v16i16, 2*14+2 }, // pblendvb sequence + split.
855 { ISD::SHL, MVT::v4i32, 4 }, // pslld/paddd/cvttps2dq/pmulld
856 { ISD::SHL, MVT::v8i32, 2*4+2 }, // pslld/paddd/cvttps2dq/pmulld + split
857
858 { ISD::SRL, MVT::v16i8, 12 }, // pblendvb sequence.
859 { ISD::SRL, MVT::v32i8, 2*12+2 }, // pblendvb sequence + split.
860 { ISD::SRL, MVT::v8i16, 14 }, // pblendvb sequence.
861 { ISD::SRL, MVT::v16i16, 2*14+2 }, // pblendvb sequence + split.
862 { ISD::SRL, MVT::v4i32, 11 }, // Shift each lane + blend.
863 { ISD::SRL, MVT::v8i32, 2*11+2 }, // Shift each lane + blend + split.
864
865 { ISD::SRA, MVT::v16i8, 24 }, // pblendvb sequence.
866 { ISD::SRA, MVT::v32i8, 2*24+2 }, // pblendvb sequence + split.
867 { ISD::SRA, MVT::v8i16, 14 }, // pblendvb sequence.
868 { ISD::SRA, MVT::v16i16, 2*14+2 }, // pblendvb sequence + split.
869 { ISD::SRA, MVT::v4i32, 12 }, // Shift each lane + blend.
870 { ISD::SRA, MVT::v8i32, 2*12+2 }, // Shift each lane + blend + split.
871
872 { ISD::MUL, MVT::v4i32, 2 } // pmulld (Nehalem from agner.org)
873 };
874
875 if (ST->hasSSE41())
876 if (const auto *Entry = CostTableLookup(SSE41CostTable, ISD, LT.second))
877 return LT.first * Entry->Cost;
878
879 static const CostTblEntry SSE2CostTable[] = {
880 // We don't correctly identify costs of casts because they are marked as
881 // custom.
882 { ISD::SHL, MVT::v16i8, 26 }, // cmpgtb sequence.
883 { ISD::SHL, MVT::v8i16, 32 }, // cmpgtb sequence.
884 { ISD::SHL, MVT::v4i32, 2*5 }, // We optimized this using mul.
885 { ISD::SHL, MVT::v2i64, 4 }, // splat+shuffle sequence.
886 { ISD::SHL, MVT::v4i64, 2*4+2 }, // splat+shuffle sequence + split.
887
888 { ISD::SRL, MVT::v16i8, 26 }, // cmpgtb sequence.
889 { ISD::SRL, MVT::v8i16, 32 }, // cmpgtb sequence.
890 { ISD::SRL, MVT::v4i32, 16 }, // Shift each lane + blend.
891 { ISD::SRL, MVT::v2i64, 4 }, // splat+shuffle sequence.
892 { ISD::SRL, MVT::v4i64, 2*4+2 }, // splat+shuffle sequence + split.
893
894 { ISD::SRA, MVT::v16i8, 54 }, // unpacked cmpgtb sequence.
895 { ISD::SRA, MVT::v8i16, 32 }, // cmpgtb sequence.
896 { ISD::SRA, MVT::v4i32, 16 }, // Shift each lane + blend.
897 { ISD::SRA, MVT::v2i64, 12 }, // srl/xor/sub sequence.
898 { ISD::SRA, MVT::v4i64, 2*12+2 }, // srl/xor/sub sequence+split.
899
900 { ISD::MUL, MVT::v16i8, 12 }, // extend/pmullw/trunc sequence.
901 { ISD::MUL, MVT::v8i16, 1 }, // pmullw
902 { ISD::MUL, MVT::v4i32, 6 }, // 3*pmuludq/4*shuffle
903 { ISD::MUL, MVT::v2i64, 8 }, // 3*pmuludq/3*shift/2*add
904
905 { ISD::FDIV, MVT::f32, 23 }, // Pentium IV from http://www.agner.org/
906 { ISD::FDIV, MVT::v4f32, 39 }, // Pentium IV from http://www.agner.org/
907 { ISD::FDIV, MVT::f64, 38 }, // Pentium IV from http://www.agner.org/
908 { ISD::FDIV, MVT::v2f64, 69 }, // Pentium IV from http://www.agner.org/
909
910 { ISD::FADD, MVT::f32, 2 }, // Pentium IV from http://www.agner.org/
911 { ISD::FADD, MVT::f64, 2 }, // Pentium IV from http://www.agner.org/
912
913 { ISD::FSUB, MVT::f32, 2 }, // Pentium IV from http://www.agner.org/
914 { ISD::FSUB, MVT::f64, 2 }, // Pentium IV from http://www.agner.org/
915 };
916
917 if (ST->hasSSE2())
918 if (const auto *Entry = CostTableLookup(SSE2CostTable, ISD, LT.second))
919 return LT.first * Entry->Cost;
920
921 static const CostTblEntry SSE1CostTable[] = {
922 { ISD::FDIV, MVT::f32, 17 }, // Pentium III from http://www.agner.org/
923 { ISD::FDIV, MVT::v4f32, 34 }, // Pentium III from http://www.agner.org/
924
925 { ISD::FADD, MVT::f32, 1 }, // Pentium III from http://www.agner.org/
926 { ISD::FADD, MVT::v4f32, 2 }, // Pentium III from http://www.agner.org/
927
928 { ISD::FSUB, MVT::f32, 1 }, // Pentium III from http://www.agner.org/
929 { ISD::FSUB, MVT::v4f32, 2 }, // Pentium III from http://www.agner.org/
930
931 { ISD::ADD, MVT::i8, 1 }, // Pentium III from http://www.agner.org/
932 { ISD::ADD, MVT::i16, 1 }, // Pentium III from http://www.agner.org/
933 { ISD::ADD, MVT::i32, 1 }, // Pentium III from http://www.agner.org/
934
935 { ISD::SUB, MVT::i8, 1 }, // Pentium III from http://www.agner.org/
936 { ISD::SUB, MVT::i16, 1 }, // Pentium III from http://www.agner.org/
937 { ISD::SUB, MVT::i32, 1 }, // Pentium III from http://www.agner.org/
938 };
939
940 if (ST->hasSSE1())
941 if (const auto *Entry = CostTableLookup(SSE1CostTable, ISD, LT.second))
942 return LT.first * Entry->Cost;
943
944 // It is not a good idea to vectorize division. We have to scalarize it and
945 // in the process we will often end up having to spilling regular
946 // registers. The overhead of division is going to dominate most kernels
947 // anyways so try hard to prevent vectorization of division - it is
948 // generally a bad idea. Assume somewhat arbitrarily that we have to be able
949 // to hide "20 cycles" for each lane.
950 if (LT.second.isVector() && (ISD == ISD::SDIV || ISD == ISD::SREM ||
951 ISD == ISD::UDIV || ISD == ISD::UREM)) {
952 int ScalarCost = getArithmeticInstrCost(
953 Opcode, Ty->getScalarType(), CostKind, Op1Info, Op2Info,
954 TargetTransformInfo::OP_None, TargetTransformInfo::OP_None);
955 return 20 * LT.first * LT.second.getVectorNumElements() * ScalarCost;
956 }
957
958 // Fallback to the default implementation.
959 return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Op1Info, Op2Info);
960}
961
962int X86TTIImpl::getShuffleCost(TTI::ShuffleKind Kind, VectorType *BaseTp,
963 int Index, VectorType *SubTp) {
964 // 64-bit packed float vectors (v2f32) are widened to type v4f32.
965 // 64-bit packed integer vectors (v2i32) are widened to type v4i32.
966 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, BaseTp);
967
968 // Treat Transpose as 2-op shuffles - there's no difference in lowering.
969 if (Kind == TTI::SK_Transpose)
970 Kind = TTI::SK_PermuteTwoSrc;
971
972 // For Broadcasts we are splatting the first element from the first input
973 // register, so only need to reference that input and all the output
974 // registers are the same.
975 if (Kind == TTI::SK_Broadcast)
976 LT.first = 1;
977
978 // Subvector extractions are free if they start at the beginning of a
979 // vector and cheap if the subvectors are aligned.
980 if (Kind == TTI::SK_ExtractSubvector && LT.second.isVector()) {
981 int NumElts = LT.second.getVectorNumElements();
982 if ((Index % NumElts) == 0)
983 return 0;
984 std::pair<int, MVT> SubLT = TLI->getTypeLegalizationCost(DL, SubTp);
985 if (SubLT.second.isVector()) {
986 int NumSubElts = SubLT.second.getVectorNumElements();
987 if ((Index % NumSubElts) == 0 && (NumElts % NumSubElts) == 0)
988 return SubLT.first;
989 // Handle some cases for widening legalization. For now we only handle
990 // cases where the original subvector was naturally aligned and evenly
991 // fit in its legalized subvector type.
992 // FIXME: Remove some of the alignment restrictions.
993 // FIXME: We can use permq for 64-bit or larger extracts from 256-bit
994 // vectors.
995 int OrigSubElts = cast<FixedVectorType>(SubTp)->getNumElements();
996 if (NumSubElts > OrigSubElts && (Index % OrigSubElts) == 0 &&
997 (NumSubElts % OrigSubElts) == 0 &&
998 LT.second.getVectorElementType() ==
999 SubLT.second.getVectorElementType() &&
1000 LT.second.getVectorElementType().getSizeInBits() ==
1001 BaseTp->getElementType()->getPrimitiveSizeInBits()) {
1002 assert(NumElts >= NumSubElts && NumElts > OrigSubElts &&((NumElts >= NumSubElts && NumElts > OrigSubElts
&& "Unexpected number of elements!") ? static_cast<
void> (0) : __assert_fail ("NumElts >= NumSubElts && NumElts > OrigSubElts && \"Unexpected number of elements!\""
, "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 1003, __PRETTY_FUNCTION__))
1003 "Unexpected number of elements!")((NumElts >= NumSubElts && NumElts > OrigSubElts
&& "Unexpected number of elements!") ? static_cast<
void> (0) : __assert_fail ("NumElts >= NumSubElts && NumElts > OrigSubElts && \"Unexpected number of elements!\""
, "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 1003, __PRETTY_FUNCTION__))
;
1004 auto *VecTy = FixedVectorType::get(BaseTp->getElementType(),
1005 LT.second.getVectorNumElements());
1006 auto *SubTy = FixedVectorType::get(BaseTp->getElementType(),
1007 SubLT.second.getVectorNumElements());
1008 int ExtractIndex = alignDown((Index % NumElts), NumSubElts);
1009 int ExtractCost = getShuffleCost(TTI::SK_ExtractSubvector, VecTy,
1010 ExtractIndex, SubTy);
1011
1012 // If the original size is 32-bits or more, we can use pshufd. Otherwise
1013 // if we have SSSE3 we can use pshufb.
1014 if (SubTp->getPrimitiveSizeInBits() >= 32 || ST->hasSSSE3())
1015 return ExtractCost + 1; // pshufd or pshufb
1016
1017 assert(SubTp->getPrimitiveSizeInBits() == 16 &&((SubTp->getPrimitiveSizeInBits() == 16 && "Unexpected vector size"
) ? static_cast<void> (0) : __assert_fail ("SubTp->getPrimitiveSizeInBits() == 16 && \"Unexpected vector size\""
, "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 1018, __PRETTY_FUNCTION__))
1018 "Unexpected vector size")((SubTp->getPrimitiveSizeInBits() == 16 && "Unexpected vector size"
) ? static_cast<void> (0) : __assert_fail ("SubTp->getPrimitiveSizeInBits() == 16 && \"Unexpected vector size\""
, "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 1018, __PRETTY_FUNCTION__))
;
1019
1020 return ExtractCost + 2; // worst case pshufhw + pshufd
1021 }
1022 }
1023 }
1024
1025 // Handle some common (illegal) sub-vector types as they are often very cheap
1026 // to shuffle even on targets without PSHUFB.
1027 EVT VT = TLI->getValueType(DL, BaseTp);
1028 if (VT.isSimple() && VT.isVector() && VT.getSizeInBits() < 128 &&
1029 !ST->hasSSSE3()) {
1030 static const CostTblEntry SSE2SubVectorShuffleTbl[] = {
1031 {TTI::SK_Broadcast, MVT::v4i16, 1}, // pshuflw
1032 {TTI::SK_Broadcast, MVT::v2i16, 1}, // pshuflw
1033 {TTI::SK_Broadcast, MVT::v8i8, 2}, // punpck/pshuflw
1034 {TTI::SK_Broadcast, MVT::v4i8, 2}, // punpck/pshuflw
1035 {TTI::SK_Broadcast, MVT::v2i8, 1}, // punpck
1036
1037 {TTI::SK_Reverse, MVT::v4i16, 1}, // pshuflw
1038 {TTI::SK_Reverse, MVT::v2i16, 1}, // pshuflw
1039 {TTI::SK_Reverse, MVT::v4i8, 3}, // punpck/pshuflw/packus
1040 {TTI::SK_Reverse, MVT::v2i8, 1}, // punpck
1041
1042 {TTI::SK_PermuteTwoSrc, MVT::v4i16, 2}, // punpck/pshuflw
1043 {TTI::SK_PermuteTwoSrc, MVT::v2i16, 2}, // punpck/pshuflw
1044 {TTI::SK_PermuteTwoSrc, MVT::v8i8, 7}, // punpck/pshuflw
1045 {TTI::SK_PermuteTwoSrc, MVT::v4i8, 4}, // punpck/pshuflw
1046 {TTI::SK_PermuteTwoSrc, MVT::v2i8, 2}, // punpck
1047
1048 {TTI::SK_PermuteSingleSrc, MVT::v4i16, 1}, // pshuflw
1049 {TTI::SK_PermuteSingleSrc, MVT::v2i16, 1}, // pshuflw
1050 {TTI::SK_PermuteSingleSrc, MVT::v8i8, 5}, // punpck/pshuflw
1051 {TTI::SK_PermuteSingleSrc, MVT::v4i8, 3}, // punpck/pshuflw
1052 {TTI::SK_PermuteSingleSrc, MVT::v2i8, 1}, // punpck
1053 };
1054
1055 if (ST->hasSSE2())
1056 if (const auto *Entry =
1057 CostTableLookup(SSE2SubVectorShuffleTbl, Kind, VT.getSimpleVT()))
1058 return Entry->Cost;
1059 }
1060
1061 // We are going to permute multiple sources and the result will be in multiple
1062 // destinations. Providing an accurate cost only for splits where the element
1063 // type remains the same.
1064 if (Kind == TTI::SK_PermuteSingleSrc && LT.first != 1) {
1065 MVT LegalVT = LT.second;
1066 if (LegalVT.isVector() &&
1067 LegalVT.getVectorElementType().getSizeInBits() ==
1068 BaseTp->getElementType()->getPrimitiveSizeInBits() &&
1069 LegalVT.getVectorNumElements() <
1070 cast<FixedVectorType>(BaseTp)->getNumElements()) {
1071
1072 unsigned VecTySize = DL.getTypeStoreSize(BaseTp);
1073 unsigned LegalVTSize = LegalVT.getStoreSize();
1074 // Number of source vectors after legalization:
1075 unsigned NumOfSrcs = (VecTySize + LegalVTSize - 1) / LegalVTSize;
1076 // Number of destination vectors after legalization:
1077 unsigned NumOfDests = LT.first;
1078
1079 auto *SingleOpTy = FixedVectorType::get(BaseTp->getElementType(),
1080 LegalVT.getVectorNumElements());
1081
1082 unsigned NumOfShuffles = (NumOfSrcs - 1) * NumOfDests;
1083 return NumOfShuffles *
1084 getShuffleCost(TTI::SK_PermuteTwoSrc, SingleOpTy, 0, nullptr);
1085 }
1086
1087 return BaseT::getShuffleCost(Kind, BaseTp, Index, SubTp);
1088 }
1089
1090 // For 2-input shuffles, we must account for splitting the 2 inputs into many.
1091 if (Kind == TTI::SK_PermuteTwoSrc && LT.first != 1) {
1092 // We assume that source and destination have the same vector type.
1093 int NumOfDests = LT.first;
1094 int NumOfShufflesPerDest = LT.first * 2 - 1;
1095 LT.first = NumOfDests * NumOfShufflesPerDest;
1096 }
1097
1098 static const CostTblEntry AVX512VBMIShuffleTbl[] = {
1099 {TTI::SK_Reverse, MVT::v64i8, 1}, // vpermb
1100 {TTI::SK_Reverse, MVT::v32i8, 1}, // vpermb
1101
1102 {TTI::SK_PermuteSingleSrc, MVT::v64i8, 1}, // vpermb
1103 {TTI::SK_PermuteSingleSrc, MVT::v32i8, 1}, // vpermb
1104
1105 {TTI::SK_PermuteTwoSrc, MVT::v64i8, 2}, // vpermt2b
1106 {TTI::SK_PermuteTwoSrc, MVT::v32i8, 2}, // vpermt2b
1107 {TTI::SK_PermuteTwoSrc, MVT::v16i8, 2} // vpermt2b
1108 };
1109
1110 if (ST->hasVBMI())
1111 if (const auto *Entry =
1112 CostTableLookup(AVX512VBMIShuffleTbl, Kind, LT.second))
1113 return LT.first * Entry->Cost;
1114
1115 static const CostTblEntry AVX512BWShuffleTbl[] = {
1116 {TTI::SK_Broadcast, MVT::v32i16, 1}, // vpbroadcastw
1117 {TTI::SK_Broadcast, MVT::v64i8, 1}, // vpbroadcastb
1118
1119 {TTI::SK_Reverse, MVT::v32i16, 2}, // vpermw
1120 {TTI::SK_Reverse, MVT::v16i16, 2}, // vpermw
1121 {TTI::SK_Reverse, MVT::v64i8, 2}, // pshufb + vshufi64x2
1122
1123 {TTI::SK_PermuteSingleSrc, MVT::v32i16, 2}, // vpermw
1124 {TTI::SK_PermuteSingleSrc, MVT::v16i16, 2}, // vpermw
1125 {TTI::SK_PermuteSingleSrc, MVT::v64i8, 8}, // extend to v32i16
1126
1127 {TTI::SK_PermuteTwoSrc, MVT::v32i16, 2}, // vpermt2w
1128 {TTI::SK_PermuteTwoSrc, MVT::v16i16, 2}, // vpermt2w
1129 {TTI::SK_PermuteTwoSrc, MVT::v8i16, 2}, // vpermt2w
1130 {TTI::SK_PermuteTwoSrc, MVT::v64i8, 19}, // 6 * v32i8 + 1
1131
1132 {TTI::SK_Select, MVT::v32i16, 1}, // vblendmw
1133 {TTI::SK_Select, MVT::v64i8, 1}, // vblendmb
1134 };
1135
1136 if (ST->hasBWI())
1137 if (const auto *Entry =
1138 CostTableLookup(AVX512BWShuffleTbl, Kind, LT.second))
1139 return LT.first * Entry->Cost;
1140
1141 static const CostTblEntry AVX512ShuffleTbl[] = {
1142 {TTI::SK_Broadcast, MVT::v8f64, 1}, // vbroadcastpd
1143 {TTI::SK_Broadcast, MVT::v16f32, 1}, // vbroadcastps
1144 {TTI::SK_Broadcast, MVT::v8i64, 1}, // vpbroadcastq
1145 {TTI::SK_Broadcast, MVT::v16i32, 1}, // vpbroadcastd
1146 {TTI::SK_Broadcast, MVT::v32i16, 1}, // vpbroadcastw
1147 {TTI::SK_Broadcast, MVT::v64i8, 1}, // vpbroadcastb
1148
1149 {TTI::SK_Reverse, MVT::v8f64, 1}, // vpermpd
1150 {TTI::SK_Reverse, MVT::v16f32, 1}, // vpermps
1151 {TTI::SK_Reverse, MVT::v8i64, 1}, // vpermq
1152 {TTI::SK_Reverse, MVT::v16i32, 1}, // vpermd
1153
1154 {TTI::SK_PermuteSingleSrc, MVT::v8f64, 1}, // vpermpd
1155 {TTI::SK_PermuteSingleSrc, MVT::v4f64, 1}, // vpermpd
1156 {TTI::SK_PermuteSingleSrc, MVT::v2f64, 1}, // vpermpd
1157 {TTI::SK_PermuteSingleSrc, MVT::v16f32, 1}, // vpermps
1158 {TTI::SK_PermuteSingleSrc, MVT::v8f32, 1}, // vpermps
1159 {TTI::SK_PermuteSingleSrc, MVT::v4f32, 1}, // vpermps
1160 {TTI::SK_PermuteSingleSrc, MVT::v8i64, 1}, // vpermq
1161 {TTI::SK_PermuteSingleSrc, MVT::v4i64, 1}, // vpermq
1162 {TTI::SK_PermuteSingleSrc, MVT::v2i64, 1}, // vpermq
1163 {TTI::SK_PermuteSingleSrc, MVT::v16i32, 1}, // vpermd
1164 {TTI::SK_PermuteSingleSrc, MVT::v8i32, 1}, // vpermd
1165 {TTI::SK_PermuteSingleSrc, MVT::v4i32, 1}, // vpermd
1166 {TTI::SK_PermuteSingleSrc, MVT::v16i8, 1}, // pshufb
1167
1168 {TTI::SK_PermuteTwoSrc, MVT::v8f64, 1}, // vpermt2pd
1169 {TTI::SK_PermuteTwoSrc, MVT::v16f32, 1}, // vpermt2ps
1170 {TTI::SK_PermuteTwoSrc, MVT::v8i64, 1}, // vpermt2q
1171 {TTI::SK_PermuteTwoSrc, MVT::v16i32, 1}, // vpermt2d
1172 {TTI::SK_PermuteTwoSrc, MVT::v4f64, 1}, // vpermt2pd
1173 {TTI::SK_PermuteTwoSrc, MVT::v8f32, 1}, // vpermt2ps
1174 {TTI::SK_PermuteTwoSrc, MVT::v4i64, 1}, // vpermt2q
1175 {TTI::SK_PermuteTwoSrc, MVT::v8i32, 1}, // vpermt2d
1176 {TTI::SK_PermuteTwoSrc, MVT::v2f64, 1}, // vpermt2pd
1177 {TTI::SK_PermuteTwoSrc, MVT::v4f32, 1}, // vpermt2ps
1178 {TTI::SK_PermuteTwoSrc, MVT::v2i64, 1}, // vpermt2q
1179 {TTI::SK_PermuteTwoSrc, MVT::v4i32, 1}, // vpermt2d
1180
1181 // FIXME: This just applies the type legalization cost rules above
1182 // assuming these completely split.
1183 {TTI::SK_PermuteSingleSrc, MVT::v32i16, 14},
1184 {TTI::SK_PermuteSingleSrc, MVT::v64i8, 14},
1185 {TTI::SK_PermuteTwoSrc, MVT::v32i16, 42},
1186 {TTI::SK_PermuteTwoSrc, MVT::v64i8, 42},
1187
1188 {TTI::SK_Select, MVT::v32i16, 1}, // vpternlogq
1189 {TTI::SK_Select, MVT::v64i8, 1}, // vpternlogq
1190 {TTI::SK_Select, MVT::v8f64, 1}, // vblendmpd
1191 {TTI::SK_Select, MVT::v16f32, 1}, // vblendmps
1192 {TTI::SK_Select, MVT::v8i64, 1}, // vblendmq
1193 {TTI::SK_Select, MVT::v16i32, 1}, // vblendmd
1194 };
1195
1196 if (ST->hasAVX512())
1197 if (const auto *Entry = CostTableLookup(AVX512ShuffleTbl, Kind, LT.second))
1198 return LT.first * Entry->Cost;
1199
1200 static const CostTblEntry AVX2ShuffleTbl[] = {
1201 {TTI::SK_Broadcast, MVT::v4f64, 1}, // vbroadcastpd
1202 {TTI::SK_Broadcast, MVT::v8f32, 1}, // vbroadcastps
1203 {TTI::SK_Broadcast, MVT::v4i64, 1}, // vpbroadcastq
1204 {TTI::SK_Broadcast, MVT::v8i32, 1}, // vpbroadcastd
1205 {TTI::SK_Broadcast, MVT::v16i16, 1}, // vpbroadcastw
1206 {TTI::SK_Broadcast, MVT::v32i8, 1}, // vpbroadcastb
1207
1208 {TTI::SK_Reverse, MVT::v4f64, 1}, // vpermpd
1209 {TTI::SK_Reverse, MVT::v8f32, 1}, // vpermps
1210 {TTI::SK_Reverse, MVT::v4i64, 1}, // vpermq
1211 {TTI::SK_Reverse, MVT::v8i32, 1}, // vpermd
1212 {TTI::SK_Reverse, MVT::v16i16, 2}, // vperm2i128 + pshufb
1213 {TTI::SK_Reverse, MVT::v32i8, 2}, // vperm2i128 + pshufb
1214
1215 {TTI::SK_Select, MVT::v16i16, 1}, // vpblendvb
1216 {TTI::SK_Select, MVT::v32i8, 1}, // vpblendvb
1217
1218 {TTI::SK_PermuteSingleSrc, MVT::v4f64, 1}, // vpermpd
1219 {TTI::SK_PermuteSingleSrc, MVT::v8f32, 1}, // vpermps
1220 {TTI::SK_PermuteSingleSrc, MVT::v4i64, 1}, // vpermq
1221 {TTI::SK_PermuteSingleSrc, MVT::v8i32, 1}, // vpermd
1222 {TTI::SK_PermuteSingleSrc, MVT::v16i16, 4}, // vperm2i128 + 2*vpshufb
1223 // + vpblendvb
1224 {TTI::SK_PermuteSingleSrc, MVT::v32i8, 4}, // vperm2i128 + 2*vpshufb
1225 // + vpblendvb
1226
1227 {TTI::SK_PermuteTwoSrc, MVT::v4f64, 3}, // 2*vpermpd + vblendpd
1228 {TTI::SK_PermuteTwoSrc, MVT::v8f32, 3}, // 2*vpermps + vblendps
1229 {TTI::SK_PermuteTwoSrc, MVT::v4i64, 3}, // 2*vpermq + vpblendd
1230 {TTI::SK_PermuteTwoSrc, MVT::v8i32, 3}, // 2*vpermd + vpblendd
1231 {TTI::SK_PermuteTwoSrc, MVT::v16i16, 7}, // 2*vperm2i128 + 4*vpshufb
1232 // + vpblendvb
1233 {TTI::SK_PermuteTwoSrc, MVT::v32i8, 7}, // 2*vperm2i128 + 4*vpshufb
1234 // + vpblendvb
1235 };
1236
1237 if (ST->hasAVX2())
1238 if (const auto *Entry = CostTableLookup(AVX2ShuffleTbl, Kind, LT.second))
1239 return LT.first * Entry->Cost;
1240
1241 static const CostTblEntry XOPShuffleTbl[] = {
1242 {TTI::SK_PermuteSingleSrc, MVT::v4f64, 2}, // vperm2f128 + vpermil2pd
1243 {TTI::SK_PermuteSingleSrc, MVT::v8f32, 2}, // vperm2f128 + vpermil2ps
1244 {TTI::SK_PermuteSingleSrc, MVT::v4i64, 2}, // vperm2f128 + vpermil2pd
1245 {TTI::SK_PermuteSingleSrc, MVT::v8i32, 2}, // vperm2f128 + vpermil2ps
1246 {TTI::SK_PermuteSingleSrc, MVT::v16i16, 4}, // vextractf128 + 2*vpperm
1247 // + vinsertf128
1248 {TTI::SK_PermuteSingleSrc, MVT::v32i8, 4}, // vextractf128 + 2*vpperm
1249 // + vinsertf128
1250
1251 {TTI::SK_PermuteTwoSrc, MVT::v16i16, 9}, // 2*vextractf128 + 6*vpperm
1252 // + vinsertf128
1253 {TTI::SK_PermuteTwoSrc, MVT::v8i16, 1}, // vpperm
1254 {TTI::SK_PermuteTwoSrc, MVT::v32i8, 9}, // 2*vextractf128 + 6*vpperm
1255 // + vinsertf128
1256 {TTI::SK_PermuteTwoSrc, MVT::v16i8, 1}, // vpperm
1257 };
1258
1259 if (ST->hasXOP())
1260 if (const auto *Entry = CostTableLookup(XOPShuffleTbl, Kind, LT.second))
1261 return LT.first * Entry->Cost;
1262
1263 static const CostTblEntry AVX1ShuffleTbl[] = {
1264 {TTI::SK_Broadcast, MVT::v4f64, 2}, // vperm2f128 + vpermilpd
1265 {TTI::SK_Broadcast, MVT::v8f32, 2}, // vperm2f128 + vpermilps
1266 {TTI::SK_Broadcast, MVT::v4i64, 2}, // vperm2f128 + vpermilpd
1267 {TTI::SK_Broadcast, MVT::v8i32, 2}, // vperm2f128 + vpermilps
1268 {TTI::SK_Broadcast, MVT::v16i16, 3}, // vpshuflw + vpshufd + vinsertf128
1269 {TTI::SK_Broadcast, MVT::v32i8, 2}, // vpshufb + vinsertf128
1270
1271 {TTI::SK_Reverse, MVT::v4f64, 2}, // vperm2f128 + vpermilpd
1272 {TTI::SK_Reverse, MVT::v8f32, 2}, // vperm2f128 + vpermilps
1273 {TTI::SK_Reverse, MVT::v4i64, 2}, // vperm2f128 + vpermilpd
1274 {TTI::SK_Reverse, MVT::v8i32, 2}, // vperm2f128 + vpermilps
1275 {TTI::SK_Reverse, MVT::v16i16, 4}, // vextractf128 + 2*pshufb
1276 // + vinsertf128
1277 {TTI::SK_Reverse, MVT::v32i8, 4}, // vextractf128 + 2*pshufb
1278 // + vinsertf128
1279
1280 {TTI::SK_Select, MVT::v4i64, 1}, // vblendpd
1281 {TTI::SK_Select, MVT::v4f64, 1}, // vblendpd
1282 {TTI::SK_Select, MVT::v8i32, 1}, // vblendps
1283 {TTI::SK_Select, MVT::v8f32, 1}, // vblendps
1284 {TTI::SK_Select, MVT::v16i16, 3}, // vpand + vpandn + vpor
1285 {TTI::SK_Select, MVT::v32i8, 3}, // vpand + vpandn + vpor
1286
1287 {TTI::SK_PermuteSingleSrc, MVT::v4f64, 2}, // vperm2f128 + vshufpd
1288 {TTI::SK_PermuteSingleSrc, MVT::v4i64, 2}, // vperm2f128 + vshufpd
1289 {TTI::SK_PermuteSingleSrc, MVT::v8f32, 4}, // 2*vperm2f128 + 2*vshufps
1290 {TTI::SK_PermuteSingleSrc, MVT::v8i32, 4}, // 2*vperm2f128 + 2*vshufps
1291 {TTI::SK_PermuteSingleSrc, MVT::v16i16, 8}, // vextractf128 + 4*pshufb
1292 // + 2*por + vinsertf128
1293 {TTI::SK_PermuteSingleSrc, MVT::v32i8, 8}, // vextractf128 + 4*pshufb
1294 // + 2*por + vinsertf128
1295
1296 {TTI::SK_PermuteTwoSrc, MVT::v4f64, 3}, // 2*vperm2f128 + vshufpd
1297 {TTI::SK_PermuteTwoSrc, MVT::v4i64, 3}, // 2*vperm2f128 + vshufpd
1298 {TTI::SK_PermuteTwoSrc, MVT::v8f32, 4}, // 2*vperm2f128 + 2*vshufps
1299 {TTI::SK_PermuteTwoSrc, MVT::v8i32, 4}, // 2*vperm2f128 + 2*vshufps
1300 {TTI::SK_PermuteTwoSrc, MVT::v16i16, 15}, // 2*vextractf128 + 8*pshufb
1301 // + 4*por + vinsertf128
1302 {TTI::SK_PermuteTwoSrc, MVT::v32i8, 15}, // 2*vextractf128 + 8*pshufb
1303 // + 4*por + vinsertf128
1304 };
1305
1306 if (ST->hasAVX())
1307 if (const auto *Entry = CostTableLookup(AVX1ShuffleTbl, Kind, LT.second))
1308 return LT.first * Entry->Cost;
1309
1310 static const CostTblEntry SSE41ShuffleTbl[] = {
1311 {TTI::SK_Select, MVT::v2i64, 1}, // pblendw
1312 {TTI::SK_Select, MVT::v2f64, 1}, // movsd
1313 {TTI::SK_Select, MVT::v4i32, 1}, // pblendw
1314 {TTI::SK_Select, MVT::v4f32, 1}, // blendps
1315 {TTI::SK_Select, MVT::v8i16, 1}, // pblendw
1316 {TTI::SK_Select, MVT::v16i8, 1} // pblendvb
1317 };
1318
1319 if (ST->hasSSE41())
1320 if (const auto *Entry = CostTableLookup(SSE41ShuffleTbl, Kind, LT.second))
1321 return LT.first * Entry->Cost;
1322
1323 static const CostTblEntry SSSE3ShuffleTbl[] = {
1324 {TTI::SK_Broadcast, MVT::v8i16, 1}, // pshufb
1325 {TTI::SK_Broadcast, MVT::v16i8, 1}, // pshufb
1326
1327 {TTI::SK_Reverse, MVT::v8i16, 1}, // pshufb
1328 {TTI::SK_Reverse, MVT::v16i8, 1}, // pshufb
1329
1330 {TTI::SK_Select, MVT::v8i16, 3}, // 2*pshufb + por
1331 {TTI::SK_Select, MVT::v16i8, 3}, // 2*pshufb + por
1332
1333 {TTI::SK_PermuteSingleSrc, MVT::v8i16, 1}, // pshufb
1334 {TTI::SK_PermuteSingleSrc, MVT::v16i8, 1}, // pshufb
1335
1336 {TTI::SK_PermuteTwoSrc, MVT::v8i16, 3}, // 2*pshufb + por
1337 {TTI::SK_PermuteTwoSrc, MVT::v16i8, 3}, // 2*pshufb + por
1338 };
1339
1340 if (ST->hasSSSE3())
1341 if (const auto *Entry = CostTableLookup(SSSE3ShuffleTbl, Kind, LT.second))
1342 return LT.first * Entry->Cost;
1343
1344 static const CostTblEntry SSE2ShuffleTbl[] = {
1345 {TTI::SK_Broadcast, MVT::v2f64, 1}, // shufpd
1346 {TTI::SK_Broadcast, MVT::v2i64, 1}, // pshufd
1347 {TTI::SK_Broadcast, MVT::v4i32, 1}, // pshufd
1348 {TTI::SK_Broadcast, MVT::v8i16, 2}, // pshuflw + pshufd
1349 {TTI::SK_Broadcast, MVT::v16i8, 3}, // unpck + pshuflw + pshufd
1350
1351 {TTI::SK_Reverse, MVT::v2f64, 1}, // shufpd
1352 {TTI::SK_Reverse, MVT::v2i64, 1}, // pshufd
1353 {TTI::SK_Reverse, MVT::v4i32, 1}, // pshufd
1354 {TTI::SK_Reverse, MVT::v8i16, 3}, // pshuflw + pshufhw + pshufd
1355 {TTI::SK_Reverse, MVT::v16i8, 9}, // 2*pshuflw + 2*pshufhw
1356 // + 2*pshufd + 2*unpck + packus
1357
1358 {TTI::SK_Select, MVT::v2i64, 1}, // movsd
1359 {TTI::SK_Select, MVT::v2f64, 1}, // movsd
1360 {TTI::SK_Select, MVT::v4i32, 2}, // 2*shufps
1361 {TTI::SK_Select, MVT::v8i16, 3}, // pand + pandn + por
1362 {TTI::SK_Select, MVT::v16i8, 3}, // pand + pandn + por
1363
1364 {TTI::SK_PermuteSingleSrc, MVT::v2f64, 1}, // shufpd
1365 {TTI::SK_PermuteSingleSrc, MVT::v2i64, 1}, // pshufd
1366 {TTI::SK_PermuteSingleSrc, MVT::v4i32, 1}, // pshufd
1367 {TTI::SK_PermuteSingleSrc, MVT::v8i16, 5}, // 2*pshuflw + 2*pshufhw
1368 // + pshufd/unpck
1369 { TTI::SK_PermuteSingleSrc, MVT::v16i8, 10 }, // 2*pshuflw + 2*pshufhw
1370 // + 2*pshufd + 2*unpck + 2*packus
1371
1372 { TTI::SK_PermuteTwoSrc, MVT::v2f64, 1 }, // shufpd
1373 { TTI::SK_PermuteTwoSrc, MVT::v2i64, 1 }, // shufpd
1374 { TTI::SK_PermuteTwoSrc, MVT::v4i32, 2 }, // 2*{unpck,movsd,pshufd}
1375 { TTI::SK_PermuteTwoSrc, MVT::v8i16, 8 }, // blend+permute
1376 { TTI::SK_PermuteTwoSrc, MVT::v16i8, 13 }, // blend+permute
1377 };
1378
1379 if (ST->hasSSE2())
1380 if (const auto *Entry = CostTableLookup(SSE2ShuffleTbl, Kind, LT.second))
1381 return LT.first * Entry->Cost;
1382
1383 static const CostTblEntry SSE1ShuffleTbl[] = {
1384 { TTI::SK_Broadcast, MVT::v4f32, 1 }, // shufps
1385 { TTI::SK_Reverse, MVT::v4f32, 1 }, // shufps
1386 { TTI::SK_Select, MVT::v4f32, 2 }, // 2*shufps
1387 { TTI::SK_PermuteSingleSrc, MVT::v4f32, 1 }, // shufps
1388 { TTI::SK_PermuteTwoSrc, MVT::v4f32, 2 }, // 2*shufps
1389 };
1390
1391 if (ST->hasSSE1())
1392 if (const auto *Entry = CostTableLookup(SSE1ShuffleTbl, Kind, LT.second))
1393 return LT.first * Entry->Cost;
1394
1395 return BaseT::getShuffleCost(Kind, BaseTp, Index, SubTp);
1396}
1397
1398int X86TTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src,
1399 TTI::CastContextHint CCH,
1400 TTI::TargetCostKind CostKind,
1401 const Instruction *I) {
1402 int ISD = TLI->InstructionOpcodeToISD(Opcode);
1403 assert(ISD && "Invalid opcode")((ISD && "Invalid opcode") ? static_cast<void> (
0) : __assert_fail ("ISD && \"Invalid opcode\"", "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 1403, __PRETTY_FUNCTION__))
;
1404
1405 // TODO: Allow non-throughput costs that aren't binary.
1406 auto AdjustCost = [&CostKind](int Cost) {
1407 if (CostKind != TTI::TCK_RecipThroughput)
1408 return Cost == 0 ? 0 : 1;
1409 return Cost;
1410 };
1411
1412 // FIXME: Need a better design of the cost table to handle non-simple types of
1413 // potential massive combinations (elem_num x src_type x dst_type).
1414
1415 static const TypeConversionCostTblEntry AVX512BWConversionTbl[] {
1416 { ISD::SIGN_EXTEND, MVT::v32i16, MVT::v32i8, 1 },
1417 { ISD::ZERO_EXTEND, MVT::v32i16, MVT::v32i8, 1 },
1418
1419 // Mask sign extend has an instruction.
1420 { ISD::SIGN_EXTEND, MVT::v2i8, MVT::v2i1, 1 },
1421 { ISD::SIGN_EXTEND, MVT::v2i16, MVT::v2i1, 1 },
1422 { ISD::SIGN_EXTEND, MVT::v4i8, MVT::v4i1, 1 },
1423 { ISD::SIGN_EXTEND, MVT::v4i16, MVT::v4i1, 1 },
1424 { ISD::SIGN_EXTEND, MVT::v8i8, MVT::v8i1, 1 },
1425 { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i1, 1 },
1426 { ISD::SIGN_EXTEND, MVT::v16i8, MVT::v16i1, 1 },
1427 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i1, 1 },
1428 { ISD::SIGN_EXTEND, MVT::v32i8, MVT::v32i1, 1 },
1429 { ISD::SIGN_EXTEND, MVT::v32i16, MVT::v32i1, 1 },
1430 { ISD::SIGN_EXTEND, MVT::v64i8, MVT::v64i1, 1 },
1431
1432 // Mask zero extend is a sext + shift.
1433 { ISD::ZERO_EXTEND, MVT::v2i8, MVT::v2i1, 2 },
1434 { ISD::ZERO_EXTEND, MVT::v2i16, MVT::v2i1, 2 },
1435 { ISD::ZERO_EXTEND, MVT::v4i8, MVT::v4i1, 2 },
1436 { ISD::ZERO_EXTEND, MVT::v4i16, MVT::v4i1, 2 },
1437 { ISD::ZERO_EXTEND, MVT::v8i8, MVT::v8i1, 2 },
1438 { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i1, 2 },
1439 { ISD::ZERO_EXTEND, MVT::v16i8, MVT::v16i1, 2 },
1440 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i1, 2 },
1441 { ISD::ZERO_EXTEND, MVT::v32i8, MVT::v32i1, 2 },
1442 { ISD::ZERO_EXTEND, MVT::v32i16, MVT::v32i1, 2 },
1443 { ISD::ZERO_EXTEND, MVT::v64i8, MVT::v64i1, 2 },
1444
1445 { ISD::TRUNCATE, MVT::v32i8, MVT::v32i16, 2 },
1446 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i16, 2 }, // widen to zmm
1447 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i8, 2 }, // widen to zmm
1448 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i16, 2 }, // widen to zmm
1449 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i8, 2 }, // widen to zmm
1450 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i16, 2 }, // widen to zmm
1451 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i8, 2 }, // widen to zmm
1452 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i16, 2 }, // widen to zmm
1453 { ISD::TRUNCATE, MVT::v16i1, MVT::v16i8, 2 }, // widen to zmm
1454 { ISD::TRUNCATE, MVT::v16i1, MVT::v16i16, 2 }, // widen to zmm
1455 { ISD::TRUNCATE, MVT::v32i1, MVT::v32i8, 2 }, // widen to zmm
1456 { ISD::TRUNCATE, MVT::v32i1, MVT::v32i16, 2 },
1457 { ISD::TRUNCATE, MVT::v64i1, MVT::v64i8, 2 },
1458 };
1459
1460 static const TypeConversionCostTblEntry AVX512DQConversionTbl[] = {
1461 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i64, 1 },
1462 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i64, 1 },
1463
1464 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i64, 1 },
1465 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i64, 1 },
1466
1467 { ISD::FP_TO_SINT, MVT::v8i64, MVT::v8f32, 1 },
1468 { ISD::FP_TO_SINT, MVT::v8i64, MVT::v8f64, 1 },
1469
1470 { ISD::FP_TO_UINT, MVT::v8i64, MVT::v8f32, 1 },
1471 { ISD::FP_TO_UINT, MVT::v8i64, MVT::v8f64, 1 },
1472 };
1473
1474 // TODO: For AVX512DQ + AVX512VL, we also have cheap casts for 128-bit and
1475 // 256-bit wide vectors.
1476
1477 static const TypeConversionCostTblEntry AVX512FConversionTbl[] = {
1478 { ISD::FP_EXTEND, MVT::v8f64, MVT::v8f32, 1 },
1479 { ISD::FP_EXTEND, MVT::v8f64, MVT::v16f32, 3 },
1480 { ISD::FP_ROUND, MVT::v8f32, MVT::v8f64, 1 },
1481
1482 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i8, 3 }, // sext+vpslld+vptestmd
1483 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i8, 3 }, // sext+vpslld+vptestmd
1484 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i8, 3 }, // sext+vpslld+vptestmd
1485 { ISD::TRUNCATE, MVT::v16i1, MVT::v16i8, 3 }, // sext+vpslld+vptestmd
1486 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i16, 3 }, // sext+vpsllq+vptestmq
1487 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i16, 3 }, // sext+vpsllq+vptestmq
1488 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i16, 3 }, // sext+vpsllq+vptestmq
1489 { ISD::TRUNCATE, MVT::v16i1, MVT::v16i16, 3 }, // sext+vpslld+vptestmd
1490 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i32, 2 }, // zmm vpslld+vptestmd
1491 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i32, 2 }, // zmm vpslld+vptestmd
1492 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i32, 2 }, // zmm vpslld+vptestmd
1493 { ISD::TRUNCATE, MVT::v16i1, MVT::v16i32, 2 }, // vpslld+vptestmd
1494 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i64, 2 }, // zmm vpsllq+vptestmq
1495 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i64, 2 }, // zmm vpsllq+vptestmq
1496 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i64, 2 }, // vpsllq+vptestmq
1497 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i32, 2 },
1498 { ISD::TRUNCATE, MVT::v16i16, MVT::v16i32, 2 },
1499 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i64, 2 },
1500 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i64, 2 },
1501 { ISD::TRUNCATE, MVT::v8i32, MVT::v8i64, 1 },
1502 { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 1 }, // zmm vpmovqd
1503 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i64, 5 },// 2*vpmovqd+concat+vpmovdb
1504
1505 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i16, 3 }, // extend to v16i32
1506 { ISD::TRUNCATE, MVT::v32i8, MVT::v32i16, 8 },
1507
1508 // Sign extend is zmm vpternlogd+vptruncdb.
1509 // Zero extend is zmm broadcast load+vptruncdw.
1510 { ISD::SIGN_EXTEND, MVT::v2i8, MVT::v2i1, 3 },
1511 { ISD::ZERO_EXTEND, MVT::v2i8, MVT::v2i1, 4 },
1512 { ISD::SIGN_EXTEND, MVT::v4i8, MVT::v4i1, 3 },
1513 { ISD::ZERO_EXTEND, MVT::v4i8, MVT::v4i1, 4 },
1514 { ISD::SIGN_EXTEND, MVT::v8i8, MVT::v8i1, 3 },
1515 { ISD::ZERO_EXTEND, MVT::v8i8, MVT::v8i1, 4 },
1516 { ISD::SIGN_EXTEND, MVT::v16i8, MVT::v16i1, 3 },
1517 { ISD::ZERO_EXTEND, MVT::v16i8, MVT::v16i1, 4 },
1518
1519 // Sign extend is zmm vpternlogd+vptruncdw.
1520 // Zero extend is zmm vpternlogd+vptruncdw+vpsrlw.
1521 { ISD::SIGN_EXTEND, MVT::v2i16, MVT::v2i1, 3 },
1522 { ISD::ZERO_EXTEND, MVT::v2i16, MVT::v2i1, 4 },
1523 { ISD::SIGN_EXTEND, MVT::v4i16, MVT::v4i1, 3 },
1524 { ISD::ZERO_EXTEND, MVT::v4i16, MVT::v4i1, 4 },
1525 { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i1, 3 },
1526 { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i1, 4 },
1527 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i1, 3 },
1528 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i1, 4 },
1529
1530 { ISD::SIGN_EXTEND, MVT::v2i32, MVT::v2i1, 1 }, // zmm vpternlogd
1531 { ISD::ZERO_EXTEND, MVT::v2i32, MVT::v2i1, 2 }, // zmm vpternlogd+psrld
1532 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i1, 1 }, // zmm vpternlogd
1533 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i1, 2 }, // zmm vpternlogd+psrld
1534 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i1, 1 }, // zmm vpternlogd
1535 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i1, 2 }, // zmm vpternlogd+psrld
1536 { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i1, 1 }, // zmm vpternlogq
1537 { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i1, 2 }, // zmm vpternlogq+psrlq
1538 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i1, 1 }, // zmm vpternlogq
1539 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i1, 2 }, // zmm vpternlogq+psrlq
1540
1541 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i1, 1 }, // vpternlogd
1542 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i1, 2 }, // vpternlogd+psrld
1543 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i1, 1 }, // vpternlogq
1544 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i1, 2 }, // vpternlogq+psrlq
1545
1546 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 1 },
1547 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 1 },
1548 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i16, 1 },
1549 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i16, 1 },
1550 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i8, 1 },
1551 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i8, 1 },
1552 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i16, 1 },
1553 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i16, 1 },
1554 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i32, 1 },
1555 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i32, 1 },
1556
1557 { ISD::SIGN_EXTEND, MVT::v32i16, MVT::v32i8, 3 }, // FIXME: May not be right
1558 { ISD::ZERO_EXTEND, MVT::v32i16, MVT::v32i8, 3 }, // FIXME: May not be right
1559
1560 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i1, 4 },
1561 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i1, 3 },
1562 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i8, 2 },
1563 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i8, 2 },
1564 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i16, 2 },
1565 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i16, 2 },
1566 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i32, 1 },
1567 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i32, 1 },
1568
1569 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i1, 4 },
1570 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i1, 3 },
1571 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i8, 2 },
1572 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i8, 2 },
1573 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i16, 2 },
1574 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i16, 2 },
1575 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i32, 1 },
1576 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i32, 1 },
1577 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i64, 26 },
1578 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i64, 5 },
1579
1580 { ISD::FP_TO_SINT, MVT::v8i8, MVT::v8f64, 3 },
1581 { ISD::FP_TO_SINT, MVT::v8i16, MVT::v8f64, 3 },
1582 { ISD::FP_TO_SINT, MVT::v16i8, MVT::v16f32, 3 },
1583 { ISD::FP_TO_SINT, MVT::v16i16, MVT::v16f32, 3 },
1584
1585 { ISD::FP_TO_UINT, MVT::v8i32, MVT::v8f64, 1 },
1586 { ISD::FP_TO_UINT, MVT::v8i16, MVT::v8f64, 3 },
1587 { ISD::FP_TO_UINT, MVT::v8i8, MVT::v8f64, 3 },
1588 { ISD::FP_TO_UINT, MVT::v16i32, MVT::v16f32, 1 },
1589 { ISD::FP_TO_UINT, MVT::v16i16, MVT::v16f32, 3 },
1590 { ISD::FP_TO_UINT, MVT::v16i8, MVT::v16f32, 3 },
1591 };
1592
1593 static const TypeConversionCostTblEntry AVX512BWVLConversionTbl[] {
1594 // Mask sign extend has an instruction.
1595 { ISD::SIGN_EXTEND, MVT::v2i8, MVT::v2i1, 1 },
1596 { ISD::SIGN_EXTEND, MVT::v2i16, MVT::v2i1, 1 },
1597 { ISD::SIGN_EXTEND, MVT::v4i8, MVT::v4i1, 1 },
1598 { ISD::SIGN_EXTEND, MVT::v4i16, MVT::v4i1, 1 },
1599 { ISD::SIGN_EXTEND, MVT::v8i8, MVT::v8i1, 1 },
1600 { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i1, 1 },
1601 { ISD::SIGN_EXTEND, MVT::v16i8, MVT::v16i1, 1 },
1602 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i1, 1 },
1603 { ISD::SIGN_EXTEND, MVT::v32i8, MVT::v32i1, 1 },
1604
1605 // Mask zero extend is a sext + shift.
1606 { ISD::ZERO_EXTEND, MVT::v2i8, MVT::v2i1, 2 },
1607 { ISD::ZERO_EXTEND, MVT::v2i16, MVT::v2i1, 2 },
1608 { ISD::ZERO_EXTEND, MVT::v4i8, MVT::v4i1, 2 },
1609 { ISD::ZERO_EXTEND, MVT::v4i16, MVT::v4i1, 2 },
1610 { ISD::ZERO_EXTEND, MVT::v8i8, MVT::v8i1, 2 },
1611 { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i1, 2 },
1612 { ISD::ZERO_EXTEND, MVT::v16i8, MVT::v16i1, 2 },
1613 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i1, 2 },
1614 { ISD::ZERO_EXTEND, MVT::v32i8, MVT::v32i1, 2 },
1615
1616 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i16, 2 },
1617 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i8, 2 }, // vpsllw+vptestmb
1618 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i16, 2 }, // vpsllw+vptestmw
1619 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i8, 2 }, // vpsllw+vptestmb
1620 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i16, 2 }, // vpsllw+vptestmw
1621 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i8, 2 }, // vpsllw+vptestmb
1622 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i16, 2 }, // vpsllw+vptestmw
1623 { ISD::TRUNCATE, MVT::v16i1, MVT::v16i8, 2 }, // vpsllw+vptestmb
1624 { ISD::TRUNCATE, MVT::v16i1, MVT::v16i16, 2 }, // vpsllw+vptestmw
1625 { ISD::TRUNCATE, MVT::v32i1, MVT::v32i8, 2 }, // vpsllw+vptestmb
1626 };
1627
1628 static const TypeConversionCostTblEntry AVX512DQVLConversionTbl[] = {
1629 { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i64, 1 },
1630 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i64, 1 },
1631 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i64, 1 },
1632 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i64, 1 },
1633
1634 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i64, 1 },
1635 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 1 },
1636 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i64, 1 },
1637 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i64, 1 },
1638
1639 { ISD::FP_TO_SINT, MVT::v2i64, MVT::v2f32, 1 },
1640 { ISD::FP_TO_SINT, MVT::v4i64, MVT::v4f32, 1 },
1641 { ISD::FP_TO_SINT, MVT::v2i64, MVT::v2f64, 1 },
1642 { ISD::FP_TO_SINT, MVT::v4i64, MVT::v4f64, 1 },
1643
1644 { ISD::FP_TO_UINT, MVT::v2i64, MVT::v2f32, 1 },
1645 { ISD::FP_TO_UINT, MVT::v4i64, MVT::v4f32, 1 },
1646 { ISD::FP_TO_UINT, MVT::v2i64, MVT::v2f64, 1 },
1647 { ISD::FP_TO_UINT, MVT::v4i64, MVT::v4f64, 1 },
1648 };
1649
1650 static const TypeConversionCostTblEntry AVX512VLConversionTbl[] = {
1651 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i8, 3 }, // sext+vpslld+vptestmd
1652 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i8, 3 }, // sext+vpslld+vptestmd
1653 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i8, 3 }, // sext+vpslld+vptestmd
1654 { ISD::TRUNCATE, MVT::v16i1, MVT::v16i8, 8 }, // split+2*v8i8
1655 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i16, 3 }, // sext+vpsllq+vptestmq
1656 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i16, 3 }, // sext+vpsllq+vptestmq
1657 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i16, 3 }, // sext+vpsllq+vptestmq
1658 { ISD::TRUNCATE, MVT::v16i1, MVT::v16i16, 8 }, // split+2*v8i16
1659 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i32, 2 }, // vpslld+vptestmd
1660 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i32, 2 }, // vpslld+vptestmd
1661 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i32, 2 }, // vpslld+vptestmd
1662 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i64, 2 }, // vpsllq+vptestmq
1663 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i64, 2 }, // vpsllq+vptestmq
1664 { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 1 }, // vpmovqd
1665
1666 // sign extend is vpcmpeq+maskedmove+vpmovdw+vpacksswb
1667 // zero extend is vpcmpeq+maskedmove+vpmovdw+vpsrlw+vpackuswb
1668 { ISD::SIGN_EXTEND, MVT::v2i8, MVT::v2i1, 5 },
1669 { ISD::ZERO_EXTEND, MVT::v2i8, MVT::v2i1, 6 },
1670 { ISD::SIGN_EXTEND, MVT::v4i8, MVT::v4i1, 5 },
1671 { ISD::ZERO_EXTEND, MVT::v4i8, MVT::v4i1, 6 },
1672 { ISD::SIGN_EXTEND, MVT::v8i8, MVT::v8i1, 5 },
1673 { ISD::ZERO_EXTEND, MVT::v8i8, MVT::v8i1, 6 },
1674 { ISD::SIGN_EXTEND, MVT::v16i8, MVT::v16i1, 10 },
1675 { ISD::ZERO_EXTEND, MVT::v16i8, MVT::v16i1, 12 },
1676
1677 // sign extend is vpcmpeq+maskedmove+vpmovdw
1678 // zero extend is vpcmpeq+maskedmove+vpmovdw+vpsrlw
1679 { ISD::SIGN_EXTEND, MVT::v2i16, MVT::v2i1, 4 },
1680 { ISD::ZERO_EXTEND, MVT::v2i16, MVT::v2i1, 5 },
1681 { ISD::SIGN_EXTEND, MVT::v4i16, MVT::v4i1, 4 },
1682 { ISD::ZERO_EXTEND, MVT::v4i16, MVT::v4i1, 5 },
1683 { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i1, 4 },
1684 { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i1, 5 },
1685 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i1, 10 },
1686 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i1, 12 },
1687
1688 { ISD::SIGN_EXTEND, MVT::v2i32, MVT::v2i1, 1 }, // vpternlogd
1689 { ISD::ZERO_EXTEND, MVT::v2i32, MVT::v2i1, 2 }, // vpternlogd+psrld
1690 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i1, 1 }, // vpternlogd
1691 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i1, 2 }, // vpternlogd+psrld
1692 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i1, 1 }, // vpternlogd
1693 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i1, 2 }, // vpternlogd+psrld
1694 { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i1, 1 }, // vpternlogq
1695 { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i1, 2 }, // vpternlogq+psrlq
1696 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i1, 1 }, // vpternlogq
1697 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i1, 2 }, // vpternlogq+psrlq
1698
1699 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i8, 2 },
1700 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i8, 2 },
1701 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i8, 2 },
1702 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i16, 5 },
1703 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i16, 2 },
1704 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i16, 2 },
1705 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i32, 2 },
1706 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i32, 1 },
1707 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 },
1708 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i32, 1 },
1709 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i32, 1 },
1710 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i64, 5 },
1711 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 5 },
1712 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i64, 5 },
1713
1714 { ISD::UINT_TO_FP, MVT::f32, MVT::i64, 1 },
1715 { ISD::UINT_TO_FP, MVT::f64, MVT::i64, 1 },
1716
1717 { ISD::FP_TO_SINT, MVT::v8i8, MVT::v8f32, 3 },
1718 { ISD::FP_TO_UINT, MVT::v8i8, MVT::v8f32, 3 },
1719
1720 { ISD::FP_TO_UINT, MVT::i64, MVT::f32, 1 },
1721 { ISD::FP_TO_UINT, MVT::i64, MVT::f64, 1 },
1722
1723 { ISD::FP_TO_UINT, MVT::v2i32, MVT::v2f32, 1 },
1724 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f32, 1 },
1725 { ISD::FP_TO_UINT, MVT::v2i32, MVT::v2f64, 1 },
1726 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f64, 1 },
1727 { ISD::FP_TO_UINT, MVT::v8i32, MVT::v8f32, 1 },
1728 };
1729
1730 static const TypeConversionCostTblEntry AVX2ConversionTbl[] = {
1731 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i1, 3 },
1732 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i1, 3 },
1733 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i1, 3 },
1734 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i1, 3 },
1735 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i8, 1 },
1736 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i8, 1 },
1737 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 1 },
1738 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 1 },
1739 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i1, 1 },
1740 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i1, 1 },
1741 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 1 },
1742 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 1 },
1743 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 1 },
1744 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 1 },
1745 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 1 },
1746 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 1 },
1747 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 1 },
1748 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 1 },
1749 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i16, 3 },
1750 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i16, 3 },
1751
1752 { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 2 },
1753 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i32, 2 },
1754
1755 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i64, 2 },
1756 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i64, 2 },
1757 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 2 },
1758 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 2 },
1759
1760 { ISD::FP_EXTEND, MVT::v8f64, MVT::v8f32, 3 },
1761 { ISD::FP_ROUND, MVT::v8f32, MVT::v8f64, 3 },
1762
1763 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i32, 8 },
1764 };
1765
1766 static const TypeConversionCostTblEntry AVXConversionTbl[] = {
1767 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i1, 6 },
1768 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i1, 4 },
1769 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i1, 7 },
1770 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i1, 4 },
1771 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i8, 4 },
1772 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i8, 4 },
1773 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 4 },
1774 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 4 },
1775 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i1, 4 },
1776 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i1, 4 },
1777 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 4 },
1778 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 4 },
1779 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 4 },
1780 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 3 },
1781 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 4 },
1782 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 4 },
1783 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 4 },
1784 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 4 },
1785
1786 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i64, 4 },
1787 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i32, 5 },
1788 { ISD::TRUNCATE, MVT::v16i1, MVT::v16i16, 4 },
1789 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i64, 9 },
1790 { ISD::TRUNCATE, MVT::v16i1, MVT::v16i64, 11 },
1791
1792 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i16, 4 },
1793 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 4 },
1794 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 5 },
1795 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i64, 4 },
1796 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i64, 4 },
1797 { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 2 },
1798 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i64, 11 },
1799 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i64, 9 },
1800 { ISD::TRUNCATE, MVT::v8i32, MVT::v8i64, 3 },
1801 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i64, 11 },
1802
1803 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i1, 3 },
1804 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i1, 3 },
1805 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i1, 8 },
1806 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i8, 3 },
1807 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i8, 3 },
1808 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i8, 8 },
1809 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i16, 3 },
1810 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i16, 3 },
1811 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i16, 5 },
1812 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 },
1813 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i32, 1 },
1814 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i32, 1 },
1815
1816 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i1, 7 },
1817 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i1, 7 },
1818 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i1, 6 },
1819 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i8, 2 },
1820 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i8, 2 },
1821 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i8, 5 },
1822 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i16, 2 },
1823 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i16, 2 },
1824 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i16, 5 },
1825 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i32, 6 },
1826 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 6 },
1827 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i32, 6 },
1828 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i32, 9 },
1829 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 5 },
1830 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i64, 6 },
1831 // The generic code to compute the scalar overhead is currently broken.
1832 // Workaround this limitation by estimating the scalarization overhead
1833 // here. We have roughly 10 instructions per scalar element.
1834 // Multiply that by the vector width.
1835 // FIXME: remove that when PR19268 is fixed.
1836 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i64, 13 },
1837 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i64, 13 },
1838
1839 { ISD::FP_TO_SINT, MVT::v8i8, MVT::v8f32, 4 },
1840 { ISD::FP_TO_SINT, MVT::v4i8, MVT::v4f64, 3 },
1841 { ISD::FP_TO_SINT, MVT::v4i16, MVT::v4f64, 2 },
1842 { ISD::FP_TO_SINT, MVT::v8i16, MVT::v8f32, 3 },
1843
1844 { ISD::FP_TO_UINT, MVT::v4i8, MVT::v4f64, 3 },
1845 { ISD::FP_TO_UINT, MVT::v4i16, MVT::v4f64, 2 },
1846 { ISD::FP_TO_UINT, MVT::v8i8, MVT::v8f32, 4 },
1847 { ISD::FP_TO_UINT, MVT::v8i16, MVT::v8f32, 3 },
1848 // This node is expanded into scalarized operations but BasicTTI is overly
1849 // optimistic estimating its cost. It computes 3 per element (one
1850 // vector-extract, one scalar conversion and one vector-insert). The
1851 // problem is that the inserts form a read-modify-write chain so latency
1852 // should be factored in too. Inflating the cost per element by 1.
1853 { ISD::FP_TO_UINT, MVT::v8i32, MVT::v8f32, 8*4 },
1854 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f64, 4*4 },
1855
1856 { ISD::FP_EXTEND, MVT::v4f64, MVT::v4f32, 1 },
1857 { ISD::FP_ROUND, MVT::v4f32, MVT::v4f64, 1 },
1858 };
1859
1860 static const TypeConversionCostTblEntry SSE41ConversionTbl[] = {
1861 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i8, 2 },
1862 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i8, 2 },
1863 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 2 },
1864 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 2 },
1865 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 2 },
1866 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 2 },
1867
1868 { ISD::ZERO_EXTEND, MVT::v4i16, MVT::v4i8, 1 },
1869 { ISD::SIGN_EXTEND, MVT::v4i16, MVT::v4i8, 2 },
1870 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i8, 1 },
1871 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i8, 1 },
1872 { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i8, 1 },
1873 { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i8, 1 },
1874 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 2 },
1875 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 2 },
1876 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 2 },
1877 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 2 },
1878 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 4 },
1879 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 4 },
1880 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i16, 1 },
1881 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i16, 1 },
1882 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 2 },
1883 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 2 },
1884 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i16, 4 },
1885 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i16, 4 },
1886
1887 // These truncates end up widening elements.
1888 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i8, 1 }, // PMOVXZBQ
1889 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i16, 1 }, // PMOVXZWQ
1890 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i8, 1 }, // PMOVXZBD
1891
1892 { ISD::TRUNCATE, MVT::v2i8, MVT::v2i16, 1 },
1893 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i16, 1 },
1894 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i16, 1 },
1895 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i32, 1 },
1896 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i32, 1 },
1897 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 3 },
1898 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 3 },
1899 { ISD::TRUNCATE, MVT::v16i16, MVT::v16i32, 6 },
1900 { ISD::TRUNCATE, MVT::v2i8, MVT::v2i64, 1 }, // PSHUFB
1901
1902 { ISD::UINT_TO_FP, MVT::f32, MVT::i64, 4 },
1903 { ISD::UINT_TO_FP, MVT::f64, MVT::i64, 4 },
1904
1905 { ISD::FP_TO_SINT, MVT::v2i8, MVT::v2f32, 3 },
1906 { ISD::FP_TO_SINT, MVT::v2i8, MVT::v2f64, 3 },
1907
1908 { ISD::FP_TO_UINT, MVT::v2i8, MVT::v2f32, 3 },
1909 { ISD::FP_TO_UINT, MVT::v2i8, MVT::v2f64, 3 },
1910 { ISD::FP_TO_UINT, MVT::v4i16, MVT::v4f32, 2 },
1911 };
1912
1913 static const TypeConversionCostTblEntry SSE2ConversionTbl[] = {
1914 // These are somewhat magic numbers justified by looking at the output of
1915 // Intel's IACA, running some kernels and making sure when we take
1916 // legalization into account the throughput will be overestimated.
1917 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v16i8, 8 },
1918 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v16i8, 16*10 },
1919 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v8i16, 15 },
1920 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v8i16, 8*10 },
1921 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 5 },
1922 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v4i32, 2*10 },
1923 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i32, 2*10 },
1924 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v2i64, 15 },
1925 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i64, 2*10 },
1926
1927 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v16i8, 16*10 },
1928 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v16i8, 8 },
1929 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v8i16, 15 },
1930 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v8i16, 8*10 },
1931 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v4i32, 4*10 },
1932 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 8 },
1933 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 6 },
1934 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v2i64, 15 },
1935
1936 { ISD::FP_TO_SINT, MVT::v2i8, MVT::v2f32, 4 },
1937 { ISD::FP_TO_SINT, MVT::v2i16, MVT::v2f32, 2 },
1938 { ISD::FP_TO_SINT, MVT::v4i8, MVT::v4f32, 3 },
1939 { ISD::FP_TO_SINT, MVT::v4i16, MVT::v4f32, 2 },
1940 { ISD::FP_TO_SINT, MVT::v2i16, MVT::v2f64, 2 },
1941 { ISD::FP_TO_SINT, MVT::v2i8, MVT::v2f64, 4 },
1942
1943 { ISD::FP_TO_SINT, MVT::v2i32, MVT::v2f64, 1 },
1944
1945 { ISD::UINT_TO_FP, MVT::f32, MVT::i64, 6 },
1946 { ISD::UINT_TO_FP, MVT::f64, MVT::i64, 6 },
1947
1948 { ISD::FP_TO_UINT, MVT::i64, MVT::f32, 4 },
1949 { ISD::FP_TO_UINT, MVT::i64, MVT::f64, 4 },
1950 { ISD::FP_TO_UINT, MVT::v2i8, MVT::v2f32, 4 },
1951 { ISD::FP_TO_UINT, MVT::v2i8, MVT::v2f64, 4 },
1952 { ISD::FP_TO_UINT, MVT::v4i8, MVT::v4f32, 3 },
1953 { ISD::FP_TO_UINT, MVT::v2i16, MVT::v2f32, 2 },
1954 { ISD::FP_TO_UINT, MVT::v2i16, MVT::v2f64, 2 },
1955 { ISD::FP_TO_UINT, MVT::v4i16, MVT::v4f32, 4 },
1956
1957 { ISD::ZERO_EXTEND, MVT::v4i16, MVT::v4i8, 1 },
1958 { ISD::SIGN_EXTEND, MVT::v4i16, MVT::v4i8, 6 },
1959 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i8, 2 },
1960 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i8, 3 },
1961 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i8, 4 },
1962 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i8, 8 },
1963 { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i8, 1 },
1964 { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i8, 2 },
1965 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 6 },
1966 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 6 },
1967 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 3 },
1968 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 4 },
1969 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 9 },
1970 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 12 },
1971 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i16, 1 },
1972 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i16, 2 },
1973 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 3 },
1974 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 10 },
1975 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 3 },
1976 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 4 },
1977 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i16, 6 },
1978 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i16, 8 },
1979 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 3 },
1980 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 5 },
1981
1982 // These truncates are really widening elements.
1983 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i32, 1 }, // PSHUFD
1984 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i16, 2 }, // PUNPCKLWD+DQ
1985 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i8, 3 }, // PUNPCKLBW+WD+PSHUFD
1986 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i16, 1 }, // PUNPCKLWD
1987 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i8, 2 }, // PUNPCKLBW+WD
1988 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i8, 1 }, // PUNPCKLBW
1989
1990 { ISD::TRUNCATE, MVT::v2i8, MVT::v2i16, 2 }, // PAND+PACKUSWB
1991 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i16, 2 }, // PAND+PACKUSWB
1992 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i16, 2 }, // PAND+PACKUSWB
1993 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i16, 3 },
1994 { ISD::TRUNCATE, MVT::v2i8, MVT::v2i32, 3 }, // PAND+2*PACKUSWB
1995 { ISD::TRUNCATE, MVT::v2i16, MVT::v2i32, 1 },
1996 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i32, 3 },
1997 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i32, 3 },
1998 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 4 },
1999 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i32, 7 },
2000 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 5 },
2001 { ISD::TRUNCATE, MVT::v16i16, MVT::v16i32, 10 },
2002 { ISD::TRUNCATE, MVT::v2i8, MVT::v2i64, 4 }, // PAND+3*PACKUSWB
2003 { ISD::TRUNCATE, MVT::v2i16, MVT::v2i64, 2 }, // PSHUFD+PSHUFLW
2004 { ISD::TRUNCATE, MVT::v2i32, MVT::v2i64, 1 }, // PSHUFD
2005 };
2006
2007 std::pair<int, MVT> LTSrc = TLI->getTypeLegalizationCost(DL, Src);
2008 std::pair<int, MVT> LTDest = TLI->getTypeLegalizationCost(DL, Dst);
2009
2010 if (ST->hasSSE2() && !ST->hasAVX()) {
2011 if (const auto *Entry = ConvertCostTableLookup(SSE2ConversionTbl, ISD,
2012 LTDest.second, LTSrc.second))
2013 return AdjustCost(LTSrc.first * Entry->Cost);
2014 }
2015
2016 EVT SrcTy = TLI->getValueType(DL, Src);
2017 EVT DstTy = TLI->getValueType(DL, Dst);
2018
2019 // The function getSimpleVT only handles simple value types.
2020 if (!SrcTy.isSimple() || !DstTy.isSimple())
2021 return AdjustCost(BaseT::getCastInstrCost(Opcode, Dst, Src, CCH, CostKind));
2022
2023 MVT SimpleSrcTy = SrcTy.getSimpleVT();
2024 MVT SimpleDstTy = DstTy.getSimpleVT();
2025
2026 if (ST->useAVX512Regs()) {
2027 if (ST->hasBWI())
2028 if (const auto *Entry = ConvertCostTableLookup(AVX512BWConversionTbl, ISD,
2029 SimpleDstTy, SimpleSrcTy))
2030 return AdjustCost(Entry->Cost);
2031
2032 if (ST->hasDQI())
2033 if (const auto *Entry = ConvertCostTableLookup(AVX512DQConversionTbl, ISD,
2034 SimpleDstTy, SimpleSrcTy))
2035 return AdjustCost(Entry->Cost);
2036
2037 if (ST->hasAVX512())
2038 if (const auto *Entry = ConvertCostTableLookup(AVX512FConversionTbl, ISD,
2039 SimpleDstTy, SimpleSrcTy))
2040 return AdjustCost(Entry->Cost);
2041 }
2042
2043 if (ST->hasBWI())
2044 if (const auto *Entry = ConvertCostTableLookup(AVX512BWVLConversionTbl, ISD,
2045 SimpleDstTy, SimpleSrcTy))
2046 return AdjustCost(Entry->Cost);
2047
2048 if (ST->hasDQI())
2049 if (const auto *Entry = ConvertCostTableLookup(AVX512DQVLConversionTbl, ISD,
2050 SimpleDstTy, SimpleSrcTy))
2051 return AdjustCost(Entry->Cost);
2052
2053 if (ST->hasAVX512())
2054 if (const auto *Entry = ConvertCostTableLookup(AVX512VLConversionTbl, ISD,
2055 SimpleDstTy, SimpleSrcTy))
2056 return AdjustCost(Entry->Cost);
2057
2058 if (ST->hasAVX2()) {
2059 if (const auto *Entry = ConvertCostTableLookup(AVX2ConversionTbl, ISD,
2060 SimpleDstTy, SimpleSrcTy))
2061 return AdjustCost(Entry->Cost);
2062 }
2063
2064 if (ST->hasAVX()) {
2065 if (const auto *Entry = ConvertCostTableLookup(AVXConversionTbl, ISD,
2066 SimpleDstTy, SimpleSrcTy))
2067 return AdjustCost(Entry->Cost);
2068 }
2069
2070 if (ST->hasSSE41()) {
2071 if (const auto *Entry = ConvertCostTableLookup(SSE41ConversionTbl, ISD,
2072 SimpleDstTy, SimpleSrcTy))
2073 return AdjustCost(Entry->Cost);
2074 }
2075
2076 if (ST->hasSSE2()) {
2077 if (const auto *Entry = ConvertCostTableLookup(SSE2ConversionTbl, ISD,
2078 SimpleDstTy, SimpleSrcTy))
2079 return AdjustCost(Entry->Cost);
2080 }
2081
2082 return AdjustCost(
2083 BaseT::getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I));
2084}
2085
2086int X86TTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy,
2087 CmpInst::Predicate VecPred,
2088 TTI::TargetCostKind CostKind,
2089 const Instruction *I) {
2090 // TODO: Handle other cost kinds.
2091 if (CostKind != TTI::TCK_RecipThroughput)
2092 return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, VecPred, CostKind,
2093 I);
2094
2095 // Legalize the type.
2096 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy);
2097
2098 MVT MTy = LT.second;
2099
2100 int ISD = TLI->InstructionOpcodeToISD(Opcode);
2101 assert(ISD && "Invalid opcode")((ISD && "Invalid opcode") ? static_cast<void> (
0) : __assert_fail ("ISD && \"Invalid opcode\"", "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 2101, __PRETTY_FUNCTION__))
;
2102
2103 unsigned ExtraCost = 0;
2104 if (I && (Opcode == Instruction::ICmp || Opcode == Instruction::FCmp)) {
2105 // Some vector comparison predicates cost extra instructions.
2106 if (MTy.isVector() &&
2107 !((ST->hasXOP() && (!ST->hasAVX2() || MTy.is128BitVector())) ||
2108 (ST->hasAVX512() && 32 <= MTy.getScalarSizeInBits()) ||
2109 ST->hasBWI())) {
2110 switch (cast<CmpInst>(I)->getPredicate()) {
2111 case CmpInst::Predicate::ICMP_NE:
2112 // xor(cmpeq(x,y),-1)
2113 ExtraCost = 1;
2114 break;
2115 case CmpInst::Predicate::ICMP_SGE:
2116 case CmpInst::Predicate::ICMP_SLE:
2117 // xor(cmpgt(x,y),-1)
2118 ExtraCost = 1;
2119 break;
2120 case CmpInst::Predicate::ICMP_ULT:
2121 case CmpInst::Predicate::ICMP_UGT:
2122 // cmpgt(xor(x,signbit),xor(y,signbit))
2123 // xor(cmpeq(pmaxu(x,y),x),-1)
2124 ExtraCost = 2;
2125 break;
2126 case CmpInst::Predicate::ICMP_ULE:
2127 case CmpInst::Predicate::ICMP_UGE:
2128 if ((ST->hasSSE41() && MTy.getScalarSizeInBits() == 32) ||
2129 (ST->hasSSE2() && MTy.getScalarSizeInBits() < 32)) {
2130 // cmpeq(psubus(x,y),0)
2131 // cmpeq(pminu(x,y),x)
2132 ExtraCost = 1;
2133 } else {
2134 // xor(cmpgt(xor(x,signbit),xor(y,signbit)),-1)
2135 ExtraCost = 3;
2136 }
2137 break;
2138 default:
2139 break;
2140 }
2141 }
2142 }
2143
2144 static const CostTblEntry SLMCostTbl[] = {
2145 // slm pcmpeq/pcmpgt throughput is 2
2146 { ISD::SETCC, MVT::v2i64, 2 },
2147 };
2148
2149 static const CostTblEntry AVX512BWCostTbl[] = {
2150 { ISD::SETCC, MVT::v32i16, 1 },
2151 { ISD::SETCC, MVT::v64i8, 1 },
2152
2153 { ISD::SELECT, MVT::v32i16, 1 },
2154 { ISD::SELECT, MVT::v64i8, 1 },
2155 };
2156
2157 static const CostTblEntry AVX512CostTbl[] = {
2158 { ISD::SETCC, MVT::v8i64, 1 },
2159 { ISD::SETCC, MVT::v16i32, 1 },
2160 { ISD::SETCC, MVT::v8f64, 1 },
2161 { ISD::SETCC, MVT::v16f32, 1 },
2162
2163 { ISD::SELECT, MVT::v8i64, 1 },
2164 { ISD::SELECT, MVT::v16i32, 1 },
2165 { ISD::SELECT, MVT::v8f64, 1 },
2166 { ISD::SELECT, MVT::v16f32, 1 },
2167
2168 { ISD::SETCC, MVT::v32i16, 2 }, // FIXME: should probably be 4
2169 { ISD::SETCC, MVT::v64i8, 2 }, // FIXME: should probably be 4
2170
2171 { ISD::SELECT, MVT::v32i16, 2 }, // FIXME: should be 3
2172 { ISD::SELECT, MVT::v64i8, 2 }, // FIXME: should be 3
2173 };
2174
2175 static const CostTblEntry AVX2CostTbl[] = {
2176 { ISD::SETCC, MVT::v4i64, 1 },
2177 { ISD::SETCC, MVT::v8i32, 1 },
2178 { ISD::SETCC, MVT::v16i16, 1 },
2179 { ISD::SETCC, MVT::v32i8, 1 },
2180
2181 { ISD::SELECT, MVT::v4i64, 1 }, // pblendvb
2182 { ISD::SELECT, MVT::v8i32, 1 }, // pblendvb
2183 { ISD::SELECT, MVT::v16i16, 1 }, // pblendvb
2184 { ISD::SELECT, MVT::v32i8, 1 }, // pblendvb
2185 };
2186
2187 static const CostTblEntry AVX1CostTbl[] = {
2188 { ISD::SETCC, MVT::v4f64, 1 },
2189 { ISD::SETCC, MVT::v8f32, 1 },
2190 // AVX1 does not support 8-wide integer compare.
2191 { ISD::SETCC, MVT::v4i64, 4 },
2192 { ISD::SETCC, MVT::v8i32, 4 },
2193 { ISD::SETCC, MVT::v16i16, 4 },
2194 { ISD::SETCC, MVT::v32i8, 4 },
2195
2196 { ISD::SELECT, MVT::v4f64, 1 }, // vblendvpd
2197 { ISD::SELECT, MVT::v8f32, 1 }, // vblendvps
2198 { ISD::SELECT, MVT::v4i64, 1 }, // vblendvpd
2199 { ISD::SELECT, MVT::v8i32, 1 }, // vblendvps
2200 { ISD::SELECT, MVT::v16i16, 3 }, // vandps + vandnps + vorps
2201 { ISD::SELECT, MVT::v32i8, 3 }, // vandps + vandnps + vorps
2202 };
2203
2204 static const CostTblEntry SSE42CostTbl[] = {
2205 { ISD::SETCC, MVT::v2f64, 1 },
2206 { ISD::SETCC, MVT::v4f32, 1 },
2207 { ISD::SETCC, MVT::v2i64, 1 },
2208 };
2209
2210 static const CostTblEntry SSE41CostTbl[] = {
2211 { ISD::SELECT, MVT::v2f64, 1 }, // blendvpd
2212 { ISD::SELECT, MVT::v4f32, 1 }, // blendvps
2213 { ISD::SELECT, MVT::v2i64, 1 }, // pblendvb
2214 { ISD::SELECT, MVT::v4i32, 1 }, // pblendvb
2215 { ISD::SELECT, MVT::v8i16, 1 }, // pblendvb
2216 { ISD::SELECT, MVT::v16i8, 1 }, // pblendvb
2217 };
2218
2219 static const CostTblEntry SSE2CostTbl[] = {
2220 { ISD::SETCC, MVT::v2f64, 2 },
2221 { ISD::SETCC, MVT::f64, 1 },
2222 { ISD::SETCC, MVT::v2i64, 8 },
2223 { ISD::SETCC, MVT::v4i32, 1 },
2224 { ISD::SETCC, MVT::v8i16, 1 },
2225 { ISD::SETCC, MVT::v16i8, 1 },
2226
2227 { ISD::SELECT, MVT::v2f64, 3 }, // andpd + andnpd + orpd
2228 { ISD::SELECT, MVT::v2i64, 3 }, // pand + pandn + por
2229 { ISD::SELECT, MVT::v4i32, 3 }, // pand + pandn + por
2230 { ISD::SELECT, MVT::v8i16, 3 }, // pand + pandn + por
2231 { ISD::SELECT, MVT::v16i8, 3 }, // pand + pandn + por
2232 };
2233
2234 static const CostTblEntry SSE1CostTbl[] = {
2235 { ISD::SETCC, MVT::v4f32, 2 },
2236 { ISD::SETCC, MVT::f32, 1 },
2237
2238 { ISD::SELECT, MVT::v4f32, 3 }, // andps + andnps + orps
2239 };
2240
2241 if (ST->isSLM())
2242 if (const auto *Entry = CostTableLookup(SLMCostTbl, ISD, MTy))
2243 return LT.first * (ExtraCost + Entry->Cost);
2244
2245 if (ST->hasBWI())
2246 if (const auto *Entry = CostTableLookup(AVX512BWCostTbl, ISD, MTy))
2247 return LT.first * (ExtraCost + Entry->Cost);
2248
2249 if (ST->hasAVX512())
2250 if (const auto *Entry = CostTableLookup(AVX512CostTbl, ISD, MTy))
2251 return LT.first * (ExtraCost + Entry->Cost);
2252
2253 if (ST->hasAVX2())
2254 if (const auto *Entry = CostTableLookup(AVX2CostTbl, ISD, MTy))
2255 return LT.first * (ExtraCost + Entry->Cost);
2256
2257 if (ST->hasAVX())
2258 if (const auto *Entry = CostTableLookup(AVX1CostTbl, ISD, MTy))
2259 return LT.first * (ExtraCost + Entry->Cost);
2260
2261 if (ST->hasSSE42())
2262 if (const auto *Entry = CostTableLookup(SSE42CostTbl, ISD, MTy))
2263 return LT.first * (ExtraCost + Entry->Cost);
2264
2265 if (ST->hasSSE41())
2266 if (const auto *Entry = CostTableLookup(SSE41CostTbl, ISD, MTy))
2267 return LT.first * (ExtraCost + Entry->Cost);
2268
2269 if (ST->hasSSE2())
2270 if (const auto *Entry = CostTableLookup(SSE2CostTbl, ISD, MTy))
2271 return LT.first * (ExtraCost + Entry->Cost);
2272
2273 if (ST->hasSSE1())
2274 if (const auto *Entry = CostTableLookup(SSE1CostTbl, ISD, MTy))
2275 return LT.first * (ExtraCost + Entry->Cost);
2276
2277 return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, VecPred, CostKind, I);
2278}
2279
2280unsigned X86TTIImpl::getAtomicMemIntrinsicMaxElementSize() const { return 16; }
2281
2282int X86TTIImpl::getTypeBasedIntrinsicInstrCost(
2283 const IntrinsicCostAttributes &ICA, TTI::TargetCostKind CostKind) {
2284
2285 // Costs should match the codegen from:
2286 // BITREVERSE: llvm\test\CodeGen\X86\vector-bitreverse.ll
2287 // BSWAP: llvm\test\CodeGen\X86\bswap-vector.ll
2288 // CTLZ: llvm\test\CodeGen\X86\vector-lzcnt-*.ll
2289 // CTPOP: llvm\test\CodeGen\X86\vector-popcnt-*.ll
2290 // CTTZ: llvm\test\CodeGen\X86\vector-tzcnt-*.ll
2291
2292 // TODO: Overflow intrinsics (*ADDO, *SUBO, *MULO) with vector types are not
2293 // specialized in these tables yet.
2294 static const CostTblEntry AVX512CDCostTbl[] = {
2295 { ISD::CTLZ, MVT::v8i64, 1 },
2296 { ISD::CTLZ, MVT::v16i32, 1 },
2297 { ISD::CTLZ, MVT::v32i16, 8 },
2298 { ISD::CTLZ, MVT::v64i8, 20 },
2299 { ISD::CTLZ, MVT::v4i64, 1 },
2300 { ISD::CTLZ, MVT::v8i32, 1 },
2301 { ISD::CTLZ, MVT::v16i16, 4 },
2302 { ISD::CTLZ, MVT::v32i8, 10 },
2303 { ISD::CTLZ, MVT::v2i64, 1 },
2304 { ISD::CTLZ, MVT::v4i32, 1 },
2305 { ISD::CTLZ, MVT::v8i16, 4 },
2306 { ISD::CTLZ, MVT::v16i8, 4 },
2307 };
2308 static const CostTblEntry AVX512BWCostTbl[] = {
2309 { ISD::ABS, MVT::v32i16, 1 },
2310 { ISD::ABS, MVT::v64i8, 1 },
2311 { ISD::BITREVERSE, MVT::v8i64, 5 },
2312 { ISD::BITREVERSE, MVT::v16i32, 5 },
2313 { ISD::BITREVERSE, MVT::v32i16, 5 },
2314 { ISD::BITREVERSE, MVT::v64i8, 5 },
2315 { ISD::CTLZ, MVT::v8i64, 23 },
2316 { ISD::CTLZ, MVT::v16i32, 22 },
2317 { ISD::CTLZ, MVT::v32i16, 18 },
2318 { ISD::CTLZ, MVT::v64i8, 17 },
2319 { ISD::CTPOP, MVT::v8i64, 7 },
2320 { ISD::CTPOP, MVT::v16i32, 11 },
2321 { ISD::CTPOP, MVT::v32i16, 9 },
2322 { ISD::CTPOP, MVT::v64i8, 6 },
2323 { ISD::CTTZ, MVT::v8i64, 10 },
2324 { ISD::CTTZ, MVT::v16i32, 14 },
2325 { ISD::CTTZ, MVT::v32i16, 12 },
2326 { ISD::CTTZ, MVT::v64i8, 9 },
2327 { ISD::SADDSAT, MVT::v32i16, 1 },
2328 { ISD::SADDSAT, MVT::v64i8, 1 },
2329 { ISD::SMAX, MVT::v32i16, 1 },
2330 { ISD::SMAX, MVT::v64i8, 1 },
2331 { ISD::SMIN, MVT::v32i16, 1 },
2332 { ISD::SMIN, MVT::v64i8, 1 },
2333 { ISD::SSUBSAT, MVT::v32i16, 1 },
2334 { ISD::SSUBSAT, MVT::v64i8, 1 },
2335 { ISD::UADDSAT, MVT::v32i16, 1 },
2336 { ISD::UADDSAT, MVT::v64i8, 1 },
2337 { ISD::UMAX, MVT::v32i16, 1 },
2338 { ISD::UMAX, MVT::v64i8, 1 },
2339 { ISD::UMIN, MVT::v32i16, 1 },
2340 { ISD::UMIN, MVT::v64i8, 1 },
2341 { ISD::USUBSAT, MVT::v32i16, 1 },
2342 { ISD::USUBSAT, MVT::v64i8, 1 },
2343 };
2344 static const CostTblEntry AVX512CostTbl[] = {
2345 { ISD::ABS, MVT::v8i64, 1 },
2346 { ISD::ABS, MVT::v16i32, 1 },
2347 { ISD::ABS, MVT::v32i16, 2 }, // FIXME: include split
2348 { ISD::ABS, MVT::v64i8, 2 }, // FIXME: include split
2349 { ISD::ABS, MVT::v4i64, 1 },
2350 { ISD::ABS, MVT::v2i64, 1 },
2351 { ISD::BITREVERSE, MVT::v8i64, 36 },
2352 { ISD::BITREVERSE, MVT::v16i32, 24 },
2353 { ISD::BITREVERSE, MVT::v32i16, 10 },
2354 { ISD::BITREVERSE, MVT::v64i8, 10 },
2355 { ISD::CTLZ, MVT::v8i64, 29 },
2356 { ISD::CTLZ, MVT::v16i32, 35 },
2357 { ISD::CTLZ, MVT::v32i16, 28 },
2358 { ISD::CTLZ, MVT::v64i8, 18 },
2359 { ISD::CTPOP, MVT::v8i64, 16 },
2360 { ISD::CTPOP, MVT::v16i32, 24 },
2361 { ISD::CTPOP, MVT::v32i16, 18 },
2362 { ISD::CTPOP, MVT::v64i8, 12 },
2363 { ISD::CTTZ, MVT::v8i64, 20 },
2364 { ISD::CTTZ, MVT::v16i32, 28 },
2365 { ISD::CTTZ, MVT::v32i16, 24 },
2366 { ISD::CTTZ, MVT::v64i8, 18 },
2367 { ISD::SMAX, MVT::v8i64, 1 },
2368 { ISD::SMAX, MVT::v16i32, 1 },
2369 { ISD::SMAX, MVT::v32i16, 2 }, // FIXME: include split
2370 { ISD::SMAX, MVT::v64i8, 2 }, // FIXME: include split
2371 { ISD::SMAX, MVT::v4i64, 1 },
2372 { ISD::SMAX, MVT::v2i64, 1 },
2373 { ISD::SMIN, MVT::v8i64, 1 },
2374 { ISD::SMIN, MVT::v16i32, 1 },
2375 { ISD::SMIN, MVT::v32i16, 2 }, // FIXME: include split
2376 { ISD::SMIN, MVT::v64i8, 2 }, // FIXME: include split
2377 { ISD::SMIN, MVT::v4i64, 1 },
2378 { ISD::SMIN, MVT::v2i64, 1 },
2379 { ISD::UMAX, MVT::v8i64, 1 },
2380 { ISD::UMAX, MVT::v16i32, 1 },
2381 { ISD::UMAX, MVT::v32i16, 2 }, // FIXME: include split
2382 { ISD::UMAX, MVT::v64i8, 2 }, // FIXME: include split
2383 { ISD::UMAX, MVT::v4i64, 1 },
2384 { ISD::UMAX, MVT::v2i64, 1 },
2385 { ISD::UMIN, MVT::v8i64, 1 },
2386 { ISD::UMIN, MVT::v16i32, 1 },
2387 { ISD::UMIN, MVT::v32i16, 2 }, // FIXME: include split
2388 { ISD::UMIN, MVT::v64i8, 2 }, // FIXME: include split
2389 { ISD::UMIN, MVT::v4i64, 1 },
2390 { ISD::UMIN, MVT::v2i64, 1 },
2391 { ISD::USUBSAT, MVT::v16i32, 2 }, // pmaxud + psubd
2392 { ISD::USUBSAT, MVT::v2i64, 2 }, // pmaxuq + psubq
2393 { ISD::USUBSAT, MVT::v4i64, 2 }, // pmaxuq + psubq
2394 { ISD::USUBSAT, MVT::v8i64, 2 }, // pmaxuq + psubq
2395 { ISD::UADDSAT, MVT::v16i32, 3 }, // not + pminud + paddd
2396 { ISD::UADDSAT, MVT::v2i64, 3 }, // not + pminuq + paddq
2397 { ISD::UADDSAT, MVT::v4i64, 3 }, // not + pminuq + paddq
2398 { ISD::UADDSAT, MVT::v8i64, 3 }, // not + pminuq + paddq
2399 { ISD::SADDSAT, MVT::v32i16, 2 }, // FIXME: include split
2400 { ISD::SADDSAT, MVT::v64i8, 2 }, // FIXME: include split
2401 { ISD::SSUBSAT, MVT::v32i16, 2 }, // FIXME: include split
2402 { ISD::SSUBSAT, MVT::v64i8, 2 }, // FIXME: include split
2403 { ISD::UADDSAT, MVT::v32i16, 2 }, // FIXME: include split
2404 { ISD::UADDSAT, MVT::v64i8, 2 }, // FIXME: include split
2405 { ISD::USUBSAT, MVT::v32i16, 2 }, // FIXME: include split
2406 { ISD::USUBSAT, MVT::v64i8, 2 }, // FIXME: include split
2407 { ISD::FMAXNUM, MVT::f32, 2 },
2408 { ISD::FMAXNUM, MVT::v4f32, 2 },
2409 { ISD::FMAXNUM, MVT::v8f32, 2 },
2410 { ISD::FMAXNUM, MVT::v16f32, 2 },
2411 { ISD::FMAXNUM, MVT::f64, 2 },
2412 { ISD::FMAXNUM, MVT::v2f64, 2 },
2413 { ISD::FMAXNUM, MVT::v4f64, 2 },
2414 { ISD::FMAXNUM, MVT::v8f64, 2 },
2415 };
2416 static const CostTblEntry XOPCostTbl[] = {
2417 { ISD::BITREVERSE, MVT::v4i64, 4 },
2418 { ISD::BITREVERSE, MVT::v8i32, 4 },
2419 { ISD::BITREVERSE, MVT::v16i16, 4 },
2420 { ISD::BITREVERSE, MVT::v32i8, 4 },
2421 { ISD::BITREVERSE, MVT::v2i64, 1 },
2422 { ISD::BITREVERSE, MVT::v4i32, 1 },
2423 { ISD::BITREVERSE, MVT::v8i16, 1 },
2424 { ISD::BITREVERSE, MVT::v16i8, 1 },
2425 { ISD::BITREVERSE, MVT::i64, 3 },
2426 { ISD::BITREVERSE, MVT::i32, 3 },
2427 { ISD::BITREVERSE, MVT::i16, 3 },
2428 { ISD::BITREVERSE, MVT::i8, 3 }
2429 };
2430 static const CostTblEntry AVX2CostTbl[] = {
2431 { ISD::ABS, MVT::v4i64, 2 }, // VBLENDVPD(X,VPSUBQ(0,X),X)
2432 { ISD::ABS, MVT::v8i32, 1 },
2433 { ISD::ABS, MVT::v16i16, 1 },
2434 { ISD::ABS, MVT::v32i8, 1 },
2435 { ISD::BITREVERSE, MVT::v4i64, 5 },
2436 { ISD::BITREVERSE, MVT::v8i32, 5 },
2437 { ISD::BITREVERSE, MVT::v16i16, 5 },
2438 { ISD::BITREVERSE, MVT::v32i8, 5 },
2439 { ISD::BSWAP, MVT::v4i64, 1 },
2440 { ISD::BSWAP, MVT::v8i32, 1 },
2441 { ISD::BSWAP, MVT::v16i16, 1 },
2442 { ISD::CTLZ, MVT::v4i64, 23 },
2443 { ISD::CTLZ, MVT::v8i32, 18 },
2444 { ISD::CTLZ, MVT::v16i16, 14 },
2445 { ISD::CTLZ, MVT::v32i8, 9 },
2446 { ISD::CTPOP, MVT::v4i64, 7 },
2447 { ISD::CTPOP, MVT::v8i32, 11 },
2448 { ISD::CTPOP, MVT::v16i16, 9 },
2449 { ISD::CTPOP, MVT::v32i8, 6 },
2450 { ISD::CTTZ, MVT::v4i64, 10 },
2451 { ISD::CTTZ, MVT::v8i32, 14 },
2452 { ISD::CTTZ, MVT::v16i16, 12 },
2453 { ISD::CTTZ, MVT::v32i8, 9 },
2454 { ISD::SADDSAT, MVT::v16i16, 1 },
2455 { ISD::SADDSAT, MVT::v32i8, 1 },
2456 { ISD::SMAX, MVT::v8i32, 1 },
2457 { ISD::SMAX, MVT::v16i16, 1 },
2458 { ISD::SMAX, MVT::v32i8, 1 },
2459 { ISD::SMIN, MVT::v8i32, 1 },
2460 { ISD::SMIN, MVT::v16i16, 1 },
2461 { ISD::SMIN, MVT::v32i8, 1 },
2462 { ISD::SSUBSAT, MVT::v16i16, 1 },
2463 { ISD::SSUBSAT, MVT::v32i8, 1 },
2464 { ISD::UADDSAT, MVT::v16i16, 1 },
2465 { ISD::UADDSAT, MVT::v32i8, 1 },
2466 { ISD::UADDSAT, MVT::v8i32, 3 }, // not + pminud + paddd
2467 { ISD::UMAX, MVT::v8i32, 1 },
2468 { ISD::UMAX, MVT::v16i16, 1 },
2469 { ISD::UMAX, MVT::v32i8, 1 },
2470 { ISD::UMIN, MVT::v8i32, 1 },
2471 { ISD::UMIN, MVT::v16i16, 1 },
2472 { ISD::UMIN, MVT::v32i8, 1 },
2473 { ISD::USUBSAT, MVT::v16i16, 1 },
2474 { ISD::USUBSAT, MVT::v32i8, 1 },
2475 { ISD::USUBSAT, MVT::v8i32, 2 }, // pmaxud + psubd
2476 { ISD::FMAXNUM, MVT::v8f32, 3 }, // MAXPS + CMPUNORDPS + BLENDVPS
2477 { ISD::FMAXNUM, MVT::v4f64, 3 }, // MAXPD + CMPUNORDPD + BLENDVPD
2478 { ISD::FSQRT, MVT::f32, 7 }, // Haswell from http://www.agner.org/
2479 { ISD::FSQRT, MVT::v4f32, 7 }, // Haswell from http://www.agner.org/
2480 { ISD::FSQRT, MVT::v8f32, 14 }, // Haswell from http://www.agner.org/
2481 { ISD::FSQRT, MVT::f64, 14 }, // Haswell from http://www.agner.org/
2482 { ISD::FSQRT, MVT::v2f64, 14 }, // Haswell from http://www.agner.org/
2483 { ISD::FSQRT, MVT::v4f64, 28 }, // Haswell from http://www.agner.org/
2484 };
2485 static const CostTblEntry AVX1CostTbl[] = {
2486 { ISD::ABS, MVT::v4i64, 5 }, // VBLENDVPD(X,VPSUBQ(0,X),X)
2487 { ISD::ABS, MVT::v8i32, 3 },
2488 { ISD::ABS, MVT::v16i16, 3 },
2489 { ISD::ABS, MVT::v32i8, 3 },
2490 { ISD::BITREVERSE, MVT::v4i64, 12 }, // 2 x 128-bit Op + extract/insert
2491 { ISD::BITREVERSE, MVT::v8i32, 12 }, // 2 x 128-bit Op + extract/insert
2492 { ISD::BITREVERSE, MVT::v16i16, 12 }, // 2 x 128-bit Op + extract/insert
2493 { ISD::BITREVERSE, MVT::v32i8, 12 }, // 2 x 128-bit Op + extract/insert
2494 { ISD::BSWAP, MVT::v4i64, 4 },
2495 { ISD::BSWAP, MVT::v8i32, 4 },
2496 { ISD::BSWAP, MVT::v16i16, 4 },
2497 { ISD::CTLZ, MVT::v4i64, 48 }, // 2 x 128-bit Op + extract/insert
2498 { ISD::CTLZ, MVT::v8i32, 38 }, // 2 x 128-bit Op + extract/insert
2499 { ISD::CTLZ, MVT::v16i16, 30 }, // 2 x 128-bit Op + extract/insert
2500 { ISD::CTLZ, MVT::v32i8, 20 }, // 2 x 128-bit Op + extract/insert
2501 { ISD::CTPOP, MVT::v4i64, 16 }, // 2 x 128-bit Op + extract/insert
2502 { ISD::CTPOP, MVT::v8i32, 24 }, // 2 x 128-bit Op + extract/insert
2503 { ISD::CTPOP, MVT::v16i16, 20 }, // 2 x 128-bit Op + extract/insert
2504 { ISD::CTPOP, MVT::v32i8, 14 }, // 2 x 128-bit Op + extract/insert
2505 { ISD::CTTZ, MVT::v4i64, 22 }, // 2 x 128-bit Op + extract/insert
2506 { ISD::CTTZ, MVT::v8i32, 30 }, // 2 x 128-bit Op + extract/insert
2507 { ISD::CTTZ, MVT::v16i16, 26 }, // 2 x 128-bit Op + extract/insert
2508 { ISD::CTTZ, MVT::v32i8, 20 }, // 2 x 128-bit Op + extract/insert
2509 { ISD::SADDSAT, MVT::v16i16, 4 }, // 2 x 128-bit Op + extract/insert
2510 { ISD::SADDSAT, MVT::v32i8, 4 }, // 2 x 128-bit Op + extract/insert
2511 { ISD::SMAX, MVT::v8i32, 4 }, // 2 x 128-bit Op + extract/insert
2512 { ISD::SMAX, MVT::v16i16, 4 }, // 2 x 128-bit Op + extract/insert
2513 { ISD::SMAX, MVT::v32i8, 4 }, // 2 x 128-bit Op + extract/insert
2514 { ISD::SMIN, MVT::v8i32, 4 }, // 2 x 128-bit Op + extract/insert
2515 { ISD::SMIN, MVT::v16i16, 4 }, // 2 x 128-bit Op + extract/insert
2516 { ISD::SMIN, MVT::v32i8, 4 }, // 2 x 128-bit Op + extract/insert
2517 { ISD::SSUBSAT, MVT::v16i16, 4 }, // 2 x 128-bit Op + extract/insert
2518 { ISD::SSUBSAT, MVT::v32i8, 4 }, // 2 x 128-bit Op + extract/insert
2519 { ISD::UADDSAT, MVT::v16i16, 4 }, // 2 x 128-bit Op + extract/insert
2520 { ISD::UADDSAT, MVT::v32i8, 4 }, // 2 x 128-bit Op + extract/insert
2521 { ISD::UADDSAT, MVT::v8i32, 8 }, // 2 x 128-bit Op + extract/insert
2522 { ISD::UMAX, MVT::v8i32, 4 }, // 2 x 128-bit Op + extract/insert
2523 { ISD::UMAX, MVT::v16i16, 4 }, // 2 x 128-bit Op + extract/insert
2524 { ISD::UMAX, MVT::v32i8, 4 }, // 2 x 128-bit Op + extract/insert
2525 { ISD::UMIN, MVT::v8i32, 4 }, // 2 x 128-bit Op + extract/insert
2526 { ISD::UMIN, MVT::v16i16, 4 }, // 2 x 128-bit Op + extract/insert
2527 { ISD::UMIN, MVT::v32i8, 4 }, // 2 x 128-bit Op + extract/insert
2528 { ISD::USUBSAT, MVT::v16i16, 4 }, // 2 x 128-bit Op + extract/insert
2529 { ISD::USUBSAT, MVT::v32i8, 4 }, // 2 x 128-bit Op + extract/insert
2530 { ISD::USUBSAT, MVT::v8i32, 6 }, // 2 x 128-bit Op + extract/insert
2531 { ISD::FMAXNUM, MVT::f32, 3 }, // MAXSS + CMPUNORDSS + BLENDVPS
2532 { ISD::FMAXNUM, MVT::v4f32, 3 }, // MAXPS + CMPUNORDPS + BLENDVPS
2533 { ISD::FMAXNUM, MVT::v8f32, 5 }, // MAXPS + CMPUNORDPS + BLENDVPS + ?
2534 { ISD::FMAXNUM, MVT::f64, 3 }, // MAXSD + CMPUNORDSD + BLENDVPD
2535 { ISD::FMAXNUM, MVT::v2f64, 3 }, // MAXPD + CMPUNORDPD + BLENDVPD
2536 { ISD::FMAXNUM, MVT::v4f64, 5 }, // MAXPD + CMPUNORDPD + BLENDVPD + ?
2537 { ISD::FSQRT, MVT::f32, 14 }, // SNB from http://www.agner.org/
2538 { ISD::FSQRT, MVT::v4f32, 14 }, // SNB from http://www.agner.org/
2539 { ISD::FSQRT, MVT::v8f32, 28 }, // SNB from http://www.agner.org/
2540 { ISD::FSQRT, MVT::f64, 21 }, // SNB from http://www.agner.org/
2541 { ISD::FSQRT, MVT::v2f64, 21 }, // SNB from http://www.agner.org/
2542 { ISD::FSQRT, MVT::v4f64, 43 }, // SNB from http://www.agner.org/
2543 };
2544 static const CostTblEntry GLMCostTbl[] = {
2545 { ISD::FSQRT, MVT::f32, 19 }, // sqrtss
2546 { ISD::FSQRT, MVT::v4f32, 37 }, // sqrtps
2547 { ISD::FSQRT, MVT::f64, 34 }, // sqrtsd
2548 { ISD::FSQRT, MVT::v2f64, 67 }, // sqrtpd
2549 };
2550 static const CostTblEntry SLMCostTbl[] = {
2551 { ISD::FSQRT, MVT::f32, 20 }, // sqrtss
2552 { ISD::FSQRT, MVT::v4f32, 40 }, // sqrtps
2553 { ISD::FSQRT, MVT::f64, 35 }, // sqrtsd
2554 { ISD::FSQRT, MVT::v2f64, 70 }, // sqrtpd
2555 };
2556 static const CostTblEntry SSE42CostTbl[] = {
2557 { ISD::USUBSAT, MVT::v4i32, 2 }, // pmaxud + psubd
2558 { ISD::UADDSAT, MVT::v4i32, 3 }, // not + pminud + paddd
2559 { ISD::FSQRT, MVT::f32, 18 }, // Nehalem from http://www.agner.org/
2560 { ISD::FSQRT, MVT::v4f32, 18 }, // Nehalem from http://www.agner.org/
2561 };
2562 static const CostTblEntry SSE41CostTbl[] = {
2563 { ISD::ABS, MVT::v2i64, 2 }, // BLENDVPD(X,PSUBQ(0,X),X)
2564 { ISD::SMAX, MVT::v4i32, 1 },
2565 { ISD::SMAX, MVT::v16i8, 1 },
2566 { ISD::SMIN, MVT::v4i32, 1 },
2567 { ISD::SMIN, MVT::v16i8, 1 },
2568 { ISD::UMAX, MVT::v4i32, 1 },
2569 { ISD::UMAX, MVT::v8i16, 1 },
2570 { ISD::UMIN, MVT::v4i32, 1 },
2571 { ISD::UMIN, MVT::v8i16, 1 },
2572 };
2573 static const CostTblEntry SSSE3CostTbl[] = {
2574 { ISD::ABS, MVT::v4i32, 1 },
2575 { ISD::ABS, MVT::v8i16, 1 },
2576 { ISD::ABS, MVT::v16i8, 1 },
2577 { ISD::BITREVERSE, MVT::v2i64, 5 },
2578 { ISD::BITREVERSE, MVT::v4i32, 5 },
2579 { ISD::BITREVERSE, MVT::v8i16, 5 },
2580 { ISD::BITREVERSE, MVT::v16i8, 5 },
2581 { ISD::BSWAP, MVT::v2i64, 1 },
2582 { ISD::BSWAP, MVT::v4i32, 1 },
2583 { ISD::BSWAP, MVT::v8i16, 1 },
2584 { ISD::CTLZ, MVT::v2i64, 23 },
2585 { ISD::CTLZ, MVT::v4i32, 18 },
2586 { ISD::CTLZ, MVT::v8i16, 14 },
2587 { ISD::CTLZ, MVT::v16i8, 9 },
2588 { ISD::CTPOP, MVT::v2i64, 7 },
2589 { ISD::CTPOP, MVT::v4i32, 11 },
2590 { ISD::CTPOP, MVT::v8i16, 9 },
2591 { ISD::CTPOP, MVT::v16i8, 6 },
2592 { ISD::CTTZ, MVT::v2i64, 10 },
2593 { ISD::CTTZ, MVT::v4i32, 14 },
2594 { ISD::CTTZ, MVT::v8i16, 12 },
2595 { ISD::CTTZ, MVT::v16i8, 9 }
2596 };
2597 static const CostTblEntry SSE2CostTbl[] = {
2598 { ISD::ABS, MVT::v2i64, 4 },
2599 { ISD::ABS, MVT::v4i32, 3 },
2600 { ISD::ABS, MVT::v8i16, 2 },
2601 { ISD::ABS, MVT::v16i8, 2 },
2602 { ISD::BITREVERSE, MVT::v2i64, 29 },
2603 { ISD::BITREVERSE, MVT::v4i32, 27 },
2604 { ISD::BITREVERSE, MVT::v8i16, 27 },
2605 { ISD::BITREVERSE, MVT::v16i8, 20 },
2606 { ISD::BSWAP, MVT::v2i64, 7 },
2607 { ISD::BSWAP, MVT::v4i32, 7 },
2608 { ISD::BSWAP, MVT::v8i16, 7 },
2609 { ISD::CTLZ, MVT::v2i64, 25 },
2610 { ISD::CTLZ, MVT::v4i32, 26 },
2611 { ISD::CTLZ, MVT::v8i16, 20 },
2612 { ISD::CTLZ, MVT::v16i8, 17 },
2613 { ISD::CTPOP, MVT::v2i64, 12 },
2614 { ISD::CTPOP, MVT::v4i32, 15 },
2615 { ISD::CTPOP, MVT::v8i16, 13 },
2616 { ISD::CTPOP, MVT::v16i8, 10 },
2617 { ISD::CTTZ, MVT::v2i64, 14 },
2618 { ISD::CTTZ, MVT::v4i32, 18 },
2619 { ISD::CTTZ, MVT::v8i16, 16 },
2620 { ISD::CTTZ, MVT::v16i8, 13 },
2621 { ISD::SADDSAT, MVT::v8i16, 1 },
2622 { ISD::SADDSAT, MVT::v16i8, 1 },
2623 { ISD::SMAX, MVT::v8i16, 1 },
2624 { ISD::SMIN, MVT::v8i16, 1 },
2625 { ISD::SSUBSAT, MVT::v8i16, 1 },
2626 { ISD::SSUBSAT, MVT::v16i8, 1 },
2627 { ISD::UADDSAT, MVT::v8i16, 1 },
2628 { ISD::UADDSAT, MVT::v16i8, 1 },
2629 { ISD::UMAX, MVT::v8i16, 2 },
2630 { ISD::UMAX, MVT::v16i8, 1 },
2631 { ISD::UMIN, MVT::v8i16, 2 },
2632 { ISD::UMIN, MVT::v16i8, 1 },
2633 { ISD::USUBSAT, MVT::v8i16, 1 },
2634 { ISD::USUBSAT, MVT::v16i8, 1 },
2635 { ISD::FMAXNUM, MVT::f64, 4 },
2636 { ISD::FMAXNUM, MVT::v2f64, 4 },
2637 { ISD::FSQRT, MVT::f64, 32 }, // Nehalem from http://www.agner.org/
2638 { ISD::FSQRT, MVT::v2f64, 32 }, // Nehalem from http://www.agner.org/
2639 };
2640 static const CostTblEntry SSE1CostTbl[] = {
2641 { ISD::FMAXNUM, MVT::f32, 4 },
2642 { ISD::FMAXNUM, MVT::v4f32, 4 },
2643 { ISD::FSQRT, MVT::f32, 28 }, // Pentium III from http://www.agner.org/
2644 { ISD::FSQRT, MVT::v4f32, 56 }, // Pentium III from http://www.agner.org/
2645 };
2646 static const CostTblEntry BMI64CostTbl[] = { // 64-bit targets
2647 { ISD::CTTZ, MVT::i64, 1 },
2648 };
2649 static const CostTblEntry BMI32CostTbl[] = { // 32 or 64-bit targets
2650 { ISD::CTTZ, MVT::i32, 1 },
2651 { ISD::CTTZ, MVT::i16, 1 },
2652 { ISD::CTTZ, MVT::i8, 1 },
2653 };
2654 static const CostTblEntry LZCNT64CostTbl[] = { // 64-bit targets
2655 { ISD::CTLZ, MVT::i64, 1 },
2656 };
2657 static const CostTblEntry LZCNT32CostTbl[] = { // 32 or 64-bit targets
2658 { ISD::CTLZ, MVT::i32, 1 },
2659 { ISD::CTLZ, MVT::i16, 1 },
2660 { ISD::CTLZ, MVT::i8, 1 },
2661 };
2662 static const CostTblEntry POPCNT64CostTbl[] = { // 64-bit targets
2663 { ISD::CTPOP, MVT::i64, 1 },
2664 };
2665 static const CostTblEntry POPCNT32CostTbl[] = { // 32 or 64-bit targets
2666 { ISD::CTPOP, MVT::i32, 1 },
2667 { ISD::CTPOP, MVT::i16, 1 },
2668 { ISD::CTPOP, MVT::i8, 1 },
2669 };
2670 static const CostTblEntry X64CostTbl[] = { // 64-bit targets
2671 { ISD::ABS, MVT::i64, 2 }, // SUB+CMOV
2672 { ISD::BITREVERSE, MVT::i64, 14 },
2673 { ISD::CTLZ, MVT::i64, 4 }, // BSR+XOR or BSR+XOR+CMOV
2674 { ISD::CTTZ, MVT::i64, 3 }, // TEST+BSF+CMOV/BRANCH
2675 { ISD::CTPOP, MVT::i64, 10 },
2676 { ISD::SADDO, MVT::i64, 1 },
2677 { ISD::UADDO, MVT::i64, 1 },
2678 { ISD::UMULO, MVT::i64, 2 }, // mulq + seto
2679 };
2680 static const CostTblEntry X86CostTbl[] = { // 32 or 64-bit targets
2681 { ISD::ABS, MVT::i32, 2 }, // SUB+CMOV
2682 { ISD::ABS, MVT::i16, 2 }, // SUB+CMOV
2683 { ISD::BITREVERSE, MVT::i32, 14 },
2684 { ISD::BITREVERSE, MVT::i16, 14 },
2685 { ISD::BITREVERSE, MVT::i8, 11 },
2686 { ISD::CTLZ, MVT::i32, 4 }, // BSR+XOR or BSR+XOR+CMOV
2687 { ISD::CTLZ, MVT::i16, 4 }, // BSR+XOR or BSR+XOR+CMOV
2688 { ISD::CTLZ, MVT::i8, 4 }, // BSR+XOR or BSR+XOR+CMOV
2689 { ISD::CTTZ, MVT::i32, 3 }, // TEST+BSF+CMOV/BRANCH
2690 { ISD::CTTZ, MVT::i16, 3 }, // TEST+BSF+CMOV/BRANCH
2691 { ISD::CTTZ, MVT::i8, 3 }, // TEST+BSF+CMOV/BRANCH
2692 { ISD::CTPOP, MVT::i32, 8 },
2693 { ISD::CTPOP, MVT::i16, 9 },
2694 { ISD::CTPOP, MVT::i8, 7 },
2695 { ISD::SADDO, MVT::i32, 1 },
2696 { ISD::SADDO, MVT::i16, 1 },
2697 { ISD::SADDO, MVT::i8, 1 },
2698 { ISD::UADDO, MVT::i32, 1 },
2699 { ISD::UADDO, MVT::i16, 1 },
2700 { ISD::UADDO, MVT::i8, 1 },
2701 { ISD::UMULO, MVT::i32, 2 }, // mul + seto
2702 { ISD::UMULO, MVT::i16, 2 },
2703 { ISD::UMULO, MVT::i8, 2 },
2704 };
2705
2706 Type *RetTy = ICA.getReturnType();
2707 Type *OpTy = RetTy;
2708 Intrinsic::ID IID = ICA.getID();
2709 unsigned ISD = ISD::DELETED_NODE;
2710 switch (IID) {
2711 default:
2712 break;
2713 case Intrinsic::abs:
2714 ISD = ISD::ABS;
2715 break;
2716 case Intrinsic::bitreverse:
2717 ISD = ISD::BITREVERSE;
2718 break;
2719 case Intrinsic::bswap:
2720 ISD = ISD::BSWAP;
2721 break;
2722 case Intrinsic::ctlz:
2723 ISD = ISD::CTLZ;
2724 break;
2725 case Intrinsic::ctpop:
2726 ISD = ISD::CTPOP;
2727 break;
2728 case Intrinsic::cttz:
2729 ISD = ISD::CTTZ;
2730 break;
2731 case Intrinsic::maxnum:
2732 case Intrinsic::minnum:
2733 // FMINNUM has same costs so don't duplicate.
2734 ISD = ISD::FMAXNUM;
2735 break;
2736 case Intrinsic::sadd_sat:
2737 ISD = ISD::SADDSAT;
2738 break;
2739 case Intrinsic::smax:
2740 ISD = ISD::SMAX;
2741 break;
2742 case Intrinsic::smin:
2743 ISD = ISD::SMIN;
2744 break;
2745 case Intrinsic::ssub_sat:
2746 ISD = ISD::SSUBSAT;
2747 break;
2748 case Intrinsic::uadd_sat:
2749 ISD = ISD::UADDSAT;
2750 break;
2751 case Intrinsic::umax:
2752 ISD = ISD::UMAX;
2753 break;
2754 case Intrinsic::umin:
2755 ISD = ISD::UMIN;
2756 break;
2757 case Intrinsic::usub_sat:
2758 ISD = ISD::USUBSAT;
2759 break;
2760 case Intrinsic::sqrt:
2761 ISD = ISD::FSQRT;
2762 break;
2763 case Intrinsic::sadd_with_overflow:
2764 case Intrinsic::ssub_with_overflow:
2765 // SSUBO has same costs so don't duplicate.
2766 ISD = ISD::SADDO;
2767 OpTy = RetTy->getContainedType(0);
2768 break;
2769 case Intrinsic::uadd_with_overflow:
2770 case Intrinsic::usub_with_overflow:
2771 // USUBO has same costs so don't duplicate.
2772 ISD = ISD::UADDO;
2773 OpTy = RetTy->getContainedType(0);
2774 break;
2775 case Intrinsic::umul_with_overflow:
2776 case Intrinsic::smul_with_overflow:
2777 // SMULO has same costs so don't duplicate.
2778 ISD = ISD::UMULO;
2779 OpTy = RetTy->getContainedType(0);
2780 break;
2781 }
2782
2783 if (ISD != ISD::DELETED_NODE) {
2784 // Legalize the type.
2785 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, OpTy);
2786 MVT MTy = LT.second;
2787
2788 // Attempt to lookup cost.
2789 if (ISD == ISD::BITREVERSE && ST->hasGFNI() && ST->hasSSSE3() &&
2790 MTy.isVector()) {
2791 // With PSHUFB the code is very similar for all types. If we have integer
2792 // byte operations, we just need a GF2P8AFFINEQB for vXi8. For other types
2793 // we also need a PSHUFB.
2794 unsigned Cost = MTy.getVectorElementType() == MVT::i8 ? 1 : 2;
2795
2796 // Without byte operations, we need twice as many GF2P8AFFINEQB and PSHUFB
2797 // instructions. We also need an extract and an insert.
2798 if (!(MTy.is128BitVector() || (ST->hasAVX2() && MTy.is256BitVector()) ||
2799 (ST->hasBWI() && MTy.is512BitVector())))
2800 Cost = Cost * 2 + 2;
2801
2802 return LT.first * Cost;
2803 }
2804
2805 if (ST->useGLMDivSqrtCosts())
2806 if (const auto *Entry = CostTableLookup(GLMCostTbl, ISD, MTy))
2807 return LT.first * Entry->Cost;
2808
2809 if (ST->isSLM())
2810 if (const auto *Entry = CostTableLookup(SLMCostTbl, ISD, MTy))
2811 return LT.first * Entry->Cost;
2812
2813 if (ST->hasCDI())
2814 if (const auto *Entry = CostTableLookup(AVX512CDCostTbl, ISD, MTy))
2815 return LT.first * Entry->Cost;
2816
2817 if (ST->hasBWI())
2818 if (const auto *Entry = CostTableLookup(AVX512BWCostTbl, ISD, MTy))
2819 return LT.first * Entry->Cost;
2820
2821 if (ST->hasAVX512())
2822 if (const auto *Entry = CostTableLookup(AVX512CostTbl, ISD, MTy))
2823 return LT.first * Entry->Cost;
2824
2825 if (ST->hasXOP())
2826 if (const auto *Entry = CostTableLookup(XOPCostTbl, ISD, MTy))
2827 return LT.first * Entry->Cost;
2828
2829 if (ST->hasAVX2())
2830 if (const auto *Entry = CostTableLookup(AVX2CostTbl, ISD, MTy))
2831 return LT.first * Entry->Cost;
2832
2833 if (ST->hasAVX())
2834 if (const auto *Entry = CostTableLookup(AVX1CostTbl, ISD, MTy))
2835 return LT.first * Entry->Cost;
2836
2837 if (ST->hasSSE42())
2838 if (const auto *Entry = CostTableLookup(SSE42CostTbl, ISD, MTy))
2839 return LT.first * Entry->Cost;
2840
2841 if (ST->hasSSE41())
2842 if (const auto *Entry = CostTableLookup(SSE41CostTbl, ISD, MTy))
2843 return LT.first * Entry->Cost;
2844
2845 if (ST->hasSSSE3())
2846 if (const auto *Entry = CostTableLookup(SSSE3CostTbl, ISD, MTy))
2847 return LT.first * Entry->Cost;
2848
2849 if (ST->hasSSE2())
2850 if (const auto *Entry = CostTableLookup(SSE2CostTbl, ISD, MTy))
2851 return LT.first * Entry->Cost;
2852
2853 if (ST->hasSSE1())
2854 if (const auto *Entry = CostTableLookup(SSE1CostTbl, ISD, MTy))
2855 return LT.first * Entry->Cost;
2856
2857 if (ST->hasBMI()) {
2858 if (ST->is64Bit())
2859 if (const auto *Entry = CostTableLookup(BMI64CostTbl, ISD, MTy))
2860 return LT.first * Entry->Cost;
2861
2862 if (const auto *Entry = CostTableLookup(BMI32CostTbl, ISD, MTy))
2863 return LT.first * Entry->Cost;
2864 }
2865
2866 if (ST->hasLZCNT()) {
2867 if (ST->is64Bit())
2868 if (const auto *Entry = CostTableLookup(LZCNT64CostTbl, ISD, MTy))
2869 return LT.first * Entry->Cost;
2870
2871 if (const auto *Entry = CostTableLookup(LZCNT32CostTbl, ISD, MTy))
2872 return LT.first * Entry->Cost;
2873 }
2874
2875 if (ST->hasPOPCNT()) {
2876 if (ST->is64Bit())
2877 if (const auto *Entry = CostTableLookup(POPCNT64CostTbl, ISD, MTy))
2878 return LT.first * Entry->Cost;
2879
2880 if (const auto *Entry = CostTableLookup(POPCNT32CostTbl, ISD, MTy))
2881 return LT.first * Entry->Cost;
2882 }
2883
2884 // TODO - add BMI (TZCNT) scalar handling
2885
2886 if (ST->is64Bit())
2887 if (const auto *Entry = CostTableLookup(X64CostTbl, ISD, MTy))
2888 return LT.first * Entry->Cost;
2889
2890 if (const auto *Entry = CostTableLookup(X86CostTbl, ISD, MTy))
2891 return LT.first * Entry->Cost;
2892 }
2893
2894 return BaseT::getIntrinsicInstrCost(ICA, CostKind);
2895}
2896
2897int X86TTIImpl::getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA,
2898 TTI::TargetCostKind CostKind) {
2899 if (ICA.isTypeBasedOnly())
2900 return getTypeBasedIntrinsicInstrCost(ICA, CostKind);
2901
2902 static const CostTblEntry AVX512CostTbl[] = {
2903 { ISD::ROTL, MVT::v8i64, 1 },
2904 { ISD::ROTL, MVT::v4i64, 1 },
2905 { ISD::ROTL, MVT::v2i64, 1 },
2906 { ISD::ROTL, MVT::v16i32, 1 },
2907 { ISD::ROTL, MVT::v8i32, 1 },
2908 { ISD::ROTL, MVT::v4i32, 1 },
2909 { ISD::ROTR, MVT::v8i64, 1 },
2910 { ISD::ROTR, MVT::v4i64, 1 },
2911 { ISD::ROTR, MVT::v2i64, 1 },
2912 { ISD::ROTR, MVT::v16i32, 1 },
2913 { ISD::ROTR, MVT::v8i32, 1 },
2914 { ISD::ROTR, MVT::v4i32, 1 }
2915 };
2916 // XOP: ROTL = VPROT(X,Y), ROTR = VPROT(X,SUB(0,Y))
2917 static const CostTblEntry XOPCostTbl[] = {
2918 { ISD::ROTL, MVT::v4i64, 4 },
2919 { ISD::ROTL, MVT::v8i32, 4 },
2920 { ISD::ROTL, MVT::v16i16, 4 },
2921 { ISD::ROTL, MVT::v32i8, 4 },
2922 { ISD::ROTL, MVT::v2i64, 1 },
2923 { ISD::ROTL, MVT::v4i32, 1 },
2924 { ISD::ROTL, MVT::v8i16, 1 },
2925 { ISD::ROTL, MVT::v16i8, 1 },
2926 { ISD::ROTR, MVT::v4i64, 6 },
2927 { ISD::ROTR, MVT::v8i32, 6 },
2928 { ISD::ROTR, MVT::v16i16, 6 },
2929 { ISD::ROTR, MVT::v32i8, 6 },
2930 { ISD::ROTR, MVT::v2i64, 2 },
2931 { ISD::ROTR, MVT::v4i32, 2 },
2932 { ISD::ROTR, MVT::v8i16, 2 },
2933 { ISD::ROTR, MVT::v16i8, 2 }
2934 };
2935 static const CostTblEntry X64CostTbl[] = { // 64-bit targets
2936 { ISD::ROTL, MVT::i64, 1 },
2937 { ISD::ROTR, MVT::i64, 1 },
2938 { ISD::FSHL, MVT::i64, 4 }
2939 };
2940 static const CostTblEntry X86CostTbl[] = { // 32 or 64-bit targets
2941 { ISD::ROTL, MVT::i32, 1 },
2942 { ISD::ROTL, MVT::i16, 1 },
2943 { ISD::ROTL, MVT::i8, 1 },
2944 { ISD::ROTR, MVT::i32, 1 },
2945 { ISD::ROTR, MVT::i16, 1 },
2946 { ISD::ROTR, MVT::i8, 1 },
2947 { ISD::FSHL, MVT::i32, 4 },
2948 { ISD::FSHL, MVT::i16, 4 },
2949 { ISD::FSHL, MVT::i8, 4 }
2950 };
2951
2952 Intrinsic::ID IID = ICA.getID();
2953 Type *RetTy = ICA.getReturnType();
2954 const SmallVectorImpl<const Value *> &Args = ICA.getArgs();
2955 unsigned ISD = ISD::DELETED_NODE;
2956 switch (IID) {
2957 default:
2958 break;
2959 case Intrinsic::fshl:
2960 ISD = ISD::FSHL;
2961 if (Args[0] == Args[1])
2962 ISD = ISD::ROTL;
2963 break;
2964 case Intrinsic::fshr:
2965 // FSHR has same costs so don't duplicate.
2966 ISD = ISD::FSHL;
2967 if (Args[0] == Args[1])
2968 ISD = ISD::ROTR;
2969 break;
2970 }
2971
2972 if (ISD != ISD::DELETED_NODE) {
2973 // Legalize the type.
2974 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, RetTy);
2975 MVT MTy = LT.second;
2976
2977 // Attempt to lookup cost.
2978 if (ST->hasAVX512())
2979 if (const auto *Entry = CostTableLookup(AVX512CostTbl, ISD, MTy))
2980 return LT.first * Entry->Cost;
2981
2982 if (ST->hasXOP())
2983 if (const auto *Entry = CostTableLookup(XOPCostTbl, ISD, MTy))
2984 return LT.first * Entry->Cost;
2985
2986 if (ST->is64Bit())
2987 if (const auto *Entry = CostTableLookup(X64CostTbl, ISD, MTy))
2988 return LT.first * Entry->Cost;
2989
2990 if (const auto *Entry = CostTableLookup(X86CostTbl, ISD, MTy))
2991 return LT.first * Entry->Cost;
2992 }
2993
2994 return BaseT::getIntrinsicInstrCost(ICA, CostKind);
2995}
2996
2997int X86TTIImpl::getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index) {
2998 static const CostTblEntry SLMCostTbl[] = {
2999 { ISD::EXTRACT_VECTOR_ELT, MVT::i8, 4 },
3000 { ISD::EXTRACT_VECTOR_ELT, MVT::i16, 4 },
3001 { ISD::EXTRACT_VECTOR_ELT, MVT::i32, 4 },
3002 { ISD::EXTRACT_VECTOR_ELT, MVT::i64, 7 }
3003 };
3004
3005 assert(Val->isVectorTy() && "This must be a vector type")((Val->isVectorTy() && "This must be a vector type"
) ? static_cast<void> (0) : __assert_fail ("Val->isVectorTy() && \"This must be a vector type\""
, "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 3005, __PRETTY_FUNCTION__))
;
3006 Type *ScalarType = Val->getScalarType();
3007 int RegisterFileMoveCost = 0;
3008
3009 if (Index != -1U && (Opcode == Instruction::ExtractElement ||
3010 Opcode == Instruction::InsertElement)) {
3011 // Legalize the type.
3012 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Val);
3013
3014 // This type is legalized to a scalar type.
3015 if (!LT.second.isVector())
3016 return 0;
3017
3018 // The type may be split. Normalize the index to the new type.
3019 unsigned NumElts = LT.second.getVectorNumElements();
3020 unsigned SubNumElts = NumElts;
3021 Index = Index % NumElts;
3022
3023 // For >128-bit vectors, we need to extract higher 128-bit subvectors.
3024 // For inserts, we also need to insert the subvector back.
3025 if (LT.second.getSizeInBits() > 128) {
3026 assert((LT.second.getSizeInBits() % 128) == 0 && "Illegal vector")(((LT.second.getSizeInBits() % 128) == 0 && "Illegal vector"
) ? static_cast<void> (0) : __assert_fail ("(LT.second.getSizeInBits() % 128) == 0 && \"Illegal vector\""
, "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 3026, __PRETTY_FUNCTION__))
;
3027 unsigned NumSubVecs = LT.second.getSizeInBits() / 128;
3028 SubNumElts = NumElts / NumSubVecs;
3029 if (SubNumElts <= Index) {
3030 RegisterFileMoveCost += (Opcode == Instruction::InsertElement ? 2 : 1);
3031 Index %= SubNumElts;
3032 }
3033 }
3034
3035 if (Index == 0) {
3036 // Floating point scalars are already located in index #0.
3037 // Many insertions to #0 can fold away for scalar fp-ops, so let's assume
3038 // true for all.
3039 if (ScalarType->isFloatingPointTy())
3040 return RegisterFileMoveCost;
3041
3042 // Assume movd/movq XMM -> GPR is relatively cheap on all targets.
3043 if (ScalarType->isIntegerTy() && Opcode == Instruction::ExtractElement)
3044 return 1 + RegisterFileMoveCost;
3045 }
3046
3047 int ISD = TLI->InstructionOpcodeToISD(Opcode);
3048 assert(ISD && "Unexpected vector opcode")((ISD && "Unexpected vector opcode") ? static_cast<
void> (0) : __assert_fail ("ISD && \"Unexpected vector opcode\""
, "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 3048, __PRETTY_FUNCTION__))
;
3049 MVT MScalarTy = LT.second.getScalarType();
3050 if (ST->isSLM())
3051 if (auto *Entry = CostTableLookup(SLMCostTbl, ISD, MScalarTy))
3052 return Entry->Cost + RegisterFileMoveCost;
3053
3054 // Assume pinsr/pextr XMM <-> GPR is relatively cheap on all targets.
3055 if ((MScalarTy == MVT::i16 && ST->hasSSE2()) ||
3056 (MScalarTy.isInteger() && ST->hasSSE41()))
3057 return 1 + RegisterFileMoveCost;
3058
3059 // Assume insertps is relatively cheap on all targets.
3060 if (MScalarTy == MVT::f32 && ST->hasSSE41() &&
3061 Opcode == Instruction::InsertElement)
3062 return 1 + RegisterFileMoveCost;
3063
3064 // For extractions we just need to shuffle the element to index 0, which
3065 // should be very cheap (assume cost = 1). For insertions we need to shuffle
3066 // the elements to its destination. In both cases we must handle the
3067 // subvector move(s).
3068 // If the vector type is already less than 128-bits then don't reduce it.
3069 // TODO: Under what circumstances should we shuffle using the full width?
3070 int ShuffleCost = 1;
3071 if (Opcode == Instruction::InsertElement) {
3072 auto *SubTy = cast<VectorType>(Val);
3073 EVT VT = TLI->getValueType(DL, Val);
3074 if (VT.getScalarType() != MScalarTy || VT.getSizeInBits() >= 128)
3075 SubTy = FixedVectorType::get(ScalarType, SubNumElts);
3076 ShuffleCost = getShuffleCost(TTI::SK_PermuteTwoSrc, SubTy, 0, SubTy);
3077 }
3078 int IntOrFpCost = ScalarType->isFloatingPointTy() ? 0 : 1;
3079 return ShuffleCost + IntOrFpCost + RegisterFileMoveCost;
3080 }
3081
3082 // Add to the base cost if we know that the extracted element of a vector is
3083 // destined to be moved to and used in the integer register file.
3084 if (Opcode == Instruction::ExtractElement && ScalarType->isPointerTy())
3085 RegisterFileMoveCost += 1;
3086
3087 return BaseT::getVectorInstrCost(Opcode, Val, Index) + RegisterFileMoveCost;
3088}
3089
3090unsigned X86TTIImpl::getScalarizationOverhead(VectorType *Ty,
3091 const APInt &DemandedElts,
3092 bool Insert, bool Extract) {
3093 unsigned Cost = 0;
3094
3095 // For insertions, a ISD::BUILD_VECTOR style vector initialization can be much
3096 // cheaper than an accumulation of ISD::INSERT_VECTOR_ELT.
3097 if (Insert) {
3098 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty);
3099 MVT MScalarTy = LT.second.getScalarType();
3100
3101 if ((MScalarTy == MVT::i16 && ST->hasSSE2()) ||
3102 (MScalarTy.isInteger() && ST->hasSSE41()) ||
3103 (MScalarTy == MVT::f32 && ST->hasSSE41())) {
3104 // For types we can insert directly, insertion into 128-bit sub vectors is
3105 // cheap, followed by a cheap chain of concatenations.
3106 if (LT.second.getSizeInBits() <= 128) {
3107 Cost +=
3108 BaseT::getScalarizationOverhead(Ty, DemandedElts, Insert, false);
3109 } else {
3110 // In each 128-lane, if at least one index is demanded but not all
3111 // indices are demanded and this 128-lane is not the first 128-lane of
3112 // the legalized-vector, then this 128-lane needs a extracti128; If in
3113 // each 128-lane, there is at least one demanded index, this 128-lane
3114 // needs a inserti128.
3115
3116 // The following cases will help you build a better understanding:
3117 // Assume we insert several elements into a v8i32 vector in avx2,
3118 // Case#1: inserting into 1th index needs vpinsrd + inserti128.
3119 // Case#2: inserting into 5th index needs extracti128 + vpinsrd +
3120 // inserti128.
3121 // Case#3: inserting into 4,5,6,7 index needs 4*vpinsrd + inserti128.
3122 unsigned Num128Lanes = LT.second.getSizeInBits() / 128 * LT.first;
3123 unsigned NumElts = LT.second.getVectorNumElements() * LT.first;
3124 APInt WidenedDemandedElts = DemandedElts.zextOrSelf(NumElts);
3125 unsigned Scale = NumElts / Num128Lanes;
3126 // We iterate each 128-lane, and check if we need a
3127 // extracti128/inserti128 for this 128-lane.
3128 for (unsigned I = 0; I < NumElts; I += Scale) {
3129 APInt Mask = WidenedDemandedElts.getBitsSet(NumElts, I, I + Scale);
3130 APInt MaskedDE = Mask & WidenedDemandedElts;
3131 unsigned Population = MaskedDE.countPopulation();
3132 Cost += (Population > 0 && Population != Scale &&
3133 I % LT.second.getVectorNumElements() != 0);
3134 Cost += Population > 0;
3135 }
3136 Cost += DemandedElts.countPopulation();
3137
3138 // For vXf32 cases, insertion into the 0'th index in each v4f32
3139 // 128-bit vector is free.
3140 // NOTE: This assumes legalization widens vXf32 vectors.
3141 if (MScalarTy == MVT::f32)
3142 for (unsigned i = 0, e = cast<FixedVectorType>(Ty)->getNumElements();
3143 i < e; i += 4)
3144 if (DemandedElts[i])
3145 Cost--;
3146 }
3147 } else if (LT.second.isVector()) {
3148 // Without fast insertion, we need to use MOVD/MOVQ to pass each demanded
3149 // integer element as a SCALAR_TO_VECTOR, then we build the vector as a
3150 // series of UNPCK followed by CONCAT_VECTORS - all of these can be
3151 // considered cheap.
3152 if (Ty->isIntOrIntVectorTy())
3153 Cost += DemandedElts.countPopulation();
3154
3155 // Get the smaller of the legalized or original pow2-extended number of
3156 // vector elements, which represents the number of unpacks we'll end up
3157 // performing.
3158 unsigned NumElts = LT.second.getVectorNumElements();
3159 unsigned Pow2Elts =
3160 PowerOf2Ceil(cast<FixedVectorType>(Ty)->getNumElements());
3161 Cost += (std::min<unsigned>(NumElts, Pow2Elts) - 1) * LT.first;
3162 }
3163 }
3164
3165 // TODO: Use default extraction for now, but we should investigate extending this
3166 // to handle repeated subvector extraction.
3167 if (Extract)
3168 Cost += BaseT::getScalarizationOverhead(Ty, DemandedElts, false, Extract);
3169
3170 return Cost;
3171}
3172
3173int X86TTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src,
3174 MaybeAlign Alignment, unsigned AddressSpace,
3175 TTI::TargetCostKind CostKind,
3176 const Instruction *I) {
3177 // TODO: Handle other cost kinds.
3178 if (CostKind != TTI::TCK_RecipThroughput) {
20
Assuming 'CostKind' is not equal to TCK_RecipThroughput
21
Taking true branch
3179 if (isa_and_nonnull<StoreInst>(I)) {
22
Assuming 'I' is a 'StoreInst'
23
Taking true branch
3180 Value *Ptr = I->getOperand(1);
24
Called C++ object pointer is null
3181 // Store instruction with index and scale costs 2 Uops.
3182 // Check the preceding GEP to identify non-const indices.
3183 if (auto *GEP = dyn_cast<GetElementPtrInst>(Ptr)) {
3184 if (!all_of(GEP->indices(), [](Value *V) { return isa<Constant>(V); }))
3185 return TTI::TCC_Basic * 2;
3186 }
3187 }
3188 return TTI::TCC_Basic;
3189 }
3190
3191 // Handle non-power-of-two vectors such as <3 x float>
3192 if (auto *VTy = dyn_cast<FixedVectorType>(Src)) {
3193 unsigned NumElem = VTy->getNumElements();
3194
3195 // Handle a few common cases:
3196 // <3 x float>
3197 if (NumElem == 3 && VTy->getScalarSizeInBits() == 32)
3198 // Cost = 64 bit store + extract + 32 bit store.
3199 return 3;
3200
3201 // <3 x double>
3202 if (NumElem == 3 && VTy->getScalarSizeInBits() == 64)
3203 // Cost = 128 bit store + unpack + 64 bit store.
3204 return 3;
3205
3206 // Assume that all other non-power-of-two numbers are scalarized.
3207 if (!isPowerOf2_32(NumElem)) {
3208 APInt DemandedElts = APInt::getAllOnesValue(NumElem);
3209 int Cost = BaseT::getMemoryOpCost(Opcode, VTy->getScalarType(), Alignment,
3210 AddressSpace, CostKind);
3211 int SplitCost = getScalarizationOverhead(VTy, DemandedElts,
3212 Opcode == Instruction::Load,
3213 Opcode == Instruction::Store);
3214 return NumElem * Cost + SplitCost;
3215 }
3216 }
3217
3218 // Type legalization can't handle structs
3219 if (TLI->getValueType(DL, Src, true) == MVT::Other)
3220 return BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace,
3221 CostKind);
3222
3223 // Legalize the type.
3224 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Src);
3225 assert((Opcode == Instruction::Load || Opcode == Instruction::Store) &&(((Opcode == Instruction::Load || Opcode == Instruction::Store
) && "Invalid Opcode") ? static_cast<void> (0) :
__assert_fail ("(Opcode == Instruction::Load || Opcode == Instruction::Store) && \"Invalid Opcode\""
, "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 3226, __PRETTY_FUNCTION__))
3226 "Invalid Opcode")(((Opcode == Instruction::Load || Opcode == Instruction::Store
) && "Invalid Opcode") ? static_cast<void> (0) :
__assert_fail ("(Opcode == Instruction::Load || Opcode == Instruction::Store) && \"Invalid Opcode\""
, "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 3226, __PRETTY_FUNCTION__))
;
3227
3228 // Each load/store unit costs 1.
3229 int Cost = LT.first * 1;
3230
3231 // This isn't exactly right. We're using slow unaligned 32-byte accesses as a
3232 // proxy for a double-pumped AVX memory interface such as on Sandybridge.
3233 if (LT.second.getStoreSize() == 32 && ST->isUnalignedMem32Slow())
3234 Cost *= 2;
3235
3236 return Cost;
3237}
3238
3239int X86TTIImpl::getMaskedMemoryOpCost(unsigned Opcode, Type *SrcTy,
3240 Align Alignment, unsigned AddressSpace,
3241 TTI::TargetCostKind CostKind) {
3242 bool IsLoad = (Instruction::Load == Opcode);
3243 bool IsStore = (Instruction::Store == Opcode);
3244
3245 auto *SrcVTy = dyn_cast<FixedVectorType>(SrcTy);
3246 if (!SrcVTy)
3247 // To calculate scalar take the regular cost, without mask
3248 return getMemoryOpCost(Opcode, SrcTy, Alignment, AddressSpace, CostKind);
3249
3250 unsigned NumElem = SrcVTy->getNumElements();
3251 auto *MaskTy =
3252 FixedVectorType::get(Type::getInt8Ty(SrcVTy->getContext()), NumElem);
3253 if ((IsLoad && !isLegalMaskedLoad(SrcVTy, Alignment)) ||
3254 (IsStore && !isLegalMaskedStore(SrcVTy, Alignment)) ||
3255 !isPowerOf2_32(NumElem)) {
3256 // Scalarization
3257 APInt DemandedElts = APInt::getAllOnesValue(NumElem);
3258 int MaskSplitCost =
3259 getScalarizationOverhead(MaskTy, DemandedElts, false, true);
3260 int ScalarCompareCost = getCmpSelInstrCost(
3261 Instruction::ICmp, Type::getInt8Ty(SrcVTy->getContext()), nullptr,
3262 CmpInst::BAD_ICMP_PREDICATE, CostKind);
3263 int BranchCost = getCFInstrCost(Instruction::Br, CostKind);
3264 int MaskCmpCost = NumElem * (BranchCost + ScalarCompareCost);
3265 int ValueSplitCost =
3266 getScalarizationOverhead(SrcVTy, DemandedElts, IsLoad, IsStore);
3267 int MemopCost =
3268 NumElem * BaseT::getMemoryOpCost(Opcode, SrcVTy->getScalarType(),
3269 Alignment, AddressSpace, CostKind);
3270 return MemopCost + ValueSplitCost + MaskSplitCost + MaskCmpCost;
3271 }
3272
3273 // Legalize the type.
3274 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, SrcVTy);
3275 auto VT = TLI->getValueType(DL, SrcVTy);
3276 int Cost = 0;
3277 if (VT.isSimple() && LT.second != VT.getSimpleVT() &&
3278 LT.second.getVectorNumElements() == NumElem)
3279 // Promotion requires expand/truncate for data and a shuffle for mask.
3280 Cost += getShuffleCost(TTI::SK_PermuteTwoSrc, SrcVTy, 0, nullptr) +
3281 getShuffleCost(TTI::SK_PermuteTwoSrc, MaskTy, 0, nullptr);
3282
3283 else if (LT.second.getVectorNumElements() > NumElem) {
3284 auto *NewMaskTy = FixedVectorType::get(MaskTy->getElementType(),
3285 LT.second.getVectorNumElements());
3286 // Expanding requires fill mask with zeroes
3287 Cost += getShuffleCost(TTI::SK_InsertSubvector, NewMaskTy, 0, MaskTy);
3288 }
3289
3290 // Pre-AVX512 - each maskmov load costs 2 + store costs ~8.
3291 if (!ST->hasAVX512())
3292 return Cost + LT.first * (IsLoad ? 2 : 8);
3293
3294 // AVX-512 masked load/store is cheapper
3295 return Cost + LT.first;
3296}
3297
3298int X86TTIImpl::getAddressComputationCost(Type *Ty, ScalarEvolution *SE,
3299 const SCEV *Ptr) {
3300 // Address computations in vectorized code with non-consecutive addresses will
3301 // likely result in more instructions compared to scalar code where the
3302 // computation can more often be merged into the index mode. The resulting
3303 // extra micro-ops can significantly decrease throughput.
3304 const unsigned NumVectorInstToHideOverhead = 10;
3305
3306 // Cost modeling of Strided Access Computation is hidden by the indexing
3307 // modes of X86 regardless of the stride value. We dont believe that there
3308 // is a difference between constant strided access in gerenal and constant
3309 // strided value which is less than or equal to 64.
3310 // Even in the case of (loop invariant) stride whose value is not known at
3311 // compile time, the address computation will not incur more than one extra
3312 // ADD instruction.
3313 if (Ty->isVectorTy() && SE) {
3314 if (!BaseT::isStridedAccess(Ptr))
3315 return NumVectorInstToHideOverhead;
3316 if (!BaseT::getConstantStrideStep(SE, Ptr))
3317 return 1;
3318 }
3319
3320 return BaseT::getAddressComputationCost(Ty, SE, Ptr);
3321}
3322
3323int X86TTIImpl::getArithmeticReductionCost(unsigned Opcode, VectorType *ValTy,
3324 bool IsPairwise,
3325 TTI::TargetCostKind CostKind) {
3326 // Just use the default implementation for pair reductions.
3327 if (IsPairwise)
3328 return BaseT::getArithmeticReductionCost(Opcode, ValTy, IsPairwise, CostKind);
3329
3330 // We use the Intel Architecture Code Analyzer(IACA) to measure the throughput
3331 // and make it as the cost.
3332
3333 static const CostTblEntry SLMCostTblNoPairWise[] = {
3334 { ISD::FADD, MVT::v2f64, 3 },
3335 { ISD::ADD, MVT::v2i64, 5 },
3336 };
3337
3338 static const CostTblEntry SSE2CostTblNoPairWise[] = {
3339 { ISD::FADD, MVT::v2f64, 2 },
3340 { ISD::FADD, MVT::v4f32, 4 },
3341 { ISD::ADD, MVT::v2i64, 2 }, // The data reported by the IACA tool is "1.6".
3342 { ISD::ADD, MVT::v2i32, 2 }, // FIXME: chosen to be less than v4i32
3343 { ISD::ADD, MVT::v4i32, 3 }, // The data reported by the IACA tool is "3.3".
3344 { ISD::ADD, MVT::v2i16, 2 }, // The data reported by the IACA tool is "4.3".
3345 { ISD::ADD, MVT::v4i16, 3 }, // The data reported by the IACA tool is "4.3".
3346 { ISD::ADD, MVT::v8i16, 4 }, // The data reported by the IACA tool is "4.3".
3347 { ISD::ADD, MVT::v2i8, 2 },
3348 { ISD::ADD, MVT::v4i8, 2 },
3349 { ISD::ADD, MVT::v8i8, 2 },
3350 { ISD::ADD, MVT::v16i8, 3 },
3351 };
3352
3353 static const CostTblEntry AVX1CostTblNoPairWise[] = {
3354 { ISD::FADD, MVT::v4f64, 3 },
3355 { ISD::FADD, MVT::v4f32, 3 },
3356 { ISD::FADD, MVT::v8f32, 4 },
3357 { ISD::ADD, MVT::v2i64, 1 }, // The data reported by the IACA tool is "1.5".
3358 { ISD::ADD, MVT::v4i64, 3 },
3359 { ISD::ADD, MVT::v8i32, 5 },
3360 { ISD::ADD, MVT::v16i16, 5 },
3361 { ISD::ADD, MVT::v32i8, 4 },
3362 };
3363
3364 int ISD = TLI->InstructionOpcodeToISD(Opcode);
3365 assert(ISD && "Invalid opcode")((ISD && "Invalid opcode") ? static_cast<void> (
0) : __assert_fail ("ISD && \"Invalid opcode\"", "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 3365, __PRETTY_FUNCTION__))
;
3366
3367 // Before legalizing the type, give a chance to look up illegal narrow types
3368 // in the table.
3369 // FIXME: Is there a better way to do this?
3370 EVT VT = TLI->getValueType(DL, ValTy);
3371 if (VT.isSimple()) {
3372 MVT MTy = VT.getSimpleVT();
3373 if (ST->isSLM())
3374 if (const auto *Entry = CostTableLookup(SLMCostTblNoPairWise, ISD, MTy))
3375 return Entry->Cost;
3376
3377 if (ST->hasAVX())
3378 if (const auto *Entry = CostTableLookup(AVX1CostTblNoPairWise, ISD, MTy))
3379 return Entry->Cost;
3380
3381 if (ST->hasSSE2())
3382 if (const auto *Entry = CostTableLookup(SSE2CostTblNoPairWise, ISD, MTy))
3383 return Entry->Cost;
3384 }
3385
3386 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy);
3387
3388 MVT MTy = LT.second;
3389
3390 auto *ValVTy = cast<FixedVectorType>(ValTy);
3391
3392 unsigned ArithmeticCost = 0;
3393 if (LT.first != 1 && MTy.isVector() &&
3394 MTy.getVectorNumElements() < ValVTy->getNumElements()) {
3395 // Type needs to be split. We need LT.first - 1 arithmetic ops.
3396 auto *SingleOpTy = FixedVectorType::get(ValVTy->getElementType(),
3397 MTy.getVectorNumElements());
3398 ArithmeticCost = getArithmeticInstrCost(Opcode, SingleOpTy, CostKind);
3399 ArithmeticCost *= LT.first - 1;
3400 }
3401
3402 if (ST->isSLM())
3403 if (const auto *Entry = CostTableLookup(SLMCostTblNoPairWise, ISD, MTy))
3404 return ArithmeticCost + Entry->Cost;
3405
3406 if (ST->hasAVX())
3407 if (const auto *Entry = CostTableLookup(AVX1CostTblNoPairWise, ISD, MTy))
3408 return ArithmeticCost + Entry->Cost;
3409
3410 if (ST->hasSSE2())
3411 if (const auto *Entry = CostTableLookup(SSE2CostTblNoPairWise, ISD, MTy))
3412 return ArithmeticCost + Entry->Cost;
3413
3414 // FIXME: These assume a naive kshift+binop lowering, which is probably
3415 // conservative in most cases.
3416 static const CostTblEntry AVX512BoolReduction[] = {
3417 { ISD::AND, MVT::v2i1, 3 },
3418 { ISD::AND, MVT::v4i1, 5 },
3419 { ISD::AND, MVT::v8i1, 7 },
3420 { ISD::AND, MVT::v16i1, 9 },
3421 { ISD::AND, MVT::v32i1, 11 },
3422 { ISD::AND, MVT::v64i1, 13 },
3423 { ISD::OR, MVT::v2i1, 3 },
3424 { ISD::OR, MVT::v4i1, 5 },
3425 { ISD::OR, MVT::v8i1, 7 },
3426 { ISD::OR, MVT::v16i1, 9 },
3427 { ISD::OR, MVT::v32i1, 11 },
3428 { ISD::OR, MVT::v64i1, 13 },
3429 };
3430
3431 static const CostTblEntry AVX2BoolReduction[] = {
3432 { ISD::AND, MVT::v16i16, 2 }, // vpmovmskb + cmp
3433 { ISD::AND, MVT::v32i8, 2 }, // vpmovmskb + cmp
3434 { ISD::OR, MVT::v16i16, 2 }, // vpmovmskb + cmp
3435 { ISD::OR, MVT::v32i8, 2 }, // vpmovmskb + cmp
3436 };
3437
3438 static const CostTblEntry AVX1BoolReduction[] = {
3439 { ISD::AND, MVT::v4i64, 2 }, // vmovmskpd + cmp
3440 { ISD::AND, MVT::v8i32, 2 }, // vmovmskps + cmp
3441 { ISD::AND, MVT::v16i16, 4 }, // vextractf128 + vpand + vpmovmskb + cmp
3442 { ISD::AND, MVT::v32i8, 4 }, // vextractf128 + vpand + vpmovmskb + cmp
3443 { ISD::OR, MVT::v4i64, 2 }, // vmovmskpd + cmp
3444 { ISD::OR, MVT::v8i32, 2 }, // vmovmskps + cmp
3445 { ISD::OR, MVT::v16i16, 4 }, // vextractf128 + vpor + vpmovmskb + cmp
3446 { ISD::OR, MVT::v32i8, 4 }, // vextractf128 + vpor + vpmovmskb + cmp
3447 };
3448
3449 static const CostTblEntry SSE2BoolReduction[] = {
3450 { ISD::AND, MVT::v2i64, 2 }, // movmskpd + cmp
3451 { ISD::AND, MVT::v4i32, 2 }, // movmskps + cmp
3452 { ISD::AND, MVT::v8i16, 2 }, // pmovmskb + cmp
3453 { ISD::AND, MVT::v16i8, 2 }, // pmovmskb + cmp
3454 { ISD::OR, MVT::v2i64, 2 }, // movmskpd + cmp
3455 { ISD::OR, MVT::v4i32, 2 }, // movmskps + cmp
3456 { ISD::OR, MVT::v8i16, 2 }, // pmovmskb + cmp
3457 { ISD::OR, MVT::v16i8, 2 }, // pmovmskb + cmp
3458 };
3459
3460 // Handle bool allof/anyof patterns.
3461 if (ValVTy->getElementType()->isIntegerTy(1)) {
3462 unsigned ArithmeticCost = 0;
3463 if (LT.first != 1 && MTy.isVector() &&
3464 MTy.getVectorNumElements() < ValVTy->getNumElements()) {
3465 // Type needs to be split. We need LT.first - 1 arithmetic ops.
3466 auto *SingleOpTy = FixedVectorType::get(ValVTy->getElementType(),
3467 MTy.getVectorNumElements());
3468 ArithmeticCost = getArithmeticInstrCost(Opcode, SingleOpTy, CostKind);
3469 ArithmeticCost *= LT.first - 1;
3470 }
3471
3472 if (ST->hasAVX512())
3473 if (const auto *Entry = CostTableLookup(AVX512BoolReduction, ISD, MTy))
3474 return ArithmeticCost + Entry->Cost;
3475 if (ST->hasAVX2())
3476 if (const auto *Entry = CostTableLookup(AVX2BoolReduction, ISD, MTy))
3477 return ArithmeticCost + Entry->Cost;
3478 if (ST->hasAVX())
3479 if (const auto *Entry = CostTableLookup(AVX1BoolReduction, ISD, MTy))
3480 return ArithmeticCost + Entry->Cost;
3481 if (ST->hasSSE2())
3482 if (const auto *Entry = CostTableLookup(SSE2BoolReduction, ISD, MTy))
3483 return ArithmeticCost + Entry->Cost;
3484
3485 return BaseT::getArithmeticReductionCost(Opcode, ValVTy, IsPairwise,
3486 CostKind);
3487 }
3488
3489 unsigned NumVecElts = ValVTy->getNumElements();
3490 unsigned ScalarSize = ValVTy->getScalarSizeInBits();
3491
3492 // Special case power of 2 reductions where the scalar type isn't changed
3493 // by type legalization.
3494 if (!isPowerOf2_32(NumVecElts) || ScalarSize != MTy.getScalarSizeInBits())
3495 return BaseT::getArithmeticReductionCost(Opcode, ValVTy, IsPairwise,
3496 CostKind);
3497
3498 unsigned ReductionCost = 0;
3499
3500 auto *Ty = ValVTy;
3501 if (LT.first != 1 && MTy.isVector() &&
3502 MTy.getVectorNumElements() < ValVTy->getNumElements()) {
3503 // Type needs to be split. We need LT.first - 1 arithmetic ops.
3504 Ty = FixedVectorType::get(ValVTy->getElementType(),
3505 MTy.getVectorNumElements());
3506 ReductionCost = getArithmeticInstrCost(Opcode, Ty, CostKind);
3507 ReductionCost *= LT.first - 1;
3508 NumVecElts = MTy.getVectorNumElements();
3509 }
3510
3511 // Now handle reduction with the legal type, taking into account size changes
3512 // at each level.
3513 while (NumVecElts > 1) {
3514 // Determine the size of the remaining vector we need to reduce.
3515 unsigned Size = NumVecElts * ScalarSize;
3516 NumVecElts /= 2;
3517 // If we're reducing from 256/512 bits, use an extract_subvector.
3518 if (Size > 128) {
3519 auto *SubTy = FixedVectorType::get(ValVTy->getElementType(), NumVecElts);
3520 ReductionCost +=
3521 getShuffleCost(TTI::SK_ExtractSubvector, Ty, NumVecElts, SubTy);
3522 Ty = SubTy;
3523 } else if (Size == 128) {
3524 // Reducing from 128 bits is a permute of v2f64/v2i64.
3525 FixedVectorType *ShufTy;
3526 if (ValVTy->isFloatingPointTy())
3527 ShufTy =
3528 FixedVectorType::get(Type::getDoubleTy(ValVTy->getContext()), 2);
3529 else
3530 ShufTy =
3531 FixedVectorType::get(Type::getInt64Ty(ValVTy->getContext()), 2);
3532 ReductionCost +=
3533 getShuffleCost(TTI::SK_PermuteSingleSrc, ShufTy, 0, nullptr);
3534 } else if (Size == 64) {
3535 // Reducing from 64 bits is a shuffle of v4f32/v4i32.
3536 FixedVectorType *ShufTy;
3537 if (ValVTy->isFloatingPointTy())
3538 ShufTy =
3539 FixedVectorType::get(Type::getFloatTy(ValVTy->getContext()), 4);
3540 else
3541 ShufTy =
3542 FixedVectorType::get(Type::getInt32Ty(ValVTy->getContext()), 4);
3543 ReductionCost +=
3544 getShuffleCost(TTI::SK_PermuteSingleSrc, ShufTy, 0, nullptr);
3545 } else {
3546 // Reducing from smaller size is a shift by immediate.
3547 auto *ShiftTy = FixedVectorType::get(
3548 Type::getIntNTy(ValVTy->getContext(), Size), 128 / Size);
3549 ReductionCost += getArithmeticInstrCost(
3550 Instruction::LShr, ShiftTy, CostKind,
3551 TargetTransformInfo::OK_AnyValue,
3552 TargetTransformInfo::OK_UniformConstantValue,
3553 TargetTransformInfo::OP_None, TargetTransformInfo::OP_None);
3554 }
3555
3556 // Add the arithmetic op for this level.
3557 ReductionCost += getArithmeticInstrCost(Opcode, Ty, CostKind);
3558 }
3559
3560 // Add the final extract element to the cost.
3561 return ReductionCost + getVectorInstrCost(Instruction::ExtractElement, Ty, 0);
3562}
3563
3564int X86TTIImpl::getMinMaxCost(Type *Ty, Type *CondTy, bool IsUnsigned) {
3565 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty);
3566
3567 MVT MTy = LT.second;
3568
3569 int ISD;
3570 if (Ty->isIntOrIntVectorTy()) {
3571 ISD = IsUnsigned ? ISD::UMIN : ISD::SMIN;
3572 } else {
3573 assert(Ty->isFPOrFPVectorTy() &&((Ty->isFPOrFPVectorTy() && "Expected float point or integer vector type."
) ? static_cast<void> (0) : __assert_fail ("Ty->isFPOrFPVectorTy() && \"Expected float point or integer vector type.\""
, "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 3574, __PRETTY_FUNCTION__))
3574 "Expected float point or integer vector type.")((Ty->isFPOrFPVectorTy() && "Expected float point or integer vector type."
) ? static_cast<void> (0) : __assert_fail ("Ty->isFPOrFPVectorTy() && \"Expected float point or integer vector type.\""
, "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 3574, __PRETTY_FUNCTION__))
;
3575 ISD = ISD::FMINNUM;
3576 }
3577
3578 static const CostTblEntry SSE1CostTbl[] = {
3579 {ISD::FMINNUM, MVT::v4f32, 1},
3580 };
3581
3582 static const CostTblEntry SSE2CostTbl[] = {
3583 {ISD::FMINNUM, MVT::v2f64, 1},
3584 {ISD::SMIN, MVT::v8i16, 1},
3585 {ISD::UMIN, MVT::v16i8, 1},
3586 };
3587
3588 static const CostTblEntry SSE41CostTbl[] = {
3589 {ISD::SMIN, MVT::v4i32, 1},
3590 {ISD::UMIN, MVT::v4i32, 1},
3591 {ISD::UMIN, MVT::v8i16, 1},
3592 {ISD::SMIN, MVT::v16i8, 1},
3593 };
3594
3595 static const CostTblEntry SSE42CostTbl[] = {
3596 {ISD::UMIN, MVT::v2i64, 3}, // xor+pcmpgtq+blendvpd
3597 };
3598
3599 static const CostTblEntry AVX1CostTbl[] = {
3600 {ISD::FMINNUM, MVT::v8f32, 1},
3601 {ISD::FMINNUM, MVT::v4f64, 1},
3602 {ISD::SMIN, MVT::v8i32, 3},
3603 {ISD::UMIN, MVT::v8i32, 3},
3604 {ISD::SMIN, MVT::v16i16, 3},
3605 {ISD::UMIN, MVT::v16i16, 3},
3606 {ISD::SMIN, MVT::v32i8, 3},
3607 {ISD::UMIN, MVT::v32i8, 3},
3608 };
3609
3610 static const CostTblEntry AVX2CostTbl[] = {
3611 {ISD::SMIN, MVT::v8i32, 1},
3612 {ISD::UMIN, MVT::v8i32, 1},
3613 {ISD::SMIN, MVT::v16i16, 1},
3614 {ISD::UMIN, MVT::v16i16, 1},
3615 {ISD::SMIN, MVT::v32i8, 1},
3616 {ISD::UMIN, MVT::v32i8, 1},
3617 };
3618
3619 static const CostTblEntry AVX512CostTbl[] = {
3620 {ISD::FMINNUM, MVT::v16f32, 1},
3621 {ISD::FMINNUM, MVT::v8f64, 1},
3622 {ISD::SMIN, MVT::v2i64, 1},
3623 {ISD::UMIN, MVT::v2i64, 1},
3624 {ISD::SMIN, MVT::v4i64, 1},
3625 {ISD::UMIN, MVT::v4i64, 1},
3626 {ISD::SMIN, MVT::v8i64, 1},
3627 {ISD::UMIN, MVT::v8i64, 1},
3628 {ISD::SMIN, MVT::v16i32, 1},
3629 {ISD::UMIN, MVT::v16i32, 1},
3630 };
3631
3632 static const CostTblEntry AVX512BWCostTbl[] = {
3633 {ISD::SMIN, MVT::v32i16, 1},
3634 {ISD::UMIN, MVT::v32i16, 1},
3635 {ISD::SMIN, MVT::v64i8, 1},
3636 {ISD::UMIN, MVT::v64i8, 1},
3637 };
3638
3639 // If we have a native MIN/MAX instruction for this type, use it.
3640 if (ST->hasBWI())
3641 if (const auto *Entry = CostTableLookup(AVX512BWCostTbl, ISD, MTy))
3642 return LT.first * Entry->Cost;
3643
3644 if (ST->hasAVX512())
3645 if (const auto *Entry = CostTableLookup(AVX512CostTbl, ISD, MTy))
3646 return LT.first * Entry->Cost;
3647
3648 if (ST->hasAVX2())
3649 if (const auto *Entry = CostTableLookup(AVX2CostTbl, ISD, MTy))
3650 return LT.first * Entry->Cost;
3651
3652 if (ST->hasAVX())
3653 if (const auto *Entry = CostTableLookup(AVX1CostTbl, ISD, MTy))
3654 return LT.first * Entry->Cost;
3655
3656 if (ST->hasSSE42())
3657 if (const auto *Entry = CostTableLookup(SSE42CostTbl, ISD, MTy))
3658 return LT.first * Entry->Cost;
3659
3660 if (ST->hasSSE41())
3661 if (const auto *Entry = CostTableLookup(SSE41CostTbl, ISD, MTy))
3662 return LT.first * Entry->Cost;
3663
3664 if (ST->hasSSE2())
3665 if (const auto *Entry = CostTableLookup(SSE2CostTbl, ISD, MTy))
3666 return LT.first * Entry->Cost;
3667
3668 if (ST->hasSSE1())
3669 if (const auto *Entry = CostTableLookup(SSE1CostTbl, ISD, MTy))
3670 return LT.first * Entry->Cost;
3671
3672 unsigned CmpOpcode;
3673 if (Ty->isFPOrFPVectorTy()) {
3674 CmpOpcode = Instruction::FCmp;
3675 } else {
3676 assert(Ty->isIntOrIntVectorTy() &&((Ty->isIntOrIntVectorTy() && "expecting floating point or integer type for min/max reduction"
) ? static_cast<void> (0) : __assert_fail ("Ty->isIntOrIntVectorTy() && \"expecting floating point or integer type for min/max reduction\""
, "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 3677, __PRETTY_FUNCTION__))
3677 "expecting floating point or integer type for min/max reduction")((Ty->isIntOrIntVectorTy() && "expecting floating point or integer type for min/max reduction"
) ? static_cast<void> (0) : __assert_fail ("Ty->isIntOrIntVectorTy() && \"expecting floating point or integer type for min/max reduction\""
, "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 3677, __PRETTY_FUNCTION__))
;
3678 CmpOpcode = Instruction::ICmp;
3679 }
3680
3681 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
3682 // Otherwise fall back to cmp+select.
3683 return getCmpSelInstrCost(CmpOpcode, Ty, CondTy, CmpInst::BAD_ICMP_PREDICATE,
3684 CostKind) +
3685 getCmpSelInstrCost(Instruction::Select, Ty, CondTy,
3686 CmpInst::BAD_ICMP_PREDICATE, CostKind);
3687}
3688
3689int X86TTIImpl::getMinMaxReductionCost(VectorType *ValTy, VectorType *CondTy,
3690 bool IsPairwise, bool IsUnsigned,
3691 TTI::TargetCostKind CostKind) {
3692 // Just use the default implementation for pair reductions.
3693 if (IsPairwise)
3694 return BaseT::getMinMaxReductionCost(ValTy, CondTy, IsPairwise, IsUnsigned,
3695 CostKind);
3696
3697 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy);
3698
3699 MVT MTy = LT.second;
3700
3701 int ISD;
3702 if (ValTy->isIntOrIntVectorTy()) {
3703 ISD = IsUnsigned ? ISD::UMIN : ISD::SMIN;
3704 } else {
3705 assert(ValTy->isFPOrFPVectorTy() &&((ValTy->isFPOrFPVectorTy() && "Expected float point or integer vector type."
) ? static_cast<void> (0) : __assert_fail ("ValTy->isFPOrFPVectorTy() && \"Expected float point or integer vector type.\""
, "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 3706, __PRETTY_FUNCTION__))
3706 "Expected float point or integer vector type.")((ValTy->isFPOrFPVectorTy() && "Expected float point or integer vector type."
) ? static_cast<void> (0) : __assert_fail ("ValTy->isFPOrFPVectorTy() && \"Expected float point or integer vector type.\""
, "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 3706, __PRETTY_FUNCTION__))
;
3707 ISD = ISD::FMINNUM;
3708 }
3709
3710 // We use the Intel Architecture Code Analyzer(IACA) to measure the throughput
3711 // and make it as the cost.
3712
3713 static const CostTblEntry SSE2CostTblNoPairWise[] = {
3714 {ISD::UMIN, MVT::v2i16, 5}, // need pxors to use pminsw/pmaxsw
3715 {ISD::UMIN, MVT::v4i16, 7}, // need pxors to use pminsw/pmaxsw
3716 {ISD::UMIN, MVT::v8i16, 9}, // need pxors to use pminsw/pmaxsw
3717 };
3718
3719 static const CostTblEntry SSE41CostTblNoPairWise[] = {
3720 {ISD::SMIN, MVT::v2i16, 3}, // same as sse2
3721 {ISD::SMIN, MVT::v4i16, 5}, // same as sse2
3722 {ISD::UMIN, MVT::v2i16, 5}, // same as sse2
3723 {ISD::UMIN, MVT::v4i16, 7}, // same as sse2
3724 {ISD::SMIN, MVT::v8i16, 4}, // phminposuw+xor
3725 {ISD::UMIN, MVT::v8i16, 4}, // FIXME: umin is cheaper than umax
3726 {ISD::SMIN, MVT::v2i8, 3}, // pminsb
3727 {ISD::SMIN, MVT::v4i8, 5}, // pminsb
3728 {ISD::SMIN, MVT::v8i8, 7}, // pminsb
3729 {ISD::SMIN, MVT::v16i8, 6},
3730 {ISD::UMIN, MVT::v2i8, 3}, // same as sse2
3731 {ISD::UMIN, MVT::v4i8, 5}, // same as sse2
3732 {ISD::UMIN, MVT::v8i8, 7}, // same as sse2
3733 {ISD::UMIN, MVT::v16i8, 6}, // FIXME: umin is cheaper than umax
3734 };
3735
3736 static const CostTblEntry AVX1CostTblNoPairWise[] = {
3737 {ISD::SMIN, MVT::v16i16, 6},
3738 {ISD::UMIN, MVT::v16i16, 6}, // FIXME: umin is cheaper than umax
3739 {ISD::SMIN, MVT::v32i8, 8},
3740 {ISD::UMIN, MVT::v32i8, 8},
3741 };
3742
3743 static const CostTblEntry AVX512BWCostTblNoPairWise[] = {
3744 {ISD::SMIN, MVT::v32i16, 8},
3745 {ISD::UMIN, MVT::v32i16, 8}, // FIXME: umin is cheaper than umax
3746 {ISD::SMIN, MVT::v64i8, 10},
3747 {ISD::UMIN, MVT::v64i8, 10},
3748 };
3749
3750 // Before legalizing the type, give a chance to look up illegal narrow types
3751 // in the table.
3752 // FIXME: Is there a better way to do this?
3753 EVT VT = TLI->getValueType(DL, ValTy);
3754 if (VT.isSimple()) {
3755 MVT MTy = VT.getSimpleVT();
3756 if (ST->hasBWI())
3757 if (const auto *Entry = CostTableLookup(AVX512BWCostTblNoPairWise, ISD, MTy))
3758 return Entry->Cost;
3759
3760 if (ST->hasAVX())
3761 if (const auto *Entry = CostTableLookup(AVX1CostTblNoPairWise, ISD, MTy))
3762 return Entry->Cost;
3763
3764 if (ST->hasSSE41())
3765 if (const auto *Entry = CostTableLookup(SSE41CostTblNoPairWise, ISD, MTy))
3766 return Entry->Cost;
3767
3768 if (ST->hasSSE2())
3769 if (const auto *Entry = CostTableLookup(SSE2CostTblNoPairWise, ISD, MTy))
3770 return Entry->Cost;
3771 }
3772
3773 auto *ValVTy = cast<FixedVectorType>(ValTy);
3774 unsigned NumVecElts = ValVTy->getNumElements();
3775
3776 auto *Ty = ValVTy;
3777 unsigned MinMaxCost = 0;
3778 if (LT.first != 1 && MTy.isVector() &&
3779 MTy.getVectorNumElements() < ValVTy->getNumElements()) {
3780 // Type needs to be split. We need LT.first - 1 operations ops.
3781 Ty = FixedVectorType::get(ValVTy->getElementType(),
3782 MTy.getVectorNumElements());
3783 auto *SubCondTy = FixedVectorType::get(CondTy->getElementType(),
3784 MTy.getVectorNumElements());
3785 MinMaxCost = getMinMaxCost(Ty, SubCondTy, IsUnsigned);
3786 MinMaxCost *= LT.first - 1;
3787 NumVecElts = MTy.getVectorNumElements();
3788 }
3789
3790 if (ST->hasBWI())
3791 if (const auto *Entry = CostTableLookup(AVX512BWCostTblNoPairWise, ISD, MTy))
3792 return MinMaxCost + Entry->Cost;
3793
3794 if (ST->hasAVX())
3795 if (const auto *Entry = CostTableLookup(AVX1CostTblNoPairWise, ISD, MTy))
3796 return MinMaxCost + Entry->Cost;
3797
3798 if (ST->hasSSE41())
3799 if (const auto *Entry = CostTableLookup(SSE41CostTblNoPairWise, ISD, MTy))
3800 return MinMaxCost + Entry->Cost;
3801
3802 if (ST->hasSSE2())
3803 if (const auto *Entry = CostTableLookup(SSE2CostTblNoPairWise, ISD, MTy))
3804 return MinMaxCost + Entry->Cost;
3805
3806 unsigned ScalarSize = ValTy->getScalarSizeInBits();
3807
3808 // Special case power of 2 reductions where the scalar type isn't changed
3809 // by type legalization.
3810 if (!isPowerOf2_32(ValVTy->getNumElements()) ||
3811 ScalarSize != MTy.getScalarSizeInBits())
3812 return BaseT::getMinMaxReductionCost(ValTy, CondTy, IsPairwise, IsUnsigned,
3813 CostKind);
3814
3815 // Now handle reduction with the legal type, taking into account size changes
3816 // at each level.
3817 while (NumVecElts > 1) {
3818 // Determine the size of the remaining vector we need to reduce.
3819 unsigned Size = NumVecElts * ScalarSize;
3820 NumVecElts /= 2;
3821 // If we're reducing from 256/512 bits, use an extract_subvector.
3822 if (Size > 128) {
3823 auto *SubTy = FixedVectorType::get(ValVTy->getElementType(), NumVecElts);
3824 MinMaxCost +=
3825 getShuffleCost(TTI::SK_ExtractSubvector, Ty, NumVecElts, SubTy);
3826 Ty = SubTy;
3827 } else if (Size == 128) {
3828 // Reducing from 128 bits is a permute of v2f64/v2i64.
3829 VectorType *ShufTy;
3830 if (ValTy->isFloatingPointTy())
3831 ShufTy =
3832 FixedVectorType::get(Type::getDoubleTy(ValTy->getContext()), 2);
3833 else
3834 ShufTy = FixedVectorType::get(Type::getInt64Ty(ValTy->getContext()), 2);
3835 MinMaxCost +=
3836 getShuffleCost(TTI::SK_PermuteSingleSrc, ShufTy, 0, nullptr);
3837 } else if (Size == 64) {
3838 // Reducing from 64 bits is a shuffle of v4f32/v4i32.
3839 FixedVectorType *ShufTy;
3840 if (ValTy->isFloatingPointTy())
3841 ShufTy = FixedVectorType::get(Type::getFloatTy(ValTy->getContext()), 4);
3842 else
3843 ShufTy = FixedVectorType::get(Type::getInt32Ty(ValTy->getContext()), 4);
3844 MinMaxCost +=
3845 getShuffleCost(TTI::SK_PermuteSingleSrc, ShufTy, 0, nullptr);
3846 } else {
3847 // Reducing from smaller size is a shift by immediate.
3848 auto *ShiftTy = FixedVectorType::get(
3849 Type::getIntNTy(ValTy->getContext(), Size), 128 / Size);
3850 MinMaxCost += getArithmeticInstrCost(
3851 Instruction::LShr, ShiftTy, TTI::TCK_RecipThroughput,
3852 TargetTransformInfo::OK_AnyValue,
3853 TargetTransformInfo::OK_UniformConstantValue,
3854 TargetTransformInfo::OP_None, TargetTransformInfo::OP_None);
3855 }
3856
3857 // Add the arithmetic op for this level.
3858 auto *SubCondTy =
3859 FixedVectorType::get(CondTy->getElementType(), Ty->getNumElements());
3860 MinMaxCost += getMinMaxCost(Ty, SubCondTy, IsUnsigned);
3861 }
3862
3863 // Add the final extract element to the cost.
3864 return MinMaxCost + getVectorInstrCost(Instruction::ExtractElement, Ty, 0);
3865}
3866
3867/// Calculate the cost of materializing a 64-bit value. This helper
3868/// method might only calculate a fraction of a larger immediate. Therefore it
3869/// is valid to return a cost of ZERO.
3870int X86TTIImpl::getIntImmCost(int64_t Val) {
3871 if (Val == 0)
3872 return TTI::TCC_Free;
3873
3874 if (isInt<32>(Val))
3875 return TTI::TCC_Basic;
3876
3877 return 2 * TTI::TCC_Basic;
3878}
3879
3880int X86TTIImpl::getIntImmCost(const APInt &Imm, Type *Ty,
3881 TTI::TargetCostKind CostKind) {
3882 assert(Ty->isIntegerTy())((Ty->isIntegerTy()) ? static_cast<void> (0) : __assert_fail
("Ty->isIntegerTy()", "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 3882, __PRETTY_FUNCTION__))
;
3883
3884 unsigned BitSize = Ty->getPrimitiveSizeInBits();
3885 if (BitSize == 0)
3886 return ~0U;
3887
3888 // Never hoist constants larger than 128bit, because this might lead to
3889 // incorrect code generation or assertions in codegen.
3890 // Fixme: Create a cost model for types larger than i128 once the codegen
3891 // issues have been fixed.
3892 if (BitSize > 128)
3893 return TTI::TCC_Free;
3894
3895 if (Imm == 0)
3896 return TTI::TCC_Free;
3897
3898 // Sign-extend all constants to a multiple of 64-bit.
3899 APInt ImmVal = Imm;
3900 if (BitSize % 64 != 0)
3901 ImmVal = Imm.sext(alignTo(BitSize, 64));
3902
3903 // Split the constant into 64-bit chunks and calculate the cost for each
3904 // chunk.
3905 int Cost = 0;
3906 for (unsigned ShiftVal = 0; ShiftVal < BitSize; ShiftVal += 64) {
3907 APInt Tmp = ImmVal.ashr(ShiftVal).sextOrTrunc(64);
3908 int64_t Val = Tmp.getSExtValue();
3909 Cost += getIntImmCost(Val);
3910 }
3911 // We need at least one instruction to materialize the constant.
3912 return std::max(1, Cost);
3913}
3914
3915int X86TTIImpl::getIntImmCostInst(unsigned Opcode, unsigned Idx,
3916 const APInt &Imm, Type *Ty,
3917 TTI::TargetCostKind CostKind,
3918 Instruction *Inst) {
3919 assert(Ty->isIntegerTy())((Ty->isIntegerTy()) ? static_cast<void> (0) : __assert_fail
("Ty->isIntegerTy()", "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 3919, __PRETTY_FUNCTION__))
;
3920
3921 unsigned BitSize = Ty->getPrimitiveSizeInBits();
3922 // There is no cost model for constants with a bit size of 0. Return TCC_Free
3923 // here, so that constant hoisting will ignore this constant.
3924 if (BitSize == 0)
3925 return TTI::TCC_Free;
3926
3927 unsigned ImmIdx = ~0U;
3928 switch (Opcode) {
3929 default:
3930 return TTI::TCC_Free;
3931 case Instruction::GetElementPtr:
3932 // Always hoist the base address of a GetElementPtr. This prevents the
3933 // creation of new constants for every base constant that gets constant
3934 // folded with the offset.
3935 if (Idx == 0)
3936 return 2 * TTI::TCC_Basic;
3937 return TTI::TCC_Free;
3938 case Instruction::Store:
3939 ImmIdx = 0;
3940 break;
3941 case Instruction::ICmp:
3942 // This is an imperfect hack to prevent constant hoisting of
3943 // compares that might be trying to check if a 64-bit value fits in
3944 // 32-bits. The backend can optimize these cases using a right shift by 32.
3945 // Ideally we would check the compare predicate here. There also other
3946 // similar immediates the backend can use shifts for.
3947 if (Idx == 1 && Imm.getBitWidth() == 64) {
3948 uint64_t ImmVal = Imm.getZExtValue();
3949 if (ImmVal == 0x100000000ULL || ImmVal == 0xffffffff)
3950 return TTI::TCC_Free;
3951 }
3952 ImmIdx = 1;
3953 break;
3954 case Instruction::And:
3955 // We support 64-bit ANDs with immediates with 32-bits of leading zeroes
3956 // by using a 32-bit operation with implicit zero extension. Detect such
3957 // immediates here as the normal path expects bit 31 to be sign extended.
3958 if (Idx == 1 && Imm.getBitWidth() == 64 && isUInt<32>(Imm.getZExtValue()))
3959 return TTI::TCC_Free;
3960 ImmIdx = 1;
3961 break;
3962 case Instruction::Add:
3963 case Instruction::Sub:
3964 // For add/sub, we can use the opposite instruction for INT32_MIN.
3965 if (Idx == 1 && Imm.getBitWidth() == 64 && Imm.getZExtValue() == 0x80000000)
3966 return TTI::TCC_Free;
3967 ImmIdx = 1;
3968 break;
3969 case Instruction::UDiv:
3970 case Instruction::SDiv:
3971 case Instruction::URem:
3972 case Instruction::SRem:
3973 // Division by constant is typically expanded later into a different
3974 // instruction sequence. This completely changes the constants.
3975 // Report them as "free" to stop ConstantHoist from marking them as opaque.
3976 return TTI::TCC_Free;
3977 case Instruction::Mul:
3978 case Instruction::Or:
3979 case Instruction::Xor:
3980 ImmIdx = 1;
3981 break;
3982 // Always return TCC_Free for the shift value of a shift instruction.
3983 case Instruction::Shl:
3984 case Instruction::LShr:
3985 case Instruction::AShr:
3986 if (Idx == 1)
3987 return TTI::TCC_Free;
3988 break;
3989 case Instruction::Trunc:
3990 case Instruction::ZExt:
3991 case Instruction::SExt:
3992 case Instruction::IntToPtr:
3993 case Instruction::PtrToInt:
3994 case Instruction::BitCast:
3995 case Instruction::PHI:
3996 case Instruction::Call:
3997 case Instruction::Select:
3998 case Instruction::Ret:
3999 case Instruction::Load:
4000 break;
4001 }
4002
4003 if (Idx == ImmIdx) {
4004 int NumConstants = divideCeil(BitSize, 64);
4005 int Cost = X86TTIImpl::getIntImmCost(Imm, Ty, CostKind);
4006 return (Cost <= NumConstants * TTI::TCC_Basic)
4007 ? static_cast<int>(TTI::TCC_Free)
4008 : Cost;
4009 }
4010
4011 return X86TTIImpl::getIntImmCost(Imm, Ty, CostKind);
4012}
4013
4014int X86TTIImpl::getIntImmCostIntrin(Intrinsic::ID IID, unsigned Idx,
4015 const APInt &Imm, Type *Ty,
4016 TTI::TargetCostKind CostKind) {
4017 assert(Ty->isIntegerTy())((Ty->isIntegerTy()) ? static_cast<void> (0) : __assert_fail
("Ty->isIntegerTy()", "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 4017, __PRETTY_FUNCTION__))
;
4018
4019 unsigned BitSize = Ty->getPrimitiveSizeInBits();
4020 // There is no cost model for constants with a bit size of 0. Return TCC_Free
4021 // here, so that constant hoisting will ignore this constant.
4022 if (BitSize == 0)
4023 return TTI::TCC_Free;
4024
4025 switch (IID) {
4026 default:
4027 return TTI::TCC_Free;
4028 case Intrinsic::sadd_with_overflow:
4029 case Intrinsic::uadd_with_overflow:
4030 case Intrinsic::ssub_with_overflow:
4031 case Intrinsic::usub_with_overflow:
4032 case Intrinsic::smul_with_overflow:
4033 case Intrinsic::umul_with_overflow:
4034 if ((Idx == 1) && Imm.getBitWidth() <= 64 && isInt<32>(Imm.getSExtValue()))
4035 return TTI::TCC_Free;
4036 break;
4037 case Intrinsic::experimental_stackmap:
4038 if ((Idx < 2) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
4039 return TTI::TCC_Free;
4040 break;
4041 case Intrinsic::experimental_patchpoint_void:
4042 case Intrinsic::experimental_patchpoint_i64:
4043 if ((Idx < 4) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
4044 return TTI::TCC_Free;
4045 break;
4046 }
4047 return X86TTIImpl::getIntImmCost(Imm, Ty, CostKind);
4048}
4049
4050unsigned
4051X86TTIImpl::getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind) {
4052 if (CostKind != TTI::TCK_RecipThroughput)
4053 return Opcode == Instruction::PHI ? 0 : 1;
4054 // Branches are assumed to be predicted.
4055 return CostKind == TTI::TCK_RecipThroughput ? 0 : 1;
4056}
4057
4058int X86TTIImpl::getGatherOverhead() const {
4059 // Some CPUs have more overhead for gather. The specified overhead is relative
4060 // to the Load operation. "2" is the number provided by Intel architects. This
4061 // parameter is used for cost estimation of Gather Op and comparison with
4062 // other alternatives.
4063 // TODO: Remove the explicit hasAVX512()?, That would mean we would only
4064 // enable gather with a -march.
4065 if (ST->hasAVX512() || (ST->hasAVX2() && ST->hasFastGather()))
4066 return 2;
4067
4068 return 1024;
4069}
4070
4071int X86TTIImpl::getScatterOverhead() const {
4072 if (ST->hasAVX512())
4073 return 2;
4074
4075 return 1024;
4076}
4077
4078// Return an average cost of Gather / Scatter instruction, maybe improved later
4079int X86TTIImpl::getGSVectorCost(unsigned Opcode, Type *SrcVTy, const Value *Ptr,
4080 Align Alignment, unsigned AddressSpace) {
4081
4082 assert(isa<VectorType>(SrcVTy) && "Unexpected type in getGSVectorCost")((isa<VectorType>(SrcVTy) && "Unexpected type in getGSVectorCost"
) ? static_cast<void> (0) : __assert_fail ("isa<VectorType>(SrcVTy) && \"Unexpected type in getGSVectorCost\""
, "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 4082, __PRETTY_FUNCTION__))
;
4083 unsigned VF = cast<FixedVectorType>(SrcVTy)->getNumElements();
4084
4085 // Try to reduce index size from 64 bit (default for GEP)
4086 // to 32. It is essential for VF 16. If the index can't be reduced to 32, the
4087 // operation will use 16 x 64 indices which do not fit in a zmm and needs
4088 // to split. Also check that the base pointer is the same for all lanes,
4089 // and that there's at most one variable index.
4090 auto getIndexSizeInBits = [](const Value *Ptr, const DataLayout &DL) {
4091 unsigned IndexSize = DL.getPointerSizeInBits();
4092 const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr);
4093 if (IndexSize < 64 || !GEP)
4094 return IndexSize;
4095
4096 unsigned NumOfVarIndices = 0;
4097 const Value *Ptrs = GEP->getPointerOperand();
4098 if (Ptrs->getType()->isVectorTy() && !getSplatValue(Ptrs))
4099 return IndexSize;
4100 for (unsigned i = 1; i < GEP->getNumOperands(); ++i) {
4101 if (isa<Constant>(GEP->getOperand(i)))
4102 continue;
4103 Type *IndxTy = GEP->getOperand(i)->getType();
4104 if (auto *IndexVTy = dyn_cast<VectorType>(IndxTy))
4105 IndxTy = IndexVTy->getElementType();
4106 if ((IndxTy->getPrimitiveSizeInBits() == 64 &&
4107 !isa<SExtInst>(GEP->getOperand(i))) ||
4108 ++NumOfVarIndices > 1)
4109 return IndexSize; // 64
4110 }
4111 return (unsigned)32;
4112 };
4113
4114 // Trying to reduce IndexSize to 32 bits for vector 16.
4115 // By default the IndexSize is equal to pointer size.
4116 unsigned IndexSize = (ST->hasAVX512() && VF >= 16)
4117 ? getIndexSizeInBits(Ptr, DL)
4118 : DL.getPointerSizeInBits();
4119
4120 auto *IndexVTy = FixedVectorType::get(
4121 IntegerType::get(SrcVTy->getContext(), IndexSize), VF);
4122 std::pair<int, MVT> IdxsLT = TLI->getTypeLegalizationCost(DL, IndexVTy);
4123 std::pair<int, MVT> SrcLT = TLI->getTypeLegalizationCost(DL, SrcVTy);
4124 int SplitFactor = std::max(IdxsLT.first, SrcLT.first);
4125 if (SplitFactor > 1) {
4126 // Handle splitting of vector of pointers
4127 auto *SplitSrcTy =
4128 FixedVectorType::get(SrcVTy->getScalarType(), VF / SplitFactor);
4129 return SplitFactor * getGSVectorCost(Opcode, SplitSrcTy, Ptr, Alignment,
4130 AddressSpace);
4131 }
4132
4133 // The gather / scatter cost is given by Intel architects. It is a rough
4134 // number since we are looking at one instruction in a time.
4135 const int GSOverhead = (Opcode == Instruction::Load)
4136 ? getGatherOverhead()
4137 : getScatterOverhead();
4138 return GSOverhead + VF * getMemoryOpCost(Opcode, SrcVTy->getScalarType(),
4139 MaybeAlign(Alignment), AddressSpace,
4140 TTI::TCK_RecipThroughput);
4141}
4142
4143/// Return the cost of full scalarization of gather / scatter operation.
4144///
4145/// Opcode - Load or Store instruction.
4146/// SrcVTy - The type of the data vector that should be gathered or scattered.
4147/// VariableMask - The mask is non-constant at compile time.
4148/// Alignment - Alignment for one element.
4149/// AddressSpace - pointer[s] address space.
4150///
4151int X86TTIImpl::getGSScalarCost(unsigned Opcode, Type *SrcVTy,
4152 bool VariableMask, Align Alignment,
4153 unsigned AddressSpace) {
4154 unsigned VF = cast<FixedVectorType>(SrcVTy)->getNumElements();
4155 APInt DemandedElts = APInt::getAllOnesValue(VF);
4156 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
4157
4158 int MaskUnpackCost = 0;
4159 if (VariableMask) {
4160 auto *MaskTy =
4161 FixedVectorType::get(Type::getInt1Ty(SrcVTy->getContext()), VF);
4162 MaskUnpackCost =
4163 getScalarizationOverhead(MaskTy, DemandedElts, false, true);
4164 int ScalarCompareCost = getCmpSelInstrCost(
4165 Instruction::ICmp, Type::getInt1Ty(SrcVTy->getContext()), nullptr,
4166 CmpInst::BAD_ICMP_PREDICATE, CostKind);
4167 int BranchCost = getCFInstrCost(Instruction::Br, CostKind);
4168 MaskUnpackCost += VF * (BranchCost + ScalarCompareCost);
4169 }
4170
4171 // The cost of the scalar loads/stores.
4172 int MemoryOpCost = VF * getMemoryOpCost(Opcode, SrcVTy->getScalarType(),
4173 MaybeAlign(Alignment), AddressSpace,
4174 CostKind);
4175
4176 int InsertExtractCost = 0;
4177 if (Opcode == Instruction::Load)
4178 for (unsigned i = 0; i < VF; ++i)
4179 // Add the cost of inserting each scalar load into the vector
4180 InsertExtractCost +=
4181 getVectorInstrCost(Instruction::InsertElement, SrcVTy, i);
4182 else
4183 for (unsigned i = 0; i < VF; ++i)
4184 // Add the cost of extracting each element out of the data vector
4185 InsertExtractCost +=
4186 getVectorInstrCost(Instruction::ExtractElement, SrcVTy, i);
4187
4188 return MemoryOpCost + MaskUnpackCost + InsertExtractCost;
4189}
4190
4191/// Calculate the cost of Gather / Scatter operation
4192int X86TTIImpl::getGatherScatterOpCost(unsigned Opcode, Type *SrcVTy,
4193 const Value *Ptr, bool VariableMask,
4194 Align Alignment,
4195 TTI::TargetCostKind CostKind,
4196 const Instruction *I = nullptr) {
4197
4198 if (CostKind != TTI::TCK_RecipThroughput)
4199 return 1;
4200
4201 assert(SrcVTy->isVectorTy() && "Unexpected data type for Gather/Scatter")((SrcVTy->isVectorTy() && "Unexpected data type for Gather/Scatter"
) ? static_cast<void> (0) : __assert_fail ("SrcVTy->isVectorTy() && \"Unexpected data type for Gather/Scatter\""
, "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 4201, __PRETTY_FUNCTION__))
;
4202 unsigned VF = cast<FixedVectorType>(SrcVTy)->getNumElements();
4203 PointerType *PtrTy = dyn_cast<PointerType>(Ptr->getType());
4204 if (!PtrTy && Ptr->getType()->isVectorTy())
4205 PtrTy = dyn_cast<PointerType>(
4206 cast<VectorType>(Ptr->getType())->getElementType());
4207 assert(PtrTy && "Unexpected type for Ptr argument")((PtrTy && "Unexpected type for Ptr argument") ? static_cast
<void> (0) : __assert_fail ("PtrTy && \"Unexpected type for Ptr argument\""
, "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 4207, __PRETTY_FUNCTION__))
;
4208 unsigned AddressSpace = PtrTy->getAddressSpace();
4209
4210 bool Scalarize = false;
4211 if ((Opcode == Instruction::Load &&
4212 !isLegalMaskedGather(SrcVTy, Align(Alignment))) ||
4213 (Opcode == Instruction::Store &&
4214 !isLegalMaskedScatter(SrcVTy, Align(Alignment))))
4215 Scalarize = true;
4216 // Gather / Scatter for vector 2 is not profitable on KNL / SKX
4217 // Vector-4 of gather/scatter instruction does not exist on KNL.
4218 // We can extend it to 8 elements, but zeroing upper bits of
4219 // the mask vector will add more instructions. Right now we give the scalar
4220 // cost of vector-4 for KNL. TODO: Check, maybe the gather/scatter instruction
4221 // is better in the VariableMask case.
4222 if (ST->hasAVX512() && (VF == 2 || (VF == 4 && !ST->hasVLX())))
4223 Scalarize = true;
4224
4225 if (Scalarize)
4226 return getGSScalarCost(Opcode, SrcVTy, VariableMask, Alignment,
4227 AddressSpace);
4228
4229 return getGSVectorCost(Opcode, SrcVTy, Ptr, Alignment, AddressSpace);
4230}
4231
4232bool X86TTIImpl::isLSRCostLess(TargetTransformInfo::LSRCost &C1,
4233 TargetTransformInfo::LSRCost &C2) {
4234 // X86 specific here are "instruction number 1st priority".
4235 return std::tie(C1.Insns, C1.NumRegs, C1.AddRecCost,
4236 C1.NumIVMuls, C1.NumBaseAdds,
4237 C1.ScaleCost, C1.ImmCost, C1.SetupCost) <
4238 std::tie(C2.Insns, C2.NumRegs, C2.AddRecCost,
4239 C2.NumIVMuls, C2.NumBaseAdds,
4240 C2.ScaleCost, C2.ImmCost, C2.SetupCost);
4241}
4242
4243bool X86TTIImpl::canMacroFuseCmp() {
4244 return ST->hasMacroFusion() || ST->hasBranchFusion();
4245}
4246
4247bool X86TTIImpl::isLegalMaskedLoad(Type *DataTy, Align Alignment) {
4248 if (!ST->hasAVX())
4249 return false;
4250
4251 // The backend can't handle a single element vector.
4252 if (isa<VectorType>(DataTy) &&
4253 cast<FixedVectorType>(DataTy)->getNumElements() == 1)
4254 return false;
4255 Type *ScalarTy = DataTy->getScalarType();
4256
4257 if (ScalarTy->isPointerTy())
4258 return true;
4259
4260 if (ScalarTy->isFloatTy() || ScalarTy->isDoubleTy())
4261 return true;
4262
4263 if (!ScalarTy->isIntegerTy())
4264 return false;
4265
4266 unsigned IntWidth = ScalarTy->getIntegerBitWidth();
4267 return IntWidth == 32 || IntWidth == 64 ||
4268 ((IntWidth == 8 || IntWidth == 16) && ST->hasBWI());
4269}
4270
4271bool X86TTIImpl::isLegalMaskedStore(Type *DataType, Align Alignment) {
4272 return isLegalMaskedLoad(DataType, Alignment);
4273}
4274
4275bool X86TTIImpl::isLegalNTLoad(Type *DataType, Align Alignment) {
4276 unsigned DataSize = DL.getTypeStoreSize(DataType);
4277 // The only supported nontemporal loads are for aligned vectors of 16 or 32
4278 // bytes. Note that 32-byte nontemporal vector loads are supported by AVX2
4279 // (the equivalent stores only require AVX).
4280 if (Alignment >= DataSize && (DataSize == 16 || DataSize == 32))
4281 return DataSize == 16 ? ST->hasSSE1() : ST->hasAVX2();
4282
4283 return false;
4284}
4285
4286bool X86TTIImpl::isLegalNTStore(Type *DataType, Align Alignment) {
4287 unsigned DataSize = DL.getTypeStoreSize(DataType);
4288
4289 // SSE4A supports nontemporal stores of float and double at arbitrary
4290 // alignment.
4291 if (ST->hasSSE4A() && (DataType->isFloatTy() || DataType->isDoubleTy()))
4292 return true;
4293
4294 // Besides the SSE4A subtarget exception above, only aligned stores are
4295 // available nontemporaly on any other subtarget. And only stores with a size
4296 // of 4..32 bytes (powers of 2, only) are permitted.
4297 if (Alignment < DataSize || DataSize < 4 || DataSize > 32 ||
4298 !isPowerOf2_32(DataSize))
4299 return false;
4300
4301 // 32-byte vector nontemporal stores are supported by AVX (the equivalent
4302 // loads require AVX2).
4303 if (DataSize == 32)
4304 return ST->hasAVX();
4305 else if (DataSize == 16)
4306 return ST->hasSSE1();
4307 return true;
4308}
4309
4310bool X86TTIImpl::isLegalMaskedExpandLoad(Type *DataTy) {
4311 if (!isa<VectorType>(DataTy))
4312 return false;
4313
4314 if (!ST->hasAVX512())
4315 return false;
4316
4317 // The backend can't handle a single element vector.
4318 if (cast<FixedVectorType>(DataTy)->getNumElements() == 1)
4319 return false;
4320
4321 Type *ScalarTy = cast<VectorType>(DataTy)->getElementType();
4322
4323 if (ScalarTy->isFloatTy() || ScalarTy->isDoubleTy())
4324 return true;
4325
4326 if (!ScalarTy->isIntegerTy())
4327 return false;
4328
4329 unsigned IntWidth = ScalarTy->getIntegerBitWidth();
4330 return IntWidth == 32 || IntWidth == 64 ||
4331 ((IntWidth == 8 || IntWidth == 16) && ST->hasVBMI2());
4332}
4333
4334bool X86TTIImpl::isLegalMaskedCompressStore(Type *DataTy) {
4335 return isLegalMaskedExpandLoad(DataTy);
4336}
4337
4338bool X86TTIImpl::isLegalMaskedGather(Type *DataTy, Align Alignment) {
4339 // Some CPUs have better gather performance than others.
4340 // TODO: Remove the explicit ST->hasAVX512()?, That would mean we would only
4341 // enable gather with a -march.
4342 if (!(ST->hasAVX512() || (ST->hasFastGather() && ST->hasAVX2())))
4343 return false;
4344
4345 // This function is called now in two cases: from the Loop Vectorizer
4346 // and from the Scalarizer.
4347 // When the Loop Vectorizer asks about legality of the feature,
4348 // the vectorization factor is not calculated yet. The Loop Vectorizer
4349 // sends a scalar type and the decision is based on the width of the
4350 // scalar element.
4351 // Later on, the cost model will estimate usage this intrinsic based on
4352 // the vector type.
4353 // The Scalarizer asks again about legality. It sends a vector type.
4354 // In this case we can reject non-power-of-2 vectors.
4355 // We also reject single element vectors as the type legalizer can't
4356 // scalarize it.
4357 if (auto *DataVTy = dyn_cast<FixedVectorType>(DataTy)) {
4358 unsigned NumElts = DataVTy->getNumElements();
4359 if (NumElts == 1)
4360 return false;
4361 }
4362 Type *ScalarTy = DataTy->getScalarType();
4363 if (ScalarTy->isPointerTy())
4364 return true;
4365
4366 if (ScalarTy->isFloatTy() || ScalarTy->isDoubleTy())
4367 return true;
4368
4369 if (!ScalarTy->isIntegerTy())
4370 return false;
4371
4372 unsigned IntWidth = ScalarTy->getIntegerBitWidth();
4373 return IntWidth == 32 || IntWidth == 64;
4374}
4375
4376bool X86TTIImpl::isLegalMaskedScatter(Type *DataType, Align Alignment) {
4377 // AVX2 doesn't support scatter
4378 if (!ST->hasAVX512())
4379 return false;
4380 return isLegalMaskedGather(DataType, Alignment);
4381}
4382
4383bool X86TTIImpl::hasDivRemOp(Type *DataType, bool IsSigned) {
4384 EVT VT = TLI->getValueType(DL, DataType);
4385 return TLI->isOperationLegal(IsSigned ? ISD::SDIVREM : ISD::UDIVREM, VT);
4386}
4387
4388bool X86TTIImpl::isFCmpOrdCheaperThanFCmpZero(Type *Ty) {
4389 return false;
4390}
4391
4392bool X86TTIImpl::areInlineCompatible(const Function *Caller,
4393 const Function *Callee) const {
4394 const TargetMachine &TM = getTLI()->getTargetMachine();
4395
4396 // Work this as a subsetting of subtarget features.
4397 const FeatureBitset &CallerBits =
4398 TM.getSubtargetImpl(*Caller)->getFeatureBits();
4399 const FeatureBitset &CalleeBits =
4400 TM.getSubtargetImpl(*Callee)->getFeatureBits();
4401
4402 FeatureBitset RealCallerBits = CallerBits & ~InlineFeatureIgnoreList;
4403 FeatureBitset RealCalleeBits = CalleeBits & ~InlineFeatureIgnoreList;
4404 return (RealCallerBits & RealCalleeBits) == RealCalleeBits;
4405}
4406
4407bool X86TTIImpl::areFunctionArgsABICompatible(
4408 const Function *Caller, const Function *Callee,
4409 SmallPtrSetImpl<Argument *> &Args) const {
4410 if (!BaseT::areFunctionArgsABICompatible(Caller, Callee, Args))
4411 return false;
4412
4413 // If we get here, we know the target features match. If one function
4414 // considers 512-bit vectors legal and the other does not, consider them
4415 // incompatible.
4416 const TargetMachine &TM = getTLI()->getTargetMachine();
4417
4418 if (TM.getSubtarget<X86Subtarget>(*Caller).useAVX512Regs() ==
4419 TM.getSubtarget<X86Subtarget>(*Callee).useAVX512Regs())
4420 return true;
4421
4422 // Consider the arguments compatible if they aren't vectors or aggregates.
4423 // FIXME: Look at the size of vectors.
4424 // FIXME: Look at the element types of aggregates to see if there are vectors.
4425 // FIXME: The API of this function seems intended to allow arguments
4426 // to be removed from the set, but the caller doesn't check if the set
4427 // becomes empty so that may not work in practice.
4428 return llvm::none_of(Args, [](Argument *A) {
4429 auto *EltTy = cast<PointerType>(A->getType())->getElementType();
4430 return EltTy->isVectorTy() || EltTy->isAggregateType();
4431 });
4432}
4433
4434X86TTIImpl::TTI::MemCmpExpansionOptions
4435X86TTIImpl::enableMemCmpExpansion(bool OptSize, bool IsZeroCmp) const {
4436 TTI::MemCmpExpansionOptions Options;
4437 Options.MaxNumLoads = TLI->getMaxExpandSizeMemcmp(OptSize);
4438 Options.NumLoadsPerBlock = 2;
4439 // All GPR and vector loads can be unaligned.
4440 Options.AllowOverlappingLoads = true;
4441 if (IsZeroCmp) {
4442 // Only enable vector loads for equality comparison. Right now the vector
4443 // version is not as fast for three way compare (see #33329).
4444 const unsigned PreferredWidth = ST->getPreferVectorWidth();
4445 if (PreferredWidth >= 512 && ST->hasAVX512()) Options.LoadSizes.push_back(64);
4446 if (PreferredWidth >= 256 && ST->hasAVX()) Options.LoadSizes.push_back(32);
4447 if (PreferredWidth >= 128 && ST->hasSSE2()) Options.LoadSizes.push_back(16);
4448 }
4449 if (ST->is64Bit()) {
4450 Options.LoadSizes.push_back(8);
4451 }
4452 Options.LoadSizes.push_back(4);
4453 Options.LoadSizes.push_back(2);
4454 Options.LoadSizes.push_back(1);
4455 return Options;
4456}
4457
4458bool X86TTIImpl::enableInterleavedAccessVectorization() {
4459 // TODO: We expect this to be beneficial regardless of arch,
4460 // but there are currently some unexplained performance artifacts on Atom.
4461 // As a temporary solution, disable on Atom.
4462 return !(ST->isAtom());
4463}
4464
4465// Get estimation for interleaved load/store operations for AVX2.
4466// \p Factor is the interleaved-access factor (stride) - number of
4467// (interleaved) elements in the group.
4468// \p Indices contains the indices for a strided load: when the
4469// interleaved load has gaps they indicate which elements are used.
4470// If Indices is empty (or if the number of indices is equal to the size
4471// of the interleaved-access as given in \p Factor) the access has no gaps.
4472//
4473// As opposed to AVX-512, AVX2 does not have generic shuffles that allow
4474// computing the cost using a generic formula as a function of generic
4475// shuffles. We therefore use a lookup table instead, filled according to
4476// the instruction sequences that codegen currently generates.
4477int X86TTIImpl::getInterleavedMemoryOpCostAVX2(
4478 unsigned Opcode, FixedVectorType *VecTy, unsigned Factor,
4479 ArrayRef<unsigned> Indices, Align Alignment, unsigned AddressSpace,
4480 TTI::TargetCostKind CostKind, bool UseMaskForCond, bool UseMaskForGaps) {
4481
4482 if (UseMaskForCond || UseMaskForGaps)
4483 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
4484 Alignment, AddressSpace, CostKind,
4485 UseMaskForCond, UseMaskForGaps);
4486
4487 // We currently Support only fully-interleaved groups, with no gaps.
4488 // TODO: Support also strided loads (interleaved-groups with gaps).
4489 if (Indices.size() && Indices.size() != Factor)
4490 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
4491 Alignment, AddressSpace,
4492 CostKind);
4493
4494 // VecTy for interleave memop is <VF*Factor x Elt>.
4495 // So, for VF=4, Interleave Factor = 3, Element type = i32 we have
4496 // VecTy = <12 x i32>.
4497 MVT LegalVT = getTLI()->getTypeLegalizationCost(DL, VecTy).second;
4498
4499 // This function can be called with VecTy=<6xi128>, Factor=3, in which case
4500 // the VF=2, while v2i128 is an unsupported MVT vector type
4501 // (see MachineValueType.h::getVectorVT()).
4502 if (!LegalVT.isVector())
4503 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
4504 Alignment, AddressSpace,
4505 CostKind);
4506
4507 unsigned VF = VecTy->getNumElements() / Factor;
4508 Type *ScalarTy = VecTy->getElementType();
4509
4510 // Calculate the number of memory operations (NumOfMemOps), required
4511 // for load/store the VecTy.
4512 unsigned VecTySize = DL.getTypeStoreSize(VecTy);
4513 unsigned LegalVTSize = LegalVT.getStoreSize();
4514 unsigned NumOfMemOps = (VecTySize + LegalVTSize - 1) / LegalVTSize;
4515
4516 // Get the cost of one memory operation.
4517 auto *SingleMemOpTy = FixedVectorType::get(VecTy->getElementType(),
4518 LegalVT.getVectorNumElements());
4519 unsigned MemOpCost = getMemoryOpCost(Opcode, SingleMemOpTy,
4520 MaybeAlign(Alignment), AddressSpace,
4521 CostKind);
4522
4523 auto *VT = FixedVectorType::get(ScalarTy, VF);
4524 EVT ETy = TLI->getValueType(DL, VT);
4525 if (!ETy.isSimple())
4526 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
4527 Alignment, AddressSpace,
4528 CostKind);
4529
4530 // TODO: Complete for other data-types and strides.
4531 // Each combination of Stride, ElementTy and VF results in a different
4532 // sequence; The cost tables are therefore accessed with:
4533 // Factor (stride) and VectorType=VFxElemType.
4534 // The Cost accounts only for the shuffle sequence;
4535 // The cost of the loads/stores is accounted for separately.
4536 //
4537 static const CostTblEntry AVX2InterleavedLoadTbl[] = {
4538 { 2, MVT::v4i64, 6 }, //(load 8i64 and) deinterleave into 2 x 4i64
4539 { 2, MVT::v4f64, 6 }, //(load 8f64 and) deinterleave into 2 x 4f64
4540
4541 { 3, MVT::v2i8, 10 }, //(load 6i8 and) deinterleave into 3 x 2i8
4542 { 3, MVT::v4i8, 4 }, //(load 12i8 and) deinterleave into 3 x 4i8
4543 { 3, MVT::v8i8, 9 }, //(load 24i8 and) deinterleave into 3 x 8i8
4544 { 3, MVT::v16i8, 11}, //(load 48i8 and) deinterleave into 3 x 16i8
4545 { 3, MVT::v32i8, 13}, //(load 96i8 and) deinterleave into 3 x 32i8
4546 { 3, MVT::v8f32, 17 }, //(load 24f32 and)deinterleave into 3 x 8f32
4547
4548 { 4, MVT::v2i8, 12 }, //(load 8i8 and) deinterleave into 4 x 2i8
4549 { 4, MVT::v4i8, 4 }, //(load 16i8 and) deinterleave into 4 x 4i8
4550 { 4, MVT::v8i8, 20 }, //(load 32i8 and) deinterleave into 4 x 8i8
4551 { 4, MVT::v16i8, 39 }, //(load 64i8 and) deinterleave into 4 x 16i8
4552 { 4, MVT::v32i8, 80 }, //(load 128i8 and) deinterleave into 4 x 32i8
4553
4554 { 8, MVT::v8f32, 40 } //(load 64f32 and)deinterleave into 8 x 8f32
4555 };
4556
4557 static const CostTblEntry AVX2InterleavedStoreTbl[] = {
4558 { 2, MVT::v4i64, 6 }, //interleave into 2 x 4i64 into 8i64 (and store)
4559 { 2, MVT::v4f64, 6 }, //interleave into 2 x 4f64 into 8f64 (and store)
4560
4561 { 3, MVT::v2i8, 7 }, //interleave 3 x 2i8 into 6i8 (and store)
4562 { 3, MVT::v4i8, 8 }, //interleave 3 x 4i8 into 12i8 (and store)
4563 { 3, MVT::v8i8, 11 }, //interleave 3 x 8i8 into 24i8 (and store)
4564 { 3, MVT::v16i8, 11 }, //interleave 3 x 16i8 into 48i8 (and store)
4565 { 3, MVT::v32i8, 13 }, //interleave 3 x 32i8 into 96i8 (and store)
4566
4567 { 4, MVT::v2i8, 12 }, //interleave 4 x 2i8 into 8i8 (and store)
4568 { 4, MVT::v4i8, 9 }, //interleave 4 x 4i8 into 16i8 (and store)
4569 { 4, MVT::v8i8, 10 }, //interleave 4 x 8i8 into 32i8 (and store)
4570 { 4, MVT::v16i8, 10 }, //interleave 4 x 16i8 into 64i8 (and store)
4571 { 4, MVT::v32i8, 12 } //interleave 4 x 32i8 into 128i8 (and store)
4572 };
4573
4574 if (Opcode == Instruction::Load) {
4575 if (const auto *Entry =
4576 CostTableLookup(AVX2InterleavedLoadTbl, Factor, ETy.getSimpleVT()))
4577 return NumOfMemOps * MemOpCost + Entry->Cost;
4578 } else {
4579 assert(Opcode == Instruction::Store &&((Opcode == Instruction::Store && "Expected Store Instruction at this point"
) ? static_cast<void> (0) : __assert_fail ("Opcode == Instruction::Store && \"Expected Store Instruction at this point\""
, "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 4580, __PRETTY_FUNCTION__))
4580 "Expected Store Instruction at this point")((Opcode == Instruction::Store && "Expected Store Instruction at this point"
) ? static_cast<void> (0) : __assert_fail ("Opcode == Instruction::Store && \"Expected Store Instruction at this point\""
, "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 4580, __PRETTY_FUNCTION__))
;
4581 if (const auto *Entry =
4582 CostTableLookup(AVX2InterleavedStoreTbl, Factor, ETy.getSimpleVT()))
4583 return NumOfMemOps * MemOpCost + Entry->Cost;
4584 }
4585
4586 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
4587 Alignment, AddressSpace, CostKind);
4588}
4589
4590// Get estimation for interleaved load/store operations and strided load.
4591// \p Indices contains indices for strided load.
4592// \p Factor - the factor of interleaving.
4593// AVX-512 provides 3-src shuffles that significantly reduces the cost.
4594int X86TTIImpl::getInterleavedMemoryOpCostAVX512(
4595 unsigned Opcode, FixedVectorType *VecTy, unsigned Factor,
4596 ArrayRef<unsigned> Indices, Align Alignment, unsigned AddressSpace,
4597 TTI::TargetCostKind CostKind, bool UseMaskForCond, bool UseMaskForGaps) {
4598
4599 if (UseMaskForCond || UseMaskForGaps)
4600 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
4601 Alignment, AddressSpace, CostKind,
4602 UseMaskForCond, UseMaskForGaps);
4603
4604 // VecTy for interleave memop is <VF*Factor x Elt>.
4605 // So, for VF=4, Interleave Factor = 3, Element type = i32 we have
4606 // VecTy = <12 x i32>.
4607
4608 // Calculate the number of memory operations (NumOfMemOps), required
4609 // for load/store the VecTy.
4610 MVT LegalVT = getTLI()->getTypeLegalizationCost(DL, VecTy).second;
4611 unsigned VecTySize = DL.getTypeStoreSize(VecTy);
4612 unsigned LegalVTSize = LegalVT.getStoreSize();
4613 unsigned NumOfMemOps = (VecTySize + LegalVTSize - 1) / LegalVTSize;
4614
4615 // Get the cost of one memory operation.
4616 auto *SingleMemOpTy = FixedVectorType::get(VecTy->getElementType(),
4617 LegalVT.getVectorNumElements());
4618 unsigned MemOpCost = getMemoryOpCost(Opcode, SingleMemOpTy,
4619 MaybeAlign(Alignment), AddressSpace,
4620 CostKind);
4621
4622 unsigned VF = VecTy->getNumElements() / Factor;
4623 MVT VT = MVT::getVectorVT(MVT::getVT(VecTy->getScalarType()), VF);
4624
4625 if (Opcode == Instruction::Load) {
4626 // The tables (AVX512InterleavedLoadTbl and AVX512InterleavedStoreTbl)
4627 // contain the cost of the optimized shuffle sequence that the
4628 // X86InterleavedAccess pass will generate.
4629 // The cost of loads and stores are computed separately from the table.
4630
4631 // X86InterleavedAccess support only the following interleaved-access group.
4632 static const CostTblEntry AVX512InterleavedLoadTbl[] = {
4633 {3, MVT::v16i8, 12}, //(load 48i8 and) deinterleave into 3 x 16i8
4634 {3, MVT::v32i8, 14}, //(load 96i8 and) deinterleave into 3 x 32i8
4635 {3, MVT::v64i8, 22}, //(load 96i8 and) deinterleave into 3 x 32i8
4636 };
4637
4638 if (const auto *Entry =
4639 CostTableLookup(AVX512InterleavedLoadTbl, Factor, VT))
4640 return NumOfMemOps * MemOpCost + Entry->Cost;
4641 //If an entry does not exist, fallback to the default implementation.
4642
4643 // Kind of shuffle depends on number of loaded values.
4644 // If we load the entire data in one register, we can use a 1-src shuffle.
4645 // Otherwise, we'll merge 2 sources in each operation.
4646 TTI::ShuffleKind ShuffleKind =
4647 (NumOfMemOps > 1) ? TTI::SK_PermuteTwoSrc : TTI::SK_PermuteSingleSrc;
4648
4649 unsigned ShuffleCost =
4650 getShuffleCost(ShuffleKind, SingleMemOpTy, 0, nullptr);
4651
4652 unsigned NumOfLoadsInInterleaveGrp =
4653 Indices.size() ? Indices.size() : Factor;
4654 auto *ResultTy = FixedVectorType::get(VecTy->getElementType(),
4655 VecTy->getNumElements() / Factor);
4656 unsigned NumOfResults =
4657 getTLI()->getTypeLegalizationCost(DL, ResultTy).first *
4658 NumOfLoadsInInterleaveGrp;
4659
4660 // About a half of the loads may be folded in shuffles when we have only
4661 // one result. If we have more than one result, we do not fold loads at all.
4662 unsigned NumOfUnfoldedLoads =
4663 NumOfResults > 1 ? NumOfMemOps : NumOfMemOps / 2;
4664
4665 // Get a number of shuffle operations per result.
4666 unsigned NumOfShufflesPerResult =
4667 std::max((unsigned)1, (unsigned)(NumOfMemOps - 1));
4668
4669 // The SK_MergeTwoSrc shuffle clobbers one of src operands.
4670 // When we have more than one destination, we need additional instructions
4671 // to keep sources.
4672 unsigned NumOfMoves = 0;
4673 if (NumOfResults > 1 && ShuffleKind == TTI::SK_PermuteTwoSrc)
4674 NumOfMoves = NumOfResults * NumOfShufflesPerResult / 2;
4675
4676 int Cost = NumOfResults * NumOfShufflesPerResult * ShuffleCost +
4677 NumOfUnfoldedLoads * MemOpCost + NumOfMoves;
4678
4679 return Cost;
4680 }
4681
4682 // Store.
4683 assert(Opcode == Instruction::Store &&((Opcode == Instruction::Store && "Expected Store Instruction at this point"
) ? static_cast<void> (0) : __assert_fail ("Opcode == Instruction::Store && \"Expected Store Instruction at this point\""
, "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 4684, __PRETTY_FUNCTION__))
4684 "Expected Store Instruction at this point")((Opcode == Instruction::Store && "Expected Store Instruction at this point"
) ? static_cast<void> (0) : __assert_fail ("Opcode == Instruction::Store && \"Expected Store Instruction at this point\""
, "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 4684, __PRETTY_FUNCTION__))
;
4685 // X86InterleavedAccess support only the following interleaved-access group.
4686 static const CostTblEntry AVX512InterleavedStoreTbl[] = {
4687 {3, MVT::v16i8, 12}, // interleave 3 x 16i8 into 48i8 (and store)
4688 {3, MVT::v32i8, 14}, // interleave 3 x 32i8 into 96i8 (and store)
4689 {3, MVT::v64i8, 26}, // interleave 3 x 64i8 into 96i8 (and store)
4690
4691 {4, MVT::v8i8, 10}, // interleave 4 x 8i8 into 32i8 (and store)
4692 {4, MVT::v16i8, 11}, // interleave 4 x 16i8 into 64i8 (and store)
4693 {4, MVT::v32i8, 14}, // interleave 4 x 32i8 into 128i8 (and store)
4694 {4, MVT::v64i8, 24} // interleave 4 x 32i8 into 256i8 (and store)
4695 };
4696
4697 if (const auto *Entry =
4698 CostTableLookup(AVX512InterleavedStoreTbl, Factor, VT))
4699 return NumOfMemOps * MemOpCost + Entry->Cost;
4700 //If an entry does not exist, fallback to the default implementation.
4701
4702 // There is no strided stores meanwhile. And store can't be folded in
4703 // shuffle.
4704 unsigned NumOfSources = Factor; // The number of values to be merged.
4705 unsigned ShuffleCost =
4706 getShuffleCost(TTI::SK_PermuteTwoSrc, SingleMemOpTy, 0, nullptr);
4707 unsigned NumOfShufflesPerStore = NumOfSources - 1;
4708
4709 // The SK_MergeTwoSrc shuffle clobbers one of src operands.
4710 // We need additional instructions to keep sources.
4711 unsigned NumOfMoves = NumOfMemOps * NumOfShufflesPerStore / 2;
4712 int Cost = NumOfMemOps * (MemOpCost + NumOfShufflesPerStore * ShuffleCost) +
4713 NumOfMoves;
4714 return Cost;
4715}
4716
4717int X86TTIImpl::getInterleavedMemoryOpCost(
4718 unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices,
4719 Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind,
4720 bool UseMaskForCond, bool UseMaskForGaps) {
4721 auto isSupportedOnAVX512 = [](Type *VecTy, bool HasBW) {
4722 Type *EltTy = cast<VectorType>(VecTy)->getElementType();
4723 if (EltTy->isFloatTy() || EltTy->isDoubleTy() || EltTy->isIntegerTy(64) ||
4724 EltTy->isIntegerTy(32) || EltTy->isPointerTy())
4725 return true;
4726 if (EltTy->isIntegerTy(16) || EltTy->isIntegerTy(8))
4727 return HasBW;
4728 return false;
4729 };
4730 if (ST->hasAVX512() && isSupportedOnAVX512(VecTy, ST->hasBWI()))
1
Calling 'X86Subtarget::hasAVX512'
4
Returning from 'X86Subtarget::hasAVX512'
4731 return getInterleavedMemoryOpCostAVX512(
4732 Opcode, cast<FixedVectorType>(VecTy), Factor, Indices, Alignment,
4733 AddressSpace, CostKind, UseMaskForCond, UseMaskForGaps);
4734 if (ST->hasAVX2())
5
Calling 'X86Subtarget::hasAVX2'
8
Returning from 'X86Subtarget::hasAVX2'
9
Taking false branch
4735 return getInterleavedMemoryOpCostAVX2(
4736 Opcode, cast<FixedVectorType>(VecTy), Factor, Indices, Alignment,
4737 AddressSpace, CostKind, UseMaskForCond, UseMaskForGaps);
4738
4739 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
10
Calling 'BasicTTIImplBase::getInterleavedMemoryOpCost'
4740 Alignment, AddressSpace, CostKind,
4741 UseMaskForCond, UseMaskForGaps);
4742}

/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/lib/Target/X86/X86Subtarget.h

1//===-- X86Subtarget.h - Define Subtarget for the X86 ----------*- C++ -*--===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file declares the X86 specific subclass of TargetSubtargetInfo.
10//
11//===----------------------------------------------------------------------===//
12
13#ifndef LLVM_LIB_TARGET_X86_X86SUBTARGET_H
14#define LLVM_LIB_TARGET_X86_X86SUBTARGET_H
15
16#include "X86FrameLowering.h"
17#include "X86ISelLowering.h"
18#include "X86InstrInfo.h"
19#include "X86SelectionDAGInfo.h"
20#include "llvm/ADT/Triple.h"
21#include "llvm/CodeGen/TargetSubtargetInfo.h"
22#include "llvm/IR/CallingConv.h"
23#include <climits>
24#include <memory>
25
26#define GET_SUBTARGETINFO_HEADER
27#include "X86GenSubtargetInfo.inc"
28
29namespace llvm {
30
31class CallLowering;
32class GlobalValue;
33class InstructionSelector;
34class LegalizerInfo;
35class RegisterBankInfo;
36class StringRef;
37class TargetMachine;
38
39/// The X86 backend supports a number of different styles of PIC.
40///
41namespace PICStyles {
42
43enum class Style {
44 StubPIC, // Used on i386-darwin in pic mode.
45 GOT, // Used on 32 bit elf on when in pic mode.
46 RIPRel, // Used on X86-64 when in pic mode.
47 None // Set when not in pic mode.
48};
49
50} // end namespace PICStyles
51
52class X86Subtarget final : public X86GenSubtargetInfo {
53 // NOTE: Do not add anything new to this list. Coarse, CPU name based flags
54 // are not a good idea. We should be migrating away from these.
55 enum X86ProcFamilyEnum {
56 Others,
57 IntelAtom,
58 IntelSLM
59 };
60
61 enum X86SSEEnum {
62 NoSSE, SSE1, SSE2, SSE3, SSSE3, SSE41, SSE42, AVX, AVX2, AVX512F
63 };
64
65 enum X863DNowEnum {
66 NoThreeDNow, MMX, ThreeDNow, ThreeDNowA
67 };
68
69 /// X86 processor family: Intel Atom, and others
70 X86ProcFamilyEnum X86ProcFamily = Others;
71
72 /// Which PIC style to use
73 PICStyles::Style PICStyle;
74
75 const TargetMachine &TM;
76
77 /// SSE1, SSE2, SSE3, SSSE3, SSE41, SSE42, or none supported.
78 X86SSEEnum X86SSELevel = NoSSE;
79
80 /// MMX, 3DNow, 3DNow Athlon, or none supported.
81 X863DNowEnum X863DNowLevel = NoThreeDNow;
82
83 /// True if the processor supports X87 instructions.
84 bool HasX87 = false;
85
86 /// True if the processor supports CMPXCHG8B.
87 bool HasCmpxchg8b = false;
88
89 /// True if this processor has NOPL instruction
90 /// (generally pentium pro+).
91 bool HasNOPL = false;
92
93 /// True if this processor has conditional move instructions
94 /// (generally pentium pro+).
95 bool HasCMov = false;
96
97 /// True if the processor supports X86-64 instructions.
98 bool HasX86_64 = false;
99
100 /// True if the processor supports POPCNT.
101 bool HasPOPCNT = false;
102
103 /// True if the processor supports SSE4A instructions.
104 bool HasSSE4A = false;
105
106 /// Target has AES instructions
107 bool HasAES = false;
108 bool HasVAES = false;
109
110 /// Target has FXSAVE/FXRESTOR instructions
111 bool HasFXSR = false;
112
113 /// Target has XSAVE instructions
114 bool HasXSAVE = false;
115
116 /// Target has XSAVEOPT instructions
117 bool HasXSAVEOPT = false;
118
119 /// Target has XSAVEC instructions
120 bool HasXSAVEC = false;
121
122 /// Target has XSAVES instructions
123 bool HasXSAVES = false;
124
125 /// Target has carry-less multiplication
126 bool HasPCLMUL = false;
127 bool HasVPCLMULQDQ = false;
128
129 /// Target has Galois Field Arithmetic instructions
130 bool HasGFNI = false;
131
132 /// Target has 3-operand fused multiply-add
133 bool HasFMA = false;
134
135 /// Target has 4-operand fused multiply-add
136 bool HasFMA4 = false;
137
138 /// Target has XOP instructions
139 bool HasXOP = false;
140
141 /// Target has TBM instructions.
142 bool HasTBM = false;
143
144 /// Target has LWP instructions
145 bool HasLWP = false;
146
147 /// True if the processor has the MOVBE instruction.
148 bool HasMOVBE = false;
149
150 /// True if the processor has the RDRAND instruction.
151 bool HasRDRAND = false;
152
153 /// Processor has 16-bit floating point conversion instructions.
154 bool HasF16C = false;
155
156 /// Processor has FS/GS base insturctions.
157 bool HasFSGSBase = false;
158
159 /// Processor has LZCNT instruction.
160 bool HasLZCNT = false;
161
162 /// Processor has BMI1 instructions.
163 bool HasBMI = false;
164
165 /// Processor has BMI2 instructions.
166 bool HasBMI2 = false;
167
168 /// Processor has VBMI instructions.
169 bool HasVBMI = false;
170
171 /// Processor has VBMI2 instructions.
172 bool HasVBMI2 = false;
173
174 /// Processor has Integer Fused Multiply Add
175 bool HasIFMA = false;
176
177 /// Processor has RTM instructions.
178 bool HasRTM = false;
179
180 /// Processor has ADX instructions.
181 bool HasADX = false;
182
183 /// Processor has SHA instructions.
184 bool HasSHA = false;
185
186 /// Processor has PRFCHW instructions.
187 bool HasPRFCHW = false;
188
189 /// Processor has RDSEED instructions.
190 bool HasRDSEED = false;
191
192 /// Processor has LAHF/SAHF instructions in 64-bit mode.
193 bool HasLAHFSAHF64 = false;
194
195 /// Processor has MONITORX/MWAITX instructions.
196 bool HasMWAITX = false;
197
198 /// Processor has Cache Line Zero instruction
199 bool HasCLZERO = false;
200
201 /// Processor has Cache Line Demote instruction
202 bool HasCLDEMOTE = false;
203
204 /// Processor has MOVDIRI instruction (direct store integer).
205 bool HasMOVDIRI = false;
206
207 /// Processor has MOVDIR64B instruction (direct store 64 bytes).
208 bool HasMOVDIR64B = false;
209
210 /// Processor has ptwrite instruction.
211 bool HasPTWRITE = false;
212
213 /// Processor has Prefetch with intent to Write instruction
214 bool HasPREFETCHWT1 = false;
215
216 /// True if SHLD instructions are slow.
217 bool IsSHLDSlow = false;
218
219 /// True if the PMULLD instruction is slow compared to PMULLW/PMULHW and
220 // PMULUDQ.
221 bool IsPMULLDSlow = false;
222
223 /// True if the PMADDWD instruction is slow compared to PMULLD.
224 bool IsPMADDWDSlow = false;
225
226 /// True if unaligned memory accesses of 16-bytes are slow.
227 bool IsUAMem16Slow = false;
228
229 /// True if unaligned memory accesses of 32-bytes are slow.
230 bool IsUAMem32Slow = false;
231
232 /// True if SSE operations can have unaligned memory operands.
233 /// This may require setting a configuration bit in the processor.
234 bool HasSSEUnalignedMem = false;
235
236 /// True if this processor has the CMPXCHG16B instruction;
237 /// this is true for most x86-64 chips, but not the first AMD chips.
238 bool HasCmpxchg16b = false;
239
240 /// True if the LEA instruction should be used for adjusting
241 /// the stack pointer. This is an optimization for Intel Atom processors.
242 bool UseLeaForSP = false;
243
244 /// True if POPCNT instruction has a false dependency on the destination register.
245 bool HasPOPCNTFalseDeps = false;
246
247 /// True if LZCNT/TZCNT instructions have a false dependency on the destination register.
248 bool HasLZCNTFalseDeps = false;
249
250 /// True if its preferable to combine to a single shuffle using a variable
251 /// mask over multiple fixed shuffles.
252 bool HasFastVariableShuffle = false;
253
254 /// True if vzeroupper instructions should be inserted after code that uses
255 /// ymm or zmm registers.
256 bool InsertVZEROUPPER = false;
257
258 /// True if there is no performance penalty for writing NOPs with up to
259 /// 7 bytes.
260 bool HasFast7ByteNOP = false;
261
262 /// True if there is no performance penalty for writing NOPs with up to
263 /// 11 bytes.
264 bool HasFast11ByteNOP = false;
265
266 /// True if there is no performance penalty for writing NOPs with up to
267 /// 15 bytes.
268 bool HasFast15ByteNOP = false;
269
270 /// True if gather is reasonably fast. This is true for Skylake client and
271 /// all AVX-512 CPUs.
272 bool HasFastGather = false;
273
274 /// True if hardware SQRTSS instruction is at least as fast (latency) as
275 /// RSQRTSS followed by a Newton-Raphson iteration.
276 bool HasFastScalarFSQRT = false;
277
278 /// True if hardware SQRTPS/VSQRTPS instructions are at least as fast
279 /// (throughput) as RSQRTPS/VRSQRTPS followed by a Newton-Raphson iteration.
280 bool HasFastVectorFSQRT = false;
281
282 /// True if 8-bit divisions are significantly faster than
283 /// 32-bit divisions and should be used when possible.
284 bool HasSlowDivide32 = false;
285
286 /// True if 32-bit divides are significantly faster than
287 /// 64-bit divisions and should be used when possible.
288 bool HasSlowDivide64 = false;
289
290 /// True if LZCNT instruction is fast.
291 bool HasFastLZCNT = false;
292
293 /// True if SHLD based rotate is fast.
294 bool HasFastSHLDRotate = false;
295
296 /// True if the processor supports macrofusion.
297 bool HasMacroFusion = false;
298
299 /// True if the processor supports branch fusion.
300 bool HasBranchFusion = false;
301
302 /// True if the processor has enhanced REP MOVSB/STOSB.
303 bool HasERMSB = false;
304
305 /// True if the processor has fast short REP MOV.
306 bool HasFSRM = false;
307
308 /// True if the short functions should be padded to prevent
309 /// a stall when returning too early.
310 bool PadShortFunctions = false;
311
312 /// True if two memory operand instructions should use a temporary register
313 /// instead.
314 bool SlowTwoMemOps = false;
315
316 /// True if the LEA instruction inputs have to be ready at address generation
317 /// (AG) time.
318 bool LEAUsesAG = false;
319
320 /// True if the LEA instruction with certain arguments is slow
321 bool SlowLEA = false;
322
323 /// True if the LEA instruction has all three source operands: base, index,
324 /// and offset or if the LEA instruction uses base and index registers where
325 /// the base is EBP, RBP,or R13
326 bool Slow3OpsLEA = false;
327
328 /// True if INC and DEC instructions are slow when writing to flags
329 bool SlowIncDec = false;
330
331 /// Processor has AVX-512 PreFetch Instructions
332 bool HasPFI = false;
333
334 /// Processor has AVX-512 Exponential and Reciprocal Instructions
335 bool HasERI = false;
336
337 /// Processor has AVX-512 Conflict Detection Instructions
338 bool HasCDI = false;
339
340 /// Processor has AVX-512 population count Instructions
341 bool HasVPOPCNTDQ = false;
342
343 /// Processor has AVX-512 Doubleword and Quadword instructions
344 bool HasDQI = false;
345
346 /// Processor has AVX-512 Byte and Word instructions
347 bool HasBWI = false;
348
349 /// Processor has AVX-512 Vector Length eXtenstions
350 bool HasVLX = false;
351
352 /// Processor has PKU extenstions
353 bool HasPKU = false;
354
355 /// Processor has AVX-512 Vector Neural Network Instructions
356 bool HasVNNI = false;
357
358 /// Processor has AVX Vector Neural Network Instructions
359 bool HasAVXVNNI = false;
360
361 /// Processor has AVX-512 bfloat16 floating-point extensions
362 bool HasBF16 = false;
363
364 /// Processor supports ENQCMD instructions
365 bool HasENQCMD = false;
366
367 /// Processor has AVX-512 Bit Algorithms instructions
368 bool HasBITALG = false;
369
370 /// Processor has AVX-512 vp2intersect instructions
371 bool HasVP2INTERSECT = false;
372
373 /// Processor supports CET SHSTK - Control-Flow Enforcement Technology
374 /// using Shadow Stack
375 bool HasSHSTK = false;
376
377 /// Processor supports Invalidate Process-Context Identifier
378 bool HasINVPCID = false;
379
380 /// Processor has Software Guard Extensions
381 bool HasSGX = false;
382
383 /// Processor supports Flush Cache Line instruction
384 bool HasCLFLUSHOPT = false;
385
386 /// Processor supports Cache Line Write Back instruction
387 bool HasCLWB = false;
388
389 /// Processor supports Write Back No Invalidate instruction
390 bool HasWBNOINVD = false;
391
392 /// Processor support RDPID instruction
393 bool HasRDPID = false;
394
395 /// Processor supports WaitPKG instructions
396 bool HasWAITPKG = false;
397
398 /// Processor supports PCONFIG instruction
399 bool HasPCONFIG = false;
400
401 /// Processor support key locker instructions
402 bool HasKL = false;
403
404 /// Processor support key locker wide instructions
405 bool HasWIDEKL = false;
406
407 /// Processor supports HRESET instruction
408 bool HasHRESET = false;
409
410 /// Processor supports SERIALIZE instruction
411 bool HasSERIALIZE = false;
412
413 /// Processor supports TSXLDTRK instruction
414 bool HasTSXLDTRK = false;
415
416 /// Processor has AMX support
417 bool HasAMXTILE = false;
418 bool HasAMXBF16 = false;
419 bool HasAMXINT8 = false;
420
421 /// Processor supports User Level Interrupt instructions
422 bool HasUINTR = false;
423
424 /// Processor has a single uop BEXTR implementation.
425 bool HasFastBEXTR = false;
426
427 /// Try harder to combine to horizontal vector ops if they are fast.
428 bool HasFastHorizontalOps = false;
429
430 /// Prefer a left/right scalar logical shifts pair over a shift+and pair.
431 bool HasFastScalarShiftMasks = false;
432
433 /// Prefer a left/right vector logical shifts pair over a shift+and pair.
434 bool HasFastVectorShiftMasks = false;
435
436 /// Use a retpoline thunk rather than indirect calls to block speculative
437 /// execution.
438 bool UseRetpolineIndirectCalls = false;
439
440 /// Use a retpoline thunk or remove any indirect branch to block speculative
441 /// execution.
442 bool UseRetpolineIndirectBranches = false;
443
444 /// Deprecated flag, query `UseRetpolineIndirectCalls` and
445 /// `UseRetpolineIndirectBranches` instead.
446 bool DeprecatedUseRetpoline = false;
447
448 /// When using a retpoline thunk, call an externally provided thunk rather
449 /// than emitting one inside the compiler.
450 bool UseRetpolineExternalThunk = false;
451
452 /// Prevent generation of indirect call/branch instructions from memory,
453 /// and force all indirect call/branch instructions from a register to be
454 /// preceded by an LFENCE. Also decompose RET instructions into a
455 /// POP+LFENCE+JMP sequence.
456 bool UseLVIControlFlowIntegrity = false;
457
458 /// Enable Speculative Execution Side Effect Suppression
459 bool UseSpeculativeExecutionSideEffectSuppression = false;
460
461 /// Insert LFENCE instructions to prevent data speculatively injected into
462 /// loads from being used maliciously.
463 bool UseLVILoadHardening = false;
464
465 /// Use software floating point for code generation.
466 bool UseSoftFloat = false;
467
468 /// Use alias analysis during code generation.
469 bool UseAA = false;
470
471 /// The minimum alignment known to hold of the stack frame on
472 /// entry to the function and which must be maintained by every function.
473 Align stackAlignment = Align(4);
474
475 /// Max. memset / memcpy size that is turned into rep/movs, rep/stos ops.
476 ///
477 // FIXME: this is a known good value for Yonah. How about others?
478 unsigned MaxInlineSizeThreshold = 128;
479
480 /// Indicates target prefers 128 bit instructions.
481 bool Prefer128Bit = false;
482
483 /// Indicates target prefers 256 bit instructions.
484 bool Prefer256Bit = false;
485
486 /// Indicates target prefers AVX512 mask registers.
487 bool PreferMaskRegisters = false;
488
489 /// Use Goldmont specific floating point div/sqrt costs.
490 bool UseGLMDivSqrtCosts = false;
491
492 /// What processor and OS we're targeting.
493 Triple TargetTriple;
494
495 /// GlobalISel related APIs.
496 std::unique_ptr<CallLowering> CallLoweringInfo;
497 std::unique_ptr<LegalizerInfo> Legalizer;
498 std::unique_ptr<RegisterBankInfo> RegBankInfo;
499 std::unique_ptr<InstructionSelector> InstSelector;
500
501private:
502 /// Override the stack alignment.
503 MaybeAlign StackAlignOverride;
504
505 /// Preferred vector width from function attribute.
506 unsigned PreferVectorWidthOverride;
507
508 /// Resolved preferred vector width from function attribute and subtarget
509 /// features.
510 unsigned PreferVectorWidth = UINT32_MAX(4294967295U);
511
512 /// Required vector width from function attribute.
513 unsigned RequiredVectorWidth;
514
515 /// True if compiling for 64-bit, false for 16-bit or 32-bit.
516 bool In64BitMode = false;
517
518 /// True if compiling for 32-bit, false for 16-bit or 64-bit.
519 bool In32BitMode = false;
520
521 /// True if compiling for 16-bit, false for 32-bit or 64-bit.
522 bool In16BitMode = false;
523
524 X86SelectionDAGInfo TSInfo;
525 // Ordering here is important. X86InstrInfo initializes X86RegisterInfo which
526 // X86TargetLowering needs.
527 X86InstrInfo InstrInfo;
528 X86TargetLowering TLInfo;
529 X86FrameLowering FrameLowering;
530
531public:
532 /// This constructor initializes the data members to match that
533 /// of the specified triple.
534 ///
535 X86Subtarget(const Triple &TT, StringRef CPU, StringRef TuneCPU, StringRef FS,
536 const X86TargetMachine &TM, MaybeAlign StackAlignOverride,
537 unsigned PreferVectorWidthOverride,
538 unsigned RequiredVectorWidth);
539
540 const X86TargetLowering *getTargetLowering() const override {
541 return &TLInfo;
542 }
543
544 const X86InstrInfo *getInstrInfo() const override { return &InstrInfo; }
545
546 const X86FrameLowering *getFrameLowering() const override {
547 return &FrameLowering;
548 }
549
550 const X86SelectionDAGInfo *getSelectionDAGInfo() const override {
551 return &TSInfo;
552 }
553
554 const X86RegisterInfo *getRegisterInfo() const override {
555 return &getInstrInfo()->getRegisterInfo();
556 }
557
558 /// Returns the minimum alignment known to hold of the
559 /// stack frame on entry to the function and which must be maintained by every
560 /// function for this subtarget.
561 Align getStackAlignment() const { return stackAlignment; }
562
563 /// Returns the maximum memset / memcpy size
564 /// that still makes it profitable to inline the call.
565 unsigned getMaxInlineSizeThreshold() const { return MaxInlineSizeThreshold; }
566
567 /// ParseSubtargetFeatures - Parses features string setting specified
568 /// subtarget options. Definition of function is auto generated by tblgen.
569 void ParseSubtargetFeatures(StringRef CPU, StringRef TuneCPU, StringRef FS);
570
571 /// Methods used by Global ISel
572 const CallLowering *getCallLowering() const override;
573 InstructionSelector *getInstructionSelector() const override;
574 const LegalizerInfo *getLegalizerInfo() const override;
575 const RegisterBankInfo *getRegBankInfo() const override;
576
577private:
578 /// Initialize the full set of dependencies so we can use an initializer
579 /// list for X86Subtarget.
580 X86Subtarget &initializeSubtargetDependencies(StringRef CPU,
581 StringRef TuneCPU,
582 StringRef FS);
583 void initSubtargetFeatures(StringRef CPU, StringRef TuneCPU, StringRef FS);
584
585public:
586 /// Is this x86_64? (disregarding specific ABI / programming model)
587 bool is64Bit() const {
588 return In64BitMode;
589 }
590
591 bool is32Bit() const {
592 return In32BitMode;
593 }
594
595 bool is16Bit() const {
596 return In16BitMode;
597 }
598
599 /// Is this x86_64 with the ILP32 programming model (x32 ABI)?
600 bool isTarget64BitILP32() const {
601 return In64BitMode && (TargetTriple.getEnvironment() == Triple::GNUX32 ||
602 TargetTriple.isOSNaCl());
603 }
604
605 /// Is this x86_64 with the LP64 programming model (standard AMD64, no x32)?
606 bool isTarget64BitLP64() const {
607 return In64BitMode && (TargetTriple.getEnvironment() != Triple::GNUX32 &&
608 !TargetTriple.isOSNaCl());
609 }
610
611 PICStyles::Style getPICStyle() const { return PICStyle; }
612 void setPICStyle(PICStyles::Style Style) { PICStyle = Style; }
613
614 bool hasX87() const { return HasX87; }
615 bool hasCmpxchg8b() const { return HasCmpxchg8b; }
616 bool hasNOPL() const { return HasNOPL; }
617 // SSE codegen depends on cmovs, and all SSE1+ processors support them.
618 // All 64-bit processors support cmov.
619 bool hasCMov() const { return HasCMov || X86SSELevel >= SSE1 || is64Bit(); }
620 bool hasSSE1() const { return X86SSELevel >= SSE1; }
621 bool hasSSE2() const { return X86SSELevel >= SSE2; }
622 bool hasSSE3() const { return X86SSELevel >= SSE3; }
623 bool hasSSSE3() const { return X86SSELevel >= SSSE3; }
624 bool hasSSE41() const { return X86SSELevel >= SSE41; }
625 bool hasSSE42() const { return X86SSELevel >= SSE42; }
626 bool hasAVX() const { return X86SSELevel >= AVX; }
627 bool hasAVX2() const { return X86SSELevel >= AVX2; }
6
Assuming field 'X86SSELevel' is < AVX2
7
Returning zero, which participates in a condition later
628 bool hasAVX512() const { return X86SSELevel >= AVX512F; }
2
Assuming field 'X86SSELevel' is < AVX512F
3
Returning zero, which participates in a condition later
629 bool hasInt256() const { return hasAVX2(); }
630 bool hasSSE4A() const { return HasSSE4A; }
631 bool hasMMX() const { return X863DNowLevel >= MMX; }
632 bool has3DNow() const { return X863DNowLevel >= ThreeDNow; }
633 bool has3DNowA() const { return X863DNowLevel >= ThreeDNowA; }
634 bool hasPOPCNT() const { return HasPOPCNT; }
635 bool hasAES() const { return HasAES; }
636 bool hasVAES() const { return HasVAES; }
637 bool hasFXSR() const { return HasFXSR; }
638 bool hasXSAVE() const { return HasXSAVE; }
639 bool hasXSAVEOPT() const { return HasXSAVEOPT; }
640 bool hasXSAVEC() const { return HasXSAVEC; }
641 bool hasXSAVES() const { return HasXSAVES; }
642 bool hasPCLMUL() const { return HasPCLMUL; }
643 bool hasVPCLMULQDQ() const { return HasVPCLMULQDQ; }
644 bool hasGFNI() const { return HasGFNI; }
645 // Prefer FMA4 to FMA - its better for commutation/memory folding and
646 // has equal or better performance on all supported targets.
647 bool hasFMA() const { return HasFMA; }
648 bool hasFMA4() const { return HasFMA4; }
649 bool hasAnyFMA() const { return hasFMA() || hasFMA4(); }
650 bool hasXOP() const { return HasXOP; }
651 bool hasTBM() const { return HasTBM; }
652 bool hasLWP() const { return HasLWP; }
653 bool hasMOVBE() const { return HasMOVBE; }
654 bool hasRDRAND() const { return HasRDRAND; }
655 bool hasF16C() const { return HasF16C; }
656 bool hasFSGSBase() const { return HasFSGSBase; }
657 bool hasLZCNT() const { return HasLZCNT; }
658 bool hasBMI() const { return HasBMI; }
659 bool hasBMI2() const { return HasBMI2; }
660 bool hasVBMI() const { return HasVBMI; }
661 bool hasVBMI2() const { return HasVBMI2; }
662 bool hasIFMA() const { return HasIFMA; }
663 bool hasRTM() const { return HasRTM; }
664 bool hasADX() const { return HasADX; }
665 bool hasSHA() const { return HasSHA; }
666 bool hasPRFCHW() const { return HasPRFCHW; }
667 bool hasPREFETCHWT1() const { return HasPREFETCHWT1; }
668 bool hasPrefetchW() const {
669 // The PREFETCHW instruction was added with 3DNow but later CPUs gave it
670 // its own CPUID bit as part of deprecating 3DNow. Intel eventually added
671 // it and KNL has another that prefetches to L2 cache. We assume the
672 // L1 version exists if the L2 version does.
673 return has3DNow() || hasPRFCHW() || hasPREFETCHWT1();
674 }
675 bool hasSSEPrefetch() const {
676 // We implicitly enable these when we have a write prefix supporting cache
677 // level OR if we have prfchw, but don't already have a read prefetch from
678 // 3dnow.
679 return hasSSE1() || (hasPRFCHW() && !has3DNow()) || hasPREFETCHWT1();
680 }
681 bool hasRDSEED() const { return HasRDSEED; }
682 bool hasLAHFSAHF() const { return HasLAHFSAHF64 || !is64Bit(); }
683 bool hasMWAITX() const { return HasMWAITX; }
684 bool hasCLZERO() const { return HasCLZERO; }
685 bool hasCLDEMOTE() const { return HasCLDEMOTE; }
686 bool hasMOVDIRI() const { return HasMOVDIRI; }
687 bool hasMOVDIR64B() const { return HasMOVDIR64B; }
688 bool hasPTWRITE() const { return HasPTWRITE; }
689 bool isSHLDSlow() const { return IsSHLDSlow; }
690 bool isPMULLDSlow() const { return IsPMULLDSlow; }
691 bool isPMADDWDSlow() const { return IsPMADDWDSlow; }
692 bool isUnalignedMem16Slow() const { return IsUAMem16Slow; }
693 bool isUnalignedMem32Slow() const { return IsUAMem32Slow; }
694 bool hasSSEUnalignedMem() const { return HasSSEUnalignedMem; }
695 bool hasCmpxchg16b() const { return HasCmpxchg16b && is64Bit(); }
696 bool useLeaForSP() const { return UseLeaForSP; }
697 bool hasPOPCNTFalseDeps() const { return HasPOPCNTFalseDeps; }
698 bool hasLZCNTFalseDeps() const { return HasLZCNTFalseDeps; }
699 bool hasFastVariableShuffle() const {
700 return HasFastVariableShuffle;
701 }
702 bool insertVZEROUPPER() const { return InsertVZEROUPPER; }
703 bool hasFastGather() const { return HasFastGather; }
704 bool hasFastScalarFSQRT() const { return HasFastScalarFSQRT; }
705 bool hasFastVectorFSQRT() const { return HasFastVectorFSQRT; }
706 bool hasFastLZCNT() const { return HasFastLZCNT; }
707 bool hasFastSHLDRotate() const { return HasFastSHLDRotate; }
708 bool hasFastBEXTR() const { return HasFastBEXTR; }
709 bool hasFastHorizontalOps() const { return HasFastHorizontalOps; }
710 bool hasFastScalarShiftMasks() const { return HasFastScalarShiftMasks; }
711 bool hasFastVectorShiftMasks() const { return HasFastVectorShiftMasks; }
712 bool hasMacroFusion() const { return HasMacroFusion; }
713 bool hasBranchFusion() const { return HasBranchFusion; }
714 bool hasERMSB() const { return HasERMSB; }
715 bool hasFSRM() const { return HasFSRM; }
716 bool hasSlowDivide32() const { return HasSlowDivide32; }
717 bool hasSlowDivide64() const { return HasSlowDivide64; }
718 bool padShortFunctions() const { return PadShortFunctions; }
719 bool slowTwoMemOps() const { return SlowTwoMemOps; }
720 bool LEAusesAG() const { return LEAUsesAG; }
721 bool slowLEA() const { return SlowLEA; }
722 bool slow3OpsLEA() const { return Slow3OpsLEA; }
723 bool slowIncDec() const { return SlowIncDec; }
724 bool hasCDI() const { return HasCDI; }
725 bool hasVPOPCNTDQ() const { return HasVPOPCNTDQ; }
726 bool hasPFI() const { return HasPFI; }
727 bool hasERI() const { return HasERI; }
728 bool hasDQI() const { return HasDQI; }
729 bool hasBWI() const { return HasBWI; }
730 bool hasVLX() const { return HasVLX; }
731 bool hasPKU() const { return HasPKU; }
732 bool hasVNNI() const { return HasVNNI; }
733 bool hasBF16() const { return HasBF16; }
734 bool hasVP2INTERSECT() const { return HasVP2INTERSECT; }
735 bool hasBITALG() const { return HasBITALG; }
736 bool hasSHSTK() const { return HasSHSTK; }
737 bool hasCLFLUSHOPT() const { return HasCLFLUSHOPT; }
738 bool hasCLWB() const { return HasCLWB; }
739 bool hasWBNOINVD() const { return HasWBNOINVD; }
740 bool hasRDPID() const { return HasRDPID; }
741 bool hasWAITPKG() const { return HasWAITPKG; }
742 bool hasPCONFIG() const { return HasPCONFIG; }
743 bool hasSGX() const { return HasSGX; }
744 bool hasINVPCID() const { return HasINVPCID; }
745 bool hasENQCMD() const { return HasENQCMD; }
746 bool hasKL() const { return HasKL; }
747 bool hasWIDEKL() const { return HasWIDEKL; }
748 bool hasHRESET() const { return HasHRESET; }
749 bool hasSERIALIZE() const { return HasSERIALIZE; }
750 bool hasTSXLDTRK() const { return HasTSXLDTRK; }
751 bool hasUINTR() const { return HasUINTR; }
752 bool useRetpolineIndirectCalls() const { return UseRetpolineIndirectCalls; }
753 bool useRetpolineIndirectBranches() const {
754 return UseRetpolineIndirectBranches;
755 }
756 bool hasAVXVNNI() const { return HasAVXVNNI; }
757 bool hasAMXTILE() const { return HasAMXTILE; }
758 bool hasAMXBF16() const { return HasAMXBF16; }
759 bool hasAMXINT8() const { return HasAMXINT8; }
760 bool useRetpolineExternalThunk() const { return UseRetpolineExternalThunk; }
761
762 // These are generic getters that OR together all of the thunk types
763 // supported by the subtarget. Therefore useIndirectThunk*() will return true
764 // if any respective thunk feature is enabled.
765 bool useIndirectThunkCalls() const {
766 return useRetpolineIndirectCalls() || useLVIControlFlowIntegrity();
767 }
768 bool useIndirectThunkBranches() const {
769 return useRetpolineIndirectBranches() || useLVIControlFlowIntegrity();
770 }
771
772 bool preferMaskRegisters() const { return PreferMaskRegisters; }
773 bool useGLMDivSqrtCosts() const { return UseGLMDivSqrtCosts; }
774 bool useLVIControlFlowIntegrity() const { return UseLVIControlFlowIntegrity; }
775 bool useLVILoadHardening() const { return UseLVILoadHardening; }
776 bool useSpeculativeExecutionSideEffectSuppression() const {
777 return UseSpeculativeExecutionSideEffectSuppression;
778 }
779
780 unsigned getPreferVectorWidth() const { return PreferVectorWidth; }
781 unsigned getRequiredVectorWidth() const { return RequiredVectorWidth; }
782
783 // Helper functions to determine when we should allow widening to 512-bit
784 // during codegen.
785 // TODO: Currently we're always allowing widening on CPUs without VLX,
786 // because for many cases we don't have a better option.
787 bool canExtendTo512DQ() const {
788 return hasAVX512() && (!hasVLX() || getPreferVectorWidth() >= 512);
789 }
790 bool canExtendTo512BW() const {
791 return hasBWI() && canExtendTo512DQ();
792 }
793
794 // If there are no 512-bit vectors and we prefer not to use 512-bit registers,
795 // disable them in the legalizer.
796 bool useAVX512Regs() const {
797 return hasAVX512() && (canExtendTo512DQ() || RequiredVectorWidth > 256);
798 }
799
800 bool useBWIRegs() const {
801 return hasBWI() && useAVX512Regs();
802 }
803
804 bool isXRaySupported() const override { return is64Bit(); }
805
806 /// TODO: to be removed later and replaced with suitable properties
807 bool isAtom() const { return X86ProcFamily == IntelAtom; }
808 bool isSLM() const { return X86ProcFamily == IntelSLM; }
809 bool useSoftFloat() const { return UseSoftFloat; }
810 bool useAA() const override { return UseAA; }
811
812 /// Use mfence if we have SSE2 or we're on x86-64 (even if we asked for
813 /// no-sse2). There isn't any reason to disable it if the target processor
814 /// supports it.
815 bool hasMFence() const { return hasSSE2() || is64Bit(); }
816
817 const Triple &getTargetTriple() const { return TargetTriple; }
818
819 bool isTargetDarwin() const { return TargetTriple.isOSDarwin(); }
820 bool isTargetFreeBSD() const { return TargetTriple.isOSFreeBSD(); }
821 bool isTargetDragonFly() const { return TargetTriple.isOSDragonFly(); }
822 bool isTargetSolaris() const { return TargetTriple.isOSSolaris(); }
823 bool isTargetPS4() const { return TargetTriple.isPS4CPU(); }
824
825 bool isTargetELF() const { return TargetTriple.isOSBinFormatELF(); }
826 bool isTargetCOFF() const { return TargetTriple.isOSBinFormatCOFF(); }
827 bool isTargetMachO() const { return TargetTriple.isOSBinFormatMachO(); }
828
829 bool isTargetLinux() const { return TargetTriple.isOSLinux(); }
830 bool isTargetKFreeBSD() const { return TargetTriple.isOSKFreeBSD(); }
831 bool isTargetGlibc() const { return TargetTriple.isOSGlibc(); }
832 bool isTargetAndroid() const { return TargetTriple.isAndroid(); }
833 bool isTargetNaCl() const { return TargetTriple.isOSNaCl(); }
834 bool isTargetNaCl32() const { return isTargetNaCl() && !is64Bit(); }
835 bool isTargetNaCl64() const { return isTargetNaCl() && is64Bit(); }
836 bool isTargetMCU() const { return TargetTriple.isOSIAMCU(); }
837 bool isTargetFuchsia() const { return TargetTriple.isOSFuchsia(); }
838
839 bool isTargetWindowsMSVC() const {
840 return TargetTriple.isWindowsMSVCEnvironment();
841 }
842
843 bool isTargetWindowsCoreCLR() const {
844 return TargetTriple.isWindowsCoreCLREnvironment();
845 }
846
847 bool isTargetWindowsCygwin() const {
848 return TargetTriple.isWindowsCygwinEnvironment();
849 }
850
851 bool isTargetWindowsGNU() const {
852 return TargetTriple.isWindowsGNUEnvironment();
853 }
854
855 bool isTargetWindowsItanium() const {
856 return TargetTriple.isWindowsItaniumEnvironment();
857 }
858
859 bool isTargetCygMing() const { return TargetTriple.isOSCygMing(); }
860
861 bool isOSWindows() const { return TargetTriple.isOSWindows(); }
862
863 bool isTargetWin64() const { return In64BitMode && isOSWindows(); }
864
865 bool isTargetWin32() const { return !In64BitMode && isOSWindows(); }
866
867 bool isPICStyleGOT() const { return PICStyle == PICStyles::Style::GOT; }
868 bool isPICStyleRIPRel() const { return PICStyle == PICStyles::Style::RIPRel; }
869
870 bool isPICStyleStubPIC() const {
871 return PICStyle == PICStyles::Style::StubPIC;
872 }
873
874 bool isPositionIndependent() const;
875
876 bool isCallingConvWin64(CallingConv::ID CC) const {
877 switch (CC) {
878 // On Win64, all these conventions just use the default convention.
879 case CallingConv::C:
880 case CallingConv::Fast:
881 case CallingConv::Tail:
882 case CallingConv::Swift:
883 case CallingConv::X86_FastCall:
884 case CallingConv::X86_StdCall:
885 case CallingConv::X86_ThisCall:
886 case CallingConv::X86_VectorCall:
887 case CallingConv::Intel_OCL_BI:
888 return isTargetWin64();
889 // This convention allows using the Win64 convention on other targets.
890 case CallingConv::Win64:
891 return true;
892 // This convention allows using the SysV convention on Windows targets.
893 case CallingConv::X86_64_SysV:
894 return false;
895 // Otherwise, who knows what this is.
896 default:
897 return false;
898 }
899 }
900
901 /// Classify a global variable reference for the current subtarget according
902 /// to how we should reference it in a non-pcrel context.
903 unsigned char classifyLocalReference(const GlobalValue *GV) const;
904
905 unsigned char classifyGlobalReference(const GlobalValue *GV,
906 const Module &M) const;
907 unsigned char classifyGlobalReference(const GlobalValue *GV) const;
908
909 /// Classify a global function reference for the current subtarget.
910 unsigned char classifyGlobalFunctionReference(const GlobalValue *GV,
911 const Module &M) const;
912 unsigned char classifyGlobalFunctionReference(const GlobalValue *GV) const;
913
914 /// Classify a blockaddress reference for the current subtarget according to
915 /// how we should reference it in a non-pcrel context.
916 unsigned char classifyBlockAddressReference() const;
917
918 /// Return true if the subtarget allows calls to immediate address.
919 bool isLegalToCallImmediateAddr() const;
920
921 /// If we are using indirect thunks, we need to expand indirectbr to avoid it
922 /// lowering to an actual indirect jump.
923 bool enableIndirectBrExpand() const override {
924 return useIndirectThunkBranches();
925 }
926
927 /// Enable the MachineScheduler pass for all X86 subtargets.
928 bool enableMachineScheduler() const override { return true; }
929
930 bool enableEarlyIfConversion() const override;
931
932 void getPostRAMutations(std::vector<std::unique_ptr<ScheduleDAGMutation>>
933 &Mutations) const override;
934
935 AntiDepBreakMode getAntiDepBreakMode() const override {
936 return TargetSubtargetInfo::ANTIDEP_CRITICAL;
937 }
938
939 bool enableAdvancedRASplitCost() const override { return true; }
940};
941
942} // end namespace llvm
943
944#endif // LLVM_LIB_TARGET_X86_X86SUBTARGET_H

/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/include/llvm/CodeGen/BasicTTIImpl.h

1//===- BasicTTIImpl.h -------------------------------------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9/// \file
10/// This file provides a helper that implements much of the TTI interface in
11/// terms of the target-independent code generator and TargetLowering
12/// interfaces.
13//
14//===----------------------------------------------------------------------===//
15
16#ifndef LLVM_CODEGEN_BASICTTIIMPL_H
17#define LLVM_CODEGEN_BASICTTIIMPL_H
18
19#include "llvm/ADT/APInt.h"
20#include "llvm/ADT/ArrayRef.h"
21#include "llvm/ADT/BitVector.h"
22#include "llvm/ADT/SmallPtrSet.h"
23#include "llvm/ADT/SmallVector.h"
24#include "llvm/Analysis/LoopInfo.h"
25#include "llvm/Analysis/TargetTransformInfo.h"
26#include "llvm/Analysis/TargetTransformInfoImpl.h"
27#include "llvm/CodeGen/ISDOpcodes.h"
28#include "llvm/CodeGen/TargetLowering.h"
29#include "llvm/CodeGen/TargetSubtargetInfo.h"
30#include "llvm/CodeGen/ValueTypes.h"
31#include "llvm/IR/BasicBlock.h"
32#include "llvm/IR/Constant.h"
33#include "llvm/IR/Constants.h"
34#include "llvm/IR/DataLayout.h"
35#include "llvm/IR/DerivedTypes.h"
36#include "llvm/IR/InstrTypes.h"
37#include "llvm/IR/Instruction.h"
38#include "llvm/IR/Instructions.h"
39#include "llvm/IR/Intrinsics.h"
40#include "llvm/IR/Operator.h"
41#include "llvm/IR/Type.h"
42#include "llvm/IR/Value.h"
43#include "llvm/Support/Casting.h"
44#include "llvm/Support/CommandLine.h"
45#include "llvm/Support/ErrorHandling.h"
46#include "llvm/Support/MachineValueType.h"
47#include "llvm/Support/MathExtras.h"
48#include <algorithm>
49#include <cassert>
50#include <cstdint>
51#include <limits>
52#include <utility>
53
54namespace llvm {
55
56class Function;
57class GlobalValue;
58class LLVMContext;
59class ScalarEvolution;
60class SCEV;
61class TargetMachine;
62
63extern cl::opt<unsigned> PartialUnrollingThreshold;
64
65/// Base class which can be used to help build a TTI implementation.
66///
67/// This class provides as much implementation of the TTI interface as is
68/// possible using the target independent parts of the code generator.
69///
70/// In order to subclass it, your class must implement a getST() method to
71/// return the subtarget, and a getTLI() method to return the target lowering.
72/// We need these methods implemented in the derived class so that this class
73/// doesn't have to duplicate storage for them.
74template <typename T>
75class BasicTTIImplBase : public TargetTransformInfoImplCRTPBase<T> {
76private:
77 using BaseT = TargetTransformInfoImplCRTPBase<T>;
78 using TTI = TargetTransformInfo;
79
80 /// Helper function to access this as a T.
81 T *thisT() { return static_cast<T *>(this); }
82
83 /// Estimate a cost of Broadcast as an extract and sequence of insert
84 /// operations.
85 unsigned getBroadcastShuffleOverhead(FixedVectorType *VTy) {
86 unsigned Cost = 0;
87 // Broadcast cost is equal to the cost of extracting the zero'th element
88 // plus the cost of inserting it into every element of the result vector.
89 Cost += thisT()->getVectorInstrCost(Instruction::ExtractElement, VTy, 0);
90
91 for (int i = 0, e = VTy->getNumElements(); i < e; ++i) {
92 Cost += thisT()->getVectorInstrCost(Instruction::InsertElement, VTy, i);
93 }
94 return Cost;
95 }
96
97 /// Estimate a cost of shuffle as a sequence of extract and insert
98 /// operations.
99 unsigned getPermuteShuffleOverhead(FixedVectorType *VTy) {
100 unsigned Cost = 0;
101 // Shuffle cost is equal to the cost of extracting element from its argument
102 // plus the cost of inserting them onto the result vector.
103
104 // e.g. <4 x float> has a mask of <0,5,2,7> i.e we need to extract from
105 // index 0 of first vector, index 1 of second vector,index 2 of first
106 // vector and finally index 3 of second vector and insert them at index
107 // <0,1,2,3> of result vector.
108 for (int i = 0, e = VTy->getNumElements(); i < e; ++i) {
109 Cost += thisT()->getVectorInstrCost(Instruction::InsertElement, VTy, i);
110 Cost += thisT()->getVectorInstrCost(Instruction::ExtractElement, VTy, i);
111 }
112 return Cost;
113 }
114
115 /// Estimate a cost of subvector extraction as a sequence of extract and
116 /// insert operations.
117 unsigned getExtractSubvectorOverhead(FixedVectorType *VTy, int Index,
118 FixedVectorType *SubVTy) {
119 assert(VTy && SubVTy &&((VTy && SubVTy && "Can only extract subvectors from vectors"
) ? static_cast<void> (0) : __assert_fail ("VTy && SubVTy && \"Can only extract subvectors from vectors\""
, "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/include/llvm/CodeGen/BasicTTIImpl.h"
, 120, __PRETTY_FUNCTION__))
120 "Can only extract subvectors from vectors")((VTy && SubVTy && "Can only extract subvectors from vectors"
) ? static_cast<void> (0) : __assert_fail ("VTy && SubVTy && \"Can only extract subvectors from vectors\""
, "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/include/llvm/CodeGen/BasicTTIImpl.h"
, 120, __PRETTY_FUNCTION__))
;
121 int NumSubElts = SubVTy->getNumElements();
122 assert((Index + NumSubElts) <= (int)VTy->getNumElements() &&(((Index + NumSubElts) <= (int)VTy->getNumElements() &&
"SK_ExtractSubvector index out of range") ? static_cast<void
> (0) : __assert_fail ("(Index + NumSubElts) <= (int)VTy->getNumElements() && \"SK_ExtractSubvector index out of range\""
, "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/include/llvm/CodeGen/BasicTTIImpl.h"
, 123, __PRETTY_FUNCTION__))
123 "SK_ExtractSubvector index out of range")(((Index + NumSubElts) <= (int)VTy->getNumElements() &&
"SK_ExtractSubvector index out of range") ? static_cast<void
> (0) : __assert_fail ("(Index + NumSubElts) <= (int)VTy->getNumElements() && \"SK_ExtractSubvector index out of range\""
, "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/include/llvm/CodeGen/BasicTTIImpl.h"
, 123, __PRETTY_FUNCTION__))
;
124
125 unsigned Cost = 0;
126 // Subvector extraction cost is equal to the cost of extracting element from
127 // the source type plus the cost of inserting them into the result vector
128 // type.
129 for (int i = 0; i != NumSubElts; ++i) {
130 Cost += thisT()->getVectorInstrCost(Instruction::ExtractElement, VTy,
131 i + Index);
132 Cost +=
133 thisT()->getVectorInstrCost(Instruction::InsertElement, SubVTy, i);
134 }
135 return Cost;
136 }
137
138 /// Estimate a cost of subvector insertion as a sequence of extract and
139 /// insert operations.
140 unsigned getInsertSubvectorOverhead(FixedVectorType *VTy, int Index,
141 FixedVectorType *SubVTy) {
142 assert(VTy && SubVTy &&((VTy && SubVTy && "Can only insert subvectors into vectors"
) ? static_cast<void> (0) : __assert_fail ("VTy && SubVTy && \"Can only insert subvectors into vectors\""
, "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/include/llvm/CodeGen/BasicTTIImpl.h"
, 143, __PRETTY_FUNCTION__))
143 "Can only insert subvectors into vectors")((VTy && SubVTy && "Can only insert subvectors into vectors"
) ? static_cast<void> (0) : __assert_fail ("VTy && SubVTy && \"Can only insert subvectors into vectors\""
, "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/include/llvm/CodeGen/BasicTTIImpl.h"
, 143, __PRETTY_FUNCTION__))
;
144 int NumSubElts = SubVTy->getNumElements();
145 assert((Index + NumSubElts) <= (int)VTy->getNumElements() &&(((Index + NumSubElts) <= (int)VTy->getNumElements() &&
"SK_InsertSubvector index out of range") ? static_cast<void
> (0) : __assert_fail ("(Index + NumSubElts) <= (int)VTy->getNumElements() && \"SK_InsertSubvector index out of range\""
, "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/include/llvm/CodeGen/BasicTTIImpl.h"
, 146, __PRETTY_FUNCTION__))
146 "SK_InsertSubvector index out of range")(((Index + NumSubElts) <= (int)VTy->getNumElements() &&
"SK_InsertSubvector index out of range") ? static_cast<void
> (0) : __assert_fail ("(Index + NumSubElts) <= (int)VTy->getNumElements() && \"SK_InsertSubvector index out of range\""
, "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/include/llvm/CodeGen/BasicTTIImpl.h"
, 146, __PRETTY_FUNCTION__))
;
147
148 unsigned Cost = 0;
149 // Subvector insertion cost is equal to the cost of extracting element from
150 // the source type plus the cost of inserting them into the result vector
151 // type.
152 for (int i = 0; i != NumSubElts; ++i) {
153 Cost +=
154 thisT()->getVectorInstrCost(Instruction::ExtractElement, SubVTy, i);
155 Cost += thisT()->getVectorInstrCost(Instruction::InsertElement, VTy,
156 i + Index);
157 }
158 return Cost;
159 }
160
161 /// Local query method delegates up to T which *must* implement this!
162 const TargetSubtargetInfo *getST() const {
163 return static_cast<const T *>(this)->getST();
164 }
165
166 /// Local query method delegates up to T which *must* implement this!
167 const TargetLoweringBase *getTLI() const {
168 return static_cast<const T *>(this)->getTLI();
169 }
170
171 static ISD::MemIndexedMode getISDIndexedMode(TTI::MemIndexedMode M) {
172 switch (M) {
173 case TTI::MIM_Unindexed:
174 return ISD::UNINDEXED;
175 case TTI::MIM_PreInc:
176 return ISD::PRE_INC;
177 case TTI::MIM_PreDec:
178 return ISD::PRE_DEC;
179 case TTI::MIM_PostInc:
180 return ISD::POST_INC;
181 case TTI::MIM_PostDec:
182 return ISD::POST_DEC;
183 }
184 llvm_unreachable("Unexpected MemIndexedMode")::llvm::llvm_unreachable_internal("Unexpected MemIndexedMode"
, "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/include/llvm/CodeGen/BasicTTIImpl.h"
, 184)
;
185 }
186
187protected:
188 explicit BasicTTIImplBase(const TargetMachine *TM, const DataLayout &DL)
189 : BaseT(DL) {}
190 virtual ~BasicTTIImplBase() = default;
191
192 using TargetTransformInfoImplBase::DL;
193
194public:
195 /// \name Scalar TTI Implementations
196 /// @{
197 bool allowsMisalignedMemoryAccesses(LLVMContext &Context, unsigned BitWidth,
198 unsigned AddressSpace, unsigned Alignment,
199 bool *Fast) const {
200 EVT E = EVT::getIntegerVT(Context, BitWidth);
201 return getTLI()->allowsMisalignedMemoryAccesses(
202 E, AddressSpace, Alignment, MachineMemOperand::MONone, Fast);
203 }
204
205 bool hasBranchDivergence() { return false; }
206
207 bool useGPUDivergenceAnalysis() { return false; }
208
209 bool isSourceOfDivergence(const Value *V) { return false; }
210
211 bool isAlwaysUniform(const Value *V) { return false; }
212
213 unsigned getFlatAddressSpace() {
214 // Return an invalid address space.
215 return -1;
216 }
217
218 bool collectFlatAddressOperands(SmallVectorImpl<int> &OpIndexes,
219 Intrinsic::ID IID) const {
220 return false;
221 }
222
223 bool isNoopAddrSpaceCast(unsigned FromAS, unsigned ToAS) const {
224 return getTLI()->getTargetMachine().isNoopAddrSpaceCast(FromAS, ToAS);
225