Bug Summary

File:llvm/lib/Target/X86/X86TargetTransformInfo.cpp
Warning:line 3424, column 15
Division by zero

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name X86TargetTransformInfo.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mframe-pointer=none -fmath-errno -fno-rounding-math -mconstructor-aliases -munwind-tables -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -ffunction-sections -fdata-sections -fcoverage-compilation-dir=/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/build-llvm/lib/Target/X86 -resource-dir /usr/lib/llvm-14/lib/clang/14.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/build-llvm/lib/Target/X86 -I /build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/X86 -I /build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/build-llvm/include -I /build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include -D NDEBUG -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/x86_64-linux-gnu/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10/backward -internal-isystem /usr/lib/llvm-14/lib/clang/14.0.0/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-class-memaccess -Wno-redundant-move -Wno-pessimizing-move -Wno-noexcept-type -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir=/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/build-llvm/lib/Target/X86 -fdebug-prefix-map=/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0=. -ferror-limit 19 -fvisibility hidden -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /tmp/scan-build-2021-08-28-193554-24367-1 -x c++ /build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/X86/X86TargetTransformInfo.cpp

/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/X86/X86TargetTransformInfo.cpp

1//===-- X86TargetTransformInfo.cpp - X86 specific TTI pass ----------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8/// \file
9/// This file implements a TargetTransformInfo analysis pass specific to the
10/// X86 target machine. It uses the target's detailed information to provide
11/// more precise answers to certain TTI queries, while letting the target
12/// independent and default TTI implementations handle the rest.
13///
14//===----------------------------------------------------------------------===//
15/// About Cost Model numbers used below it's necessary to say the following:
16/// the numbers correspond to some "generic" X86 CPU instead of usage of
17/// concrete CPU model. Usually the numbers correspond to CPU where the feature
18/// apeared at the first time. For example, if we do Subtarget.hasSSE42() in
19/// the lookups below the cost is based on Nehalem as that was the first CPU
20/// to support that feature level and thus has most likely the worst case cost.
21/// Some examples of other technologies/CPUs:
22/// SSE 3 - Pentium4 / Athlon64
23/// SSE 4.1 - Penryn
24/// SSE 4.2 - Nehalem
25/// AVX - Sandy Bridge
26/// AVX2 - Haswell
27/// AVX-512 - Xeon Phi / Skylake
28/// And some examples of instruction target dependent costs (latency)
29/// divss sqrtss rsqrtss
30/// AMD K7 11-16 19 3
31/// Piledriver 9-24 13-15 5
32/// Jaguar 14 16 2
33/// Pentium II,III 18 30 2
34/// Nehalem 7-14 7-18 3
35/// Haswell 10-13 11 5
36/// TODO: Develop and implement the target dependent cost model and
37/// specialize cost numbers for different Cost Model Targets such as throughput,
38/// code size, latency and uop count.
39//===----------------------------------------------------------------------===//
40
41#include "X86TargetTransformInfo.h"
42#include "llvm/Analysis/TargetTransformInfo.h"
43#include "llvm/CodeGen/BasicTTIImpl.h"
44#include "llvm/CodeGen/CostTable.h"
45#include "llvm/CodeGen/TargetLowering.h"
46#include "llvm/IR/IntrinsicInst.h"
47#include "llvm/Support/Debug.h"
48
49using namespace llvm;
50
51#define DEBUG_TYPE"x86tti" "x86tti"
52
53//===----------------------------------------------------------------------===//
54//
55// X86 cost model.
56//
57//===----------------------------------------------------------------------===//
58
59TargetTransformInfo::PopcntSupportKind
60X86TTIImpl::getPopcntSupport(unsigned TyWidth) {
61 assert(isPowerOf2_32(TyWidth) && "Ty width must be power of 2")(static_cast <bool> (isPowerOf2_32(TyWidth) && "Ty width must be power of 2"
) ? void (0) : __assert_fail ("isPowerOf2_32(TyWidth) && \"Ty width must be power of 2\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 61, __extension__ __PRETTY_FUNCTION__))
;
62 // TODO: Currently the __builtin_popcount() implementation using SSE3
63 // instructions is inefficient. Once the problem is fixed, we should
64 // call ST->hasSSE3() instead of ST->hasPOPCNT().
65 return ST->hasPOPCNT() ? TTI::PSK_FastHardware : TTI::PSK_Software;
66}
67
68llvm::Optional<unsigned> X86TTIImpl::getCacheSize(
69 TargetTransformInfo::CacheLevel Level) const {
70 switch (Level) {
71 case TargetTransformInfo::CacheLevel::L1D:
72 // - Penryn
73 // - Nehalem
74 // - Westmere
75 // - Sandy Bridge
76 // - Ivy Bridge
77 // - Haswell
78 // - Broadwell
79 // - Skylake
80 // - Kabylake
81 return 32 * 1024; // 32 KByte
82 case TargetTransformInfo::CacheLevel::L2D:
83 // - Penryn
84 // - Nehalem
85 // - Westmere
86 // - Sandy Bridge
87 // - Ivy Bridge
88 // - Haswell
89 // - Broadwell
90 // - Skylake
91 // - Kabylake
92 return 256 * 1024; // 256 KByte
93 }
94
95 llvm_unreachable("Unknown TargetTransformInfo::CacheLevel")::llvm::llvm_unreachable_internal("Unknown TargetTransformInfo::CacheLevel"
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 95)
;
96}
97
98llvm::Optional<unsigned> X86TTIImpl::getCacheAssociativity(
99 TargetTransformInfo::CacheLevel Level) const {
100 // - Penryn
101 // - Nehalem
102 // - Westmere
103 // - Sandy Bridge
104 // - Ivy Bridge
105 // - Haswell
106 // - Broadwell
107 // - Skylake
108 // - Kabylake
109 switch (Level) {
110 case TargetTransformInfo::CacheLevel::L1D:
111 LLVM_FALLTHROUGH[[gnu::fallthrough]];
112 case TargetTransformInfo::CacheLevel::L2D:
113 return 8;
114 }
115
116 llvm_unreachable("Unknown TargetTransformInfo::CacheLevel")::llvm::llvm_unreachable_internal("Unknown TargetTransformInfo::CacheLevel"
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 116)
;
117}
118
119unsigned X86TTIImpl::getNumberOfRegisters(unsigned ClassID) const {
120 bool Vector = (ClassID == 1);
121 if (Vector && !ST->hasSSE1())
122 return 0;
123
124 if (ST->is64Bit()) {
125 if (Vector && ST->hasAVX512())
126 return 32;
127 return 16;
128 }
129 return 8;
130}
131
132TypeSize
133X86TTIImpl::getRegisterBitWidth(TargetTransformInfo::RegisterKind K) const {
134 unsigned PreferVectorWidth = ST->getPreferVectorWidth();
135 switch (K) {
136 case TargetTransformInfo::RGK_Scalar:
137 return TypeSize::getFixed(ST->is64Bit() ? 64 : 32);
138 case TargetTransformInfo::RGK_FixedWidthVector:
139 if (ST->hasAVX512() && PreferVectorWidth >= 512)
140 return TypeSize::getFixed(512);
141 if (ST->hasAVX() && PreferVectorWidth >= 256)
142 return TypeSize::getFixed(256);
143 if (ST->hasSSE1() && PreferVectorWidth >= 128)
144 return TypeSize::getFixed(128);
145 return TypeSize::getFixed(0);
146 case TargetTransformInfo::RGK_ScalableVector:
147 return TypeSize::getScalable(0);
148 }
149
150 llvm_unreachable("Unsupported register kind")::llvm::llvm_unreachable_internal("Unsupported register kind"
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 150)
;
151}
152
153unsigned X86TTIImpl::getLoadStoreVecRegBitWidth(unsigned) const {
154 return getRegisterBitWidth(TargetTransformInfo::RGK_FixedWidthVector)
155 .getFixedSize();
156}
157
158unsigned X86TTIImpl::getMaxInterleaveFactor(unsigned VF) {
159 // If the loop will not be vectorized, don't interleave the loop.
160 // Let regular unroll to unroll the loop, which saves the overflow
161 // check and memory check cost.
162 if (VF == 1)
163 return 1;
164
165 if (ST->isAtom())
166 return 1;
167
168 // Sandybridge and Haswell have multiple execution ports and pipelined
169 // vector units.
170 if (ST->hasAVX())
171 return 4;
172
173 return 2;
174}
175
176InstructionCost X86TTIImpl::getArithmeticInstrCost(
177 unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind,
178 TTI::OperandValueKind Op1Info, TTI::OperandValueKind Op2Info,
179 TTI::OperandValueProperties Opd1PropInfo,
180 TTI::OperandValueProperties Opd2PropInfo, ArrayRef<const Value *> Args,
181 const Instruction *CxtI) {
182 // TODO: Handle more cost kinds.
183 if (CostKind != TTI::TCK_RecipThroughput)
184 return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Op1Info,
185 Op2Info, Opd1PropInfo,
186 Opd2PropInfo, Args, CxtI);
187
188 // vXi8 multiplications are always promoted to vXi16.
189 if (Opcode == Instruction::Mul && Ty->isVectorTy() &&
190 Ty->getScalarSizeInBits() == 8) {
191 Type *WideVecTy =
192 VectorType::getExtendedElementVectorType(cast<VectorType>(Ty));
193 return getCastInstrCost(Instruction::ZExt, WideVecTy, Ty,
194 TargetTransformInfo::CastContextHint::None,
195 CostKind) +
196 getCastInstrCost(Instruction::Trunc, Ty, WideVecTy,
197 TargetTransformInfo::CastContextHint::None,
198 CostKind) +
199 getArithmeticInstrCost(Opcode, WideVecTy, CostKind, Op1Info, Op2Info,
200 Opd1PropInfo, Opd2PropInfo);
201 }
202
203 // Legalize the type.
204 std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty);
205
206 int ISD = TLI->InstructionOpcodeToISD(Opcode);
207 assert(ISD && "Invalid opcode")(static_cast <bool> (ISD && "Invalid opcode") ?
void (0) : __assert_fail ("ISD && \"Invalid opcode\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 207, __extension__ __PRETTY_FUNCTION__))
;
208
209 static const CostTblEntry GLMCostTable[] = {
210 { ISD::FDIV, MVT::f32, 18 }, // divss
211 { ISD::FDIV, MVT::v4f32, 35 }, // divps
212 { ISD::FDIV, MVT::f64, 33 }, // divsd
213 { ISD::FDIV, MVT::v2f64, 65 }, // divpd
214 };
215
216 if (ST->useGLMDivSqrtCosts())
217 if (const auto *Entry = CostTableLookup(GLMCostTable, ISD,
218 LT.second))
219 return LT.first * Entry->Cost;
220
221 static const CostTblEntry SLMCostTable[] = {
222 { ISD::MUL, MVT::v4i32, 11 }, // pmulld
223 { ISD::MUL, MVT::v8i16, 2 }, // pmullw
224 { ISD::FMUL, MVT::f64, 2 }, // mulsd
225 { ISD::FMUL, MVT::v2f64, 4 }, // mulpd
226 { ISD::FMUL, MVT::v4f32, 2 }, // mulps
227 { ISD::FDIV, MVT::f32, 17 }, // divss
228 { ISD::FDIV, MVT::v4f32, 39 }, // divps
229 { ISD::FDIV, MVT::f64, 32 }, // divsd
230 { ISD::FDIV, MVT::v2f64, 69 }, // divpd
231 { ISD::FADD, MVT::v2f64, 2 }, // addpd
232 { ISD::FSUB, MVT::v2f64, 2 }, // subpd
233 // v2i64/v4i64 mul is custom lowered as a series of long:
234 // multiplies(3), shifts(3) and adds(2)
235 // slm muldq version throughput is 2 and addq throughput 4
236 // thus: 3X2 (muldq throughput) + 3X1 (shift throughput) +
237 // 3X4 (addq throughput) = 17
238 { ISD::MUL, MVT::v2i64, 17 },
239 // slm addq\subq throughput is 4
240 { ISD::ADD, MVT::v2i64, 4 },
241 { ISD::SUB, MVT::v2i64, 4 },
242 };
243
244 if (ST->isSLM()) {
245 if (Args.size() == 2 && ISD == ISD::MUL && LT.second == MVT::v4i32) {
246 // Check if the operands can be shrinked into a smaller datatype.
247 bool Op1Signed = false;
248 unsigned Op1MinSize = BaseT::minRequiredElementSize(Args[0], Op1Signed);
249 bool Op2Signed = false;
250 unsigned Op2MinSize = BaseT::minRequiredElementSize(Args[1], Op2Signed);
251
252 bool SignedMode = Op1Signed || Op2Signed;
253 unsigned OpMinSize = std::max(Op1MinSize, Op2MinSize);
254
255 if (OpMinSize <= 7)
256 return LT.first * 3; // pmullw/sext
257 if (!SignedMode && OpMinSize <= 8)
258 return LT.first * 3; // pmullw/zext
259 if (OpMinSize <= 15)
260 return LT.first * 5; // pmullw/pmulhw/pshuf
261 if (!SignedMode && OpMinSize <= 16)
262 return LT.first * 5; // pmullw/pmulhw/pshuf
263 }
264
265 if (const auto *Entry = CostTableLookup(SLMCostTable, ISD,
266 LT.second)) {
267 return LT.first * Entry->Cost;
268 }
269 }
270
271 if ((ISD == ISD::SDIV || ISD == ISD::SREM || ISD == ISD::UDIV ||
272 ISD == ISD::UREM) &&
273 (Op2Info == TargetTransformInfo::OK_UniformConstantValue ||
274 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) &&
275 Opd2PropInfo == TargetTransformInfo::OP_PowerOf2) {
276 if (ISD == ISD::SDIV || ISD == ISD::SREM) {
277 // On X86, vector signed division by constants power-of-two are
278 // normally expanded to the sequence SRA + SRL + ADD + SRA.
279 // The OperandValue properties may not be the same as that of the previous
280 // operation; conservatively assume OP_None.
281 InstructionCost Cost =
282 2 * getArithmeticInstrCost(Instruction::AShr, Ty, CostKind, Op1Info,
283 Op2Info, TargetTransformInfo::OP_None,
284 TargetTransformInfo::OP_None);
285 Cost += getArithmeticInstrCost(Instruction::LShr, Ty, CostKind, Op1Info,
286 Op2Info,
287 TargetTransformInfo::OP_None,
288 TargetTransformInfo::OP_None);
289 Cost += getArithmeticInstrCost(Instruction::Add, Ty, CostKind, Op1Info,
290 Op2Info,
291 TargetTransformInfo::OP_None,
292 TargetTransformInfo::OP_None);
293
294 if (ISD == ISD::SREM) {
295 // For SREM: (X % C) is the equivalent of (X - (X/C)*C)
296 Cost += getArithmeticInstrCost(Instruction::Mul, Ty, CostKind, Op1Info,
297 Op2Info);
298 Cost += getArithmeticInstrCost(Instruction::Sub, Ty, CostKind, Op1Info,
299 Op2Info);
300 }
301
302 return Cost;
303 }
304
305 // Vector unsigned division/remainder will be simplified to shifts/masks.
306 if (ISD == ISD::UDIV)
307 return getArithmeticInstrCost(Instruction::LShr, Ty, CostKind,
308 Op1Info, Op2Info,
309 TargetTransformInfo::OP_None,
310 TargetTransformInfo::OP_None);
311
312 else // UREM
313 return getArithmeticInstrCost(Instruction::And, Ty, CostKind,
314 Op1Info, Op2Info,
315 TargetTransformInfo::OP_None,
316 TargetTransformInfo::OP_None);
317 }
318
319 static const CostTblEntry AVX512BWUniformConstCostTable[] = {
320 { ISD::SHL, MVT::v64i8, 2 }, // psllw + pand.
321 { ISD::SRL, MVT::v64i8, 2 }, // psrlw + pand.
322 { ISD::SRA, MVT::v64i8, 4 }, // psrlw, pand, pxor, psubb.
323 };
324
325 if (Op2Info == TargetTransformInfo::OK_UniformConstantValue &&
326 ST->hasBWI()) {
327 if (const auto *Entry = CostTableLookup(AVX512BWUniformConstCostTable, ISD,
328 LT.second))
329 return LT.first * Entry->Cost;
330 }
331
332 static const CostTblEntry AVX512UniformConstCostTable[] = {
333 { ISD::SRA, MVT::v2i64, 1 },
334 { ISD::SRA, MVT::v4i64, 1 },
335 { ISD::SRA, MVT::v8i64, 1 },
336
337 { ISD::SHL, MVT::v64i8, 4 }, // psllw + pand.
338 { ISD::SRL, MVT::v64i8, 4 }, // psrlw + pand.
339 { ISD::SRA, MVT::v64i8, 8 }, // psrlw, pand, pxor, psubb.
340
341 { ISD::SDIV, MVT::v16i32, 6 }, // pmuludq sequence
342 { ISD::SREM, MVT::v16i32, 8 }, // pmuludq+mul+sub sequence
343 { ISD::UDIV, MVT::v16i32, 5 }, // pmuludq sequence
344 { ISD::UREM, MVT::v16i32, 7 }, // pmuludq+mul+sub sequence
345 };
346
347 if (Op2Info == TargetTransformInfo::OK_UniformConstantValue &&
348 ST->hasAVX512()) {
349 if (const auto *Entry = CostTableLookup(AVX512UniformConstCostTable, ISD,
350 LT.second))
351 return LT.first * Entry->Cost;
352 }
353
354 static const CostTblEntry AVX2UniformConstCostTable[] = {
355 { ISD::SHL, MVT::v32i8, 2 }, // psllw + pand.
356 { ISD::SRL, MVT::v32i8, 2 }, // psrlw + pand.
357 { ISD::SRA, MVT::v32i8, 4 }, // psrlw, pand, pxor, psubb.
358
359 { ISD::SRA, MVT::v4i64, 4 }, // 2 x psrad + shuffle.
360
361 { ISD::SDIV, MVT::v8i32, 6 }, // pmuludq sequence
362 { ISD::SREM, MVT::v8i32, 8 }, // pmuludq+mul+sub sequence
363 { ISD::UDIV, MVT::v8i32, 5 }, // pmuludq sequence
364 { ISD::UREM, MVT::v8i32, 7 }, // pmuludq+mul+sub sequence
365 };
366
367 if (Op2Info == TargetTransformInfo::OK_UniformConstantValue &&
368 ST->hasAVX2()) {
369 if (const auto *Entry = CostTableLookup(AVX2UniformConstCostTable, ISD,
370 LT.second))
371 return LT.first * Entry->Cost;
372 }
373
374 static const CostTblEntry SSE2UniformConstCostTable[] = {
375 { ISD::SHL, MVT::v16i8, 2 }, // psllw + pand.
376 { ISD::SRL, MVT::v16i8, 2 }, // psrlw + pand.
377 { ISD::SRA, MVT::v16i8, 4 }, // psrlw, pand, pxor, psubb.
378
379 { ISD::SHL, MVT::v32i8, 4+2 }, // 2*(psllw + pand) + split.
380 { ISD::SRL, MVT::v32i8, 4+2 }, // 2*(psrlw + pand) + split.
381 { ISD::SRA, MVT::v32i8, 8+2 }, // 2*(psrlw, pand, pxor, psubb) + split.
382
383 { ISD::SDIV, MVT::v8i32, 12+2 }, // 2*pmuludq sequence + split.
384 { ISD::SREM, MVT::v8i32, 16+2 }, // 2*pmuludq+mul+sub sequence + split.
385 { ISD::SDIV, MVT::v4i32, 6 }, // pmuludq sequence
386 { ISD::SREM, MVT::v4i32, 8 }, // pmuludq+mul+sub sequence
387 { ISD::UDIV, MVT::v8i32, 10+2 }, // 2*pmuludq sequence + split.
388 { ISD::UREM, MVT::v8i32, 14+2 }, // 2*pmuludq+mul+sub sequence + split.
389 { ISD::UDIV, MVT::v4i32, 5 }, // pmuludq sequence
390 { ISD::UREM, MVT::v4i32, 7 }, // pmuludq+mul+sub sequence
391 };
392
393 // XOP has faster vXi8 shifts.
394 if (Op2Info == TargetTransformInfo::OK_UniformConstantValue &&
395 ST->hasSSE2() && !ST->hasXOP()) {
396 if (const auto *Entry =
397 CostTableLookup(SSE2UniformConstCostTable, ISD, LT.second))
398 return LT.first * Entry->Cost;
399 }
400
401 static const CostTblEntry AVX512BWConstCostTable[] = {
402 { ISD::SDIV, MVT::v64i8, 14 }, // 2*ext+2*pmulhw sequence
403 { ISD::SREM, MVT::v64i8, 16 }, // 2*ext+2*pmulhw+mul+sub sequence
404 { ISD::UDIV, MVT::v64i8, 14 }, // 2*ext+2*pmulhw sequence
405 { ISD::UREM, MVT::v64i8, 16 }, // 2*ext+2*pmulhw+mul+sub sequence
406 { ISD::SDIV, MVT::v32i16, 6 }, // vpmulhw sequence
407 { ISD::SREM, MVT::v32i16, 8 }, // vpmulhw+mul+sub sequence
408 { ISD::UDIV, MVT::v32i16, 6 }, // vpmulhuw sequence
409 { ISD::UREM, MVT::v32i16, 8 }, // vpmulhuw+mul+sub sequence
410 };
411
412 if ((Op2Info == TargetTransformInfo::OK_UniformConstantValue ||
413 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) &&
414 ST->hasBWI()) {
415 if (const auto *Entry =
416 CostTableLookup(AVX512BWConstCostTable, ISD, LT.second))
417 return LT.first * Entry->Cost;
418 }
419
420 static const CostTblEntry AVX512ConstCostTable[] = {
421 { ISD::SDIV, MVT::v16i32, 15 }, // vpmuldq sequence
422 { ISD::SREM, MVT::v16i32, 17 }, // vpmuldq+mul+sub sequence
423 { ISD::UDIV, MVT::v16i32, 15 }, // vpmuludq sequence
424 { ISD::UREM, MVT::v16i32, 17 }, // vpmuludq+mul+sub sequence
425 { ISD::SDIV, MVT::v64i8, 28 }, // 4*ext+4*pmulhw sequence
426 { ISD::SREM, MVT::v64i8, 32 }, // 4*ext+4*pmulhw+mul+sub sequence
427 { ISD::UDIV, MVT::v64i8, 28 }, // 4*ext+4*pmulhw sequence
428 { ISD::UREM, MVT::v64i8, 32 }, // 4*ext+4*pmulhw+mul+sub sequence
429 { ISD::SDIV, MVT::v32i16, 12 }, // 2*vpmulhw sequence
430 { ISD::SREM, MVT::v32i16, 16 }, // 2*vpmulhw+mul+sub sequence
431 { ISD::UDIV, MVT::v32i16, 12 }, // 2*vpmulhuw sequence
432 { ISD::UREM, MVT::v32i16, 16 }, // 2*vpmulhuw+mul+sub sequence
433 };
434
435 if ((Op2Info == TargetTransformInfo::OK_UniformConstantValue ||
436 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) &&
437 ST->hasAVX512()) {
438 if (const auto *Entry =
439 CostTableLookup(AVX512ConstCostTable, ISD, LT.second))
440 return LT.first * Entry->Cost;
441 }
442
443 static const CostTblEntry AVX2ConstCostTable[] = {
444 { ISD::SDIV, MVT::v32i8, 14 }, // 2*ext+2*pmulhw sequence
445 { ISD::SREM, MVT::v32i8, 16 }, // 2*ext+2*pmulhw+mul+sub sequence
446 { ISD::UDIV, MVT::v32i8, 14 }, // 2*ext+2*pmulhw sequence
447 { ISD::UREM, MVT::v32i8, 16 }, // 2*ext+2*pmulhw+mul+sub sequence
448 { ISD::SDIV, MVT::v16i16, 6 }, // vpmulhw sequence
449 { ISD::SREM, MVT::v16i16, 8 }, // vpmulhw+mul+sub sequence
450 { ISD::UDIV, MVT::v16i16, 6 }, // vpmulhuw sequence
451 { ISD::UREM, MVT::v16i16, 8 }, // vpmulhuw+mul+sub sequence
452 { ISD::SDIV, MVT::v8i32, 15 }, // vpmuldq sequence
453 { ISD::SREM, MVT::v8i32, 19 }, // vpmuldq+mul+sub sequence
454 { ISD::UDIV, MVT::v8i32, 15 }, // vpmuludq sequence
455 { ISD::UREM, MVT::v8i32, 19 }, // vpmuludq+mul+sub sequence
456 };
457
458 if ((Op2Info == TargetTransformInfo::OK_UniformConstantValue ||
459 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) &&
460 ST->hasAVX2()) {
461 if (const auto *Entry = CostTableLookup(AVX2ConstCostTable, ISD, LT.second))
462 return LT.first * Entry->Cost;
463 }
464
465 static const CostTblEntry SSE2ConstCostTable[] = {
466 { ISD::SDIV, MVT::v32i8, 28+2 }, // 4*ext+4*pmulhw sequence + split.
467 { ISD::SREM, MVT::v32i8, 32+2 }, // 4*ext+4*pmulhw+mul+sub sequence + split.
468 { ISD::SDIV, MVT::v16i8, 14 }, // 2*ext+2*pmulhw sequence
469 { ISD::SREM, MVT::v16i8, 16 }, // 2*ext+2*pmulhw+mul+sub sequence
470 { ISD::UDIV, MVT::v32i8, 28+2 }, // 4*ext+4*pmulhw sequence + split.
471 { ISD::UREM, MVT::v32i8, 32+2 }, // 4*ext+4*pmulhw+mul+sub sequence + split.
472 { ISD::UDIV, MVT::v16i8, 14 }, // 2*ext+2*pmulhw sequence
473 { ISD::UREM, MVT::v16i8, 16 }, // 2*ext+2*pmulhw+mul+sub sequence
474 { ISD::SDIV, MVT::v16i16, 12+2 }, // 2*pmulhw sequence + split.
475 { ISD::SREM, MVT::v16i16, 16+2 }, // 2*pmulhw+mul+sub sequence + split.
476 { ISD::SDIV, MVT::v8i16, 6 }, // pmulhw sequence
477 { ISD::SREM, MVT::v8i16, 8 }, // pmulhw+mul+sub sequence
478 { ISD::UDIV, MVT::v16i16, 12+2 }, // 2*pmulhuw sequence + split.
479 { ISD::UREM, MVT::v16i16, 16+2 }, // 2*pmulhuw+mul+sub sequence + split.
480 { ISD::UDIV, MVT::v8i16, 6 }, // pmulhuw sequence
481 { ISD::UREM, MVT::v8i16, 8 }, // pmulhuw+mul+sub sequence
482 { ISD::SDIV, MVT::v8i32, 38+2 }, // 2*pmuludq sequence + split.
483 { ISD::SREM, MVT::v8i32, 48+2 }, // 2*pmuludq+mul+sub sequence + split.
484 { ISD::SDIV, MVT::v4i32, 19 }, // pmuludq sequence
485 { ISD::SREM, MVT::v4i32, 24 }, // pmuludq+mul+sub sequence
486 { ISD::UDIV, MVT::v8i32, 30+2 }, // 2*pmuludq sequence + split.
487 { ISD::UREM, MVT::v8i32, 40+2 }, // 2*pmuludq+mul+sub sequence + split.
488 { ISD::UDIV, MVT::v4i32, 15 }, // pmuludq sequence
489 { ISD::UREM, MVT::v4i32, 20 }, // pmuludq+mul+sub sequence
490 };
491
492 if ((Op2Info == TargetTransformInfo::OK_UniformConstantValue ||
493 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) &&
494 ST->hasSSE2()) {
495 // pmuldq sequence.
496 if (ISD == ISD::SDIV && LT.second == MVT::v8i32 && ST->hasAVX())
497 return LT.first * 32;
498 if (ISD == ISD::SREM && LT.second == MVT::v8i32 && ST->hasAVX())
499 return LT.first * 38;
500 if (ISD == ISD::SDIV && LT.second == MVT::v4i32 && ST->hasSSE41())
501 return LT.first * 15;
502 if (ISD == ISD::SREM && LT.second == MVT::v4i32 && ST->hasSSE41())
503 return LT.first * 20;
504
505 if (const auto *Entry = CostTableLookup(SSE2ConstCostTable, ISD, LT.second))
506 return LT.first * Entry->Cost;
507 }
508
509 static const CostTblEntry AVX512BWShiftCostTable[] = {
510 { ISD::SHL, MVT::v16i8, 4 }, // extend/vpsllvw/pack sequence.
511 { ISD::SRL, MVT::v16i8, 4 }, // extend/vpsrlvw/pack sequence.
512 { ISD::SRA, MVT::v16i8, 4 }, // extend/vpsravw/pack sequence.
513 { ISD::SHL, MVT::v32i8, 4 }, // extend/vpsllvw/pack sequence.
514 { ISD::SRL, MVT::v32i8, 4 }, // extend/vpsrlvw/pack sequence.
515 { ISD::SRA, MVT::v32i8, 6 }, // extend/vpsravw/pack sequence.
516 { ISD::SHL, MVT::v64i8, 6 }, // extend/vpsllvw/pack sequence.
517 { ISD::SRL, MVT::v64i8, 7 }, // extend/vpsrlvw/pack sequence.
518 { ISD::SRA, MVT::v64i8, 15 }, // extend/vpsravw/pack sequence.
519
520 { ISD::SHL, MVT::v8i16, 1 }, // vpsllvw
521 { ISD::SRL, MVT::v8i16, 1 }, // vpsrlvw
522 { ISD::SRA, MVT::v8i16, 1 }, // vpsravw
523 { ISD::SHL, MVT::v16i16, 1 }, // vpsllvw
524 { ISD::SRL, MVT::v16i16, 1 }, // vpsrlvw
525 { ISD::SRA, MVT::v16i16, 1 }, // vpsravw
526 { ISD::SHL, MVT::v32i16, 1 }, // vpsllvw
527 { ISD::SRL, MVT::v32i16, 1 }, // vpsrlvw
528 { ISD::SRA, MVT::v32i16, 1 }, // vpsravw
529 };
530
531 if (ST->hasBWI())
532 if (const auto *Entry = CostTableLookup(AVX512BWShiftCostTable, ISD, LT.second))
533 return LT.first * Entry->Cost;
534
535 static const CostTblEntry AVX2UniformCostTable[] = {
536 // Uniform splats are cheaper for the following instructions.
537 { ISD::SHL, MVT::v16i16, 1 }, // psllw.
538 { ISD::SRL, MVT::v16i16, 1 }, // psrlw.
539 { ISD::SRA, MVT::v16i16, 1 }, // psraw.
540 { ISD::SHL, MVT::v32i16, 2 }, // 2*psllw.
541 { ISD::SRL, MVT::v32i16, 2 }, // 2*psrlw.
542 { ISD::SRA, MVT::v32i16, 2 }, // 2*psraw.
543
544 { ISD::SHL, MVT::v8i32, 1 }, // pslld
545 { ISD::SRL, MVT::v8i32, 1 }, // psrld
546 { ISD::SRA, MVT::v8i32, 1 }, // psrad
547 { ISD::SHL, MVT::v4i64, 1 }, // psllq
548 { ISD::SRL, MVT::v4i64, 1 }, // psrlq
549 };
550
551 if (ST->hasAVX2() &&
552 ((Op2Info == TargetTransformInfo::OK_UniformConstantValue) ||
553 (Op2Info == TargetTransformInfo::OK_UniformValue))) {
554 if (const auto *Entry =
555 CostTableLookup(AVX2UniformCostTable, ISD, LT.second))
556 return LT.first * Entry->Cost;
557 }
558
559 static const CostTblEntry SSE2UniformCostTable[] = {
560 // Uniform splats are cheaper for the following instructions.
561 { ISD::SHL, MVT::v8i16, 1 }, // psllw.
562 { ISD::SHL, MVT::v4i32, 1 }, // pslld
563 { ISD::SHL, MVT::v2i64, 1 }, // psllq.
564
565 { ISD::SRL, MVT::v8i16, 1 }, // psrlw.
566 { ISD::SRL, MVT::v4i32, 1 }, // psrld.
567 { ISD::SRL, MVT::v2i64, 1 }, // psrlq.
568
569 { ISD::SRA, MVT::v8i16, 1 }, // psraw.
570 { ISD::SRA, MVT::v4i32, 1 }, // psrad.
571 };
572
573 if (ST->hasSSE2() &&
574 ((Op2Info == TargetTransformInfo::OK_UniformConstantValue) ||
575 (Op2Info == TargetTransformInfo::OK_UniformValue))) {
576 if (const auto *Entry =
577 CostTableLookup(SSE2UniformCostTable, ISD, LT.second))
578 return LT.first * Entry->Cost;
579 }
580
581 static const CostTblEntry AVX512DQCostTable[] = {
582 { ISD::MUL, MVT::v2i64, 2 }, // pmullq
583 { ISD::MUL, MVT::v4i64, 2 }, // pmullq
584 { ISD::MUL, MVT::v8i64, 2 } // pmullq
585 };
586
587 // Look for AVX512DQ lowering tricks for custom cases.
588 if (ST->hasDQI())
589 if (const auto *Entry = CostTableLookup(AVX512DQCostTable, ISD, LT.second))
590 return LT.first * Entry->Cost;
591
592 static const CostTblEntry AVX512BWCostTable[] = {
593 { ISD::SHL, MVT::v64i8, 11 }, // vpblendvb sequence.
594 { ISD::SRL, MVT::v64i8, 11 }, // vpblendvb sequence.
595 { ISD::SRA, MVT::v64i8, 24 }, // vpblendvb sequence.
596 };
597
598 // Look for AVX512BW lowering tricks for custom cases.
599 if (ST->hasBWI())
600 if (const auto *Entry = CostTableLookup(AVX512BWCostTable, ISD, LT.second))
601 return LT.first * Entry->Cost;
602
603 static const CostTblEntry AVX512CostTable[] = {
604 { ISD::SHL, MVT::v4i32, 1 },
605 { ISD::SRL, MVT::v4i32, 1 },
606 { ISD::SRA, MVT::v4i32, 1 },
607 { ISD::SHL, MVT::v8i32, 1 },
608 { ISD::SRL, MVT::v8i32, 1 },
609 { ISD::SRA, MVT::v8i32, 1 },
610 { ISD::SHL, MVT::v16i32, 1 },
611 { ISD::SRL, MVT::v16i32, 1 },
612 { ISD::SRA, MVT::v16i32, 1 },
613
614 { ISD::SHL, MVT::v2i64, 1 },
615 { ISD::SRL, MVT::v2i64, 1 },
616 { ISD::SHL, MVT::v4i64, 1 },
617 { ISD::SRL, MVT::v4i64, 1 },
618 { ISD::SHL, MVT::v8i64, 1 },
619 { ISD::SRL, MVT::v8i64, 1 },
620
621 { ISD::SRA, MVT::v2i64, 1 },
622 { ISD::SRA, MVT::v4i64, 1 },
623 { ISD::SRA, MVT::v8i64, 1 },
624
625 { ISD::MUL, MVT::v16i32, 1 }, // pmulld (Skylake from agner.org)
626 { ISD::MUL, MVT::v8i32, 1 }, // pmulld (Skylake from agner.org)
627 { ISD::MUL, MVT::v4i32, 1 }, // pmulld (Skylake from agner.org)
628 { ISD::MUL, MVT::v8i64, 6 }, // 3*pmuludq/3*shift/2*add
629
630 { ISD::FNEG, MVT::v8f64, 1 }, // Skylake from http://www.agner.org/
631 { ISD::FADD, MVT::v8f64, 1 }, // Skylake from http://www.agner.org/
632 { ISD::FSUB, MVT::v8f64, 1 }, // Skylake from http://www.agner.org/
633 { ISD::FMUL, MVT::v8f64, 1 }, // Skylake from http://www.agner.org/
634 { ISD::FDIV, MVT::f64, 4 }, // Skylake from http://www.agner.org/
635 { ISD::FDIV, MVT::v2f64, 4 }, // Skylake from http://www.agner.org/
636 { ISD::FDIV, MVT::v4f64, 8 }, // Skylake from http://www.agner.org/
637 { ISD::FDIV, MVT::v8f64, 16 }, // Skylake from http://www.agner.org/
638
639 { ISD::FNEG, MVT::v16f32, 1 }, // Skylake from http://www.agner.org/
640 { ISD::FADD, MVT::v16f32, 1 }, // Skylake from http://www.agner.org/
641 { ISD::FSUB, MVT::v16f32, 1 }, // Skylake from http://www.agner.org/
642 { ISD::FMUL, MVT::v16f32, 1 }, // Skylake from http://www.agner.org/
643 { ISD::FDIV, MVT::f32, 3 }, // Skylake from http://www.agner.org/
644 { ISD::FDIV, MVT::v4f32, 3 }, // Skylake from http://www.agner.org/
645 { ISD::FDIV, MVT::v8f32, 5 }, // Skylake from http://www.agner.org/
646 { ISD::FDIV, MVT::v16f32, 10 }, // Skylake from http://www.agner.org/
647 };
648
649 if (ST->hasAVX512())
650 if (const auto *Entry = CostTableLookup(AVX512CostTable, ISD, LT.second))
651 return LT.first * Entry->Cost;
652
653 static const CostTblEntry AVX2ShiftCostTable[] = {
654 // Shifts on vXi64/vXi32 on AVX2 is legal even though we declare to
655 // customize them to detect the cases where shift amount is a scalar one.
656 { ISD::SHL, MVT::v4i32, 2 }, // vpsllvd (Haswell from agner.org)
657 { ISD::SRL, MVT::v4i32, 2 }, // vpsrlvd (Haswell from agner.org)
658 { ISD::SRA, MVT::v4i32, 2 }, // vpsravd (Haswell from agner.org)
659 { ISD::SHL, MVT::v8i32, 2 }, // vpsllvd (Haswell from agner.org)
660 { ISD::SRL, MVT::v8i32, 2 }, // vpsrlvd (Haswell from agner.org)
661 { ISD::SRA, MVT::v8i32, 2 }, // vpsravd (Haswell from agner.org)
662 { ISD::SHL, MVT::v2i64, 1 }, // vpsllvq (Haswell from agner.org)
663 { ISD::SRL, MVT::v2i64, 1 }, // vpsrlvq (Haswell from agner.org)
664 { ISD::SHL, MVT::v4i64, 1 }, // vpsllvq (Haswell from agner.org)
665 { ISD::SRL, MVT::v4i64, 1 }, // vpsrlvq (Haswell from agner.org)
666 };
667
668 if (ST->hasAVX512()) {
669 if (ISD == ISD::SHL && LT.second == MVT::v32i16 &&
670 (Op2Info == TargetTransformInfo::OK_UniformConstantValue ||
671 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue))
672 // On AVX512, a packed v32i16 shift left by a constant build_vector
673 // is lowered into a vector multiply (vpmullw).
674 return getArithmeticInstrCost(Instruction::Mul, Ty, CostKind,
675 Op1Info, Op2Info,
676 TargetTransformInfo::OP_None,
677 TargetTransformInfo::OP_None);
678 }
679
680 // Look for AVX2 lowering tricks (XOP is always better at v4i32 shifts).
681 if (ST->hasAVX2() && !(ST->hasXOP() && LT.second == MVT::v4i32)) {
682 if (ISD == ISD::SHL && LT.second == MVT::v16i16 &&
683 (Op2Info == TargetTransformInfo::OK_UniformConstantValue ||
684 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue))
685 // On AVX2, a packed v16i16 shift left by a constant build_vector
686 // is lowered into a vector multiply (vpmullw).
687 return getArithmeticInstrCost(Instruction::Mul, Ty, CostKind,
688 Op1Info, Op2Info,
689 TargetTransformInfo::OP_None,
690 TargetTransformInfo::OP_None);
691
692 if (const auto *Entry = CostTableLookup(AVX2ShiftCostTable, ISD, LT.second))
693 return LT.first * Entry->Cost;
694 }
695
696 static const CostTblEntry XOPShiftCostTable[] = {
697 // 128bit shifts take 1cy, but right shifts require negation beforehand.
698 { ISD::SHL, MVT::v16i8, 1 },
699 { ISD::SRL, MVT::v16i8, 2 },
700 { ISD::SRA, MVT::v16i8, 2 },
701 { ISD::SHL, MVT::v8i16, 1 },
702 { ISD::SRL, MVT::v8i16, 2 },
703 { ISD::SRA, MVT::v8i16, 2 },
704 { ISD::SHL, MVT::v4i32, 1 },
705 { ISD::SRL, MVT::v4i32, 2 },
706 { ISD::SRA, MVT::v4i32, 2 },
707 { ISD::SHL, MVT::v2i64, 1 },
708 { ISD::SRL, MVT::v2i64, 2 },
709 { ISD::SRA, MVT::v2i64, 2 },
710 // 256bit shifts require splitting if AVX2 didn't catch them above.
711 { ISD::SHL, MVT::v32i8, 2+2 },
712 { ISD::SRL, MVT::v32i8, 4+2 },
713 { ISD::SRA, MVT::v32i8, 4+2 },
714 { ISD::SHL, MVT::v16i16, 2+2 },
715 { ISD::SRL, MVT::v16i16, 4+2 },
716 { ISD::SRA, MVT::v16i16, 4+2 },
717 { ISD::SHL, MVT::v8i32, 2+2 },
718 { ISD::SRL, MVT::v8i32, 4+2 },
719 { ISD::SRA, MVT::v8i32, 4+2 },
720 { ISD::SHL, MVT::v4i64, 2+2 },
721 { ISD::SRL, MVT::v4i64, 4+2 },
722 { ISD::SRA, MVT::v4i64, 4+2 },
723 };
724
725 // Look for XOP lowering tricks.
726 if (ST->hasXOP()) {
727 // If the right shift is constant then we'll fold the negation so
728 // it's as cheap as a left shift.
729 int ShiftISD = ISD;
730 if ((ShiftISD == ISD::SRL || ShiftISD == ISD::SRA) &&
731 (Op2Info == TargetTransformInfo::OK_UniformConstantValue ||
732 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue))
733 ShiftISD = ISD::SHL;
734 if (const auto *Entry =
735 CostTableLookup(XOPShiftCostTable, ShiftISD, LT.second))
736 return LT.first * Entry->Cost;
737 }
738
739 static const CostTblEntry SSE2UniformShiftCostTable[] = {
740 // Uniform splats are cheaper for the following instructions.
741 { ISD::SHL, MVT::v16i16, 2+2 }, // 2*psllw + split.
742 { ISD::SHL, MVT::v8i32, 2+2 }, // 2*pslld + split.
743 { ISD::SHL, MVT::v4i64, 2+2 }, // 2*psllq + split.
744
745 { ISD::SRL, MVT::v16i16, 2+2 }, // 2*psrlw + split.
746 { ISD::SRL, MVT::v8i32, 2+2 }, // 2*psrld + split.
747 { ISD::SRL, MVT::v4i64, 2+2 }, // 2*psrlq + split.
748
749 { ISD::SRA, MVT::v16i16, 2+2 }, // 2*psraw + split.
750 { ISD::SRA, MVT::v8i32, 2+2 }, // 2*psrad + split.
751 { ISD::SRA, MVT::v2i64, 4 }, // 2*psrad + shuffle.
752 { ISD::SRA, MVT::v4i64, 8+2 }, // 2*(2*psrad + shuffle) + split.
753 };
754
755 if (ST->hasSSE2() &&
756 ((Op2Info == TargetTransformInfo::OK_UniformConstantValue) ||
757 (Op2Info == TargetTransformInfo::OK_UniformValue))) {
758
759 // Handle AVX2 uniform v4i64 ISD::SRA, it's not worth a table.
760 if (ISD == ISD::SRA && LT.second == MVT::v4i64 && ST->hasAVX2())
761 return LT.first * 4; // 2*psrad + shuffle.
762
763 if (const auto *Entry =
764 CostTableLookup(SSE2UniformShiftCostTable, ISD, LT.second))
765 return LT.first * Entry->Cost;
766 }
767
768 if (ISD == ISD::SHL &&
769 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) {
770 MVT VT = LT.second;
771 // Vector shift left by non uniform constant can be lowered
772 // into vector multiply.
773 if (((VT == MVT::v8i16 || VT == MVT::v4i32) && ST->hasSSE2()) ||
774 ((VT == MVT::v16i16 || VT == MVT::v8i32) && ST->hasAVX()))
775 ISD = ISD::MUL;
776 }
777
778 static const CostTblEntry AVX2CostTable[] = {
779 { ISD::SHL, MVT::v16i8, 6 }, // vpblendvb sequence.
780 { ISD::SHL, MVT::v32i8, 6 }, // vpblendvb sequence.
781 { ISD::SHL, MVT::v64i8, 12 }, // 2*vpblendvb sequence.
782 { ISD::SHL, MVT::v8i16, 5 }, // extend/vpsrlvd/pack sequence.
783 { ISD::SHL, MVT::v16i16, 7 }, // extend/vpsrlvd/pack sequence.
784 { ISD::SHL, MVT::v32i16, 14 }, // 2*extend/vpsrlvd/pack sequence.
785
786 { ISD::SRL, MVT::v16i8, 6 }, // vpblendvb sequence.
787 { ISD::SRL, MVT::v32i8, 6 }, // vpblendvb sequence.
788 { ISD::SRL, MVT::v64i8, 12 }, // 2*vpblendvb sequence.
789 { ISD::SRL, MVT::v8i16, 5 }, // extend/vpsrlvd/pack sequence.
790 { ISD::SRL, MVT::v16i16, 7 }, // extend/vpsrlvd/pack sequence.
791 { ISD::SRL, MVT::v32i16, 14 }, // 2*extend/vpsrlvd/pack sequence.
792
793 { ISD::SRA, MVT::v16i8, 17 }, // vpblendvb sequence.
794 { ISD::SRA, MVT::v32i8, 17 }, // vpblendvb sequence.
795 { ISD::SRA, MVT::v64i8, 34 }, // 2*vpblendvb sequence.
796 { ISD::SRA, MVT::v8i16, 5 }, // extend/vpsravd/pack sequence.
797 { ISD::SRA, MVT::v16i16, 7 }, // extend/vpsravd/pack sequence.
798 { ISD::SRA, MVT::v32i16, 14 }, // 2*extend/vpsravd/pack sequence.
799 { ISD::SRA, MVT::v2i64, 2 }, // srl/xor/sub sequence.
800 { ISD::SRA, MVT::v4i64, 2 }, // srl/xor/sub sequence.
801
802 { ISD::SUB, MVT::v32i8, 1 }, // psubb
803 { ISD::ADD, MVT::v32i8, 1 }, // paddb
804 { ISD::SUB, MVT::v16i16, 1 }, // psubw
805 { ISD::ADD, MVT::v16i16, 1 }, // paddw
806 { ISD::SUB, MVT::v8i32, 1 }, // psubd
807 { ISD::ADD, MVT::v8i32, 1 }, // paddd
808 { ISD::SUB, MVT::v4i64, 1 }, // psubq
809 { ISD::ADD, MVT::v4i64, 1 }, // paddq
810
811 { ISD::MUL, MVT::v16i16, 1 }, // pmullw
812 { ISD::MUL, MVT::v8i32, 2 }, // pmulld (Haswell from agner.org)
813 { ISD::MUL, MVT::v4i64, 6 }, // 3*pmuludq/3*shift/2*add
814
815 { ISD::FNEG, MVT::v4f64, 1 }, // Haswell from http://www.agner.org/
816 { ISD::FNEG, MVT::v8f32, 1 }, // Haswell from http://www.agner.org/
817 { ISD::FADD, MVT::v4f64, 1 }, // Haswell from http://www.agner.org/
818 { ISD::FADD, MVT::v8f32, 1 }, // Haswell from http://www.agner.org/
819 { ISD::FSUB, MVT::v4f64, 1 }, // Haswell from http://www.agner.org/
820 { ISD::FSUB, MVT::v8f32, 1 }, // Haswell from http://www.agner.org/
821 { ISD::FMUL, MVT::f64, 1 }, // Haswell from http://www.agner.org/
822 { ISD::FMUL, MVT::v2f64, 1 }, // Haswell from http://www.agner.org/
823 { ISD::FMUL, MVT::v4f64, 1 }, // Haswell from http://www.agner.org/
824 { ISD::FMUL, MVT::v8f32, 1 }, // Haswell from http://www.agner.org/
825
826 { ISD::FDIV, MVT::f32, 7 }, // Haswell from http://www.agner.org/
827 { ISD::FDIV, MVT::v4f32, 7 }, // Haswell from http://www.agner.org/
828 { ISD::FDIV, MVT::v8f32, 14 }, // Haswell from http://www.agner.org/
829 { ISD::FDIV, MVT::f64, 14 }, // Haswell from http://www.agner.org/
830 { ISD::FDIV, MVT::v2f64, 14 }, // Haswell from http://www.agner.org/
831 { ISD::FDIV, MVT::v4f64, 28 }, // Haswell from http://www.agner.org/
832 };
833
834 // Look for AVX2 lowering tricks for custom cases.
835 if (ST->hasAVX2())
836 if (const auto *Entry = CostTableLookup(AVX2CostTable, ISD, LT.second))
837 return LT.first * Entry->Cost;
838
839 static const CostTblEntry AVX1CostTable[] = {
840 // We don't have to scalarize unsupported ops. We can issue two half-sized
841 // operations and we only need to extract the upper YMM half.
842 // Two ops + 1 extract + 1 insert = 4.
843 { ISD::MUL, MVT::v16i16, 4 },
844 { ISD::MUL, MVT::v8i32, 5 }, // BTVER2 from http://www.agner.org/
845 { ISD::MUL, MVT::v4i64, 12 },
846
847 { ISD::SUB, MVT::v32i8, 4 },
848 { ISD::ADD, MVT::v32i8, 4 },
849 { ISD::SUB, MVT::v16i16, 4 },
850 { ISD::ADD, MVT::v16i16, 4 },
851 { ISD::SUB, MVT::v8i32, 4 },
852 { ISD::ADD, MVT::v8i32, 4 },
853 { ISD::SUB, MVT::v4i64, 4 },
854 { ISD::ADD, MVT::v4i64, 4 },
855
856 { ISD::SHL, MVT::v32i8, 22 }, // pblendvb sequence + split.
857 { ISD::SHL, MVT::v8i16, 6 }, // pblendvb sequence.
858 { ISD::SHL, MVT::v16i16, 13 }, // pblendvb sequence + split.
859 { ISD::SHL, MVT::v4i32, 3 }, // pslld/paddd/cvttps2dq/pmulld
860 { ISD::SHL, MVT::v8i32, 9 }, // pslld/paddd/cvttps2dq/pmulld + split
861 { ISD::SHL, MVT::v2i64, 2 }, // Shift each lane + blend.
862 { ISD::SHL, MVT::v4i64, 6 }, // Shift each lane + blend + split.
863
864 { ISD::SRL, MVT::v32i8, 23 }, // pblendvb sequence + split.
865 { ISD::SRL, MVT::v16i16, 28 }, // pblendvb sequence + split.
866 { ISD::SRL, MVT::v4i32, 6 }, // Shift each lane + blend.
867 { ISD::SRL, MVT::v8i32, 14 }, // Shift each lane + blend + split.
868 { ISD::SRL, MVT::v2i64, 2 }, // Shift each lane + blend.
869 { ISD::SRL, MVT::v4i64, 6 }, // Shift each lane + blend + split.
870
871 { ISD::SRA, MVT::v32i8, 44 }, // pblendvb sequence + split.
872 { ISD::SRA, MVT::v16i16, 28 }, // pblendvb sequence + split.
873 { ISD::SRA, MVT::v4i32, 6 }, // Shift each lane + blend.
874 { ISD::SRA, MVT::v8i32, 14 }, // Shift each lane + blend + split.
875 { ISD::SRA, MVT::v2i64, 5 }, // Shift each lane + blend.
876 { ISD::SRA, MVT::v4i64, 12 }, // Shift each lane + blend + split.
877
878 { ISD::FNEG, MVT::v4f64, 2 }, // BTVER2 from http://www.agner.org/
879 { ISD::FNEG, MVT::v8f32, 2 }, // BTVER2 from http://www.agner.org/
880
881 { ISD::FMUL, MVT::f64, 2 }, // BTVER2 from http://www.agner.org/
882 { ISD::FMUL, MVT::v2f64, 2 }, // BTVER2 from http://www.agner.org/
883 { ISD::FMUL, MVT::v4f64, 4 }, // BTVER2 from http://www.agner.org/
884
885 { ISD::FDIV, MVT::f32, 14 }, // SNB from http://www.agner.org/
886 { ISD::FDIV, MVT::v4f32, 14 }, // SNB from http://www.agner.org/
887 { ISD::FDIV, MVT::v8f32, 28 }, // SNB from http://www.agner.org/
888 { ISD::FDIV, MVT::f64, 22 }, // SNB from http://www.agner.org/
889 { ISD::FDIV, MVT::v2f64, 22 }, // SNB from http://www.agner.org/
890 { ISD::FDIV, MVT::v4f64, 44 }, // SNB from http://www.agner.org/
891 };
892
893 if (ST->hasAVX())
894 if (const auto *Entry = CostTableLookup(AVX1CostTable, ISD, LT.second))
895 return LT.first * Entry->Cost;
896
897 static const CostTblEntry SSE42CostTable[] = {
898 { ISD::FADD, MVT::f64, 1 }, // Nehalem from http://www.agner.org/
899 { ISD::FADD, MVT::f32, 1 }, // Nehalem from http://www.agner.org/
900 { ISD::FADD, MVT::v2f64, 1 }, // Nehalem from http://www.agner.org/
901 { ISD::FADD, MVT::v4f32, 1 }, // Nehalem from http://www.agner.org/
902
903 { ISD::FSUB, MVT::f64, 1 }, // Nehalem from http://www.agner.org/
904 { ISD::FSUB, MVT::f32 , 1 }, // Nehalem from http://www.agner.org/
905 { ISD::FSUB, MVT::v2f64, 1 }, // Nehalem from http://www.agner.org/
906 { ISD::FSUB, MVT::v4f32, 1 }, // Nehalem from http://www.agner.org/
907
908 { ISD::FMUL, MVT::f64, 1 }, // Nehalem from http://www.agner.org/
909 { ISD::FMUL, MVT::f32, 1 }, // Nehalem from http://www.agner.org/
910 { ISD::FMUL, MVT::v2f64, 1 }, // Nehalem from http://www.agner.org/
911 { ISD::FMUL, MVT::v4f32, 1 }, // Nehalem from http://www.agner.org/
912
913 { ISD::FDIV, MVT::f32, 14 }, // Nehalem from http://www.agner.org/
914 { ISD::FDIV, MVT::v4f32, 14 }, // Nehalem from http://www.agner.org/
915 { ISD::FDIV, MVT::f64, 22 }, // Nehalem from http://www.agner.org/
916 { ISD::FDIV, MVT::v2f64, 22 }, // Nehalem from http://www.agner.org/
917
918 { ISD::MUL, MVT::v2i64, 6 } // 3*pmuludq/3*shift/2*add
919 };
920
921 if (ST->hasSSE42())
922 if (const auto *Entry = CostTableLookup(SSE42CostTable, ISD, LT.second))
923 return LT.first * Entry->Cost;
924
925 static const CostTblEntry SSE41CostTable[] = {
926 { ISD::SHL, MVT::v16i8, 10 }, // pblendvb sequence.
927 { ISD::SHL, MVT::v8i16, 11 }, // pblendvb sequence.
928 { ISD::SHL, MVT::v4i32, 4 }, // pslld/paddd/cvttps2dq/pmulld
929
930 { ISD::SRL, MVT::v16i8, 11 }, // pblendvb sequence.
931 { ISD::SRL, MVT::v8i16, 13 }, // pblendvb sequence.
932 { ISD::SRL, MVT::v4i32, 16 }, // Shift each lane + blend.
933
934 { ISD::SRA, MVT::v16i8, 21 }, // pblendvb sequence.
935 { ISD::SRA, MVT::v8i16, 13 }, // pblendvb sequence.
936
937 { ISD::MUL, MVT::v4i32, 2 } // pmulld (Nehalem from agner.org)
938 };
939
940 if (ST->hasSSE41())
941 if (const auto *Entry = CostTableLookup(SSE41CostTable, ISD, LT.second))
942 return LT.first * Entry->Cost;
943
944 static const CostTblEntry SSE2CostTable[] = {
945 // We don't correctly identify costs of casts because they are marked as
946 // custom.
947 { ISD::SHL, MVT::v16i8, 13 }, // cmpgtb sequence.
948 { ISD::SHL, MVT::v8i16, 25 }, // cmpgtw sequence.
949 { ISD::SHL, MVT::v4i32, 16 }, // pslld/paddd/cvttps2dq/pmuludq.
950 { ISD::SHL, MVT::v2i64, 4 }, // splat+shuffle sequence.
951
952 { ISD::SRL, MVT::v16i8, 14 }, // cmpgtb sequence.
953 { ISD::SRL, MVT::v8i16, 16 }, // cmpgtw sequence.
954 { ISD::SRL, MVT::v4i32, 12 }, // Shift each lane + blend.
955 { ISD::SRL, MVT::v2i64, 4 }, // splat+shuffle sequence.
956
957 { ISD::SRA, MVT::v16i8, 27 }, // unpacked cmpgtb sequence.
958 { ISD::SRA, MVT::v8i16, 16 }, // cmpgtw sequence.
959 { ISD::SRA, MVT::v4i32, 12 }, // Shift each lane + blend.
960 { ISD::SRA, MVT::v2i64, 8 }, // srl/xor/sub splat+shuffle sequence.
961
962 { ISD::MUL, MVT::v8i16, 1 }, // pmullw
963 { ISD::MUL, MVT::v4i32, 6 }, // 3*pmuludq/4*shuffle
964 { ISD::MUL, MVT::v2i64, 8 }, // 3*pmuludq/3*shift/2*add
965
966 { ISD::FDIV, MVT::f32, 23 }, // Pentium IV from http://www.agner.org/
967 { ISD::FDIV, MVT::v4f32, 39 }, // Pentium IV from http://www.agner.org/
968 { ISD::FDIV, MVT::f64, 38 }, // Pentium IV from http://www.agner.org/
969 { ISD::FDIV, MVT::v2f64, 69 }, // Pentium IV from http://www.agner.org/
970
971 { ISD::FNEG, MVT::f32, 1 }, // Pentium IV from http://www.agner.org/
972 { ISD::FNEG, MVT::f64, 1 }, // Pentium IV from http://www.agner.org/
973 { ISD::FNEG, MVT::v4f32, 1 }, // Pentium IV from http://www.agner.org/
974 { ISD::FNEG, MVT::v2f64, 1 }, // Pentium IV from http://www.agner.org/
975
976 { ISD::FADD, MVT::f32, 2 }, // Pentium IV from http://www.agner.org/
977 { ISD::FADD, MVT::f64, 2 }, // Pentium IV from http://www.agner.org/
978
979 { ISD::FSUB, MVT::f32, 2 }, // Pentium IV from http://www.agner.org/
980 { ISD::FSUB, MVT::f64, 2 }, // Pentium IV from http://www.agner.org/
981 };
982
983 if (ST->hasSSE2())
984 if (const auto *Entry = CostTableLookup(SSE2CostTable, ISD, LT.second))
985 return LT.first * Entry->Cost;
986
987 static const CostTblEntry SSE1CostTable[] = {
988 { ISD::FDIV, MVT::f32, 17 }, // Pentium III from http://www.agner.org/
989 { ISD::FDIV, MVT::v4f32, 34 }, // Pentium III from http://www.agner.org/
990
991 { ISD::FNEG, MVT::f32, 2 }, // Pentium III from http://www.agner.org/
992 { ISD::FNEG, MVT::v4f32, 2 }, // Pentium III from http://www.agner.org/
993
994 { ISD::FADD, MVT::f32, 1 }, // Pentium III from http://www.agner.org/
995 { ISD::FADD, MVT::v4f32, 2 }, // Pentium III from http://www.agner.org/
996
997 { ISD::FSUB, MVT::f32, 1 }, // Pentium III from http://www.agner.org/
998 { ISD::FSUB, MVT::v4f32, 2 }, // Pentium III from http://www.agner.org/
999 };
1000
1001 if (ST->hasSSE1())
1002 if (const auto *Entry = CostTableLookup(SSE1CostTable, ISD, LT.second))
1003 return LT.first * Entry->Cost;
1004
1005 static const CostTblEntry X64CostTbl[] = { // 64-bit targets
1006 { ISD::ADD, MVT::i64, 1 }, // Core (Merom) from http://www.agner.org/
1007 { ISD::SUB, MVT::i64, 1 }, // Core (Merom) from http://www.agner.org/
1008 };
1009
1010 if (ST->is64Bit())
1011 if (const auto *Entry = CostTableLookup(X64CostTbl, ISD, LT.second))
1012 return LT.first * Entry->Cost;
1013
1014 static const CostTblEntry X86CostTbl[] = { // 32 or 64-bit targets
1015 { ISD::ADD, MVT::i8, 1 }, // Pentium III from http://www.agner.org/
1016 { ISD::ADD, MVT::i16, 1 }, // Pentium III from http://www.agner.org/
1017 { ISD::ADD, MVT::i32, 1 }, // Pentium III from http://www.agner.org/
1018
1019 { ISD::SUB, MVT::i8, 1 }, // Pentium III from http://www.agner.org/
1020 { ISD::SUB, MVT::i16, 1 }, // Pentium III from http://www.agner.org/
1021 { ISD::SUB, MVT::i32, 1 }, // Pentium III from http://www.agner.org/
1022 };
1023
1024 if (const auto *Entry = CostTableLookup(X86CostTbl, ISD, LT.second))
1025 return LT.first * Entry->Cost;
1026
1027 // It is not a good idea to vectorize division. We have to scalarize it and
1028 // in the process we will often end up having to spilling regular
1029 // registers. The overhead of division is going to dominate most kernels
1030 // anyways so try hard to prevent vectorization of division - it is
1031 // generally a bad idea. Assume somewhat arbitrarily that we have to be able
1032 // to hide "20 cycles" for each lane.
1033 if (LT.second.isVector() && (ISD == ISD::SDIV || ISD == ISD::SREM ||
1034 ISD == ISD::UDIV || ISD == ISD::UREM)) {
1035 InstructionCost ScalarCost = getArithmeticInstrCost(
1036 Opcode, Ty->getScalarType(), CostKind, Op1Info, Op2Info,
1037 TargetTransformInfo::OP_None, TargetTransformInfo::OP_None);
1038 return 20 * LT.first * LT.second.getVectorNumElements() * ScalarCost;
1039 }
1040
1041 // Fallback to the default implementation.
1042 return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Op1Info, Op2Info);
1043}
1044
1045InstructionCost X86TTIImpl::getShuffleCost(TTI::ShuffleKind Kind,
1046 VectorType *BaseTp,
1047 ArrayRef<int> Mask, int Index,
1048 VectorType *SubTp) {
1049 // 64-bit packed float vectors (v2f32) are widened to type v4f32.
1050 // 64-bit packed integer vectors (v2i32) are widened to type v4i32.
1051 std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, BaseTp);
1052
1053 Kind = improveShuffleKindFromMask(Kind, Mask);
1054 // Treat Transpose as 2-op shuffles - there's no difference in lowering.
1055 if (Kind == TTI::SK_Transpose)
1056 Kind = TTI::SK_PermuteTwoSrc;
1057
1058 // For Broadcasts we are splatting the first element from the first input
1059 // register, so only need to reference that input and all the output
1060 // registers are the same.
1061 if (Kind == TTI::SK_Broadcast)
1062 LT.first = 1;
1063
1064 // Subvector extractions are free if they start at the beginning of a
1065 // vector and cheap if the subvectors are aligned.
1066 if (Kind == TTI::SK_ExtractSubvector && LT.second.isVector()) {
1067 int NumElts = LT.second.getVectorNumElements();
1068 if ((Index % NumElts) == 0)
1069 return 0;
1070 std::pair<InstructionCost, MVT> SubLT =
1071 TLI->getTypeLegalizationCost(DL, SubTp);
1072 if (SubLT.second.isVector()) {
1073 int NumSubElts = SubLT.second.getVectorNumElements();
1074 if ((Index % NumSubElts) == 0 && (NumElts % NumSubElts) == 0)
1075 return SubLT.first;
1076 // Handle some cases for widening legalization. For now we only handle
1077 // cases where the original subvector was naturally aligned and evenly
1078 // fit in its legalized subvector type.
1079 // FIXME: Remove some of the alignment restrictions.
1080 // FIXME: We can use permq for 64-bit or larger extracts from 256-bit
1081 // vectors.
1082 int OrigSubElts = cast<FixedVectorType>(SubTp)->getNumElements();
1083 if (NumSubElts > OrigSubElts && (Index % OrigSubElts) == 0 &&
1084 (NumSubElts % OrigSubElts) == 0 &&
1085 LT.second.getVectorElementType() ==
1086 SubLT.second.getVectorElementType() &&
1087 LT.second.getVectorElementType().getSizeInBits() ==
1088 BaseTp->getElementType()->getPrimitiveSizeInBits()) {
1089 assert(NumElts >= NumSubElts && NumElts > OrigSubElts &&(static_cast <bool> (NumElts >= NumSubElts &&
NumElts > OrigSubElts && "Unexpected number of elements!"
) ? void (0) : __assert_fail ("NumElts >= NumSubElts && NumElts > OrigSubElts && \"Unexpected number of elements!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 1090, __extension__ __PRETTY_FUNCTION__))
1090 "Unexpected number of elements!")(static_cast <bool> (NumElts >= NumSubElts &&
NumElts > OrigSubElts && "Unexpected number of elements!"
) ? void (0) : __assert_fail ("NumElts >= NumSubElts && NumElts > OrigSubElts && \"Unexpected number of elements!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 1090, __extension__ __PRETTY_FUNCTION__))
;
1091 auto *VecTy = FixedVectorType::get(BaseTp->getElementType(),
1092 LT.second.getVectorNumElements());
1093 auto *SubTy = FixedVectorType::get(BaseTp->getElementType(),
1094 SubLT.second.getVectorNumElements());
1095 int ExtractIndex = alignDown((Index % NumElts), NumSubElts);
1096 InstructionCost ExtractCost = getShuffleCost(
1097 TTI::SK_ExtractSubvector, VecTy, None, ExtractIndex, SubTy);
1098
1099 // If the original size is 32-bits or more, we can use pshufd. Otherwise
1100 // if we have SSSE3 we can use pshufb.
1101 if (SubTp->getPrimitiveSizeInBits() >= 32 || ST->hasSSSE3())
1102 return ExtractCost + 1; // pshufd or pshufb
1103
1104 assert(SubTp->getPrimitiveSizeInBits() == 16 &&(static_cast <bool> (SubTp->getPrimitiveSizeInBits()
== 16 && "Unexpected vector size") ? void (0) : __assert_fail
("SubTp->getPrimitiveSizeInBits() == 16 && \"Unexpected vector size\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 1105, __extension__ __PRETTY_FUNCTION__))
1105 "Unexpected vector size")(static_cast <bool> (SubTp->getPrimitiveSizeInBits()
== 16 && "Unexpected vector size") ? void (0) : __assert_fail
("SubTp->getPrimitiveSizeInBits() == 16 && \"Unexpected vector size\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 1105, __extension__ __PRETTY_FUNCTION__))
;
1106
1107 return ExtractCost + 2; // worst case pshufhw + pshufd
1108 }
1109 }
1110 }
1111
1112 // Subvector insertions are cheap if the subvectors are aligned.
1113 // Note that in general, the insertion starting at the beginning of a vector
1114 // isn't free, because we need to preserve the rest of the wide vector.
1115 if (Kind == TTI::SK_InsertSubvector && LT.second.isVector()) {
1116 int NumElts = LT.second.getVectorNumElements();
1117 std::pair<InstructionCost, MVT> SubLT =
1118 TLI->getTypeLegalizationCost(DL, SubTp);
1119 if (SubLT.second.isVector()) {
1120 int NumSubElts = SubLT.second.getVectorNumElements();
1121 if ((Index % NumSubElts) == 0 && (NumElts % NumSubElts) == 0)
1122 return SubLT.first;
1123 }
1124
1125 // If the insertion isn't aligned, treat it like a 2-op shuffle.
1126 Kind = TTI::SK_PermuteTwoSrc;
1127 }
1128
1129 // Handle some common (illegal) sub-vector types as they are often very cheap
1130 // to shuffle even on targets without PSHUFB.
1131 EVT VT = TLI->getValueType(DL, BaseTp);
1132 if (VT.isSimple() && VT.isVector() && VT.getSizeInBits() < 128 &&
1133 !ST->hasSSSE3()) {
1134 static const CostTblEntry SSE2SubVectorShuffleTbl[] = {
1135 {TTI::SK_Broadcast, MVT::v4i16, 1}, // pshuflw
1136 {TTI::SK_Broadcast, MVT::v2i16, 1}, // pshuflw
1137 {TTI::SK_Broadcast, MVT::v8i8, 2}, // punpck/pshuflw
1138 {TTI::SK_Broadcast, MVT::v4i8, 2}, // punpck/pshuflw
1139 {TTI::SK_Broadcast, MVT::v2i8, 1}, // punpck
1140
1141 {TTI::SK_Reverse, MVT::v4i16, 1}, // pshuflw
1142 {TTI::SK_Reverse, MVT::v2i16, 1}, // pshuflw
1143 {TTI::SK_Reverse, MVT::v4i8, 3}, // punpck/pshuflw/packus
1144 {TTI::SK_Reverse, MVT::v2i8, 1}, // punpck
1145
1146 {TTI::SK_PermuteTwoSrc, MVT::v4i16, 2}, // punpck/pshuflw
1147 {TTI::SK_PermuteTwoSrc, MVT::v2i16, 2}, // punpck/pshuflw
1148 {TTI::SK_PermuteTwoSrc, MVT::v8i8, 7}, // punpck/pshuflw
1149 {TTI::SK_PermuteTwoSrc, MVT::v4i8, 4}, // punpck/pshuflw
1150 {TTI::SK_PermuteTwoSrc, MVT::v2i8, 2}, // punpck
1151
1152 {TTI::SK_PermuteSingleSrc, MVT::v4i16, 1}, // pshuflw
1153 {TTI::SK_PermuteSingleSrc, MVT::v2i16, 1}, // pshuflw
1154 {TTI::SK_PermuteSingleSrc, MVT::v8i8, 5}, // punpck/pshuflw
1155 {TTI::SK_PermuteSingleSrc, MVT::v4i8, 3}, // punpck/pshuflw
1156 {TTI::SK_PermuteSingleSrc, MVT::v2i8, 1}, // punpck
1157 };
1158
1159 if (ST->hasSSE2())
1160 if (const auto *Entry =
1161 CostTableLookup(SSE2SubVectorShuffleTbl, Kind, VT.getSimpleVT()))
1162 return Entry->Cost;
1163 }
1164
1165 // We are going to permute multiple sources and the result will be in multiple
1166 // destinations. Providing an accurate cost only for splits where the element
1167 // type remains the same.
1168 if (Kind == TTI::SK_PermuteSingleSrc && LT.first != 1) {
1169 MVT LegalVT = LT.second;
1170 if (LegalVT.isVector() &&
1171 LegalVT.getVectorElementType().getSizeInBits() ==
1172 BaseTp->getElementType()->getPrimitiveSizeInBits() &&
1173 LegalVT.getVectorNumElements() <
1174 cast<FixedVectorType>(BaseTp)->getNumElements()) {
1175
1176 unsigned VecTySize = DL.getTypeStoreSize(BaseTp);
1177 unsigned LegalVTSize = LegalVT.getStoreSize();
1178 // Number of source vectors after legalization:
1179 unsigned NumOfSrcs = (VecTySize + LegalVTSize - 1) / LegalVTSize;
1180 // Number of destination vectors after legalization:
1181 InstructionCost NumOfDests = LT.first;
1182
1183 auto *SingleOpTy = FixedVectorType::get(BaseTp->getElementType(),
1184 LegalVT.getVectorNumElements());
1185
1186 InstructionCost NumOfShuffles = (NumOfSrcs - 1) * NumOfDests;
1187 return NumOfShuffles * getShuffleCost(TTI::SK_PermuteTwoSrc, SingleOpTy,
1188 None, 0, nullptr);
1189 }
1190
1191 return BaseT::getShuffleCost(Kind, BaseTp, Mask, Index, SubTp);
1192 }
1193
1194 // For 2-input shuffles, we must account for splitting the 2 inputs into many.
1195 if (Kind == TTI::SK_PermuteTwoSrc && LT.first != 1) {
1196 // We assume that source and destination have the same vector type.
1197 InstructionCost NumOfDests = LT.first;
1198 InstructionCost NumOfShufflesPerDest = LT.first * 2 - 1;
1199 LT.first = NumOfDests * NumOfShufflesPerDest;
1200 }
1201
1202 static const CostTblEntry AVX512FP16ShuffleTbl[] = {
1203 {TTI::SK_Broadcast, MVT::v32f16, 1}, // vpbroadcastw
1204 {TTI::SK_Broadcast, MVT::v16f16, 1}, // vpbroadcastw
1205 {TTI::SK_Broadcast, MVT::v8f16, 1}, // vpbroadcastw
1206
1207 {TTI::SK_Reverse, MVT::v32f16, 2}, // vpermw
1208 {TTI::SK_Reverse, MVT::v16f16, 2}, // vpermw
1209 {TTI::SK_Reverse, MVT::v8f16, 1}, // vpshufb
1210
1211 {TTI::SK_PermuteSingleSrc, MVT::v32f16, 2}, // vpermw
1212 {TTI::SK_PermuteSingleSrc, MVT::v16f16, 2}, // vpermw
1213 {TTI::SK_PermuteSingleSrc, MVT::v8f16, 1}, // vpshufb
1214
1215 {TTI::SK_PermuteTwoSrc, MVT::v32f16, 2}, // vpermt2w
1216 {TTI::SK_PermuteTwoSrc, MVT::v16f16, 2}, // vpermt2w
1217 {TTI::SK_PermuteTwoSrc, MVT::v8f16, 2} // vpermt2w
1218 };
1219
1220 if (!ST->useSoftFloat() && ST->hasFP16())
1221 if (const auto *Entry =
1222 CostTableLookup(AVX512FP16ShuffleTbl, Kind, LT.second))
1223 return LT.first * Entry->Cost;
1224
1225 static const CostTblEntry AVX512VBMIShuffleTbl[] = {
1226 {TTI::SK_Reverse, MVT::v64i8, 1}, // vpermb
1227 {TTI::SK_Reverse, MVT::v32i8, 1}, // vpermb
1228
1229 {TTI::SK_PermuteSingleSrc, MVT::v64i8, 1}, // vpermb
1230 {TTI::SK_PermuteSingleSrc, MVT::v32i8, 1}, // vpermb
1231
1232 {TTI::SK_PermuteTwoSrc, MVT::v64i8, 2}, // vpermt2b
1233 {TTI::SK_PermuteTwoSrc, MVT::v32i8, 2}, // vpermt2b
1234 {TTI::SK_PermuteTwoSrc, MVT::v16i8, 2} // vpermt2b
1235 };
1236
1237 if (ST->hasVBMI())
1238 if (const auto *Entry =
1239 CostTableLookup(AVX512VBMIShuffleTbl, Kind, LT.second))
1240 return LT.first * Entry->Cost;
1241
1242 static const CostTblEntry AVX512BWShuffleTbl[] = {
1243 {TTI::SK_Broadcast, MVT::v32i16, 1}, // vpbroadcastw
1244 {TTI::SK_Broadcast, MVT::v64i8, 1}, // vpbroadcastb
1245
1246 {TTI::SK_Reverse, MVT::v32i16, 2}, // vpermw
1247 {TTI::SK_Reverse, MVT::v16i16, 2}, // vpermw
1248 {TTI::SK_Reverse, MVT::v64i8, 2}, // pshufb + vshufi64x2
1249
1250 {TTI::SK_PermuteSingleSrc, MVT::v32i16, 2}, // vpermw
1251 {TTI::SK_PermuteSingleSrc, MVT::v16i16, 2}, // vpermw
1252 {TTI::SK_PermuteSingleSrc, MVT::v64i8, 8}, // extend to v32i16
1253
1254 {TTI::SK_PermuteTwoSrc, MVT::v32i16, 2}, // vpermt2w
1255 {TTI::SK_PermuteTwoSrc, MVT::v16i16, 2}, // vpermt2w
1256 {TTI::SK_PermuteTwoSrc, MVT::v8i16, 2}, // vpermt2w
1257 {TTI::SK_PermuteTwoSrc, MVT::v64i8, 19}, // 6 * v32i8 + 1
1258
1259 {TTI::SK_Select, MVT::v32i16, 1}, // vblendmw
1260 {TTI::SK_Select, MVT::v64i8, 1}, // vblendmb
1261 };
1262
1263 if (ST->hasBWI())
1264 if (const auto *Entry =
1265 CostTableLookup(AVX512BWShuffleTbl, Kind, LT.second))
1266 return LT.first * Entry->Cost;
1267
1268 static const CostTblEntry AVX512ShuffleTbl[] = {
1269 {TTI::SK_Broadcast, MVT::v8f64, 1}, // vbroadcastpd
1270 {TTI::SK_Broadcast, MVT::v16f32, 1}, // vbroadcastps
1271 {TTI::SK_Broadcast, MVT::v8i64, 1}, // vpbroadcastq
1272 {TTI::SK_Broadcast, MVT::v16i32, 1}, // vpbroadcastd
1273 {TTI::SK_Broadcast, MVT::v32i16, 1}, // vpbroadcastw
1274 {TTI::SK_Broadcast, MVT::v64i8, 1}, // vpbroadcastb
1275
1276 {TTI::SK_Reverse, MVT::v8f64, 1}, // vpermpd
1277 {TTI::SK_Reverse, MVT::v16f32, 1}, // vpermps
1278 {TTI::SK_Reverse, MVT::v8i64, 1}, // vpermq
1279 {TTI::SK_Reverse, MVT::v16i32, 1}, // vpermd
1280 {TTI::SK_Reverse, MVT::v32i16, 7}, // per mca
1281 {TTI::SK_Reverse, MVT::v64i8, 7}, // per mca
1282
1283 {TTI::SK_PermuteSingleSrc, MVT::v8f64, 1}, // vpermpd
1284 {TTI::SK_PermuteSingleSrc, MVT::v4f64, 1}, // vpermpd
1285 {TTI::SK_PermuteSingleSrc, MVT::v2f64, 1}, // vpermpd
1286 {TTI::SK_PermuteSingleSrc, MVT::v16f32, 1}, // vpermps
1287 {TTI::SK_PermuteSingleSrc, MVT::v8f32, 1}, // vpermps
1288 {TTI::SK_PermuteSingleSrc, MVT::v4f32, 1}, // vpermps
1289 {TTI::SK_PermuteSingleSrc, MVT::v8i64, 1}, // vpermq
1290 {TTI::SK_PermuteSingleSrc, MVT::v4i64, 1}, // vpermq
1291 {TTI::SK_PermuteSingleSrc, MVT::v2i64, 1}, // vpermq
1292 {TTI::SK_PermuteSingleSrc, MVT::v16i32, 1}, // vpermd
1293 {TTI::SK_PermuteSingleSrc, MVT::v8i32, 1}, // vpermd
1294 {TTI::SK_PermuteSingleSrc, MVT::v4i32, 1}, // vpermd
1295 {TTI::SK_PermuteSingleSrc, MVT::v16i8, 1}, // pshufb
1296
1297 {TTI::SK_PermuteTwoSrc, MVT::v8f64, 1}, // vpermt2pd
1298 {TTI::SK_PermuteTwoSrc, MVT::v16f32, 1}, // vpermt2ps
1299 {TTI::SK_PermuteTwoSrc, MVT::v8i64, 1}, // vpermt2q
1300 {TTI::SK_PermuteTwoSrc, MVT::v16i32, 1}, // vpermt2d
1301 {TTI::SK_PermuteTwoSrc, MVT::v4f64, 1}, // vpermt2pd
1302 {TTI::SK_PermuteTwoSrc, MVT::v8f32, 1}, // vpermt2ps
1303 {TTI::SK_PermuteTwoSrc, MVT::v4i64, 1}, // vpermt2q
1304 {TTI::SK_PermuteTwoSrc, MVT::v8i32, 1}, // vpermt2d
1305 {TTI::SK_PermuteTwoSrc, MVT::v2f64, 1}, // vpermt2pd
1306 {TTI::SK_PermuteTwoSrc, MVT::v4f32, 1}, // vpermt2ps
1307 {TTI::SK_PermuteTwoSrc, MVT::v2i64, 1}, // vpermt2q
1308 {TTI::SK_PermuteTwoSrc, MVT::v4i32, 1}, // vpermt2d
1309
1310 // FIXME: This just applies the type legalization cost rules above
1311 // assuming these completely split.
1312 {TTI::SK_PermuteSingleSrc, MVT::v32i16, 14},
1313 {TTI::SK_PermuteSingleSrc, MVT::v64i8, 14},
1314 {TTI::SK_PermuteTwoSrc, MVT::v32i16, 42},
1315 {TTI::SK_PermuteTwoSrc, MVT::v64i8, 42},
1316
1317 {TTI::SK_Select, MVT::v32i16, 1}, // vpternlogq
1318 {TTI::SK_Select, MVT::v64i8, 1}, // vpternlogq
1319 {TTI::SK_Select, MVT::v8f64, 1}, // vblendmpd
1320 {TTI::SK_Select, MVT::v16f32, 1}, // vblendmps
1321 {TTI::SK_Select, MVT::v8i64, 1}, // vblendmq
1322 {TTI::SK_Select, MVT::v16i32, 1}, // vblendmd
1323 };
1324
1325 if (ST->hasAVX512())
1326 if (const auto *Entry = CostTableLookup(AVX512ShuffleTbl, Kind, LT.second))
1327 return LT.first * Entry->Cost;
1328
1329 static const CostTblEntry AVX2ShuffleTbl[] = {
1330 {TTI::SK_Broadcast, MVT::v4f64, 1}, // vbroadcastpd
1331 {TTI::SK_Broadcast, MVT::v8f32, 1}, // vbroadcastps
1332 {TTI::SK_Broadcast, MVT::v4i64, 1}, // vpbroadcastq
1333 {TTI::SK_Broadcast, MVT::v8i32, 1}, // vpbroadcastd
1334 {TTI::SK_Broadcast, MVT::v16i16, 1}, // vpbroadcastw
1335 {TTI::SK_Broadcast, MVT::v32i8, 1}, // vpbroadcastb
1336
1337 {TTI::SK_Reverse, MVT::v4f64, 1}, // vpermpd
1338 {TTI::SK_Reverse, MVT::v8f32, 1}, // vpermps
1339 {TTI::SK_Reverse, MVT::v4i64, 1}, // vpermq
1340 {TTI::SK_Reverse, MVT::v8i32, 1}, // vpermd
1341 {TTI::SK_Reverse, MVT::v16i16, 2}, // vperm2i128 + pshufb
1342 {TTI::SK_Reverse, MVT::v32i8, 2}, // vperm2i128 + pshufb
1343
1344 {TTI::SK_Select, MVT::v16i16, 1}, // vpblendvb
1345 {TTI::SK_Select, MVT::v32i8, 1}, // vpblendvb
1346
1347 {TTI::SK_PermuteSingleSrc, MVT::v4f64, 1}, // vpermpd
1348 {TTI::SK_PermuteSingleSrc, MVT::v8f32, 1}, // vpermps
1349 {TTI::SK_PermuteSingleSrc, MVT::v4i64, 1}, // vpermq
1350 {TTI::SK_PermuteSingleSrc, MVT::v8i32, 1}, // vpermd
1351 {TTI::SK_PermuteSingleSrc, MVT::v16i16, 4}, // vperm2i128 + 2*vpshufb
1352 // + vpblendvb
1353 {TTI::SK_PermuteSingleSrc, MVT::v32i8, 4}, // vperm2i128 + 2*vpshufb
1354 // + vpblendvb
1355
1356 {TTI::SK_PermuteTwoSrc, MVT::v4f64, 3}, // 2*vpermpd + vblendpd
1357 {TTI::SK_PermuteTwoSrc, MVT::v8f32, 3}, // 2*vpermps + vblendps
1358 {TTI::SK_PermuteTwoSrc, MVT::v4i64, 3}, // 2*vpermq + vpblendd
1359 {TTI::SK_PermuteTwoSrc, MVT::v8i32, 3}, // 2*vpermd + vpblendd
1360 {TTI::SK_PermuteTwoSrc, MVT::v16i16, 7}, // 2*vperm2i128 + 4*vpshufb
1361 // + vpblendvb
1362 {TTI::SK_PermuteTwoSrc, MVT::v32i8, 7}, // 2*vperm2i128 + 4*vpshufb
1363 // + vpblendvb
1364 };
1365
1366 if (ST->hasAVX2())
1367 if (const auto *Entry = CostTableLookup(AVX2ShuffleTbl, Kind, LT.second))
1368 return LT.first * Entry->Cost;
1369
1370 static const CostTblEntry XOPShuffleTbl[] = {
1371 {TTI::SK_PermuteSingleSrc, MVT::v4f64, 2}, // vperm2f128 + vpermil2pd
1372 {TTI::SK_PermuteSingleSrc, MVT::v8f32, 2}, // vperm2f128 + vpermil2ps
1373 {TTI::SK_PermuteSingleSrc, MVT::v4i64, 2}, // vperm2f128 + vpermil2pd
1374 {TTI::SK_PermuteSingleSrc, MVT::v8i32, 2}, // vperm2f128 + vpermil2ps
1375 {TTI::SK_PermuteSingleSrc, MVT::v16i16, 4}, // vextractf128 + 2*vpperm
1376 // + vinsertf128
1377 {TTI::SK_PermuteSingleSrc, MVT::v32i8, 4}, // vextractf128 + 2*vpperm
1378 // + vinsertf128
1379
1380 {TTI::SK_PermuteTwoSrc, MVT::v16i16, 9}, // 2*vextractf128 + 6*vpperm
1381 // + vinsertf128
1382 {TTI::SK_PermuteTwoSrc, MVT::v8i16, 1}, // vpperm
1383 {TTI::SK_PermuteTwoSrc, MVT::v32i8, 9}, // 2*vextractf128 + 6*vpperm
1384 // + vinsertf128
1385 {TTI::SK_PermuteTwoSrc, MVT::v16i8, 1}, // vpperm
1386 };
1387
1388 if (ST->hasXOP())
1389 if (const auto *Entry = CostTableLookup(XOPShuffleTbl, Kind, LT.second))
1390 return LT.first * Entry->Cost;
1391
1392 static const CostTblEntry AVX1ShuffleTbl[] = {
1393 {TTI::SK_Broadcast, MVT::v4f64, 2}, // vperm2f128 + vpermilpd
1394 {TTI::SK_Broadcast, MVT::v8f32, 2}, // vperm2f128 + vpermilps
1395 {TTI::SK_Broadcast, MVT::v4i64, 2}, // vperm2f128 + vpermilpd
1396 {TTI::SK_Broadcast, MVT::v8i32, 2}, // vperm2f128 + vpermilps
1397 {TTI::SK_Broadcast, MVT::v16i16, 3}, // vpshuflw + vpshufd + vinsertf128
1398 {TTI::SK_Broadcast, MVT::v32i8, 2}, // vpshufb + vinsertf128
1399
1400 {TTI::SK_Reverse, MVT::v4f64, 2}, // vperm2f128 + vpermilpd
1401 {TTI::SK_Reverse, MVT::v8f32, 2}, // vperm2f128 + vpermilps
1402 {TTI::SK_Reverse, MVT::v4i64, 2}, // vperm2f128 + vpermilpd
1403 {TTI::SK_Reverse, MVT::v8i32, 2}, // vperm2f128 + vpermilps
1404 {TTI::SK_Reverse, MVT::v16i16, 4}, // vextractf128 + 2*pshufb
1405 // + vinsertf128
1406 {TTI::SK_Reverse, MVT::v32i8, 4}, // vextractf128 + 2*pshufb
1407 // + vinsertf128
1408
1409 {TTI::SK_Select, MVT::v4i64, 1}, // vblendpd
1410 {TTI::SK_Select, MVT::v4f64, 1}, // vblendpd
1411 {TTI::SK_Select, MVT::v8i32, 1}, // vblendps
1412 {TTI::SK_Select, MVT::v8f32, 1}, // vblendps
1413 {TTI::SK_Select, MVT::v16i16, 3}, // vpand + vpandn + vpor
1414 {TTI::SK_Select, MVT::v32i8, 3}, // vpand + vpandn + vpor
1415
1416 {TTI::SK_PermuteSingleSrc, MVT::v4f64, 2}, // vperm2f128 + vshufpd
1417 {TTI::SK_PermuteSingleSrc, MVT::v4i64, 2}, // vperm2f128 + vshufpd
1418 {TTI::SK_PermuteSingleSrc, MVT::v8f32, 4}, // 2*vperm2f128 + 2*vshufps
1419 {TTI::SK_PermuteSingleSrc, MVT::v8i32, 4}, // 2*vperm2f128 + 2*vshufps
1420 {TTI::SK_PermuteSingleSrc, MVT::v16i16, 8}, // vextractf128 + 4*pshufb
1421 // + 2*por + vinsertf128
1422 {TTI::SK_PermuteSingleSrc, MVT::v32i8, 8}, // vextractf128 + 4*pshufb
1423 // + 2*por + vinsertf128
1424
1425 {TTI::SK_PermuteTwoSrc, MVT::v4f64, 3}, // 2*vperm2f128 + vshufpd
1426 {TTI::SK_PermuteTwoSrc, MVT::v4i64, 3}, // 2*vperm2f128 + vshufpd
1427 {TTI::SK_PermuteTwoSrc, MVT::v8f32, 4}, // 2*vperm2f128 + 2*vshufps
1428 {TTI::SK_PermuteTwoSrc, MVT::v8i32, 4}, // 2*vperm2f128 + 2*vshufps
1429 {TTI::SK_PermuteTwoSrc, MVT::v16i16, 15}, // 2*vextractf128 + 8*pshufb
1430 // + 4*por + vinsertf128
1431 {TTI::SK_PermuteTwoSrc, MVT::v32i8, 15}, // 2*vextractf128 + 8*pshufb
1432 // + 4*por + vinsertf128
1433 };
1434
1435 if (ST->hasAVX())
1436 if (const auto *Entry = CostTableLookup(AVX1ShuffleTbl, Kind, LT.second))
1437 return LT.first * Entry->Cost;
1438
1439 static const CostTblEntry SSE41ShuffleTbl[] = {
1440 {TTI::SK_Select, MVT::v2i64, 1}, // pblendw
1441 {TTI::SK_Select, MVT::v2f64, 1}, // movsd
1442 {TTI::SK_Select, MVT::v4i32, 1}, // pblendw
1443 {TTI::SK_Select, MVT::v4f32, 1}, // blendps
1444 {TTI::SK_Select, MVT::v8i16, 1}, // pblendw
1445 {TTI::SK_Select, MVT::v16i8, 1} // pblendvb
1446 };
1447
1448 if (ST->hasSSE41())
1449 if (const auto *Entry = CostTableLookup(SSE41ShuffleTbl, Kind, LT.second))
1450 return LT.first * Entry->Cost;
1451
1452 static const CostTblEntry SSSE3ShuffleTbl[] = {
1453 {TTI::SK_Broadcast, MVT::v8i16, 1}, // pshufb
1454 {TTI::SK_Broadcast, MVT::v16i8, 1}, // pshufb
1455
1456 {TTI::SK_Reverse, MVT::v8i16, 1}, // pshufb
1457 {TTI::SK_Reverse, MVT::v16i8, 1}, // pshufb
1458
1459 {TTI::SK_Select, MVT::v8i16, 3}, // 2*pshufb + por
1460 {TTI::SK_Select, MVT::v16i8, 3}, // 2*pshufb + por
1461
1462 {TTI::SK_PermuteSingleSrc, MVT::v8i16, 1}, // pshufb
1463 {TTI::SK_PermuteSingleSrc, MVT::v16i8, 1}, // pshufb
1464
1465 {TTI::SK_PermuteTwoSrc, MVT::v8i16, 3}, // 2*pshufb + por
1466 {TTI::SK_PermuteTwoSrc, MVT::v16i8, 3}, // 2*pshufb + por
1467 };
1468
1469 if (ST->hasSSSE3())
1470 if (const auto *Entry = CostTableLookup(SSSE3ShuffleTbl, Kind, LT.second))
1471 return LT.first * Entry->Cost;
1472
1473 static const CostTblEntry SSE2ShuffleTbl[] = {
1474 {TTI::SK_Broadcast, MVT::v2f64, 1}, // shufpd
1475 {TTI::SK_Broadcast, MVT::v2i64, 1}, // pshufd
1476 {TTI::SK_Broadcast, MVT::v4i32, 1}, // pshufd
1477 {TTI::SK_Broadcast, MVT::v8i16, 2}, // pshuflw + pshufd
1478 {TTI::SK_Broadcast, MVT::v16i8, 3}, // unpck + pshuflw + pshufd
1479
1480 {TTI::SK_Reverse, MVT::v2f64, 1}, // shufpd
1481 {TTI::SK_Reverse, MVT::v2i64, 1}, // pshufd
1482 {TTI::SK_Reverse, MVT::v4i32, 1}, // pshufd
1483 {TTI::SK_Reverse, MVT::v8i16, 3}, // pshuflw + pshufhw + pshufd
1484 {TTI::SK_Reverse, MVT::v16i8, 9}, // 2*pshuflw + 2*pshufhw
1485 // + 2*pshufd + 2*unpck + packus
1486
1487 {TTI::SK_Select, MVT::v2i64, 1}, // movsd
1488 {TTI::SK_Select, MVT::v2f64, 1}, // movsd
1489 {TTI::SK_Select, MVT::v4i32, 2}, // 2*shufps
1490 {TTI::SK_Select, MVT::v8i16, 3}, // pand + pandn + por
1491 {TTI::SK_Select, MVT::v16i8, 3}, // pand + pandn + por
1492
1493 {TTI::SK_PermuteSingleSrc, MVT::v2f64, 1}, // shufpd
1494 {TTI::SK_PermuteSingleSrc, MVT::v2i64, 1}, // pshufd
1495 {TTI::SK_PermuteSingleSrc, MVT::v4i32, 1}, // pshufd
1496 {TTI::SK_PermuteSingleSrc, MVT::v8i16, 5}, // 2*pshuflw + 2*pshufhw
1497 // + pshufd/unpck
1498 { TTI::SK_PermuteSingleSrc, MVT::v16i8, 10 }, // 2*pshuflw + 2*pshufhw
1499 // + 2*pshufd + 2*unpck + 2*packus
1500
1501 { TTI::SK_PermuteTwoSrc, MVT::v2f64, 1 }, // shufpd
1502 { TTI::SK_PermuteTwoSrc, MVT::v2i64, 1 }, // shufpd
1503 { TTI::SK_PermuteTwoSrc, MVT::v4i32, 2 }, // 2*{unpck,movsd,pshufd}
1504 { TTI::SK_PermuteTwoSrc, MVT::v8i16, 8 }, // blend+permute
1505 { TTI::SK_PermuteTwoSrc, MVT::v16i8, 13 }, // blend+permute
1506 };
1507
1508 if (ST->hasSSE2())
1509 if (const auto *Entry = CostTableLookup(SSE2ShuffleTbl, Kind, LT.second))
1510 return LT.first * Entry->Cost;
1511
1512 static const CostTblEntry SSE1ShuffleTbl[] = {
1513 { TTI::SK_Broadcast, MVT::v4f32, 1 }, // shufps
1514 { TTI::SK_Reverse, MVT::v4f32, 1 }, // shufps
1515 { TTI::SK_Select, MVT::v4f32, 2 }, // 2*shufps
1516 { TTI::SK_PermuteSingleSrc, MVT::v4f32, 1 }, // shufps
1517 { TTI::SK_PermuteTwoSrc, MVT::v4f32, 2 }, // 2*shufps
1518 };
1519
1520 if (ST->hasSSE1())
1521 if (const auto *Entry = CostTableLookup(SSE1ShuffleTbl, Kind, LT.second))
1522 return LT.first * Entry->Cost;
1523
1524 return BaseT::getShuffleCost(Kind, BaseTp, Mask, Index, SubTp);
1525}
1526
1527InstructionCost X86TTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst,
1528 Type *Src,
1529 TTI::CastContextHint CCH,
1530 TTI::TargetCostKind CostKind,
1531 const Instruction *I) {
1532 int ISD = TLI->InstructionOpcodeToISD(Opcode);
1533 assert(ISD && "Invalid opcode")(static_cast <bool> (ISD && "Invalid opcode") ?
void (0) : __assert_fail ("ISD && \"Invalid opcode\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 1533, __extension__ __PRETTY_FUNCTION__))
;
1534
1535 // TODO: Allow non-throughput costs that aren't binary.
1536 auto AdjustCost = [&CostKind](InstructionCost Cost) -> InstructionCost {
1537 if (CostKind != TTI::TCK_RecipThroughput)
1538 return Cost == 0 ? 0 : 1;
1539 return Cost;
1540 };
1541
1542 // The cost tables include both specific, custom (non-legal) src/dst type
1543 // conversions and generic, legalized types. We test for customs first, before
1544 // falling back to legalization.
1545 // FIXME: Need a better design of the cost table to handle non-simple types of
1546 // potential massive combinations (elem_num x src_type x dst_type).
1547 static const TypeConversionCostTblEntry AVX512BWConversionTbl[] {
1548 { ISD::SIGN_EXTEND, MVT::v32i16, MVT::v32i8, 1 },
1549 { ISD::ZERO_EXTEND, MVT::v32i16, MVT::v32i8, 1 },
1550
1551 // Mask sign extend has an instruction.
1552 { ISD::SIGN_EXTEND, MVT::v2i8, MVT::v2i1, 1 },
1553 { ISD::SIGN_EXTEND, MVT::v2i16, MVT::v2i1, 1 },
1554 { ISD::SIGN_EXTEND, MVT::v4i8, MVT::v4i1, 1 },
1555 { ISD::SIGN_EXTEND, MVT::v4i16, MVT::v4i1, 1 },
1556 { ISD::SIGN_EXTEND, MVT::v8i8, MVT::v8i1, 1 },
1557 { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i1, 1 },
1558 { ISD::SIGN_EXTEND, MVT::v16i8, MVT::v16i1, 1 },
1559 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i1, 1 },
1560 { ISD::SIGN_EXTEND, MVT::v32i8, MVT::v32i1, 1 },
1561 { ISD::SIGN_EXTEND, MVT::v32i16, MVT::v32i1, 1 },
1562 { ISD::SIGN_EXTEND, MVT::v64i8, MVT::v64i1, 1 },
1563
1564 // Mask zero extend is a sext + shift.
1565 { ISD::ZERO_EXTEND, MVT::v2i8, MVT::v2i1, 2 },
1566 { ISD::ZERO_EXTEND, MVT::v2i16, MVT::v2i1, 2 },
1567 { ISD::ZERO_EXTEND, MVT::v4i8, MVT::v4i1, 2 },
1568 { ISD::ZERO_EXTEND, MVT::v4i16, MVT::v4i1, 2 },
1569 { ISD::ZERO_EXTEND, MVT::v8i8, MVT::v8i1, 2 },
1570 { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i1, 2 },
1571 { ISD::ZERO_EXTEND, MVT::v16i8, MVT::v16i1, 2 },
1572 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i1, 2 },
1573 { ISD::ZERO_EXTEND, MVT::v32i8, MVT::v32i1, 2 },
1574 { ISD::ZERO_EXTEND, MVT::v32i16, MVT::v32i1, 2 },
1575 { ISD::ZERO_EXTEND, MVT::v64i8, MVT::v64i1, 2 },
1576
1577 { ISD::TRUNCATE, MVT::v32i8, MVT::v32i16, 2 },
1578 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i16, 2 }, // widen to zmm
1579 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i8, 2 }, // widen to zmm
1580 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i16, 2 }, // widen to zmm
1581 { ISD::TRUNCATE, MVT::v2i8, MVT::v2i16, 2 }, // vpmovwb
1582 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i8, 2 }, // widen to zmm
1583 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i16, 2 }, // widen to zmm
1584 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i16, 2 }, // vpmovwb
1585 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i8, 2 }, // widen to zmm
1586 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i16, 2 }, // widen to zmm
1587 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i16, 2 }, // vpmovwb
1588 { ISD::TRUNCATE, MVT::v16i1, MVT::v16i8, 2 }, // widen to zmm
1589 { ISD::TRUNCATE, MVT::v16i1, MVT::v16i16, 2 }, // widen to zmm
1590 { ISD::TRUNCATE, MVT::v32i1, MVT::v32i8, 2 }, // widen to zmm
1591 { ISD::TRUNCATE, MVT::v32i1, MVT::v32i16, 2 },
1592 { ISD::TRUNCATE, MVT::v64i1, MVT::v64i8, 2 },
1593 };
1594
1595 static const TypeConversionCostTblEntry AVX512DQConversionTbl[] = {
1596 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i64, 1 },
1597 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i64, 1 },
1598
1599 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i64, 1 },
1600 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i64, 1 },
1601
1602 { ISD::FP_TO_SINT, MVT::v8i64, MVT::v8f32, 1 },
1603 { ISD::FP_TO_SINT, MVT::v8i64, MVT::v8f64, 1 },
1604
1605 { ISD::FP_TO_UINT, MVT::v8i64, MVT::v8f32, 1 },
1606 { ISD::FP_TO_UINT, MVT::v8i64, MVT::v8f64, 1 },
1607 };
1608
1609 // TODO: For AVX512DQ + AVX512VL, we also have cheap casts for 128-bit and
1610 // 256-bit wide vectors.
1611
1612 static const TypeConversionCostTblEntry AVX512FConversionTbl[] = {
1613 { ISD::FP_EXTEND, MVT::v8f64, MVT::v8f32, 1 },
1614 { ISD::FP_EXTEND, MVT::v8f64, MVT::v16f32, 3 },
1615 { ISD::FP_ROUND, MVT::v8f32, MVT::v8f64, 1 },
1616
1617 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i8, 3 }, // sext+vpslld+vptestmd
1618 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i8, 3 }, // sext+vpslld+vptestmd
1619 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i8, 3 }, // sext+vpslld+vptestmd
1620 { ISD::TRUNCATE, MVT::v16i1, MVT::v16i8, 3 }, // sext+vpslld+vptestmd
1621 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i16, 3 }, // sext+vpsllq+vptestmq
1622 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i16, 3 }, // sext+vpsllq+vptestmq
1623 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i16, 3 }, // sext+vpsllq+vptestmq
1624 { ISD::TRUNCATE, MVT::v16i1, MVT::v16i16, 3 }, // sext+vpslld+vptestmd
1625 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i32, 2 }, // zmm vpslld+vptestmd
1626 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i32, 2 }, // zmm vpslld+vptestmd
1627 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i32, 2 }, // zmm vpslld+vptestmd
1628 { ISD::TRUNCATE, MVT::v16i1, MVT::v16i32, 2 }, // vpslld+vptestmd
1629 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i64, 2 }, // zmm vpsllq+vptestmq
1630 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i64, 2 }, // zmm vpsllq+vptestmq
1631 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i64, 2 }, // vpsllq+vptestmq
1632 { ISD::TRUNCATE, MVT::v2i8, MVT::v2i32, 2 }, // vpmovdb
1633 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i32, 2 }, // vpmovdb
1634 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i32, 2 }, // vpmovdb
1635 { ISD::TRUNCATE, MVT::v16i16, MVT::v16i32, 2 }, // vpmovdb
1636 { ISD::TRUNCATE, MVT::v2i8, MVT::v2i64, 2 }, // vpmovqb
1637 { ISD::TRUNCATE, MVT::v2i16, MVT::v2i64, 1 }, // vpshufb
1638 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i64, 2 }, // vpmovqb
1639 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i64, 2 }, // vpmovqw
1640 { ISD::TRUNCATE, MVT::v8i32, MVT::v8i64, 1 }, // vpmovqd
1641 { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 1 }, // zmm vpmovqd
1642 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i64, 5 },// 2*vpmovqd+concat+vpmovdb
1643
1644 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i16, 3 }, // extend to v16i32
1645 { ISD::TRUNCATE, MVT::v32i8, MVT::v32i16, 8 },
1646
1647 // Sign extend is zmm vpternlogd+vptruncdb.
1648 // Zero extend is zmm broadcast load+vptruncdw.
1649 { ISD::SIGN_EXTEND, MVT::v2i8, MVT::v2i1, 3 },
1650 { ISD::ZERO_EXTEND, MVT::v2i8, MVT::v2i1, 4 },
1651 { ISD::SIGN_EXTEND, MVT::v4i8, MVT::v4i1, 3 },
1652 { ISD::ZERO_EXTEND, MVT::v4i8, MVT::v4i1, 4 },
1653 { ISD::SIGN_EXTEND, MVT::v8i8, MVT::v8i1, 3 },
1654 { ISD::ZERO_EXTEND, MVT::v8i8, MVT::v8i1, 4 },
1655 { ISD::SIGN_EXTEND, MVT::v16i8, MVT::v16i1, 3 },
1656 { ISD::ZERO_EXTEND, MVT::v16i8, MVT::v16i1, 4 },
1657
1658 // Sign extend is zmm vpternlogd+vptruncdw.
1659 // Zero extend is zmm vpternlogd+vptruncdw+vpsrlw.
1660 { ISD::SIGN_EXTEND, MVT::v2i16, MVT::v2i1, 3 },
1661 { ISD::ZERO_EXTEND, MVT::v2i16, MVT::v2i1, 4 },
1662 { ISD::SIGN_EXTEND, MVT::v4i16, MVT::v4i1, 3 },
1663 { ISD::ZERO_EXTEND, MVT::v4i16, MVT::v4i1, 4 },
1664 { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i1, 3 },
1665 { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i1, 4 },
1666 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i1, 3 },
1667 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i1, 4 },
1668
1669 { ISD::SIGN_EXTEND, MVT::v2i32, MVT::v2i1, 1 }, // zmm vpternlogd
1670 { ISD::ZERO_EXTEND, MVT::v2i32, MVT::v2i1, 2 }, // zmm vpternlogd+psrld
1671 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i1, 1 }, // zmm vpternlogd
1672 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i1, 2 }, // zmm vpternlogd+psrld
1673 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i1, 1 }, // zmm vpternlogd
1674 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i1, 2 }, // zmm vpternlogd+psrld
1675 { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i1, 1 }, // zmm vpternlogq
1676 { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i1, 2 }, // zmm vpternlogq+psrlq
1677 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i1, 1 }, // zmm vpternlogq
1678 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i1, 2 }, // zmm vpternlogq+psrlq
1679
1680 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i1, 1 }, // vpternlogd
1681 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i1, 2 }, // vpternlogd+psrld
1682 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i1, 1 }, // vpternlogq
1683 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i1, 2 }, // vpternlogq+psrlq
1684
1685 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 1 },
1686 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 1 },
1687 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i16, 1 },
1688 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i16, 1 },
1689 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i8, 1 },
1690 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i8, 1 },
1691 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i16, 1 },
1692 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i16, 1 },
1693 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i32, 1 },
1694 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i32, 1 },
1695
1696 { ISD::SIGN_EXTEND, MVT::v32i16, MVT::v32i8, 3 }, // FIXME: May not be right
1697 { ISD::ZERO_EXTEND, MVT::v32i16, MVT::v32i8, 3 }, // FIXME: May not be right
1698
1699 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i1, 4 },
1700 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i1, 3 },
1701 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v16i8, 2 },
1702 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i8, 1 },
1703 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i16, 2 },
1704 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i16, 1 },
1705 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i32, 1 },
1706 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i32, 1 },
1707
1708 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i1, 4 },
1709 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i1, 3 },
1710 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v16i8, 2 },
1711 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i8, 1 },
1712 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i16, 2 },
1713 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i16, 1 },
1714 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i32, 1 },
1715 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i32, 1 },
1716 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i64, 26 },
1717 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i64, 5 },
1718
1719 { ISD::FP_TO_SINT, MVT::v16i8, MVT::v16f32, 2 },
1720 { ISD::FP_TO_SINT, MVT::v16i8, MVT::v16f64, 7 },
1721 { ISD::FP_TO_SINT, MVT::v32i8, MVT::v32f64,15 },
1722 { ISD::FP_TO_SINT, MVT::v64i8, MVT::v64f32,11 },
1723 { ISD::FP_TO_SINT, MVT::v64i8, MVT::v64f64,31 },
1724 { ISD::FP_TO_SINT, MVT::v8i16, MVT::v8f64, 3 },
1725 { ISD::FP_TO_SINT, MVT::v16i16, MVT::v16f64, 7 },
1726 { ISD::FP_TO_SINT, MVT::v32i16, MVT::v32f32, 5 },
1727 { ISD::FP_TO_SINT, MVT::v32i16, MVT::v32f64,15 },
1728 { ISD::FP_TO_SINT, MVT::v8i32, MVT::v8f64, 1 },
1729 { ISD::FP_TO_SINT, MVT::v16i32, MVT::v16f64, 3 },
1730
1731 { ISD::FP_TO_UINT, MVT::v8i32, MVT::v8f64, 1 },
1732 { ISD::FP_TO_UINT, MVT::v8i16, MVT::v8f64, 3 },
1733 { ISD::FP_TO_UINT, MVT::v8i8, MVT::v8f64, 3 },
1734 { ISD::FP_TO_UINT, MVT::v16i32, MVT::v16f32, 1 },
1735 { ISD::FP_TO_UINT, MVT::v16i16, MVT::v16f32, 3 },
1736 { ISD::FP_TO_UINT, MVT::v16i8, MVT::v16f32, 3 },
1737 };
1738
1739 static const TypeConversionCostTblEntry AVX512BWVLConversionTbl[] {
1740 // Mask sign extend has an instruction.
1741 { ISD::SIGN_EXTEND, MVT::v2i8, MVT::v2i1, 1 },
1742 { ISD::SIGN_EXTEND, MVT::v2i16, MVT::v2i1, 1 },
1743 { ISD::SIGN_EXTEND, MVT::v4i8, MVT::v4i1, 1 },
1744 { ISD::SIGN_EXTEND, MVT::v4i16, MVT::v4i1, 1 },
1745 { ISD::SIGN_EXTEND, MVT::v8i8, MVT::v8i1, 1 },
1746 { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i1, 1 },
1747 { ISD::SIGN_EXTEND, MVT::v16i8, MVT::v16i1, 1 },
1748 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i1, 1 },
1749 { ISD::SIGN_EXTEND, MVT::v32i8, MVT::v32i1, 1 },
1750
1751 // Mask zero extend is a sext + shift.
1752 { ISD::ZERO_EXTEND, MVT::v2i8, MVT::v2i1, 2 },
1753 { ISD::ZERO_EXTEND, MVT::v2i16, MVT::v2i1, 2 },
1754 { ISD::ZERO_EXTEND, MVT::v4i8, MVT::v4i1, 2 },
1755 { ISD::ZERO_EXTEND, MVT::v4i16, MVT::v4i1, 2 },
1756 { ISD::ZERO_EXTEND, MVT::v8i8, MVT::v8i1, 2 },
1757 { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i1, 2 },
1758 { ISD::ZERO_EXTEND, MVT::v16i8, MVT::v16i1, 2 },
1759 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i1, 2 },
1760 { ISD::ZERO_EXTEND, MVT::v32i8, MVT::v32i1, 2 },
1761
1762 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i16, 2 },
1763 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i8, 2 }, // vpsllw+vptestmb
1764 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i16, 2 }, // vpsllw+vptestmw
1765 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i8, 2 }, // vpsllw+vptestmb
1766 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i16, 2 }, // vpsllw+vptestmw
1767 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i8, 2 }, // vpsllw+vptestmb
1768 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i16, 2 }, // vpsllw+vptestmw
1769 { ISD::TRUNCATE, MVT::v16i1, MVT::v16i8, 2 }, // vpsllw+vptestmb
1770 { ISD::TRUNCATE, MVT::v16i1, MVT::v16i16, 2 }, // vpsllw+vptestmw
1771 { ISD::TRUNCATE, MVT::v32i1, MVT::v32i8, 2 }, // vpsllw+vptestmb
1772 };
1773
1774 static const TypeConversionCostTblEntry AVX512DQVLConversionTbl[] = {
1775 { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i64, 1 },
1776 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i64, 1 },
1777 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i64, 1 },
1778 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i64, 1 },
1779
1780 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i64, 1 },
1781 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 1 },
1782 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i64, 1 },
1783 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i64, 1 },
1784
1785 { ISD::FP_TO_SINT, MVT::v2i64, MVT::v4f32, 1 },
1786 { ISD::FP_TO_SINT, MVT::v4i64, MVT::v4f32, 1 },
1787 { ISD::FP_TO_SINT, MVT::v2i64, MVT::v2f64, 1 },
1788 { ISD::FP_TO_SINT, MVT::v4i64, MVT::v4f64, 1 },
1789
1790 { ISD::FP_TO_UINT, MVT::v2i64, MVT::v4f32, 1 },
1791 { ISD::FP_TO_UINT, MVT::v4i64, MVT::v4f32, 1 },
1792 { ISD::FP_TO_UINT, MVT::v2i64, MVT::v2f64, 1 },
1793 { ISD::FP_TO_UINT, MVT::v4i64, MVT::v4f64, 1 },
1794 };
1795
1796 static const TypeConversionCostTblEntry AVX512VLConversionTbl[] = {
1797 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i8, 3 }, // sext+vpslld+vptestmd
1798 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i8, 3 }, // sext+vpslld+vptestmd
1799 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i8, 3 }, // sext+vpslld+vptestmd
1800 { ISD::TRUNCATE, MVT::v16i1, MVT::v16i8, 8 }, // split+2*v8i8
1801 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i16, 3 }, // sext+vpsllq+vptestmq
1802 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i16, 3 }, // sext+vpsllq+vptestmq
1803 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i16, 3 }, // sext+vpsllq+vptestmq
1804 { ISD::TRUNCATE, MVT::v16i1, MVT::v16i16, 8 }, // split+2*v8i16
1805 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i32, 2 }, // vpslld+vptestmd
1806 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i32, 2 }, // vpslld+vptestmd
1807 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i32, 2 }, // vpslld+vptestmd
1808 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i64, 2 }, // vpsllq+vptestmq
1809 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i64, 2 }, // vpsllq+vptestmq
1810 { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 1 }, // vpmovqd
1811 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i64, 2 }, // vpmovqb
1812 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i64, 2 }, // vpmovqw
1813 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 2 }, // vpmovwb
1814
1815 // sign extend is vpcmpeq+maskedmove+vpmovdw+vpacksswb
1816 // zero extend is vpcmpeq+maskedmove+vpmovdw+vpsrlw+vpackuswb
1817 { ISD::SIGN_EXTEND, MVT::v2i8, MVT::v2i1, 5 },
1818 { ISD::ZERO_EXTEND, MVT::v2i8, MVT::v2i1, 6 },
1819 { ISD::SIGN_EXTEND, MVT::v4i8, MVT::v4i1, 5 },
1820 { ISD::ZERO_EXTEND, MVT::v4i8, MVT::v4i1, 6 },
1821 { ISD::SIGN_EXTEND, MVT::v8i8, MVT::v8i1, 5 },
1822 { ISD::ZERO_EXTEND, MVT::v8i8, MVT::v8i1, 6 },
1823 { ISD::SIGN_EXTEND, MVT::v16i8, MVT::v16i1, 10 },
1824 { ISD::ZERO_EXTEND, MVT::v16i8, MVT::v16i1, 12 },
1825
1826 // sign extend is vpcmpeq+maskedmove+vpmovdw
1827 // zero extend is vpcmpeq+maskedmove+vpmovdw+vpsrlw
1828 { ISD::SIGN_EXTEND, MVT::v2i16, MVT::v2i1, 4 },
1829 { ISD::ZERO_EXTEND, MVT::v2i16, MVT::v2i1, 5 },
1830 { ISD::SIGN_EXTEND, MVT::v4i16, MVT::v4i1, 4 },
1831 { ISD::ZERO_EXTEND, MVT::v4i16, MVT::v4i1, 5 },
1832 { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i1, 4 },
1833 { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i1, 5 },
1834 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i1, 10 },
1835 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i1, 12 },
1836
1837 { ISD::SIGN_EXTEND, MVT::v2i32, MVT::v2i1, 1 }, // vpternlogd
1838 { ISD::ZERO_EXTEND, MVT::v2i32, MVT::v2i1, 2 }, // vpternlogd+psrld
1839 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i1, 1 }, // vpternlogd
1840 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i1, 2 }, // vpternlogd+psrld
1841 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i1, 1 }, // vpternlogd
1842 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i1, 2 }, // vpternlogd+psrld
1843 { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i1, 1 }, // vpternlogq
1844 { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i1, 2 }, // vpternlogq+psrlq
1845 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i1, 1 }, // vpternlogq
1846 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i1, 2 }, // vpternlogq+psrlq
1847
1848 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v16i8, 1 },
1849 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v16i8, 1 },
1850 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v16i8, 1 },
1851 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v16i8, 1 },
1852 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 1 },
1853 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 1 },
1854 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v8i16, 1 },
1855 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v8i16, 1 },
1856 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 1 },
1857 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 1 },
1858 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 1 },
1859 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 1 },
1860
1861 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v16i8, 1 },
1862 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v16i8, 1 },
1863 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v8i16, 1 },
1864 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i16, 1 },
1865
1866 { ISD::UINT_TO_FP, MVT::f32, MVT::i64, 1 },
1867 { ISD::UINT_TO_FP, MVT::f64, MVT::i64, 1 },
1868 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v16i8, 1 },
1869 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v16i8, 1 },
1870 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v8i16, 1 },
1871 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i16, 1 },
1872 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i32, 1 },
1873 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 },
1874 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i32, 1 },
1875 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i32, 1 },
1876 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i64, 5 },
1877 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 5 },
1878 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i64, 5 },
1879
1880 { ISD::FP_TO_SINT, MVT::v16i8, MVT::v8f32, 2 },
1881 { ISD::FP_TO_SINT, MVT::v16i8, MVT::v16f32, 2 },
1882 { ISD::FP_TO_SINT, MVT::v32i8, MVT::v32f32, 5 },
1883
1884 { ISD::FP_TO_UINT, MVT::i64, MVT::f32, 1 },
1885 { ISD::FP_TO_UINT, MVT::i64, MVT::f64, 1 },
1886 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f32, 1 },
1887 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v2f64, 1 },
1888 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f64, 1 },
1889 { ISD::FP_TO_UINT, MVT::v8i32, MVT::v8f32, 1 },
1890 { ISD::FP_TO_UINT, MVT::v8i32, MVT::v8f64, 1 },
1891 };
1892
1893 static const TypeConversionCostTblEntry AVX2ConversionTbl[] = {
1894 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i1, 3 },
1895 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i1, 3 },
1896 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i1, 3 },
1897 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i1, 3 },
1898 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i1, 1 },
1899 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i1, 1 },
1900
1901 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v16i8, 2 },
1902 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v16i8, 2 },
1903 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v16i8, 2 },
1904 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v16i8, 2 },
1905 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 2 },
1906 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 2 },
1907 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v8i16, 2 },
1908 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v8i16, 2 },
1909 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 2 },
1910 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 2 },
1911 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i16, 3 },
1912 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i16, 3 },
1913 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 2 },
1914 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 2 },
1915
1916 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i32, 2 },
1917
1918 { ISD::TRUNCATE, MVT::v16i8, MVT::v8i16, 1 },
1919 { ISD::TRUNCATE, MVT::v16i8, MVT::v4i32, 1 },
1920 { ISD::TRUNCATE, MVT::v16i8, MVT::v2i64, 1 },
1921 { ISD::TRUNCATE, MVT::v16i8, MVT::v8i32, 4 },
1922 { ISD::TRUNCATE, MVT::v16i8, MVT::v4i64, 4 },
1923 { ISD::TRUNCATE, MVT::v8i16, MVT::v4i32, 1 },
1924 { ISD::TRUNCATE, MVT::v8i16, MVT::v2i64, 1 },
1925 { ISD::TRUNCATE, MVT::v8i16, MVT::v4i64, 5 },
1926 { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 1 },
1927 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 2 },
1928
1929 { ISD::FP_EXTEND, MVT::v8f64, MVT::v8f32, 3 },
1930 { ISD::FP_ROUND, MVT::v8f32, MVT::v8f64, 3 },
1931
1932 { ISD::FP_TO_SINT, MVT::v16i16, MVT::v8f32, 1 },
1933 { ISD::FP_TO_SINT, MVT::v4i32, MVT::v4f64, 1 },
1934 { ISD::FP_TO_SINT, MVT::v8i32, MVT::v8f32, 1 },
1935 { ISD::FP_TO_SINT, MVT::v8i32, MVT::v8f64, 3 },
1936
1937 { ISD::FP_TO_UINT, MVT::i64, MVT::f32, 3 },
1938 { ISD::FP_TO_UINT, MVT::i64, MVT::f64, 3 },
1939 { ISD::FP_TO_UINT, MVT::v16i16, MVT::v8f32, 1 },
1940 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f32, 3 },
1941 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v2f64, 4 },
1942 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f64, 4 },
1943 { ISD::FP_TO_UINT, MVT::v8i32, MVT::v8f32, 3 },
1944 { ISD::FP_TO_UINT, MVT::v8i32, MVT::v4f64, 4 },
1945
1946 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v16i8, 2 },
1947 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v16i8, 2 },
1948 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v8i16, 2 },
1949 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i16, 2 },
1950 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i32, 1 },
1951 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i32, 1 },
1952 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i32, 3 },
1953
1954 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v16i8, 2 },
1955 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v16i8, 2 },
1956 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v8i16, 2 },
1957 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i16, 2 },
1958 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i32, 2 },
1959 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i32, 1 },
1960 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 2 },
1961 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i32, 2 },
1962 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i32, 2 },
1963 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i32, 4 },
1964 };
1965
1966 static const TypeConversionCostTblEntry AVXConversionTbl[] = {
1967 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i1, 6 },
1968 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i1, 4 },
1969 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i1, 7 },
1970 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i1, 4 },
1971 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i1, 4 },
1972 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i1, 4 },
1973
1974 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v16i8, 3 },
1975 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v16i8, 3 },
1976 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v16i8, 3 },
1977 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v16i8, 3 },
1978 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 3 },
1979 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 3 },
1980 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v8i16, 3 },
1981 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v8i16, 3 },
1982 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 3 },
1983 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 3 },
1984 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 3 },
1985 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 3 },
1986
1987 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i64, 4 },
1988 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i32, 5 },
1989 { ISD::TRUNCATE, MVT::v16i1, MVT::v16i16, 4 },
1990 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i64, 9 },
1991 { ISD::TRUNCATE, MVT::v16i1, MVT::v16i64, 11 },
1992
1993 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i16, 2 }, // and+extract+packuswb
1994 { ISD::TRUNCATE, MVT::v16i8, MVT::v8i32, 5 },
1995 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 5 },
1996 { ISD::TRUNCATE, MVT::v16i8, MVT::v4i64, 5 },
1997 { ISD::TRUNCATE, MVT::v8i16, MVT::v4i64, 3 }, // and+extract+2*packusdw
1998 { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 2 },
1999
2000 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i1, 3 },
2001 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i1, 3 },
2002 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i1, 8 },
2003 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v16i8, 4 },
2004 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v16i8, 2 },
2005 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i16, 4 },
2006 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v8i16, 2 },
2007 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i32, 2 },
2008 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i32, 2 },
2009 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i32, 4 },
2010 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v2i64, 5 },
2011 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i64, 8 },
2012
2013 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i1, 7 },
2014 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i1, 7 },
2015 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i1, 6 },
2016 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v16i8, 4 },
2017 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v16i8, 2 },
2018 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i16, 4 },
2019 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v8i16, 2 },
2020 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i32, 4 },
2021 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i32, 4 },
2022 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 5 },
2023 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i32, 6 },
2024 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i32, 8 },
2025 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i32, 10 },
2026 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i64, 10 },
2027 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i64, 18 },
2028 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 5 },
2029 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i64, 10 },
2030
2031 { ISD::FP_TO_SINT, MVT::v16i8, MVT::v8f32, 2 },
2032 { ISD::FP_TO_SINT, MVT::v16i8, MVT::v4f64, 2 },
2033 { ISD::FP_TO_SINT, MVT::v32i8, MVT::v8f32, 2 },
2034 { ISD::FP_TO_SINT, MVT::v32i8, MVT::v4f64, 2 },
2035 { ISD::FP_TO_SINT, MVT::v8i16, MVT::v8f32, 2 },
2036 { ISD::FP_TO_SINT, MVT::v8i16, MVT::v4f64, 2 },
2037 { ISD::FP_TO_SINT, MVT::v16i16, MVT::v8f32, 2 },
2038 { ISD::FP_TO_SINT, MVT::v16i16, MVT::v4f64, 2 },
2039 { ISD::FP_TO_SINT, MVT::v4i32, MVT::v4f64, 2 },
2040 { ISD::FP_TO_SINT, MVT::v8i32, MVT::v8f32, 2 },
2041 { ISD::FP_TO_SINT, MVT::v8i32, MVT::v8f64, 5 },
2042
2043 { ISD::FP_TO_UINT, MVT::v16i8, MVT::v8f32, 2 },
2044 { ISD::FP_TO_UINT, MVT::v16i8, MVT::v4f64, 2 },
2045 { ISD::FP_TO_UINT, MVT::v32i8, MVT::v8f32, 2 },
2046 { ISD::FP_TO_UINT, MVT::v32i8, MVT::v4f64, 2 },
2047 { ISD::FP_TO_UINT, MVT::v8i16, MVT::v8f32, 2 },
2048 { ISD::FP_TO_UINT, MVT::v8i16, MVT::v4f64, 2 },
2049 { ISD::FP_TO_UINT, MVT::v16i16, MVT::v8f32, 2 },
2050 { ISD::FP_TO_UINT, MVT::v16i16, MVT::v4f64, 2 },
2051 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f32, 3 },
2052 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v2f64, 4 },
2053 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f64, 6 },
2054 { ISD::FP_TO_UINT, MVT::v8i32, MVT::v8f32, 7 },
2055 { ISD::FP_TO_UINT, MVT::v8i32, MVT::v4f64, 7 },
2056
2057 { ISD::FP_EXTEND, MVT::v4f64, MVT::v4f32, 1 },
2058 { ISD::FP_ROUND, MVT::v4f32, MVT::v4f64, 1 },
2059 };
2060
2061 static const TypeConversionCostTblEntry SSE41ConversionTbl[] = {
2062 { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v16i8, 1 },
2063 { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v16i8, 1 },
2064 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v16i8, 1 },
2065 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v16i8, 1 },
2066 { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v16i8, 1 },
2067 { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v16i8, 1 },
2068 { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v8i16, 1 },
2069 { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v8i16, 1 },
2070 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v8i16, 1 },
2071 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v8i16, 1 },
2072 { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v4i32, 1 },
2073 { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v4i32, 1 },
2074
2075 // These truncates end up widening elements.
2076 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i8, 1 }, // PMOVXZBQ
2077 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i16, 1 }, // PMOVXZWQ
2078 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i8, 1 }, // PMOVXZBD
2079
2080 { ISD::TRUNCATE, MVT::v16i8, MVT::v4i32, 2 },
2081 { ISD::TRUNCATE, MVT::v8i16, MVT::v4i32, 2 },
2082 { ISD::TRUNCATE, MVT::v16i8, MVT::v2i64, 2 },
2083
2084 { ISD::SINT_TO_FP, MVT::f32, MVT::i32, 1 },
2085 { ISD::SINT_TO_FP, MVT::f64, MVT::i32, 1 },
2086 { ISD::SINT_TO_FP, MVT::f32, MVT::i64, 1 },
2087 { ISD::SINT_TO_FP, MVT::f64, MVT::i64, 1 },
2088 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v16i8, 1 },
2089 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v16i8, 1 },
2090 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v8i16, 1 },
2091 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v8i16, 1 },
2092 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 },
2093 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v4i32, 1 },
2094 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i32, 2 },
2095
2096 { ISD::UINT_TO_FP, MVT::f32, MVT::i32, 1 },
2097 { ISD::UINT_TO_FP, MVT::f64, MVT::i32, 1 },
2098 { ISD::UINT_TO_FP, MVT::f32, MVT::i64, 4 },
2099 { ISD::UINT_TO_FP, MVT::f64, MVT::i64, 4 },
2100 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v16i8, 1 },
2101 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v16i8, 1 },
2102 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v8i16, 1 },
2103 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v8i16, 1 },
2104 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i32, 3 },
2105 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 3 },
2106 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v4i32, 2 },
2107 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v2i64, 12 },
2108 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i64, 22 },
2109 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 4 },
2110
2111 { ISD::FP_TO_SINT, MVT::i32, MVT::f32, 1 },
2112 { ISD::FP_TO_SINT, MVT::i64, MVT::f32, 1 },
2113 { ISD::FP_TO_SINT, MVT::i32, MVT::f64, 1 },
2114 { ISD::FP_TO_SINT, MVT::i64, MVT::f64, 1 },
2115 { ISD::FP_TO_SINT, MVT::v16i8, MVT::v4f32, 2 },
2116 { ISD::FP_TO_SINT, MVT::v16i8, MVT::v2f64, 2 },
2117 { ISD::FP_TO_SINT, MVT::v8i16, MVT::v4f32, 1 },
2118 { ISD::FP_TO_SINT, MVT::v8i16, MVT::v2f64, 1 },
2119 { ISD::FP_TO_SINT, MVT::v4i32, MVT::v4f32, 1 },
2120 { ISD::FP_TO_SINT, MVT::v4i32, MVT::v2f64, 1 },
2121
2122 { ISD::FP_TO_UINT, MVT::i32, MVT::f32, 1 },
2123 { ISD::FP_TO_UINT, MVT::i64, MVT::f32, 4 },
2124 { ISD::FP_TO_UINT, MVT::i32, MVT::f64, 1 },
2125 { ISD::FP_TO_UINT, MVT::i64, MVT::f64, 4 },
2126 { ISD::FP_TO_UINT, MVT::v16i8, MVT::v4f32, 2 },
2127 { ISD::FP_TO_UINT, MVT::v16i8, MVT::v2f64, 2 },
2128 { ISD::FP_TO_UINT, MVT::v8i16, MVT::v4f32, 1 },
2129 { ISD::FP_TO_UINT, MVT::v8i16, MVT::v2f64, 1 },
2130 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f32, 4 },
2131 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v2f64, 4 },
2132 };
2133
2134 static const TypeConversionCostTblEntry SSE2ConversionTbl[] = {
2135 // These are somewhat magic numbers justified by comparing the
2136 // output of llvm-mca for our various supported scheduler models
2137 // and basing it off the worst case scenario.
2138 { ISD::SINT_TO_FP, MVT::f32, MVT::i32, 3 },
2139 { ISD::SINT_TO_FP, MVT::f64, MVT::i32, 3 },
2140 { ISD::SINT_TO_FP, MVT::f32, MVT::i64, 3 },
2141 { ISD::SINT_TO_FP, MVT::f64, MVT::i64, 3 },
2142 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v16i8, 3 },
2143 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v16i8, 4 },
2144 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v8i16, 3 },
2145 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v8i16, 4 },
2146 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 3 },
2147 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v4i32, 4 },
2148 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v2i64, 8 },
2149 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i64, 8 },
2150
2151 { ISD::UINT_TO_FP, MVT::f32, MVT::i32, 3 },
2152 { ISD::UINT_TO_FP, MVT::f64, MVT::i32, 3 },
2153 { ISD::UINT_TO_FP, MVT::f32, MVT::i64, 8 },
2154 { ISD::UINT_TO_FP, MVT::f64, MVT::i64, 9 },
2155 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v16i8, 4 },
2156 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v16i8, 4 },
2157 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v8i16, 4 },
2158 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v8i16, 4 },
2159 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i32, 7 },
2160 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v4i32, 7 },
2161 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 5 },
2162 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 15 },
2163 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v2i64, 18 },
2164
2165 { ISD::FP_TO_SINT, MVT::i32, MVT::f32, 4 },
2166 { ISD::FP_TO_SINT, MVT::i64, MVT::f32, 4 },
2167 { ISD::FP_TO_SINT, MVT::i32, MVT::f64, 4 },
2168 { ISD::FP_TO_SINT, MVT::i64, MVT::f64, 4 },
2169 { ISD::FP_TO_SINT, MVT::v16i8, MVT::v4f32, 6 },
2170 { ISD::FP_TO_SINT, MVT::v16i8, MVT::v2f64, 6 },
2171 { ISD::FP_TO_SINT, MVT::v8i16, MVT::v4f32, 5 },
2172 { ISD::FP_TO_SINT, MVT::v8i16, MVT::v2f64, 5 },
2173 { ISD::FP_TO_SINT, MVT::v4i32, MVT::v4f32, 4 },
2174 { ISD::FP_TO_SINT, MVT::v4i32, MVT::v2f64, 4 },
2175
2176 { ISD::FP_TO_UINT, MVT::i32, MVT::f32, 4 },
2177 { ISD::FP_TO_UINT, MVT::i64, MVT::f32, 4 },
2178 { ISD::FP_TO_UINT, MVT::i32, MVT::f64, 4 },
2179 { ISD::FP_TO_UINT, MVT::i64, MVT::f64, 15 },
2180 { ISD::FP_TO_UINT, MVT::v16i8, MVT::v4f32, 6 },
2181 { ISD::FP_TO_UINT, MVT::v16i8, MVT::v2f64, 6 },
2182 { ISD::FP_TO_UINT, MVT::v8i16, MVT::v4f32, 5 },
2183 { ISD::FP_TO_UINT, MVT::v8i16, MVT::v2f64, 5 },
2184 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f32, 8 },
2185 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v2f64, 8 },
2186
2187 { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v16i8, 4 },
2188 { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v16i8, 4 },
2189 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v16i8, 2 },
2190 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v16i8, 3 },
2191 { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v16i8, 1 },
2192 { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v16i8, 2 },
2193 { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v8i16, 2 },
2194 { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v8i16, 3 },
2195 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v8i16, 1 },
2196 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v8i16, 2 },
2197 { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v4i32, 1 },
2198 { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v4i32, 2 },
2199
2200 // These truncates are really widening elements.
2201 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i32, 1 }, // PSHUFD
2202 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i16, 2 }, // PUNPCKLWD+DQ
2203 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i8, 3 }, // PUNPCKLBW+WD+PSHUFD
2204 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i16, 1 }, // PUNPCKLWD
2205 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i8, 2 }, // PUNPCKLBW+WD
2206 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i8, 1 }, // PUNPCKLBW
2207
2208 { ISD::TRUNCATE, MVT::v16i8, MVT::v8i16, 2 }, // PAND+PACKUSWB
2209 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i16, 3 },
2210 { ISD::TRUNCATE, MVT::v16i8, MVT::v4i32, 3 }, // PAND+2*PACKUSWB
2211 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i32, 7 },
2212 { ISD::TRUNCATE, MVT::v2i16, MVT::v2i32, 1 },
2213 { ISD::TRUNCATE, MVT::v8i16, MVT::v4i32, 3 },
2214 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 5 },
2215 { ISD::TRUNCATE, MVT::v16i16, MVT::v16i32,10 },
2216 { ISD::TRUNCATE, MVT::v16i8, MVT::v2i64, 4 }, // PAND+3*PACKUSWB
2217 { ISD::TRUNCATE, MVT::v8i16, MVT::v2i64, 2 }, // PSHUFD+PSHUFLW
2218 { ISD::TRUNCATE, MVT::v4i32, MVT::v2i64, 1 }, // PSHUFD
2219 };
2220
2221 // Attempt to map directly to (simple) MVT types to let us match custom entries.
2222 EVT SrcTy = TLI->getValueType(DL, Src);
2223 EVT DstTy = TLI->getValueType(DL, Dst);
2224
2225 // The function getSimpleVT only handles simple value types.
2226 if (SrcTy.isSimple() && DstTy.isSimple()) {
2227 MVT SimpleSrcTy = SrcTy.getSimpleVT();
2228 MVT SimpleDstTy = DstTy.getSimpleVT();
2229
2230 if (ST->useAVX512Regs()) {
2231 if (ST->hasBWI())
2232 if (const auto *Entry = ConvertCostTableLookup(
2233 AVX512BWConversionTbl, ISD, SimpleDstTy, SimpleSrcTy))
2234 return AdjustCost(Entry->Cost);
2235
2236 if (ST->hasDQI())
2237 if (const auto *Entry = ConvertCostTableLookup(
2238 AVX512DQConversionTbl, ISD, SimpleDstTy, SimpleSrcTy))
2239 return AdjustCost(Entry->Cost);
2240
2241 if (ST->hasAVX512())
2242 if (const auto *Entry = ConvertCostTableLookup(
2243 AVX512FConversionTbl, ISD, SimpleDstTy, SimpleSrcTy))
2244 return AdjustCost(Entry->Cost);
2245 }
2246
2247 if (ST->hasBWI())
2248 if (const auto *Entry = ConvertCostTableLookup(
2249 AVX512BWVLConversionTbl, ISD, SimpleDstTy, SimpleSrcTy))
2250 return AdjustCost(Entry->Cost);
2251
2252 if (ST->hasDQI())
2253 if (const auto *Entry = ConvertCostTableLookup(
2254 AVX512DQVLConversionTbl, ISD, SimpleDstTy, SimpleSrcTy))
2255 return AdjustCost(Entry->Cost);
2256
2257 if (ST->hasAVX512())
2258 if (const auto *Entry = ConvertCostTableLookup(AVX512VLConversionTbl, ISD,
2259 SimpleDstTy, SimpleSrcTy))
2260 return AdjustCost(Entry->Cost);
2261
2262 if (ST->hasAVX2()) {
2263 if (const auto *Entry = ConvertCostTableLookup(AVX2ConversionTbl, ISD,
2264 SimpleDstTy, SimpleSrcTy))
2265 return AdjustCost(Entry->Cost);
2266 }
2267
2268 if (ST->hasAVX()) {
2269 if (const auto *Entry = ConvertCostTableLookup(AVXConversionTbl, ISD,
2270 SimpleDstTy, SimpleSrcTy))
2271 return AdjustCost(Entry->Cost);
2272 }
2273
2274 if (ST->hasSSE41()) {
2275 if (const auto *Entry = ConvertCostTableLookup(SSE41ConversionTbl, ISD,
2276 SimpleDstTy, SimpleSrcTy))
2277 return AdjustCost(Entry->Cost);
2278 }
2279
2280 if (ST->hasSSE2()) {
2281 if (const auto *Entry = ConvertCostTableLookup(SSE2ConversionTbl, ISD,
2282 SimpleDstTy, SimpleSrcTy))
2283 return AdjustCost(Entry->Cost);
2284 }
2285 }
2286
2287 // Fall back to legalized types.
2288 std::pair<InstructionCost, MVT> LTSrc = TLI->getTypeLegalizationCost(DL, Src);
2289 std::pair<InstructionCost, MVT> LTDest =
2290 TLI->getTypeLegalizationCost(DL, Dst);
2291
2292 if (ST->useAVX512Regs()) {
2293 if (ST->hasBWI())
2294 if (const auto *Entry = ConvertCostTableLookup(
2295 AVX512BWConversionTbl, ISD, LTDest.second, LTSrc.second))
2296 return AdjustCost(std::max(LTSrc.first, LTDest.first) * Entry->Cost);
2297
2298 if (ST->hasDQI())
2299 if (const auto *Entry = ConvertCostTableLookup(
2300 AVX512DQConversionTbl, ISD, LTDest.second, LTSrc.second))
2301 return AdjustCost(std::max(LTSrc.first, LTDest.first) * Entry->Cost);
2302
2303 if (ST->hasAVX512())
2304 if (const auto *Entry = ConvertCostTableLookup(
2305 AVX512FConversionTbl, ISD, LTDest.second, LTSrc.second))
2306 return AdjustCost(std::max(LTSrc.first, LTDest.first) * Entry->Cost);
2307 }
2308
2309 if (ST->hasBWI())
2310 if (const auto *Entry = ConvertCostTableLookup(AVX512BWVLConversionTbl, ISD,
2311 LTDest.second, LTSrc.second))
2312 return AdjustCost(std::max(LTSrc.first, LTDest.first) * Entry->Cost);
2313
2314 if (ST->hasDQI())
2315 if (const auto *Entry = ConvertCostTableLookup(AVX512DQVLConversionTbl, ISD,
2316 LTDest.second, LTSrc.second))
2317 return AdjustCost(std::max(LTSrc.first, LTDest.first) * Entry->Cost);
2318
2319 if (ST->hasAVX512())
2320 if (const auto *Entry = ConvertCostTableLookup(AVX512VLConversionTbl, ISD,
2321 LTDest.second, LTSrc.second))
2322 return AdjustCost(std::max(LTSrc.first, LTDest.first) * Entry->Cost);
2323
2324 if (ST->hasAVX2())
2325 if (const auto *Entry = ConvertCostTableLookup(AVX2ConversionTbl, ISD,
2326 LTDest.second, LTSrc.second))
2327 return AdjustCost(std::max(LTSrc.first, LTDest.first) * Entry->Cost);
2328
2329 if (ST->hasAVX())
2330 if (const auto *Entry = ConvertCostTableLookup(AVXConversionTbl, ISD,
2331 LTDest.second, LTSrc.second))
2332 return AdjustCost(std::max(LTSrc.first, LTDest.first) * Entry->Cost);
2333
2334 if (ST->hasSSE41())
2335 if (const auto *Entry = ConvertCostTableLookup(SSE41ConversionTbl, ISD,
2336 LTDest.second, LTSrc.second))
2337 return AdjustCost(std::max(LTSrc.first, LTDest.first) * Entry->Cost);
2338
2339 if (ST->hasSSE2())
2340 if (const auto *Entry = ConvertCostTableLookup(SSE2ConversionTbl, ISD,
2341 LTDest.second, LTSrc.second))
2342 return AdjustCost(std::max(LTSrc.first, LTDest.first) * Entry->Cost);
2343
2344 // Fallback, for i8/i16 sitofp/uitofp cases we need to extend to i32 for
2345 // sitofp.
2346 if ((ISD == ISD::SINT_TO_FP || ISD == ISD::UINT_TO_FP) &&
2347 1 < Src->getScalarSizeInBits() && Src->getScalarSizeInBits() < 32) {
2348 Type *ExtSrc = Src->getWithNewBitWidth(32);
2349 unsigned ExtOpc =
2350 (ISD == ISD::SINT_TO_FP) ? Instruction::SExt : Instruction::ZExt;
2351
2352 // For scalar loads the extend would be free.
2353 InstructionCost ExtCost = 0;
2354 if (!(Src->isIntegerTy() && I && isa<LoadInst>(I->getOperand(0))))
2355 ExtCost = getCastInstrCost(ExtOpc, ExtSrc, Src, CCH, CostKind);
2356
2357 return ExtCost + getCastInstrCost(Instruction::SIToFP, Dst, ExtSrc,
2358 TTI::CastContextHint::None, CostKind);
2359 }
2360
2361 // Fallback for fptosi/fptoui i8/i16 cases we need to truncate from fptosi
2362 // i32.
2363 if ((ISD == ISD::FP_TO_SINT || ISD == ISD::FP_TO_UINT) &&
2364 1 < Dst->getScalarSizeInBits() && Dst->getScalarSizeInBits() < 32) {
2365 Type *TruncDst = Dst->getWithNewBitWidth(32);
2366 return getCastInstrCost(Instruction::FPToSI, TruncDst, Src, CCH, CostKind) +
2367 getCastInstrCost(Instruction::Trunc, Dst, TruncDst,
2368 TTI::CastContextHint::None, CostKind);
2369 }
2370
2371 return AdjustCost(
2372 BaseT::getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I));
2373}
2374
2375InstructionCost X86TTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
2376 Type *CondTy,
2377 CmpInst::Predicate VecPred,
2378 TTI::TargetCostKind CostKind,
2379 const Instruction *I) {
2380 // TODO: Handle other cost kinds.
2381 if (CostKind != TTI::TCK_RecipThroughput)
2382 return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, VecPred, CostKind,
2383 I);
2384
2385 // Legalize the type.
2386 std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy);
2387
2388 MVT MTy = LT.second;
2389
2390 int ISD = TLI->InstructionOpcodeToISD(Opcode);
2391 assert(ISD && "Invalid opcode")(static_cast <bool> (ISD && "Invalid opcode") ?
void (0) : __assert_fail ("ISD && \"Invalid opcode\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 2391, __extension__ __PRETTY_FUNCTION__))
;
2392
2393 unsigned ExtraCost = 0;
2394 if (I && (Opcode == Instruction::ICmp || Opcode == Instruction::FCmp)) {
2395 // Some vector comparison predicates cost extra instructions.
2396 if (MTy.isVector() &&
2397 !((ST->hasXOP() && (!ST->hasAVX2() || MTy.is128BitVector())) ||
2398 (ST->hasAVX512() && 32 <= MTy.getScalarSizeInBits()) ||
2399 ST->hasBWI())) {
2400 switch (cast<CmpInst>(I)->getPredicate()) {
2401 case CmpInst::Predicate::ICMP_NE:
2402 // xor(cmpeq(x,y),-1)
2403 ExtraCost = 1;
2404 break;
2405 case CmpInst::Predicate::ICMP_SGE:
2406 case CmpInst::Predicate::ICMP_SLE:
2407 // xor(cmpgt(x,y),-1)
2408 ExtraCost = 1;
2409 break;
2410 case CmpInst::Predicate::ICMP_ULT:
2411 case CmpInst::Predicate::ICMP_UGT:
2412 // cmpgt(xor(x,signbit),xor(y,signbit))
2413 // xor(cmpeq(pmaxu(x,y),x),-1)
2414 ExtraCost = 2;
2415 break;
2416 case CmpInst::Predicate::ICMP_ULE:
2417 case CmpInst::Predicate::ICMP_UGE:
2418 if ((ST->hasSSE41() && MTy.getScalarSizeInBits() == 32) ||
2419 (ST->hasSSE2() && MTy.getScalarSizeInBits() < 32)) {
2420 // cmpeq(psubus(x,y),0)
2421 // cmpeq(pminu(x,y),x)
2422 ExtraCost = 1;
2423 } else {
2424 // xor(cmpgt(xor(x,signbit),xor(y,signbit)),-1)
2425 ExtraCost = 3;
2426 }
2427 break;
2428 default:
2429 break;
2430 }
2431 }
2432 }
2433
2434 static const CostTblEntry SLMCostTbl[] = {
2435 // slm pcmpeq/pcmpgt throughput is 2
2436 { ISD::SETCC, MVT::v2i64, 2 },
2437 };
2438
2439 static const CostTblEntry AVX512BWCostTbl[] = {
2440 { ISD::SETCC, MVT::v32i16, 1 },
2441 { ISD::SETCC, MVT::v64i8, 1 },
2442
2443 { ISD::SELECT, MVT::v32i16, 1 },
2444 { ISD::SELECT, MVT::v64i8, 1 },
2445 };
2446
2447 static const CostTblEntry AVX512CostTbl[] = {
2448 { ISD::SETCC, MVT::v8i64, 1 },
2449 { ISD::SETCC, MVT::v16i32, 1 },
2450 { ISD::SETCC, MVT::v8f64, 1 },
2451 { ISD::SETCC, MVT::v16f32, 1 },
2452
2453 { ISD::SELECT, MVT::v8i64, 1 },
2454 { ISD::SELECT, MVT::v16i32, 1 },
2455 { ISD::SELECT, MVT::v8f64, 1 },
2456 { ISD::SELECT, MVT::v16f32, 1 },
2457
2458 { ISD::SETCC, MVT::v32i16, 2 }, // FIXME: should probably be 4
2459 { ISD::SETCC, MVT::v64i8, 2 }, // FIXME: should probably be 4
2460
2461 { ISD::SELECT, MVT::v32i16, 2 }, // FIXME: should be 3
2462 { ISD::SELECT, MVT::v64i8, 2 }, // FIXME: should be 3
2463 };
2464
2465 static const CostTblEntry AVX2CostTbl[] = {
2466 { ISD::SETCC, MVT::v4i64, 1 },
2467 { ISD::SETCC, MVT::v8i32, 1 },
2468 { ISD::SETCC, MVT::v16i16, 1 },
2469 { ISD::SETCC, MVT::v32i8, 1 },
2470
2471 { ISD::SELECT, MVT::v4i64, 1 }, // pblendvb
2472 { ISD::SELECT, MVT::v8i32, 1 }, // pblendvb
2473 { ISD::SELECT, MVT::v16i16, 1 }, // pblendvb
2474 { ISD::SELECT, MVT::v32i8, 1 }, // pblendvb
2475 };
2476
2477 static const CostTblEntry AVX1CostTbl[] = {
2478 { ISD::SETCC, MVT::v4f64, 1 },
2479 { ISD::SETCC, MVT::v8f32, 1 },
2480 // AVX1 does not support 8-wide integer compare.
2481 { ISD::SETCC, MVT::v4i64, 4 },
2482 { ISD::SETCC, MVT::v8i32, 4 },
2483 { ISD::SETCC, MVT::v16i16, 4 },
2484 { ISD::SETCC, MVT::v32i8, 4 },
2485
2486 { ISD::SELECT, MVT::v4f64, 1 }, // vblendvpd
2487 { ISD::SELECT, MVT::v8f32, 1 }, // vblendvps
2488 { ISD::SELECT, MVT::v4i64, 1 }, // vblendvpd
2489 { ISD::SELECT, MVT::v8i32, 1 }, // vblendvps
2490 { ISD::SELECT, MVT::v16i16, 3 }, // vandps + vandnps + vorps
2491 { ISD::SELECT, MVT::v32i8, 3 }, // vandps + vandnps + vorps
2492 };
2493
2494 static const CostTblEntry SSE42CostTbl[] = {
2495 { ISD::SETCC, MVT::v2f64, 1 },
2496 { ISD::SETCC, MVT::v4f32, 1 },
2497 { ISD::SETCC, MVT::v2i64, 1 },
2498 };
2499
2500 static const CostTblEntry SSE41CostTbl[] = {
2501 { ISD::SELECT, MVT::v2f64, 1 }, // blendvpd
2502 { ISD::SELECT, MVT::v4f32, 1 }, // blendvps
2503 { ISD::SELECT, MVT::v2i64, 1 }, // pblendvb
2504 { ISD::SELECT, MVT::v4i32, 1 }, // pblendvb
2505 { ISD::SELECT, MVT::v8i16, 1 }, // pblendvb
2506 { ISD::SELECT, MVT::v16i8, 1 }, // pblendvb
2507 };
2508
2509 static const CostTblEntry SSE2CostTbl[] = {
2510 { ISD::SETCC, MVT::v2f64, 2 },
2511 { ISD::SETCC, MVT::f64, 1 },
2512 { ISD::SETCC, MVT::v2i64, 8 },
2513 { ISD::SETCC, MVT::v4i32, 1 },
2514 { ISD::SETCC, MVT::v8i16, 1 },
2515 { ISD::SETCC, MVT::v16i8, 1 },
2516
2517 { ISD::SELECT, MVT::v2f64, 3 }, // andpd + andnpd + orpd
2518 { ISD::SELECT, MVT::v2i64, 3 }, // pand + pandn + por
2519 { ISD::SELECT, MVT::v4i32, 3 }, // pand + pandn + por
2520 { ISD::SELECT, MVT::v8i16, 3 }, // pand + pandn + por
2521 { ISD::SELECT, MVT::v16i8, 3 }, // pand + pandn + por
2522 };
2523
2524 static const CostTblEntry SSE1CostTbl[] = {
2525 { ISD::SETCC, MVT::v4f32, 2 },
2526 { ISD::SETCC, MVT::f32, 1 },
2527
2528 { ISD::SELECT, MVT::v4f32, 3 }, // andps + andnps + orps
2529 };
2530
2531 if (ST->isSLM())
2532 if (const auto *Entry = CostTableLookup(SLMCostTbl, ISD, MTy))
2533 return LT.first * (ExtraCost + Entry->Cost);
2534
2535 if (ST->hasBWI())
2536 if (const auto *Entry = CostTableLookup(AVX512BWCostTbl, ISD, MTy))
2537 return LT.first * (ExtraCost + Entry->Cost);
2538
2539 if (ST->hasAVX512())
2540 if (const auto *Entry = CostTableLookup(AVX512CostTbl, ISD, MTy))
2541 return LT.first * (ExtraCost + Entry->Cost);
2542
2543 if (ST->hasAVX2())
2544 if (const auto *Entry = CostTableLookup(AVX2CostTbl, ISD, MTy))
2545 return LT.first * (ExtraCost + Entry->Cost);
2546
2547 if (ST->hasAVX())
2548 if (const auto *Entry = CostTableLookup(AVX1CostTbl, ISD, MTy))
2549 return LT.first * (ExtraCost + Entry->Cost);
2550
2551 if (ST->hasSSE42())
2552 if (const auto *Entry = CostTableLookup(SSE42CostTbl, ISD, MTy))
2553 return LT.first * (ExtraCost + Entry->Cost);
2554
2555 if (ST->hasSSE41())
2556 if (const auto *Entry = CostTableLookup(SSE41CostTbl, ISD, MTy))
2557 return LT.first * (ExtraCost + Entry->Cost);
2558
2559 if (ST->hasSSE2())
2560 if (const auto *Entry = CostTableLookup(SSE2CostTbl, ISD, MTy))
2561 return LT.first * (ExtraCost + Entry->Cost);
2562
2563 if (ST->hasSSE1())
2564 if (const auto *Entry = CostTableLookup(SSE1CostTbl, ISD, MTy))
2565 return LT.first * (ExtraCost + Entry->Cost);
2566
2567 return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, VecPred, CostKind, I);
2568}
2569
2570unsigned X86TTIImpl::getAtomicMemIntrinsicMaxElementSize() const { return 16; }
2571
2572InstructionCost
2573X86TTIImpl::getTypeBasedIntrinsicInstrCost(const IntrinsicCostAttributes &ICA,
2574 TTI::TargetCostKind CostKind) {
2575
2576 // Costs should match the codegen from:
2577 // BITREVERSE: llvm\test\CodeGen\X86\vector-bitreverse.ll
2578 // BSWAP: llvm\test\CodeGen\X86\bswap-vector.ll
2579 // CTLZ: llvm\test\CodeGen\X86\vector-lzcnt-*.ll
2580 // CTPOP: llvm\test\CodeGen\X86\vector-popcnt-*.ll
2581 // CTTZ: llvm\test\CodeGen\X86\vector-tzcnt-*.ll
2582
2583 // TODO: Overflow intrinsics (*ADDO, *SUBO, *MULO) with vector types are not
2584 // specialized in these tables yet.
2585 static const CostTblEntry AVX512BITALGCostTbl[] = {
2586 { ISD::CTPOP, MVT::v32i16, 1 },
2587 { ISD::CTPOP, MVT::v64i8, 1 },
2588 { ISD::CTPOP, MVT::v16i16, 1 },
2589 { ISD::CTPOP, MVT::v32i8, 1 },
2590 { ISD::CTPOP, MVT::v8i16, 1 },
2591 { ISD::CTPOP, MVT::v16i8, 1 },
2592 };
2593 static const CostTblEntry AVX512VPOPCNTDQCostTbl[] = {
2594 { ISD::CTPOP, MVT::v8i64, 1 },
2595 { ISD::CTPOP, MVT::v16i32, 1 },
2596 { ISD::CTPOP, MVT::v4i64, 1 },
2597 { ISD::CTPOP, MVT::v8i32, 1 },
2598 { ISD::CTPOP, MVT::v2i64, 1 },
2599 { ISD::CTPOP, MVT::v4i32, 1 },
2600 };
2601 static const CostTblEntry AVX512CDCostTbl[] = {
2602 { ISD::CTLZ, MVT::v8i64, 1 },
2603 { ISD::CTLZ, MVT::v16i32, 1 },
2604 { ISD::CTLZ, MVT::v32i16, 8 },
2605 { ISD::CTLZ, MVT::v64i8, 20 },
2606 { ISD::CTLZ, MVT::v4i64, 1 },
2607 { ISD::CTLZ, MVT::v8i32, 1 },
2608 { ISD::CTLZ, MVT::v16i16, 4 },
2609 { ISD::CTLZ, MVT::v32i8, 10 },
2610 { ISD::CTLZ, MVT::v2i64, 1 },
2611 { ISD::CTLZ, MVT::v4i32, 1 },
2612 { ISD::CTLZ, MVT::v8i16, 4 },
2613 { ISD::CTLZ, MVT::v16i8, 4 },
2614 };
2615 static const CostTblEntry AVX512BWCostTbl[] = {
2616 { ISD::ABS, MVT::v32i16, 1 },
2617 { ISD::ABS, MVT::v64i8, 1 },
2618 { ISD::BITREVERSE, MVT::v8i64, 5 },
2619 { ISD::BITREVERSE, MVT::v16i32, 5 },
2620 { ISD::BITREVERSE, MVT::v32i16, 5 },
2621 { ISD::BITREVERSE, MVT::v64i8, 5 },
2622 { ISD::BSWAP, MVT::v8i64, 1 },
2623 { ISD::BSWAP, MVT::v16i32, 1 },
2624 { ISD::BSWAP, MVT::v32i16, 1 },
2625 { ISD::CTLZ, MVT::v8i64, 23 },
2626 { ISD::CTLZ, MVT::v16i32, 22 },
2627 { ISD::CTLZ, MVT::v32i16, 18 },
2628 { ISD::CTLZ, MVT::v64i8, 17 },
2629 { ISD::CTPOP, MVT::v8i64, 7 },
2630 { ISD::CTPOP, MVT::v16i32, 11 },
2631 { ISD::CTPOP, MVT::v32i16, 9 },
2632 { ISD::CTPOP, MVT::v64i8, 6 },
2633 { ISD::CTTZ, MVT::v8i64, 10 },
2634 { ISD::CTTZ, MVT::v16i32, 14 },
2635 { ISD::CTTZ, MVT::v32i16, 12 },
2636 { ISD::CTTZ, MVT::v64i8, 9 },
2637 { ISD::SADDSAT, MVT::v32i16, 1 },
2638 { ISD::SADDSAT, MVT::v64i8, 1 },
2639 { ISD::SMAX, MVT::v32i16, 1 },
2640 { ISD::SMAX, MVT::v64i8, 1 },
2641 { ISD::SMIN, MVT::v32i16, 1 },
2642 { ISD::SMIN, MVT::v64i8, 1 },
2643 { ISD::SSUBSAT, MVT::v32i16, 1 },
2644 { ISD::SSUBSAT, MVT::v64i8, 1 },
2645 { ISD::UADDSAT, MVT::v32i16, 1 },
2646 { ISD::UADDSAT, MVT::v64i8, 1 },
2647 { ISD::UMAX, MVT::v32i16, 1 },
2648 { ISD::UMAX, MVT::v64i8, 1 },
2649 { ISD::UMIN, MVT::v32i16, 1 },
2650 { ISD::UMIN, MVT::v64i8, 1 },
2651 { ISD::USUBSAT, MVT::v32i16, 1 },
2652 { ISD::USUBSAT, MVT::v64i8, 1 },
2653 };
2654 static const CostTblEntry AVX512CostTbl[] = {
2655 { ISD::ABS, MVT::v8i64, 1 },
2656 { ISD::ABS, MVT::v16i32, 1 },
2657 { ISD::ABS, MVT::v32i16, 2 }, // FIXME: include split
2658 { ISD::ABS, MVT::v64i8, 2 }, // FIXME: include split
2659 { ISD::ABS, MVT::v4i64, 1 },
2660 { ISD::ABS, MVT::v2i64, 1 },
2661 { ISD::BITREVERSE, MVT::v8i64, 36 },
2662 { ISD::BITREVERSE, MVT::v16i32, 24 },
2663 { ISD::BITREVERSE, MVT::v32i16, 10 },
2664 { ISD::BITREVERSE, MVT::v64i8, 10 },
2665 { ISD::BSWAP, MVT::v8i64, 4 },
2666 { ISD::BSWAP, MVT::v16i32, 4 },
2667 { ISD::BSWAP, MVT::v32i16, 4 },
2668 { ISD::CTLZ, MVT::v8i64, 29 },
2669 { ISD::CTLZ, MVT::v16i32, 35 },
2670 { ISD::CTLZ, MVT::v32i16, 28 },
2671 { ISD::CTLZ, MVT::v64i8, 18 },
2672 { ISD::CTPOP, MVT::v8i64, 16 },
2673 { ISD::CTPOP, MVT::v16i32, 24 },
2674 { ISD::CTPOP, MVT::v32i16, 18 },
2675 { ISD::CTPOP, MVT::v64i8, 12 },
2676 { ISD::CTTZ, MVT::v8i64, 20 },
2677 { ISD::CTTZ, MVT::v16i32, 28 },
2678 { ISD::CTTZ, MVT::v32i16, 24 },
2679 { ISD::CTTZ, MVT::v64i8, 18 },
2680 { ISD::SMAX, MVT::v8i64, 1 },
2681 { ISD::SMAX, MVT::v16i32, 1 },
2682 { ISD::SMAX, MVT::v32i16, 2 }, // FIXME: include split
2683 { ISD::SMAX, MVT::v64i8, 2 }, // FIXME: include split
2684 { ISD::SMAX, MVT::v4i64, 1 },
2685 { ISD::SMAX, MVT::v2i64, 1 },
2686 { ISD::SMIN, MVT::v8i64, 1 },
2687 { ISD::SMIN, MVT::v16i32, 1 },
2688 { ISD::SMIN, MVT::v32i16, 2 }, // FIXME: include split
2689 { ISD::SMIN, MVT::v64i8, 2 }, // FIXME: include split
2690 { ISD::SMIN, MVT::v4i64, 1 },
2691 { ISD::SMIN, MVT::v2i64, 1 },
2692 { ISD::UMAX, MVT::v8i64, 1 },
2693 { ISD::UMAX, MVT::v16i32, 1 },
2694 { ISD::UMAX, MVT::v32i16, 2 }, // FIXME: include split
2695 { ISD::UMAX, MVT::v64i8, 2 }, // FIXME: include split
2696 { ISD::UMAX, MVT::v4i64, 1 },
2697 { ISD::UMAX, MVT::v2i64, 1 },
2698 { ISD::UMIN, MVT::v8i64, 1 },
2699 { ISD::UMIN, MVT::v16i32, 1 },
2700 { ISD::UMIN, MVT::v32i16, 2 }, // FIXME: include split
2701 { ISD::UMIN, MVT::v64i8, 2 }, // FIXME: include split
2702 { ISD::UMIN, MVT::v4i64, 1 },
2703 { ISD::UMIN, MVT::v2i64, 1 },
2704 { ISD::USUBSAT, MVT::v16i32, 2 }, // pmaxud + psubd
2705 { ISD::USUBSAT, MVT::v2i64, 2 }, // pmaxuq + psubq
2706 { ISD::USUBSAT, MVT::v4i64, 2 }, // pmaxuq + psubq
2707 { ISD::USUBSAT, MVT::v8i64, 2 }, // pmaxuq + psubq
2708 { ISD::UADDSAT, MVT::v16i32, 3 }, // not + pminud + paddd
2709 { ISD::UADDSAT, MVT::v2i64, 3 }, // not + pminuq + paddq
2710 { ISD::UADDSAT, MVT::v4i64, 3 }, // not + pminuq + paddq
2711 { ISD::UADDSAT, MVT::v8i64, 3 }, // not + pminuq + paddq
2712 { ISD::SADDSAT, MVT::v32i16, 2 }, // FIXME: include split
2713 { ISD::SADDSAT, MVT::v64i8, 2 }, // FIXME: include split
2714 { ISD::SSUBSAT, MVT::v32i16, 2 }, // FIXME: include split
2715 { ISD::SSUBSAT, MVT::v64i8, 2 }, // FIXME: include split
2716 { ISD::UADDSAT, MVT::v32i16, 2 }, // FIXME: include split
2717 { ISD::UADDSAT, MVT::v64i8, 2 }, // FIXME: include split
2718 { ISD::USUBSAT, MVT::v32i16, 2 }, // FIXME: include split
2719 { ISD::USUBSAT, MVT::v64i8, 2 }, // FIXME: include split
2720 { ISD::FMAXNUM, MVT::f32, 2 },
2721 { ISD::FMAXNUM, MVT::v4f32, 2 },
2722 { ISD::FMAXNUM, MVT::v8f32, 2 },
2723 { ISD::FMAXNUM, MVT::v16f32, 2 },
2724 { ISD::FMAXNUM, MVT::f64, 2 },
2725 { ISD::FMAXNUM, MVT::v2f64, 2 },
2726 { ISD::FMAXNUM, MVT::v4f64, 2 },
2727 { ISD::FMAXNUM, MVT::v8f64, 2 },
2728 { ISD::ISNAN, MVT::v8f64, 1 },
2729 { ISD::ISNAN, MVT::v16f32, 1 },
2730 };
2731 static const CostTblEntry XOPCostTbl[] = {
2732 { ISD::BITREVERSE, MVT::v4i64, 4 },
2733 { ISD::BITREVERSE, MVT::v8i32, 4 },
2734 { ISD::BITREVERSE, MVT::v16i16, 4 },
2735 { ISD::BITREVERSE, MVT::v32i8, 4 },
2736 { ISD::BITREVERSE, MVT::v2i64, 1 },
2737 { ISD::BITREVERSE, MVT::v4i32, 1 },
2738 { ISD::BITREVERSE, MVT::v8i16, 1 },
2739 { ISD::BITREVERSE, MVT::v16i8, 1 },
2740 { ISD::BITREVERSE, MVT::i64, 3 },
2741 { ISD::BITREVERSE, MVT::i32, 3 },
2742 { ISD::BITREVERSE, MVT::i16, 3 },
2743 { ISD::BITREVERSE, MVT::i8, 3 }
2744 };
2745 static const CostTblEntry AVX2CostTbl[] = {
2746 { ISD::ABS, MVT::v4i64, 2 }, // VBLENDVPD(X,VPSUBQ(0,X),X)
2747 { ISD::ABS, MVT::v8i32, 1 },
2748 { ISD::ABS, MVT::v16i16, 1 },
2749 { ISD::ABS, MVT::v32i8, 1 },
2750 { ISD::BITREVERSE, MVT::v4i64, 5 },
2751 { ISD::BITREVERSE, MVT::v8i32, 5 },
2752 { ISD::BITREVERSE, MVT::v16i16, 5 },
2753 { ISD::BITREVERSE, MVT::v32i8, 5 },
2754 { ISD::BSWAP, MVT::v4i64, 1 },
2755 { ISD::BSWAP, MVT::v8i32, 1 },
2756 { ISD::BSWAP, MVT::v16i16, 1 },
2757 { ISD::CTLZ, MVT::v4i64, 23 },
2758 { ISD::CTLZ, MVT::v8i32, 18 },
2759 { ISD::CTLZ, MVT::v16i16, 14 },
2760 { ISD::CTLZ, MVT::v32i8, 9 },
2761 { ISD::CTPOP, MVT::v4i64, 7 },
2762 { ISD::CTPOP, MVT::v8i32, 11 },
2763 { ISD::CTPOP, MVT::v16i16, 9 },
2764 { ISD::CTPOP, MVT::v32i8, 6 },
2765 { ISD::CTTZ, MVT::v4i64, 10 },
2766 { ISD::CTTZ, MVT::v8i32, 14 },
2767 { ISD::CTTZ, MVT::v16i16, 12 },
2768 { ISD::CTTZ, MVT::v32i8, 9 },
2769 { ISD::SADDSAT, MVT::v16i16, 1 },
2770 { ISD::SADDSAT, MVT::v32i8, 1 },
2771 { ISD::SMAX, MVT::v8i32, 1 },
2772 { ISD::SMAX, MVT::v16i16, 1 },
2773 { ISD::SMAX, MVT::v32i8, 1 },
2774 { ISD::SMIN, MVT::v8i32, 1 },
2775 { ISD::SMIN, MVT::v16i16, 1 },
2776 { ISD::SMIN, MVT::v32i8, 1 },
2777 { ISD::SSUBSAT, MVT::v16i16, 1 },
2778 { ISD::SSUBSAT, MVT::v32i8, 1 },
2779 { ISD::UADDSAT, MVT::v16i16, 1 },
2780 { ISD::UADDSAT, MVT::v32i8, 1 },
2781 { ISD::UADDSAT, MVT::v8i32, 3 }, // not + pminud + paddd
2782 { ISD::UMAX, MVT::v8i32, 1 },
2783 { ISD::UMAX, MVT::v16i16, 1 },
2784 { ISD::UMAX, MVT::v32i8, 1 },
2785 { ISD::UMIN, MVT::v8i32, 1 },
2786 { ISD::UMIN, MVT::v16i16, 1 },
2787 { ISD::UMIN, MVT::v32i8, 1 },
2788 { ISD::USUBSAT, MVT::v16i16, 1 },
2789 { ISD::USUBSAT, MVT::v32i8, 1 },
2790 { ISD::USUBSAT, MVT::v8i32, 2 }, // pmaxud + psubd
2791 { ISD::FMAXNUM, MVT::v8f32, 3 }, // MAXPS + CMPUNORDPS + BLENDVPS
2792 { ISD::FMAXNUM, MVT::v4f64, 3 }, // MAXPD + CMPUNORDPD + BLENDVPD
2793 { ISD::FSQRT, MVT::f32, 7 }, // Haswell from http://www.agner.org/
2794 { ISD::FSQRT, MVT::v4f32, 7 }, // Haswell from http://www.agner.org/
2795 { ISD::FSQRT, MVT::v8f32, 14 }, // Haswell from http://www.agner.org/
2796 { ISD::FSQRT, MVT::f64, 14 }, // Haswell from http://www.agner.org/
2797 { ISD::FSQRT, MVT::v2f64, 14 }, // Haswell from http://www.agner.org/
2798 { ISD::FSQRT, MVT::v4f64, 28 }, // Haswell from http://www.agner.org/
2799 };
2800 static const CostTblEntry AVX1CostTbl[] = {
2801 { ISD::ABS, MVT::v4i64, 5 }, // VBLENDVPD(X,VPSUBQ(0,X),X)
2802 { ISD::ABS, MVT::v8i32, 3 },
2803 { ISD::ABS, MVT::v16i16, 3 },
2804 { ISD::ABS, MVT::v32i8, 3 },
2805 { ISD::BITREVERSE, MVT::v4i64, 12 }, // 2 x 128-bit Op + extract/insert
2806 { ISD::BITREVERSE, MVT::v8i32, 12 }, // 2 x 128-bit Op + extract/insert
2807 { ISD::BITREVERSE, MVT::v16i16, 12 }, // 2 x 128-bit Op + extract/insert
2808 { ISD::BITREVERSE, MVT::v32i8, 12 }, // 2 x 128-bit Op + extract/insert
2809 { ISD::BSWAP, MVT::v4i64, 4 },
2810 { ISD::BSWAP, MVT::v8i32, 4 },
2811 { ISD::BSWAP, MVT::v16i16, 4 },
2812 { ISD::CTLZ, MVT::v4i64, 48 }, // 2 x 128-bit Op + extract/insert
2813 { ISD::CTLZ, MVT::v8i32, 38 }, // 2 x 128-bit Op + extract/insert
2814 { ISD::CTLZ, MVT::v16i16, 30 }, // 2 x 128-bit Op + extract/insert
2815 { ISD::CTLZ, MVT::v32i8, 20 }, // 2 x 128-bit Op + extract/insert
2816 { ISD::CTPOP, MVT::v4i64, 16 }, // 2 x 128-bit Op + extract/insert
2817 { ISD::CTPOP, MVT::v8i32, 24 }, // 2 x 128-bit Op + extract/insert
2818 { ISD::CTPOP, MVT::v16i16, 20 }, // 2 x 128-bit Op + extract/insert
2819 { ISD::CTPOP, MVT::v32i8, 14 }, // 2 x 128-bit Op + extract/insert
2820 { ISD::CTTZ, MVT::v4i64, 22 }, // 2 x 128-bit Op + extract/insert
2821 { ISD::CTTZ, MVT::v8i32, 30 }, // 2 x 128-bit Op + extract/insert
2822 { ISD::CTTZ, MVT::v16i16, 26 }, // 2 x 128-bit Op + extract/insert
2823 { ISD::CTTZ, MVT::v32i8, 20 }, // 2 x 128-bit Op + extract/insert
2824 { ISD::SADDSAT, MVT::v16i16, 4 }, // 2 x 128-bit Op + extract/insert
2825 { ISD::SADDSAT, MVT::v32i8, 4 }, // 2 x 128-bit Op + extract/insert
2826 { ISD::SMAX, MVT::v8i32, 4 }, // 2 x 128-bit Op + extract/insert
2827 { ISD::SMAX, MVT::v16i16, 4 }, // 2 x 128-bit Op + extract/insert
2828 { ISD::SMAX, MVT::v32i8, 4 }, // 2 x 128-bit Op + extract/insert
2829 { ISD::SMIN, MVT::v8i32, 4 }, // 2 x 128-bit Op + extract/insert
2830 { ISD::SMIN, MVT::v16i16, 4 }, // 2 x 128-bit Op + extract/insert
2831 { ISD::SMIN, MVT::v32i8, 4 }, // 2 x 128-bit Op + extract/insert
2832 { ISD::SSUBSAT, MVT::v16i16, 4 }, // 2 x 128-bit Op + extract/insert
2833 { ISD::SSUBSAT, MVT::v32i8, 4 }, // 2 x 128-bit Op + extract/insert
2834 { ISD::UADDSAT, MVT::v16i16, 4 }, // 2 x 128-bit Op + extract/insert
2835 { ISD::UADDSAT, MVT::v32i8, 4 }, // 2 x 128-bit Op + extract/insert
2836 { ISD::UADDSAT, MVT::v8i32, 8 }, // 2 x 128-bit Op + extract/insert
2837 { ISD::UMAX, MVT::v8i32, 4 }, // 2 x 128-bit Op + extract/insert
2838 { ISD::UMAX, MVT::v16i16, 4 }, // 2 x 128-bit Op + extract/insert
2839 { ISD::UMAX, MVT::v32i8, 4 }, // 2 x 128-bit Op + extract/insert
2840 { ISD::UMIN, MVT::v8i32, 4 }, // 2 x 128-bit Op + extract/insert
2841 { ISD::UMIN, MVT::v16i16, 4 }, // 2 x 128-bit Op + extract/insert
2842 { ISD::UMIN, MVT::v32i8, 4 }, // 2 x 128-bit Op + extract/insert
2843 { ISD::USUBSAT, MVT::v16i16, 4 }, // 2 x 128-bit Op + extract/insert
2844 { ISD::USUBSAT, MVT::v32i8, 4 }, // 2 x 128-bit Op + extract/insert
2845 { ISD::USUBSAT, MVT::v8i32, 6 }, // 2 x 128-bit Op + extract/insert
2846 { ISD::FMAXNUM, MVT::f32, 3 }, // MAXSS + CMPUNORDSS + BLENDVPS
2847 { ISD::FMAXNUM, MVT::v4f32, 3 }, // MAXPS + CMPUNORDPS + BLENDVPS
2848 { ISD::FMAXNUM, MVT::v8f32, 5 }, // MAXPS + CMPUNORDPS + BLENDVPS + ?
2849 { ISD::FMAXNUM, MVT::f64, 3 }, // MAXSD + CMPUNORDSD + BLENDVPD
2850 { ISD::FMAXNUM, MVT::v2f64, 3 }, // MAXPD + CMPUNORDPD + BLENDVPD
2851 { ISD::FMAXNUM, MVT::v4f64, 5 }, // MAXPD + CMPUNORDPD + BLENDVPD + ?
2852 { ISD::FSQRT, MVT::f32, 14 }, // SNB from http://www.agner.org/
2853 { ISD::FSQRT, MVT::v4f32, 14 }, // SNB from http://www.agner.org/
2854 { ISD::FSQRT, MVT::v8f32, 28 }, // SNB from http://www.agner.org/
2855 { ISD::FSQRT, MVT::f64, 21 }, // SNB from http://www.agner.org/
2856 { ISD::FSQRT, MVT::v2f64, 21 }, // SNB from http://www.agner.org/
2857 { ISD::FSQRT, MVT::v4f64, 43 }, // SNB from http://www.agner.org/
2858 { ISD::ISNAN, MVT::v4f64, 1 },
2859 { ISD::ISNAN, MVT::v8f32, 1 },
2860 };
2861 static const CostTblEntry GLMCostTbl[] = {
2862 { ISD::FSQRT, MVT::f32, 19 }, // sqrtss
2863 { ISD::FSQRT, MVT::v4f32, 37 }, // sqrtps
2864 { ISD::FSQRT, MVT::f64, 34 }, // sqrtsd
2865 { ISD::FSQRT, MVT::v2f64, 67 }, // sqrtpd
2866 };
2867 static const CostTblEntry SLMCostTbl[] = {
2868 { ISD::FSQRT, MVT::f32, 20 }, // sqrtss
2869 { ISD::FSQRT, MVT::v4f32, 40 }, // sqrtps
2870 { ISD::FSQRT, MVT::f64, 35 }, // sqrtsd
2871 { ISD::FSQRT, MVT::v2f64, 70 }, // sqrtpd
2872 };
2873 static const CostTblEntry SSE42CostTbl[] = {
2874 { ISD::USUBSAT, MVT::v4i32, 2 }, // pmaxud + psubd
2875 { ISD::UADDSAT, MVT::v4i32, 3 }, // not + pminud + paddd
2876 { ISD::FSQRT, MVT::f32, 18 }, // Nehalem from http://www.agner.org/
2877 { ISD::FSQRT, MVT::v4f32, 18 }, // Nehalem from http://www.agner.org/
2878 };
2879 static const CostTblEntry SSE41CostTbl[] = {
2880 { ISD::ABS, MVT::v2i64, 2 }, // BLENDVPD(X,PSUBQ(0,X),X)
2881 { ISD::SMAX, MVT::v4i32, 1 },
2882 { ISD::SMAX, MVT::v16i8, 1 },
2883 { ISD::SMIN, MVT::v4i32, 1 },
2884 { ISD::SMIN, MVT::v16i8, 1 },
2885 { ISD::UMAX, MVT::v4i32, 1 },
2886 { ISD::UMAX, MVT::v8i16, 1 },
2887 { ISD::UMIN, MVT::v4i32, 1 },
2888 { ISD::UMIN, MVT::v8i16, 1 },
2889 };
2890 static const CostTblEntry SSSE3CostTbl[] = {
2891 { ISD::ABS, MVT::v4i32, 1 },
2892 { ISD::ABS, MVT::v8i16, 1 },
2893 { ISD::ABS, MVT::v16i8, 1 },
2894 { ISD::BITREVERSE, MVT::v2i64, 5 },
2895 { ISD::BITREVERSE, MVT::v4i32, 5 },
2896 { ISD::BITREVERSE, MVT::v8i16, 5 },
2897 { ISD::BITREVERSE, MVT::v16i8, 5 },
2898 { ISD::BSWAP, MVT::v2i64, 1 },
2899 { ISD::BSWAP, MVT::v4i32, 1 },
2900 { ISD::BSWAP, MVT::v8i16, 1 },
2901 { ISD::CTLZ, MVT::v2i64, 23 },
2902 { ISD::CTLZ, MVT::v4i32, 18 },
2903 { ISD::CTLZ, MVT::v8i16, 14 },
2904 { ISD::CTLZ, MVT::v16i8, 9 },
2905 { ISD::CTPOP, MVT::v2i64, 7 },
2906 { ISD::CTPOP, MVT::v4i32, 11 },
2907 { ISD::CTPOP, MVT::v8i16, 9 },
2908 { ISD::CTPOP, MVT::v16i8, 6 },
2909 { ISD::CTTZ, MVT::v2i64, 10 },
2910 { ISD::CTTZ, MVT::v4i32, 14 },
2911 { ISD::CTTZ, MVT::v8i16, 12 },
2912 { ISD::CTTZ, MVT::v16i8, 9 }
2913 };
2914 static const CostTblEntry SSE2CostTbl[] = {
2915 { ISD::ABS, MVT::v2i64, 4 },
2916 { ISD::ABS, MVT::v4i32, 3 },
2917 { ISD::ABS, MVT::v8i16, 2 },
2918 { ISD::ABS, MVT::v16i8, 2 },
2919 { ISD::BITREVERSE, MVT::v2i64, 29 },
2920 { ISD::BITREVERSE, MVT::v4i32, 27 },
2921 { ISD::BITREVERSE, MVT::v8i16, 27 },
2922 { ISD::BITREVERSE, MVT::v16i8, 20 },
2923 { ISD::BSWAP, MVT::v2i64, 7 },
2924 { ISD::BSWAP, MVT::v4i32, 7 },
2925 { ISD::BSWAP, MVT::v8i16, 7 },
2926 { ISD::CTLZ, MVT::v2i64, 25 },
2927 { ISD::CTLZ, MVT::v4i32, 26 },
2928 { ISD::CTLZ, MVT::v8i16, 20 },
2929 { ISD::CTLZ, MVT::v16i8, 17 },
2930 { ISD::CTPOP, MVT::v2i64, 12 },
2931 { ISD::CTPOP, MVT::v4i32, 15 },
2932 { ISD::CTPOP, MVT::v8i16, 13 },
2933 { ISD::CTPOP, MVT::v16i8, 10 },
2934 { ISD::CTTZ, MVT::v2i64, 14 },
2935 { ISD::CTTZ, MVT::v4i32, 18 },
2936 { ISD::CTTZ, MVT::v8i16, 16 },
2937 { ISD::CTTZ, MVT::v16i8, 13 },
2938 { ISD::SADDSAT, MVT::v8i16, 1 },
2939 { ISD::SADDSAT, MVT::v16i8, 1 },
2940 { ISD::SMAX, MVT::v8i16, 1 },
2941 { ISD::SMIN, MVT::v8i16, 1 },
2942 { ISD::SSUBSAT, MVT::v8i16, 1 },
2943 { ISD::SSUBSAT, MVT::v16i8, 1 },
2944 { ISD::UADDSAT, MVT::v8i16, 1 },
2945 { ISD::UADDSAT, MVT::v16i8, 1 },
2946 { ISD::UMAX, MVT::v8i16, 2 },
2947 { ISD::UMAX, MVT::v16i8, 1 },
2948 { ISD::UMIN, MVT::v8i16, 2 },
2949 { ISD::UMIN, MVT::v16i8, 1 },
2950 { ISD::USUBSAT, MVT::v8i16, 1 },
2951 { ISD::USUBSAT, MVT::v16i8, 1 },
2952 { ISD::FMAXNUM, MVT::f64, 4 },
2953 { ISD::FMAXNUM, MVT::v2f64, 4 },
2954 { ISD::FSQRT, MVT::f64, 32 }, // Nehalem from http://www.agner.org/
2955 { ISD::FSQRT, MVT::v2f64, 32 }, // Nehalem from http://www.agner.org/
2956 { ISD::ISNAN, MVT::f64, 1 },
2957 { ISD::ISNAN, MVT::v2f64, 1 },
2958 };
2959 static const CostTblEntry SSE1CostTbl[] = {
2960 { ISD::FMAXNUM, MVT::f32, 4 },
2961 { ISD::FMAXNUM, MVT::v4f32, 4 },
2962 { ISD::FSQRT, MVT::f32, 28 }, // Pentium III from http://www.agner.org/
2963 { ISD::FSQRT, MVT::v4f32, 56 }, // Pentium III from http://www.agner.org/
2964 { ISD::ISNAN, MVT::f32, 1 },
2965 { ISD::ISNAN, MVT::v4f32, 1 },
2966 };
2967 static const CostTblEntry BMI64CostTbl[] = { // 64-bit targets
2968 { ISD::CTTZ, MVT::i64, 1 },
2969 };
2970 static const CostTblEntry BMI32CostTbl[] = { // 32 or 64-bit targets
2971 { ISD::CTTZ, MVT::i32, 1 },
2972 { ISD::CTTZ, MVT::i16, 1 },
2973 { ISD::CTTZ, MVT::i8, 1 },
2974 };
2975 static const CostTblEntry LZCNT64CostTbl[] = { // 64-bit targets
2976 { ISD::CTLZ, MVT::i64, 1 },
2977 };
2978 static const CostTblEntry LZCNT32CostTbl[] = { // 32 or 64-bit targets
2979 { ISD::CTLZ, MVT::i32, 1 },
2980 { ISD::CTLZ, MVT::i16, 1 },
2981 { ISD::CTLZ, MVT::i8, 1 },
2982 };
2983 static const CostTblEntry POPCNT64CostTbl[] = { // 64-bit targets
2984 { ISD::CTPOP, MVT::i64, 1 },
2985 };
2986 static const CostTblEntry POPCNT32CostTbl[] = { // 32 or 64-bit targets
2987 { ISD::CTPOP, MVT::i32, 1 },
2988 { ISD::CTPOP, MVT::i16, 1 },
2989 { ISD::CTPOP, MVT::i8, 1 },
2990 };
2991 static const CostTblEntry X64CostTbl[] = { // 64-bit targets
2992 { ISD::ABS, MVT::i64, 2 }, // SUB+CMOV
2993 { ISD::BITREVERSE, MVT::i64, 14 },
2994 { ISD::BSWAP, MVT::i64, 1 },
2995 { ISD::CTLZ, MVT::i64, 4 }, // BSR+XOR or BSR+XOR+CMOV
2996 { ISD::CTTZ, MVT::i64, 3 }, // TEST+BSF+CMOV/BRANCH
2997 { ISD::CTPOP, MVT::i64, 10 },
2998 { ISD::SADDO, MVT::i64, 1 },
2999 { ISD::UADDO, MVT::i64, 1 },
3000 { ISD::UMULO, MVT::i64, 2 }, // mulq + seto
3001 };
3002 static const CostTblEntry X86CostTbl[] = { // 32 or 64-bit targets
3003 { ISD::ABS, MVT::i32, 2 }, // SUB+CMOV
3004 { ISD::ABS, MVT::i16, 2 }, // SUB+CMOV
3005 { ISD::BITREVERSE, MVT::i32, 14 },
3006 { ISD::BITREVERSE, MVT::i16, 14 },
3007 { ISD::BITREVERSE, MVT::i8, 11 },
3008 { ISD::BSWAP, MVT::i32, 1 },
3009 { ISD::BSWAP, MVT::i16, 1 }, // ROL
3010 { ISD::CTLZ, MVT::i32, 4 }, // BSR+XOR or BSR+XOR+CMOV
3011 { ISD::CTLZ, MVT::i16, 4 }, // BSR+XOR or BSR+XOR+CMOV
3012 { ISD::CTLZ, MVT::i8, 4 }, // BSR+XOR or BSR+XOR+CMOV
3013 { ISD::CTTZ, MVT::i32, 3 }, // TEST+BSF+CMOV/BRANCH
3014 { ISD::CTTZ, MVT::i16, 3 }, // TEST+BSF+CMOV/BRANCH
3015 { ISD::CTTZ, MVT::i8, 3 }, // TEST+BSF+CMOV/BRANCH
3016 { ISD::CTPOP, MVT::i32, 8 },
3017 { ISD::CTPOP, MVT::i16, 9 },
3018 { ISD::CTPOP, MVT::i8, 7 },
3019 { ISD::SADDO, MVT::i32, 1 },
3020 { ISD::SADDO, MVT::i16, 1 },
3021 { ISD::SADDO, MVT::i8, 1 },
3022 { ISD::UADDO, MVT::i32, 1 },
3023 { ISD::UADDO, MVT::i16, 1 },
3024 { ISD::UADDO, MVT::i8, 1 },
3025 { ISD::UMULO, MVT::i32, 2 }, // mul + seto
3026 { ISD::UMULO, MVT::i16, 2 },
3027 { ISD::UMULO, MVT::i8, 2 },
3028 };
3029
3030 Type *RetTy = ICA.getReturnType();
3031 Type *OpTy = RetTy;
3032 Intrinsic::ID IID = ICA.getID();
3033 unsigned ISD = ISD::DELETED_NODE;
3034 switch (IID) {
3035 default:
3036 break;
3037 case Intrinsic::abs:
3038 ISD = ISD::ABS;
3039 break;
3040 case Intrinsic::bitreverse:
3041 ISD = ISD::BITREVERSE;
3042 break;
3043 case Intrinsic::bswap:
3044 ISD = ISD::BSWAP;
3045 break;
3046 case Intrinsic::ctlz:
3047 ISD = ISD::CTLZ;
3048 break;
3049 case Intrinsic::ctpop:
3050 ISD = ISD::CTPOP;
3051 break;
3052 case Intrinsic::cttz:
3053 ISD = ISD::CTTZ;
3054 break;
3055 case Intrinsic::isnan:
3056 ISD = ISD::ISNAN;
3057 OpTy = ICA.getArgTypes()[0];
3058 break;
3059 case Intrinsic::maxnum:
3060 case Intrinsic::minnum:
3061 // FMINNUM has same costs so don't duplicate.
3062 ISD = ISD::FMAXNUM;
3063 break;
3064 case Intrinsic::sadd_sat:
3065 ISD = ISD::SADDSAT;
3066 break;
3067 case Intrinsic::smax:
3068 ISD = ISD::SMAX;
3069 break;
3070 case Intrinsic::smin:
3071 ISD = ISD::SMIN;
3072 break;
3073 case Intrinsic::ssub_sat:
3074 ISD = ISD::SSUBSAT;
3075 break;
3076 case Intrinsic::uadd_sat:
3077 ISD = ISD::UADDSAT;
3078 break;
3079 case Intrinsic::umax:
3080 ISD = ISD::UMAX;
3081 break;
3082 case Intrinsic::umin:
3083 ISD = ISD::UMIN;
3084 break;
3085 case Intrinsic::usub_sat:
3086 ISD = ISD::USUBSAT;
3087 break;
3088 case Intrinsic::sqrt:
3089 ISD = ISD::FSQRT;
3090 break;
3091 case Intrinsic::sadd_with_overflow:
3092 case Intrinsic::ssub_with_overflow:
3093 // SSUBO has same costs so don't duplicate.
3094 ISD = ISD::SADDO;
3095 OpTy = RetTy->getContainedType(0);
3096 break;
3097 case Intrinsic::uadd_with_overflow:
3098 case Intrinsic::usub_with_overflow:
3099 // USUBO has same costs so don't duplicate.
3100 ISD = ISD::UADDO;
3101 OpTy = RetTy->getContainedType(0);
3102 break;
3103 case Intrinsic::umul_with_overflow:
3104 case Intrinsic::smul_with_overflow:
3105 // SMULO has same costs so don't duplicate.
3106 ISD = ISD::UMULO;
3107 OpTy = RetTy->getContainedType(0);
3108 break;
3109 }
3110
3111 if (ISD != ISD::DELETED_NODE) {
3112 // Legalize the type.
3113 std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, OpTy);
3114 MVT MTy = LT.second;
3115
3116 // Attempt to lookup cost.
3117 if (ISD == ISD::BITREVERSE && ST->hasGFNI() && ST->hasSSSE3() &&
3118 MTy.isVector()) {
3119 // With PSHUFB the code is very similar for all types. If we have integer
3120 // byte operations, we just need a GF2P8AFFINEQB for vXi8. For other types
3121 // we also need a PSHUFB.
3122 unsigned Cost = MTy.getVectorElementType() == MVT::i8 ? 1 : 2;
3123
3124 // Without byte operations, we need twice as many GF2P8AFFINEQB and PSHUFB
3125 // instructions. We also need an extract and an insert.
3126 if (!(MTy.is128BitVector() || (ST->hasAVX2() && MTy.is256BitVector()) ||
3127 (ST->hasBWI() && MTy.is512BitVector())))
3128 Cost = Cost * 2 + 2;
3129
3130 return LT.first * Cost;
3131 }
3132
3133 auto adjustTableCost = [](const CostTblEntry &Entry,
3134 InstructionCost LegalizationCost,
3135 FastMathFlags FMF) {
3136 // If there are no NANs to deal with, then these are reduced to a
3137 // single MIN** or MAX** instruction instead of the MIN/CMP/SELECT that we
3138 // assume is used in the non-fast case.
3139 if (Entry.ISD == ISD::FMAXNUM || Entry.ISD == ISD::FMINNUM) {
3140 if (FMF.noNaNs())
3141 return LegalizationCost * 1;
3142 }
3143 return LegalizationCost * (int)Entry.Cost;
3144 };
3145
3146 if (ST->useGLMDivSqrtCosts())
3147 if (const auto *Entry = CostTableLookup(GLMCostTbl, ISD, MTy))
3148 return adjustTableCost(*Entry, LT.first, ICA.getFlags());
3149
3150 if (ST->isSLM())
3151 if (const auto *Entry = CostTableLookup(SLMCostTbl, ISD, MTy))
3152 return adjustTableCost(*Entry, LT.first, ICA.getFlags());
3153
3154 if (ST->hasBITALG())
3155 if (const auto *Entry = CostTableLookup(AVX512BITALGCostTbl, ISD, MTy))
3156 return adjustTableCost(*Entry, LT.first, ICA.getFlags());
3157
3158 if (ST->hasVPOPCNTDQ())
3159 if (const auto *Entry = CostTableLookup(AVX512VPOPCNTDQCostTbl, ISD, MTy))
3160 return adjustTableCost(*Entry, LT.first, ICA.getFlags());
3161
3162 if (ST->hasCDI())
3163 if (const auto *Entry = CostTableLookup(AVX512CDCostTbl, ISD, MTy))
3164 return adjustTableCost(*Entry, LT.first, ICA.getFlags());
3165
3166 if (ST->hasBWI())
3167 if (const auto *Entry = CostTableLookup(AVX512BWCostTbl, ISD, MTy))
3168 return adjustTableCost(*Entry, LT.first, ICA.getFlags());
3169
3170 if (ST->hasAVX512())
3171 if (const auto *Entry = CostTableLookup(AVX512CostTbl, ISD, MTy))
3172 return adjustTableCost(*Entry, LT.first, ICA.getFlags());
3173
3174 if (ST->hasXOP())
3175 if (const auto *Entry = CostTableLookup(XOPCostTbl, ISD, MTy))
3176 return adjustTableCost(*Entry, LT.first, ICA.getFlags());
3177
3178 if (ST->hasAVX2())
3179 if (const auto *Entry = CostTableLookup(AVX2CostTbl, ISD, MTy))
3180 return adjustTableCost(*Entry, LT.first, ICA.getFlags());
3181
3182 if (ST->hasAVX())
3183 if (const auto *Entry = CostTableLookup(AVX1CostTbl, ISD, MTy))
3184 return adjustTableCost(*Entry, LT.first, ICA.getFlags());
3185
3186 if (ST->hasSSE42())
3187 if (const auto *Entry = CostTableLookup(SSE42CostTbl, ISD, MTy))
3188 return adjustTableCost(*Entry, LT.first, ICA.getFlags());
3189
3190 if (ST->hasSSE41())
3191 if (const auto *Entry = CostTableLookup(SSE41CostTbl, ISD, MTy))
3192 return adjustTableCost(*Entry, LT.first, ICA.getFlags());
3193
3194 if (ST->hasSSSE3())
3195 if (const auto *Entry = CostTableLookup(SSSE3CostTbl, ISD, MTy))
3196 return adjustTableCost(*Entry, LT.first, ICA.getFlags());
3197
3198 if (ST->hasSSE2())
3199 if (const auto *Entry = CostTableLookup(SSE2CostTbl, ISD, MTy))
3200 return adjustTableCost(*Entry, LT.first, ICA.getFlags());
3201
3202 if (ST->hasSSE1())
3203 if (const auto *Entry = CostTableLookup(SSE1CostTbl, ISD, MTy))
3204 return adjustTableCost(*Entry, LT.first, ICA.getFlags());
3205
3206 if (ST->hasBMI()) {
3207 if (ST->is64Bit())
3208 if (const auto *Entry = CostTableLookup(BMI64CostTbl, ISD, MTy))
3209 return adjustTableCost(*Entry, LT.first, ICA.getFlags());
3210
3211 if (const auto *Entry = CostTableLookup(BMI32CostTbl, ISD, MTy))
3212 return adjustTableCost(*Entry, LT.first, ICA.getFlags());
3213 }
3214
3215 if (ST->hasLZCNT()) {
3216 if (ST->is64Bit())
3217 if (const auto *Entry = CostTableLookup(LZCNT64CostTbl, ISD, MTy))
3218 return adjustTableCost(*Entry, LT.first, ICA.getFlags());
3219
3220 if (const auto *Entry = CostTableLookup(LZCNT32CostTbl, ISD, MTy))
3221 return adjustTableCost(*Entry, LT.first, ICA.getFlags());
3222 }
3223
3224 if (ST->hasPOPCNT()) {
3225 if (ST->is64Bit())
3226 if (const auto *Entry = CostTableLookup(POPCNT64CostTbl, ISD, MTy))
3227 return adjustTableCost(*Entry, LT.first, ICA.getFlags());
3228
3229 if (const auto *Entry = CostTableLookup(POPCNT32CostTbl, ISD, MTy))
3230 return adjustTableCost(*Entry, LT.first, ICA.getFlags());
3231 }
3232
3233 if (ISD == ISD::BSWAP && ST->hasMOVBE() && ST->hasFastMOVBE()) {
3234 if (const Instruction *II = ICA.getInst()) {
3235 if (II->hasOneUse() && isa<StoreInst>(II->user_back()))
3236 return TTI::TCC_Free;
3237 if (auto *LI = dyn_cast<LoadInst>(II->getOperand(0))) {
3238 if (LI->hasOneUse())
3239 return TTI::TCC_Free;
3240 }
3241 }
3242 }
3243
3244 // TODO - add BMI (TZCNT) scalar handling
3245
3246 if (ST->is64Bit())
3247 if (const auto *Entry = CostTableLookup(X64CostTbl, ISD, MTy))
3248 return adjustTableCost(*Entry, LT.first, ICA.getFlags());
3249
3250 if (const auto *Entry = CostTableLookup(X86CostTbl, ISD, MTy))
3251 return adjustTableCost(*Entry, LT.first, ICA.getFlags());
3252 }
3253
3254 return BaseT::getIntrinsicInstrCost(ICA, CostKind);
3255}
3256
3257InstructionCost
3258X86TTIImpl::getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA,
3259 TTI::TargetCostKind CostKind) {
3260 if (ICA.isTypeBasedOnly())
3261 return getTypeBasedIntrinsicInstrCost(ICA, CostKind);
3262
3263 static const CostTblEntry AVX512CostTbl[] = {
3264 { ISD::ROTL, MVT::v8i64, 1 },
3265 { ISD::ROTL, MVT::v4i64, 1 },
3266 { ISD::ROTL, MVT::v2i64, 1 },
3267 { ISD::ROTL, MVT::v16i32, 1 },
3268 { ISD::ROTL, MVT::v8i32, 1 },
3269 { ISD::ROTL, MVT::v4i32, 1 },
3270 { ISD::ROTR, MVT::v8i64, 1 },
3271 { ISD::ROTR, MVT::v4i64, 1 },
3272 { ISD::ROTR, MVT::v2i64, 1 },
3273 { ISD::ROTR, MVT::v16i32, 1 },
3274 { ISD::ROTR, MVT::v8i32, 1 },
3275 { ISD::ROTR, MVT::v4i32, 1 }
3276 };
3277 // XOP: ROTL = VPROT(X,Y), ROTR = VPROT(X,SUB(0,Y))
3278 static const CostTblEntry XOPCostTbl[] = {
3279 { ISD::ROTL, MVT::v4i64, 4 },
3280 { ISD::ROTL, MVT::v8i32, 4 },
3281 { ISD::ROTL, MVT::v16i16, 4 },
3282 { ISD::ROTL, MVT::v32i8, 4 },
3283 { ISD::ROTL, MVT::v2i64, 1 },
3284 { ISD::ROTL, MVT::v4i32, 1 },
3285 { ISD::ROTL, MVT::v8i16, 1 },
3286 { ISD::ROTL, MVT::v16i8, 1 },
3287 { ISD::ROTR, MVT::v4i64, 6 },
3288 { ISD::ROTR, MVT::v8i32, 6 },
3289 { ISD::ROTR, MVT::v16i16, 6 },
3290 { ISD::ROTR, MVT::v32i8, 6 },
3291 { ISD::ROTR, MVT::v2i64, 2 },
3292 { ISD::ROTR, MVT::v4i32, 2 },
3293 { ISD::ROTR, MVT::v8i16, 2 },
3294 { ISD::ROTR, MVT::v16i8, 2 }
3295 };
3296 static const CostTblEntry X64CostTbl[] = { // 64-bit targets
3297 { ISD::ROTL, MVT::i64, 1 },
3298 { ISD::ROTR, MVT::i64, 1 },
3299 { ISD::FSHL, MVT::i64, 4 }
3300 };
3301 static const CostTblEntry X86CostTbl[] = { // 32 or 64-bit targets
3302 { ISD::ROTL, MVT::i32, 1 },
3303 { ISD::ROTL, MVT::i16, 1 },
3304 { ISD::ROTL, MVT::i8, 1 },
3305 { ISD::ROTR, MVT::i32, 1 },
3306 { ISD::ROTR, MVT::i16, 1 },
3307 { ISD::ROTR, MVT::i8, 1 },
3308 { ISD::FSHL, MVT::i32, 4 },
3309 { ISD::FSHL, MVT::i16, 4 },
3310 { ISD::FSHL, MVT::i8, 4 }
3311 };
3312
3313 Intrinsic::ID IID = ICA.getID();
3314 Type *RetTy = ICA.getReturnType();
3315 const SmallVectorImpl<const Value *> &Args = ICA.getArgs();
3316 unsigned ISD = ISD::DELETED_NODE;
3317 switch (IID) {
3318 default:
3319 break;
3320 case Intrinsic::fshl:
3321 ISD = ISD::FSHL;
3322 if (Args[0] == Args[1])
3323 ISD = ISD::ROTL;
3324 break;
3325 case Intrinsic::fshr:
3326 // FSHR has same costs so don't duplicate.
3327 ISD = ISD::FSHL;
3328 if (Args[0] == Args[1])
3329 ISD = ISD::ROTR;
3330 break;
3331 }
3332
3333 if (ISD != ISD::DELETED_NODE) {
3334 // Legalize the type.
3335 std::pair<InstructionCost, MVT> LT =
3336 TLI->getTypeLegalizationCost(DL, RetTy);
3337 MVT MTy = LT.second;
3338
3339 // Attempt to lookup cost.
3340 if (ST->hasAVX512())
3341 if (const auto *Entry = CostTableLookup(AVX512CostTbl, ISD, MTy))
3342 return LT.first * Entry->Cost;
3343
3344 if (ST->hasXOP())
3345 if (const auto *Entry = CostTableLookup(XOPCostTbl, ISD, MTy))
3346 return LT.first * Entry->Cost;
3347
3348 if (ST->is64Bit())
3349 if (const auto *Entry = CostTableLookup(X64CostTbl, ISD, MTy))
3350 return LT.first * Entry->Cost;
3351
3352 if (const auto *Entry = CostTableLookup(X86CostTbl, ISD, MTy))
3353 return LT.first * Entry->Cost;
3354 }
3355
3356 return BaseT::getIntrinsicInstrCost(ICA, CostKind);
3357}
3358
3359InstructionCost X86TTIImpl::getVectorInstrCost(unsigned Opcode, Type *Val,
3360 unsigned Index) {
3361 static const CostTblEntry SLMCostTbl[] = {
3362 { ISD::EXTRACT_VECTOR_ELT, MVT::i8, 4 },
3363 { ISD::EXTRACT_VECTOR_ELT, MVT::i16, 4 },
3364 { ISD::EXTRACT_VECTOR_ELT, MVT::i32, 4 },
3365 { ISD::EXTRACT_VECTOR_ELT, MVT::i64, 7 }
3366 };
3367
3368 assert(Val->isVectorTy() && "This must be a vector type")(static_cast <bool> (Val->isVectorTy() && "This must be a vector type"
) ? void (0) : __assert_fail ("Val->isVectorTy() && \"This must be a vector type\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 3368, __extension__ __PRETTY_FUNCTION__))
;
24
'?' condition is true
3369 Type *ScalarType = Val->getScalarType();
3370 int RegisterFileMoveCost = 0;
3371
3372 // Non-immediate extraction/insertion can be handled as a sequence of
3373 // aliased loads+stores via the stack.
3374 if (Index == -1U && (Opcode == Instruction::ExtractElement ||
3375 Opcode == Instruction::InsertElement)) {
3376 // TODO: On some SSE41+ targets, we expand to cmp+splat+select patterns:
3377 // inselt N0, N1, N2 --> select (SplatN2 == {0,1,2...}) ? SplatN1 : N0.
3378
3379 // TODO: Move this to BasicTTIImpl.h? We'd need better gep + index handling.
3380 assert(isa<FixedVectorType>(Val) && "Fixed vector type expected")(static_cast <bool> (isa<FixedVectorType>(Val) &&
"Fixed vector type expected") ? void (0) : __assert_fail ("isa<FixedVectorType>(Val) && \"Fixed vector type expected\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 3380, __extension__ __PRETTY_FUNCTION__))
;
3381 Align VecAlign = DL.getPrefTypeAlign(Val);
3382 Align SclAlign = DL.getPrefTypeAlign(ScalarType);
3383
3384 // Extract - store vector to stack, load scalar.
3385 if (Opcode == Instruction::ExtractElement) {
3386 return getMemoryOpCost(Instruction::Store, Val, VecAlign, 0,
3387 TTI::TargetCostKind::TCK_RecipThroughput) +
3388 getMemoryOpCost(Instruction::Load, ScalarType, SclAlign, 0,
3389 TTI::TargetCostKind::TCK_RecipThroughput);
3390 }
3391 // Insert - store vector to stack, store scalar, load vector.
3392 if (Opcode == Instruction::InsertElement) {
3393 return getMemoryOpCost(Instruction::Store, Val, VecAlign, 0,
3394 TTI::TargetCostKind::TCK_RecipThroughput) +
3395 getMemoryOpCost(Instruction::Store, ScalarType, SclAlign, 0,
3396 TTI::TargetCostKind::TCK_RecipThroughput) +
3397 getMemoryOpCost(Instruction::Load, Val, VecAlign, 0,
3398 TTI::TargetCostKind::TCK_RecipThroughput);
3399 }
3400 }
3401
3402 if (Index != -1U && (Opcode
24.1
'Opcode' is equal to ExtractElement
24.1
'Opcode' is equal to ExtractElement
24.1
'Opcode' is equal to ExtractElement
== Instruction::ExtractElement ||
3403 Opcode == Instruction::InsertElement)) {
3404 // Legalize the type.
3405 std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, Val);
3406
3407 // This type is legalized to a scalar type.
3408 if (!LT.second.isVector())
25
Calling 'MVT::isVector'
29
Returning from 'MVT::isVector'
30
Taking false branch
3409 return 0;
3410
3411 // The type may be split. Normalize the index to the new type.
3412 unsigned NumElts = LT.second.getVectorNumElements();
3413 unsigned SubNumElts = NumElts;
3414 Index = Index % NumElts;
3415
3416 // For >128-bit vectors, we need to extract higher 128-bit subvectors.
3417 // For inserts, we also need to insert the subvector back.
3418 if (LT.second.getSizeInBits() > 128) {
31
Assuming the condition is true
3419 assert((LT.second.getSizeInBits() % 128) == 0 && "Illegal vector")(static_cast <bool> ((LT.second.getSizeInBits() % 128) ==
0 && "Illegal vector") ? void (0) : __assert_fail ("(LT.second.getSizeInBits() % 128) == 0 && \"Illegal vector\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 3419, __extension__ __PRETTY_FUNCTION__))
;
32
Taking true branch
33
Assuming the condition is true
34
'?' condition is true
3420 unsigned NumSubVecs = LT.second.getSizeInBits() / 128;
3421 SubNumElts = NumElts / NumSubVecs;
35
Value assigned to 'SubNumElts'
3422 if (SubNumElts <= Index) {
36
Assuming 'SubNumElts' is <= 'Index'
37
Taking true branch
3423 RegisterFileMoveCost += (Opcode
37.1
'Opcode' is not equal to InsertElement
37.1
'Opcode' is not equal to InsertElement
37.1
'Opcode' is not equal to InsertElement
== Instruction::InsertElement ? 2 : 1);
38
'?' condition is false
3424 Index %= SubNumElts;
39
Division by zero
3425 }
3426 }
3427
3428 if (Index == 0) {
3429 // Floating point scalars are already located in index #0.
3430 // Many insertions to #0 can fold away for scalar fp-ops, so let's assume
3431 // true for all.
3432 if (ScalarType->isFloatingPointTy())
3433 return RegisterFileMoveCost;
3434
3435 // Assume movd/movq XMM -> GPR is relatively cheap on all targets.
3436 if (ScalarType->isIntegerTy() && Opcode == Instruction::ExtractElement)
3437 return 1 + RegisterFileMoveCost;
3438 }
3439
3440 int ISD = TLI->InstructionOpcodeToISD(Opcode);
3441 assert(ISD && "Unexpected vector opcode")(static_cast <bool> (ISD && "Unexpected vector opcode"
) ? void (0) : __assert_fail ("ISD && \"Unexpected vector opcode\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 3441, __extension__ __PRETTY_FUNCTION__))
;
3442 MVT MScalarTy = LT.second.getScalarType();
3443 if (ST->isSLM())
3444 if (auto *Entry = CostTableLookup(SLMCostTbl, ISD, MScalarTy))
3445 return Entry->Cost + RegisterFileMoveCost;
3446
3447 // Assume pinsr/pextr XMM <-> GPR is relatively cheap on all targets.
3448 if ((MScalarTy == MVT::i16 && ST->hasSSE2()) ||
3449 (MScalarTy.isInteger() && ST->hasSSE41()))
3450 return 1 + RegisterFileMoveCost;
3451
3452 // Assume insertps is relatively cheap on all targets.
3453 if (MScalarTy == MVT::f32 && ST->hasSSE41() &&
3454 Opcode == Instruction::InsertElement)
3455 return 1 + RegisterFileMoveCost;
3456
3457 // For extractions we just need to shuffle the element to index 0, which
3458 // should be very cheap (assume cost = 1). For insertions we need to shuffle
3459 // the elements to its destination. In both cases we must handle the
3460 // subvector move(s).
3461 // If the vector type is already less than 128-bits then don't reduce it.
3462 // TODO: Under what circumstances should we shuffle using the full width?
3463 InstructionCost ShuffleCost = 1;
3464 if (Opcode == Instruction::InsertElement) {
3465 auto *SubTy = cast<VectorType>(Val);
3466 EVT VT = TLI->getValueType(DL, Val);
3467 if (VT.getScalarType() != MScalarTy || VT.getSizeInBits() >= 128)
3468 SubTy = FixedVectorType::get(ScalarType, SubNumElts);
3469 ShuffleCost =
3470 getShuffleCost(TTI::SK_PermuteTwoSrc, SubTy, None, 0, SubTy);
3471 }
3472 int IntOrFpCost = ScalarType->isFloatingPointTy() ? 0 : 1;
3473 return ShuffleCost + IntOrFpCost + RegisterFileMoveCost;
3474 }
3475
3476 // Add to the base cost if we know that the extracted element of a vector is
3477 // destined to be moved to and used in the integer register file.
3478 if (Opcode == Instruction::ExtractElement && ScalarType->isPointerTy())
3479 RegisterFileMoveCost += 1;
3480
3481 return BaseT::getVectorInstrCost(Opcode, Val, Index) + RegisterFileMoveCost;
3482}
3483
3484InstructionCost X86TTIImpl::getScalarizationOverhead(VectorType *Ty,
3485 const APInt &DemandedElts,
3486 bool Insert,
3487 bool Extract) {
3488 InstructionCost Cost = 0;
3489
3490 // For insertions, a ISD::BUILD_VECTOR style vector initialization can be much
3491 // cheaper than an accumulation of ISD::INSERT_VECTOR_ELT.
3492 if (Insert) {
3493 std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty);
3494 MVT MScalarTy = LT.second.getScalarType();
3495
3496 if ((MScalarTy == MVT::i16 && ST->hasSSE2()) ||
3497 (MScalarTy.isInteger() && ST->hasSSE41()) ||
3498 (MScalarTy == MVT::f32 && ST->hasSSE41())) {
3499 // For types we can insert directly, insertion into 128-bit sub vectors is
3500 // cheap, followed by a cheap chain of concatenations.
3501 if (LT.second.getSizeInBits() <= 128) {
3502 Cost +=
3503 BaseT::getScalarizationOverhead(Ty, DemandedElts, Insert, false);
3504 } else {
3505 // In each 128-lane, if at least one index is demanded but not all
3506 // indices are demanded and this 128-lane is not the first 128-lane of
3507 // the legalized-vector, then this 128-lane needs a extracti128; If in
3508 // each 128-lane, there is at least one demanded index, this 128-lane
3509 // needs a inserti128.
3510
3511 // The following cases will help you build a better understanding:
3512 // Assume we insert several elements into a v8i32 vector in avx2,
3513 // Case#1: inserting into 1th index needs vpinsrd + inserti128.
3514 // Case#2: inserting into 5th index needs extracti128 + vpinsrd +
3515 // inserti128.
3516 // Case#3: inserting into 4,5,6,7 index needs 4*vpinsrd + inserti128.
3517 const int CostValue = *LT.first.getValue();
3518 assert(CostValue >= 0 && "Negative cost!")(static_cast <bool> (CostValue >= 0 && "Negative cost!"
) ? void (0) : __assert_fail ("CostValue >= 0 && \"Negative cost!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 3518, __extension__ __PRETTY_FUNCTION__))
;
3519 unsigned Num128Lanes = LT.second.getSizeInBits() / 128 * CostValue;
3520 unsigned NumElts = LT.second.getVectorNumElements() * CostValue;
3521 APInt WidenedDemandedElts = DemandedElts.zextOrSelf(NumElts);
3522 unsigned Scale = NumElts / Num128Lanes;
3523 // We iterate each 128-lane, and check if we need a
3524 // extracti128/inserti128 for this 128-lane.
3525 for (unsigned I = 0; I < NumElts; I += Scale) {
3526 APInt Mask = WidenedDemandedElts.getBitsSet(NumElts, I, I + Scale);
3527 APInt MaskedDE = Mask & WidenedDemandedElts;
3528 unsigned Population = MaskedDE.countPopulation();
3529 Cost += (Population > 0 && Population != Scale &&
3530 I % LT.second.getVectorNumElements() != 0);
3531 Cost += Population > 0;
3532 }
3533 Cost += DemandedElts.countPopulation();
3534
3535 // For vXf32 cases, insertion into the 0'th index in each v4f32
3536 // 128-bit vector is free.
3537 // NOTE: This assumes legalization widens vXf32 vectors.
3538 if (MScalarTy == MVT::f32)
3539 for (unsigned i = 0, e = cast<FixedVectorType>(Ty)->getNumElements();
3540 i < e; i += 4)
3541 if (DemandedElts[i])
3542 Cost--;
3543 }
3544 } else if (LT.second.isVector()) {
3545 // Without fast insertion, we need to use MOVD/MOVQ to pass each demanded
3546 // integer element as a SCALAR_TO_VECTOR, then we build the vector as a
3547 // series of UNPCK followed by CONCAT_VECTORS - all of these can be
3548 // considered cheap.
3549 if (Ty->isIntOrIntVectorTy())
3550 Cost += DemandedElts.countPopulation();
3551
3552 // Get the smaller of the legalized or original pow2-extended number of
3553 // vector elements, which represents the number of unpacks we'll end up
3554 // performing.
3555 unsigned NumElts = LT.second.getVectorNumElements();
3556 unsigned Pow2Elts =
3557 PowerOf2Ceil(cast<FixedVectorType>(Ty)->getNumElements());
3558 Cost += (std::min<unsigned>(NumElts, Pow2Elts) - 1) * LT.first;
3559 }
3560 }
3561
3562 // TODO: Use default extraction for now, but we should investigate extending this
3563 // to handle repeated subvector extraction.
3564 if (Extract)
3565 Cost += BaseT::getScalarizationOverhead(Ty, DemandedElts, false, Extract);
3566
3567 return Cost;
3568}
3569
3570InstructionCost X86TTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src,
3571 MaybeAlign Alignment,
3572 unsigned AddressSpace,
3573 TTI::TargetCostKind CostKind,
3574 const Instruction *I) {
3575 // TODO: Handle other cost kinds.
3576 if (CostKind != TTI::TCK_RecipThroughput) {
3577 if (auto *SI = dyn_cast_or_null<StoreInst>(I)) {
3578 // Store instruction with index and scale costs 2 Uops.
3579 // Check the preceding GEP to identify non-const indices.
3580 if (auto *GEP = dyn_cast<GetElementPtrInst>(SI->getPointerOperand())) {
3581 if (!all_of(GEP->indices(), [](Value *V) { return isa<Constant>(V); }))
3582 return TTI::TCC_Basic * 2;
3583 }
3584 }
3585 return TTI::TCC_Basic;
3586 }
3587
3588 assert((Opcode == Instruction::Load || Opcode == Instruction::Store) &&(static_cast <bool> ((Opcode == Instruction::Load || Opcode
== Instruction::Store) && "Invalid Opcode") ? void (
0) : __assert_fail ("(Opcode == Instruction::Load || Opcode == Instruction::Store) && \"Invalid Opcode\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 3589, __extension__ __PRETTY_FUNCTION__))
3589 "Invalid Opcode")(static_cast <bool> ((Opcode == Instruction::Load || Opcode
== Instruction::Store) && "Invalid Opcode") ? void (
0) : __assert_fail ("(Opcode == Instruction::Load || Opcode == Instruction::Store) && \"Invalid Opcode\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 3589, __extension__ __PRETTY_FUNCTION__))
;
3590 // Type legalization can't handle structs
3591 if (TLI->getValueType(DL, Src, true) == MVT::Other)
3592 return BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace,
3593 CostKind);
3594
3595 // Legalize the type.
3596 std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, Src);
3597
3598 auto *VTy = dyn_cast<FixedVectorType>(Src);
3599
3600 // Handle the simple case of non-vectors.
3601 // NOTE: this assumes that legalization never creates vector from scalars!
3602 if (!VTy || !LT.second.isVector())
3603 // Each load/store unit costs 1.
3604 return LT.first * 1;
3605
3606 bool IsLoad = Opcode == Instruction::Load;
3607
3608 Type *EltTy = VTy->getElementType();
3609
3610 const int EltTyBits = DL.getTypeSizeInBits(EltTy);
3611
3612 InstructionCost Cost = 0;
3613
3614 // Source of truth: how many elements were there in the original IR vector?
3615 const unsigned SrcNumElt = VTy->getNumElements();
3616
3617 // How far have we gotten?
3618 int NumEltRemaining = SrcNumElt;
3619 // Note that we intentionally capture by-reference, NumEltRemaining changes.
3620 auto NumEltDone = [&]() { return SrcNumElt - NumEltRemaining; };
3621
3622 const int MaxLegalOpSizeBytes = divideCeil(LT.second.getSizeInBits(), 8);
3623
3624 // Note that even if we can store 64 bits of an XMM, we still operate on XMM.
3625 const unsigned XMMBits = 128;
3626 if (XMMBits % EltTyBits != 0)
3627 // Vector size must be a multiple of the element size. I.e. no padding.
3628 return BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace,
3629 CostKind);
3630 const int NumEltPerXMM = XMMBits / EltTyBits;
3631
3632 auto *XMMVecTy = FixedVectorType::get(EltTy, NumEltPerXMM);
3633
3634 for (int CurrOpSizeBytes = MaxLegalOpSizeBytes, SubVecEltsLeft = 0;
3635 NumEltRemaining > 0; CurrOpSizeBytes /= 2) {
3636 // How many elements would a single op deal with at once?
3637 if ((8 * CurrOpSizeBytes) % EltTyBits != 0)
3638 // Vector size must be a multiple of the element size. I.e. no padding.
3639 return BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace,
3640 CostKind);
3641 int CurrNumEltPerOp = (8 * CurrOpSizeBytes) / EltTyBits;
3642
3643 assert(CurrOpSizeBytes > 0 && CurrNumEltPerOp > 0 && "How'd we get here?")(static_cast <bool> (CurrOpSizeBytes > 0 && CurrNumEltPerOp
> 0 && "How'd we get here?") ? void (0) : __assert_fail
("CurrOpSizeBytes > 0 && CurrNumEltPerOp > 0 && \"How'd we get here?\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 3643, __extension__ __PRETTY_FUNCTION__))
;
3644 assert((((NumEltRemaining * EltTyBits) < (2 * 8 * CurrOpSizeBytes)) ||(static_cast <bool> ((((NumEltRemaining * EltTyBits) <
(2 * 8 * CurrOpSizeBytes)) || (CurrOpSizeBytes == MaxLegalOpSizeBytes
)) && "Unless we haven't halved the op size yet, " "we have less than two op's sized units of work left."
) ? void (0) : __assert_fail ("(((NumEltRemaining * EltTyBits) < (2 * 8 * CurrOpSizeBytes)) || (CurrOpSizeBytes == MaxLegalOpSizeBytes)) && \"Unless we haven't halved the op size yet, \" \"we have less than two op's sized units of work left.\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 3647, __extension__ __PRETTY_FUNCTION__))
3645 (CurrOpSizeBytes == MaxLegalOpSizeBytes)) &&(static_cast <bool> ((((NumEltRemaining * EltTyBits) <
(2 * 8 * CurrOpSizeBytes)) || (CurrOpSizeBytes == MaxLegalOpSizeBytes
)) && "Unless we haven't halved the op size yet, " "we have less than two op's sized units of work left."
) ? void (0) : __assert_fail ("(((NumEltRemaining * EltTyBits) < (2 * 8 * CurrOpSizeBytes)) || (CurrOpSizeBytes == MaxLegalOpSizeBytes)) && \"Unless we haven't halved the op size yet, \" \"we have less than two op's sized units of work left.\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 3647, __extension__ __PRETTY_FUNCTION__))
3646 "Unless we haven't halved the op size yet, "(static_cast <bool> ((((NumEltRemaining * EltTyBits) <
(2 * 8 * CurrOpSizeBytes)) || (CurrOpSizeBytes == MaxLegalOpSizeBytes
)) && "Unless we haven't halved the op size yet, " "we have less than two op's sized units of work left."
) ? void (0) : __assert_fail ("(((NumEltRemaining * EltTyBits) < (2 * 8 * CurrOpSizeBytes)) || (CurrOpSizeBytes == MaxLegalOpSizeBytes)) && \"Unless we haven't halved the op size yet, \" \"we have less than two op's sized units of work left.\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 3647, __extension__ __PRETTY_FUNCTION__))
3647 "we have less than two op's sized units of work left.")(static_cast <bool> ((((NumEltRemaining * EltTyBits) <
(2 * 8 * CurrOpSizeBytes)) || (CurrOpSizeBytes == MaxLegalOpSizeBytes
)) && "Unless we haven't halved the op size yet, " "we have less than two op's sized units of work left."
) ? void (0) : __assert_fail ("(((NumEltRemaining * EltTyBits) < (2 * 8 * CurrOpSizeBytes)) || (CurrOpSizeBytes == MaxLegalOpSizeBytes)) && \"Unless we haven't halved the op size yet, \" \"we have less than two op's sized units of work left.\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 3647, __extension__ __PRETTY_FUNCTION__))
;
3648
3649 auto *CurrVecTy = CurrNumEltPerOp > NumEltPerXMM
3650 ? FixedVectorType::get(EltTy, CurrNumEltPerOp)
3651 : XMMVecTy;
3652
3653 assert(CurrVecTy->getNumElements() % CurrNumEltPerOp == 0 &&(static_cast <bool> (CurrVecTy->getNumElements() % CurrNumEltPerOp
== 0 && "After halving sizes, the vector elt count is no longer a multiple "
"of number of elements per operation?") ? void (0) : __assert_fail
("CurrVecTy->getNumElements() % CurrNumEltPerOp == 0 && \"After halving sizes, the vector elt count is no longer a multiple \" \"of number of elements per operation?\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 3655, __extension__ __PRETTY_FUNCTION__))
3654 "After halving sizes, the vector elt count is no longer a multiple "(static_cast <bool> (CurrVecTy->getNumElements() % CurrNumEltPerOp
== 0 && "After halving sizes, the vector elt count is no longer a multiple "
"of number of elements per operation?") ? void (0) : __assert_fail
("CurrVecTy->getNumElements() % CurrNumEltPerOp == 0 && \"After halving sizes, the vector elt count is no longer a multiple \" \"of number of elements per operation?\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 3655, __extension__ __PRETTY_FUNCTION__))
3655 "of number of elements per operation?")(static_cast <bool> (CurrVecTy->getNumElements() % CurrNumEltPerOp
== 0 && "After halving sizes, the vector elt count is no longer a multiple "
"of number of elements per operation?") ? void (0) : __assert_fail
("CurrVecTy->getNumElements() % CurrNumEltPerOp == 0 && \"After halving sizes, the vector elt count is no longer a multiple \" \"of number of elements per operation?\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 3655, __extension__ __PRETTY_FUNCTION__))
;
3656 auto *CoalescedVecTy =
3657 CurrNumEltPerOp == 1
3658 ? CurrVecTy
3659 : FixedVectorType::get(
3660 IntegerType::get(Src->getContext(),
3661 EltTyBits * CurrNumEltPerOp),
3662 CurrVecTy->getNumElements() / CurrNumEltPerOp);
3663 assert(DL.getTypeSizeInBits(CoalescedVecTy) ==(static_cast <bool> (DL.getTypeSizeInBits(CoalescedVecTy
) == DL.getTypeSizeInBits(CurrVecTy) && "coalesciing elements doesn't change vector width."
) ? void (0) : __assert_fail ("DL.getTypeSizeInBits(CoalescedVecTy) == DL.getTypeSizeInBits(CurrVecTy) && \"coalesciing elements doesn't change vector width.\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 3665, __extension__ __PRETTY_FUNCTION__))
3664 DL.getTypeSizeInBits(CurrVecTy) &&(static_cast <bool> (DL.getTypeSizeInBits(CoalescedVecTy
) == DL.getTypeSizeInBits(CurrVecTy) && "coalesciing elements doesn't change vector width."
) ? void (0) : __assert_fail ("DL.getTypeSizeInBits(CoalescedVecTy) == DL.getTypeSizeInBits(CurrVecTy) && \"coalesciing elements doesn't change vector width.\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 3665, __extension__ __PRETTY_FUNCTION__))
3665 "coalesciing elements doesn't change vector width.")(static_cast <bool> (DL.getTypeSizeInBits(CoalescedVecTy
) == DL.getTypeSizeInBits(CurrVecTy) && "coalesciing elements doesn't change vector width."
) ? void (0) : __assert_fail ("DL.getTypeSizeInBits(CoalescedVecTy) == DL.getTypeSizeInBits(CurrVecTy) && \"coalesciing elements doesn't change vector width.\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 3665, __extension__ __PRETTY_FUNCTION__))
;
3666
3667 while (NumEltRemaining > 0) {
3668 assert(SubVecEltsLeft >= 0 && "Subreg element count overconsumtion?")(static_cast <bool> (SubVecEltsLeft >= 0 && "Subreg element count overconsumtion?"
) ? void (0) : __assert_fail ("SubVecEltsLeft >= 0 && \"Subreg element count overconsumtion?\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 3668, __extension__ __PRETTY_FUNCTION__))
;
3669
3670 // Can we use this vector size, as per the remaining element count?
3671 // Iff the vector is naturally aligned, we can do a wide load regardless.
3672 if (NumEltRemaining < CurrNumEltPerOp &&
3673 (!IsLoad || Alignment.valueOrOne() < CurrOpSizeBytes) &&
3674 CurrOpSizeBytes != 1)
3675 break; // Try smalled vector size.
3676
3677 bool Is0thSubVec = (NumEltDone() % LT.second.getVectorNumElements()) == 0;
3678
3679 // If we have fully processed the previous reg, we need to replenish it.
3680 if (SubVecEltsLeft == 0) {
3681 SubVecEltsLeft += CurrVecTy->getNumElements();
3682 // And that's free only for the 0'th subvector of a legalized vector.
3683 if (!Is0thSubVec)
3684 Cost += getShuffleCost(IsLoad ? TTI::ShuffleKind::SK_InsertSubvector
3685 : TTI::ShuffleKind::SK_ExtractSubvector,
3686 VTy, None, NumEltDone(), CurrVecTy);
3687 }
3688
3689 // While we can directly load/store ZMM, YMM, and 64-bit halves of XMM,
3690 // for smaller widths (32/16/8) we have to insert/extract them separately.
3691 // Again, it's free for the 0'th subreg (if op is 32/64 bit wide,
3692 // but let's pretend that it is also true for 16/8 bit wide ops...)
3693 if (CurrOpSizeBytes <= 32 / 8 && !Is0thSubVec) {
3694 int NumEltDoneInCurrXMM = NumEltDone() % NumEltPerXMM;
3695 assert(NumEltDoneInCurrXMM % CurrNumEltPerOp == 0 && "")(static_cast <bool> (NumEltDoneInCurrXMM % CurrNumEltPerOp
== 0 && "") ? void (0) : __assert_fail ("NumEltDoneInCurrXMM % CurrNumEltPerOp == 0 && \"\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 3695, __extension__ __PRETTY_FUNCTION__))
;
3696 int CoalescedVecEltIdx = NumEltDoneInCurrXMM / CurrNumEltPerOp;
3697 APInt DemandedElts =
3698 APInt::getBitsSet(CoalescedVecTy->getNumElements(),
3699 CoalescedVecEltIdx, CoalescedVecEltIdx + 1);
3700 assert(DemandedElts.countPopulation() == 1 && "Inserting single value")(static_cast <bool> (DemandedElts.countPopulation() == 1
&& "Inserting single value") ? void (0) : __assert_fail
("DemandedElts.countPopulation() == 1 && \"Inserting single value\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 3700, __extension__ __PRETTY_FUNCTION__))
;
3701 Cost += getScalarizationOverhead(CoalescedVecTy, DemandedElts, IsLoad,
3702 !IsLoad);
3703 }
3704
3705 // This isn't exactly right. We're using slow unaligned 32-byte accesses
3706 // as a proxy for a double-pumped AVX memory interface such as on
3707 // Sandybridge.
3708 if (CurrOpSizeBytes == 32 && ST->isUnalignedMem32Slow())
3709 Cost += 2;
3710 else
3711 Cost += 1;
3712
3713 SubVecEltsLeft -= CurrNumEltPerOp;
3714 NumEltRemaining -= CurrNumEltPerOp;
3715 Alignment = commonAlignment(Alignment.valueOrOne(), CurrOpSizeBytes);
3716 }
3717 }
3718
3719 assert(NumEltRemaining <= 0 && "Should have processed all the elements.")(static_cast <bool> (NumEltRemaining <= 0 &&
"Should have processed all the elements.") ? void (0) : __assert_fail
("NumEltRemaining <= 0 && \"Should have processed all the elements.\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 3719, __extension__ __PRETTY_FUNCTION__))
;
3720
3721 return Cost;
3722}
3723
3724InstructionCost
3725X86TTIImpl::getMaskedMemoryOpCost(unsigned Opcode, Type *SrcTy, Align Alignment,
3726 unsigned AddressSpace,
3727 TTI::TargetCostKind CostKind) {
3728 bool IsLoad = (Instruction::Load == Opcode);
3729 bool IsStore = (Instruction::Store == Opcode);
3730
3731 auto *SrcVTy = dyn_cast<FixedVectorType>(SrcTy);
3732 if (!SrcVTy)
3733 // To calculate scalar take the regular cost, without mask
3734 return getMemoryOpCost(Opcode, SrcTy, Alignment, AddressSpace, CostKind);
3735
3736 unsigned NumElem = SrcVTy->getNumElements();
3737 auto *MaskTy =
3738 FixedVectorType::get(Type::getInt8Ty(SrcVTy->getContext()), NumElem);
3739 if ((IsLoad && !isLegalMaskedLoad(SrcVTy, Alignment)) ||
3740 (IsStore && !isLegalMaskedStore(SrcVTy, Alignment))) {
3741 // Scalarization
3742 APInt DemandedElts = APInt::getAllOnesValue(NumElem);
3743 InstructionCost MaskSplitCost =
3744 getScalarizationOverhead(MaskTy, DemandedElts, false, true);
3745 InstructionCost ScalarCompareCost = getCmpSelInstrCost(
3746 Instruction::ICmp, Type::getInt8Ty(SrcVTy->getContext()), nullptr,
3747 CmpInst::BAD_ICMP_PREDICATE, CostKind);
3748 InstructionCost BranchCost = getCFInstrCost(Instruction::Br, CostKind);
3749 InstructionCost MaskCmpCost = NumElem * (BranchCost + ScalarCompareCost);
3750 InstructionCost ValueSplitCost =
3751 getScalarizationOverhead(SrcVTy, DemandedElts, IsLoad, IsStore);
3752 InstructionCost MemopCost =
3753 NumElem * BaseT::getMemoryOpCost(Opcode, SrcVTy->getScalarType(),
3754 Alignment, AddressSpace, CostKind);
3755 return MemopCost + ValueSplitCost + MaskSplitCost + MaskCmpCost;
3756 }
3757
3758 // Legalize the type.
3759 std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, SrcVTy);
3760 auto VT = TLI->getValueType(DL, SrcVTy);
3761 InstructionCost Cost = 0;
3762 if (VT.isSimple() && LT.second != VT.getSimpleVT() &&
3763 LT.second.getVectorNumElements() == NumElem)
3764 // Promotion requires extend/truncate for data and a shuffle for mask.
3765 Cost += getShuffleCost(TTI::SK_PermuteTwoSrc, SrcVTy, None, 0, nullptr) +
3766 getShuffleCost(TTI::SK_PermuteTwoSrc, MaskTy, None, 0, nullptr);
3767
3768 else if (LT.first * LT.second.getVectorNumElements() > NumElem) {
3769 auto *NewMaskTy = FixedVectorType::get(MaskTy->getElementType(),
3770 LT.second.getVectorNumElements());
3771 // Expanding requires fill mask with zeroes
3772 Cost += getShuffleCost(TTI::SK_InsertSubvector, NewMaskTy, None, 0, MaskTy);
3773 }
3774
3775 // Pre-AVX512 - each maskmov load costs 2 + store costs ~8.
3776 if (!ST->hasAVX512())
3777 return Cost + LT.first * (IsLoad ? 2 : 8);
3778
3779 // AVX-512 masked load/store is cheapper
3780 return Cost + LT.first;
3781}
3782
3783InstructionCost X86TTIImpl::getAddressComputationCost(Type *Ty,
3784 ScalarEvolution *SE,
3785 const SCEV *Ptr) {
3786 // Address computations in vectorized code with non-consecutive addresses will
3787 // likely result in more instructions compared to scalar code where the
3788 // computation can more often be merged into the index mode. The resulting
3789 // extra micro-ops can significantly decrease throughput.
3790 const unsigned NumVectorInstToHideOverhead = 10;
3791
3792 // Cost modeling of Strided Access Computation is hidden by the indexing
3793 // modes of X86 regardless of the stride value. We dont believe that there
3794 // is a difference between constant strided access in gerenal and constant
3795 // strided value which is less than or equal to 64.
3796 // Even in the case of (loop invariant) stride whose value is not known at
3797 // compile time, the address computation will not incur more than one extra
3798 // ADD instruction.
3799 if (Ty->isVectorTy() && SE) {
3800 if (!BaseT::isStridedAccess(Ptr))
3801 return NumVectorInstToHideOverhead;
3802 if (!BaseT::getConstantStrideStep(SE, Ptr))
3803 return 1;
3804 }
3805
3806 return BaseT::getAddressComputationCost(Ty, SE, Ptr);
3807}
3808
3809InstructionCost
3810X86TTIImpl::getArithmeticReductionCost(unsigned Opcode, VectorType *ValTy,
3811 Optional<FastMathFlags> FMF,
3812 TTI::TargetCostKind CostKind) {
3813 if (TTI::requiresOrderedReduction(FMF))
3814 return BaseT::getArithmeticReductionCost(Opcode, ValTy, FMF, CostKind);
3815
3816 // We use the Intel Architecture Code Analyzer(IACA) to measure the throughput
3817 // and make it as the cost.
3818
3819 static const CostTblEntry SLMCostTblNoPairWise[] = {
3820 { ISD::FADD, MVT::v2f64, 3 },
3821 { ISD::ADD, MVT::v2i64, 5 },
3822 };
3823
3824 static const CostTblEntry SSE2CostTblNoPairWise[] = {
3825 { ISD::FADD, MVT::v2f64, 2 },
3826 { ISD::FADD, MVT::v2f32, 2 },
3827 { ISD::FADD, MVT::v4f32, 4 },
3828 { ISD::ADD, MVT::v2i64, 2 }, // The data reported by the IACA tool is "1.6".
3829 { ISD::ADD, MVT::v2i32, 2 }, // FIXME: chosen to be less than v4i32
3830 { ISD::ADD, MVT::v4i32, 3 }, // The data reported by the IACA tool is "3.3".
3831 { ISD::ADD, MVT::v2i16, 2 }, // The data reported by the IACA tool is "4.3".
3832 { ISD::ADD, MVT::v4i16, 3 }, // The data reported by the IACA tool is "4.3".
3833 { ISD::ADD, MVT::v8i16, 4 }, // The data reported by the IACA tool is "4.3".
3834 { ISD::ADD, MVT::v2i8, 2 },
3835 { ISD::ADD, MVT::v4i8, 2 },
3836 { ISD::ADD, MVT::v8i8, 2 },
3837 { ISD::ADD, MVT::v16i8, 3 },
3838 };
3839
3840 static const CostTblEntry AVX1CostTblNoPairWise[] = {
3841 { ISD::FADD, MVT::v4f64, 3 },
3842 { ISD::FADD, MVT::v4f32, 3 },
3843 { ISD::FADD, MVT::v8f32, 4 },
3844 { ISD::ADD, MVT::v2i64, 1 }, // The data reported by the IACA tool is "1.5".
3845 { ISD::ADD, MVT::v4i64, 3 },
3846 { ISD::ADD, MVT::v8i32, 5 },
3847 { ISD::ADD, MVT::v16i16, 5 },
3848 { ISD::ADD, MVT::v32i8, 4 },
3849 };
3850
3851 int ISD = TLI->InstructionOpcodeToISD(Opcode);
3852 assert(ISD && "Invalid opcode")(static_cast <bool> (ISD && "Invalid opcode") ?
void (0) : __assert_fail ("ISD && \"Invalid opcode\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 3852, __extension__ __PRETTY_FUNCTION__))
;
3853
3854 // Before legalizing the type, give a chance to look up illegal narrow types
3855 // in the table.
3856 // FIXME: Is there a better way to do this?
3857 EVT VT = TLI->getValueType(DL, ValTy);
3858 if (VT.isSimple()) {
3859 MVT MTy = VT.getSimpleVT();
3860 if (ST->isSLM())
3861 if (const auto *Entry = CostTableLookup(SLMCostTblNoPairWise, ISD, MTy))
3862 return Entry->Cost;
3863
3864 if (ST->hasAVX())
3865 if (const auto *Entry = CostTableLookup(AVX1CostTblNoPairWise, ISD, MTy))
3866 return Entry->Cost;
3867
3868 if (ST->hasSSE2())
3869 if (const auto *Entry = CostTableLookup(SSE2CostTblNoPairWise, ISD, MTy))
3870 return Entry->Cost;
3871 }
3872
3873 std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy);
3874
3875 MVT MTy = LT.second;
3876
3877 auto *ValVTy = cast<FixedVectorType>(ValTy);
3878
3879 // Special case: vXi8 mul reductions are performed as vXi16.
3880 if (ISD == ISD::MUL && MTy.getScalarType() == MVT::i8) {
3881 auto *WideSclTy = IntegerType::get(ValVTy->getContext(), 16);
3882 auto *WideVecTy = FixedVectorType::get(WideSclTy, ValVTy->getNumElements());
3883 return getCastInstrCost(Instruction::ZExt, WideVecTy, ValTy,
3884 TargetTransformInfo::CastContextHint::None,
3885 CostKind) +
3886 getArithmeticReductionCost(Opcode, WideVecTy, FMF, CostKind);
3887 }
3888
3889 InstructionCost ArithmeticCost = 0;
3890 if (LT.first != 1 && MTy.isVector() &&
3891 MTy.getVectorNumElements() < ValVTy->getNumElements()) {
3892 // Type needs to be split. We need LT.first - 1 arithmetic ops.
3893 auto *SingleOpTy = FixedVectorType::get(ValVTy->getElementType(),
3894 MTy.getVectorNumElements());
3895 ArithmeticCost = getArithmeticInstrCost(Opcode, SingleOpTy, CostKind);
3896 ArithmeticCost *= LT.first - 1;
3897 }
3898
3899 if (ST->isSLM())
3900 if (const auto *Entry = CostTableLookup(SLMCostTblNoPairWise, ISD, MTy))
3901 return ArithmeticCost + Entry->Cost;
3902
3903 if (ST->hasAVX())
3904 if (const auto *Entry = CostTableLookup(AVX1CostTblNoPairWise, ISD, MTy))
3905 return ArithmeticCost + Entry->Cost;
3906
3907 if (ST->hasSSE2())
3908 if (const auto *Entry = CostTableLookup(SSE2CostTblNoPairWise, ISD, MTy))
3909 return ArithmeticCost + Entry->Cost;
3910
3911 // FIXME: These assume a naive kshift+binop lowering, which is probably
3912 // conservative in most cases.
3913 static const CostTblEntry AVX512BoolReduction[] = {
3914 { ISD::AND, MVT::v2i1, 3 },
3915 { ISD::AND, MVT::v4i1, 5 },
3916 { ISD::AND, MVT::v8i1, 7 },
3917 { ISD::AND, MVT::v16i1, 9 },
3918 { ISD::AND, MVT::v32i1, 11 },
3919 { ISD::AND, MVT::v64i1, 13 },
3920 { ISD::OR, MVT::v2i1, 3 },
3921 { ISD::OR, MVT::v4i1, 5 },
3922 { ISD::OR, MVT::v8i1, 7 },
3923 { ISD::OR, MVT::v16i1, 9 },
3924 { ISD::OR, MVT::v32i1, 11 },
3925 { ISD::OR, MVT::v64i1, 13 },
3926 };
3927
3928 static const CostTblEntry AVX2BoolReduction[] = {
3929 { ISD::AND, MVT::v16i16, 2 }, // vpmovmskb + cmp
3930 { ISD::AND, MVT::v32i8, 2 }, // vpmovmskb + cmp
3931 { ISD::OR, MVT::v16i16, 2 }, // vpmovmskb + cmp
3932 { ISD::OR, MVT::v32i8, 2 }, // vpmovmskb + cmp
3933 };
3934
3935 static const CostTblEntry AVX1BoolReduction[] = {
3936 { ISD::AND, MVT::v4i64, 2 }, // vmovmskpd + cmp
3937 { ISD::AND, MVT::v8i32, 2 }, // vmovmskps + cmp
3938 { ISD::AND, MVT::v16i16, 4 }, // vextractf128 + vpand + vpmovmskb + cmp
3939 { ISD::AND, MVT::v32i8, 4 }, // vextractf128 + vpand + vpmovmskb + cmp
3940 { ISD::OR, MVT::v4i64, 2 }, // vmovmskpd + cmp
3941 { ISD::OR, MVT::v8i32, 2 }, // vmovmskps + cmp
3942 { ISD::OR, MVT::v16i16, 4 }, // vextractf128 + vpor + vpmovmskb + cmp
3943 { ISD::OR, MVT::v32i8, 4 }, // vextractf128 + vpor + vpmovmskb + cmp
3944 };
3945
3946 static const CostTblEntry SSE2BoolReduction[] = {
3947 { ISD::AND, MVT::v2i64, 2 }, // movmskpd + cmp
3948 { ISD::AND, MVT::v4i32, 2 }, // movmskps + cmp
3949 { ISD::AND, MVT::v8i16, 2 }, // pmovmskb + cmp
3950 { ISD::AND, MVT::v16i8, 2 }, // pmovmskb + cmp
3951 { ISD::OR, MVT::v2i64, 2 }, // movmskpd + cmp
3952 { ISD::OR, MVT::v4i32, 2 }, // movmskps + cmp
3953 { ISD::OR, MVT::v8i16, 2 }, // pmovmskb + cmp
3954 { ISD::OR, MVT::v16i8, 2 }, // pmovmskb + cmp
3955 };
3956
3957 // Handle bool allof/anyof patterns.
3958 if (ValVTy->getElementType()->isIntegerTy(1)) {
3959 InstructionCost ArithmeticCost = 0;
3960 if (LT.first != 1 && MTy.isVector() &&
3961 MTy.getVectorNumElements() < ValVTy->getNumElements()) {
3962 // Type needs to be split. We need LT.first - 1 arithmetic ops.
3963 auto *SingleOpTy = FixedVectorType::get(ValVTy->getElementType(),
3964 MTy.getVectorNumElements());
3965 ArithmeticCost = getArithmeticInstrCost(Opcode, SingleOpTy, CostKind);
3966 ArithmeticCost *= LT.first - 1;
3967 }
3968
3969 if (ST->hasAVX512())
3970 if (const auto *Entry = CostTableLookup(AVX512BoolReduction, ISD, MTy))
3971 return ArithmeticCost + Entry->Cost;
3972 if (ST->hasAVX2())
3973 if (const auto *Entry = CostTableLookup(AVX2BoolReduction, ISD, MTy))
3974 return ArithmeticCost + Entry->Cost;
3975 if (ST->hasAVX())
3976 if (const auto *Entry = CostTableLookup(AVX1BoolReduction, ISD, MTy))
3977 return ArithmeticCost + Entry->Cost;
3978 if (ST->hasSSE2())
3979 if (const auto *Entry = CostTableLookup(SSE2BoolReduction, ISD, MTy))
3980 return ArithmeticCost + Entry->Cost;
3981
3982 return BaseT::getArithmeticReductionCost(Opcode, ValVTy, FMF, CostKind);
3983 }
3984
3985 unsigned NumVecElts = ValVTy->getNumElements();
3986 unsigned ScalarSize = ValVTy->getScalarSizeInBits();
3987
3988 // Special case power of 2 reductions where the scalar type isn't changed
3989 // by type legalization.
3990 if (!isPowerOf2_32(NumVecElts) || ScalarSize != MTy.getScalarSizeInBits())
3991 return BaseT::getArithmeticReductionCost(Opcode, ValVTy, FMF, CostKind);
3992
3993 InstructionCost ReductionCost = 0;
3994
3995 auto *Ty = ValVTy;
3996 if (LT.first != 1 && MTy.isVector() &&
3997 MTy.getVectorNumElements() < ValVTy->getNumElements()) {
3998 // Type needs to be split. We need LT.first - 1 arithmetic ops.
3999 Ty = FixedVectorType::get(ValVTy->getElementType(),
4000 MTy.getVectorNumElements());
4001 ReductionCost = getArithmeticInstrCost(Opcode, Ty, CostKind);
4002 ReductionCost *= LT.first - 1;
4003 NumVecElts = MTy.getVectorNumElements();
4004 }
4005
4006 // Now handle reduction with the legal type, taking into account size changes
4007 // at each level.
4008 while (NumVecElts > 1) {
4009 // Determine the size of the remaining vector we need to reduce.
4010 unsigned Size = NumVecElts * ScalarSize;
4011 NumVecElts /= 2;
4012 // If we're reducing from 256/512 bits, use an extract_subvector.
4013 if (Size > 128) {
4014 auto *SubTy = FixedVectorType::get(ValVTy->getElementType(), NumVecElts);
4015 ReductionCost +=
4016 getShuffleCost(TTI::SK_ExtractSubvector, Ty, None, NumVecElts, SubTy);
4017 Ty = SubTy;
4018 } else if (Size == 128) {
4019 // Reducing from 128 bits is a permute of v2f64/v2i64.
4020 FixedVectorType *ShufTy;
4021 if (ValVTy->isFloatingPointTy())
4022 ShufTy =
4023 FixedVectorType::get(Type::getDoubleTy(ValVTy->getContext()), 2);
4024 else
4025 ShufTy =
4026 FixedVectorType::get(Type::getInt64Ty(ValVTy->getContext()), 2);
4027 ReductionCost +=
4028 getShuffleCost(TTI::SK_PermuteSingleSrc, ShufTy, None, 0, nullptr);
4029 } else if (Size == 64) {
4030 // Reducing from 64 bits is a shuffle of v4f32/v4i32.
4031 FixedVectorType *ShufTy;
4032 if (ValVTy->isFloatingPointTy())
4033 ShufTy =
4034 FixedVectorType::get(Type::getFloatTy(ValVTy->getContext()), 4);
4035 else
4036 ShufTy =
4037 FixedVectorType::get(Type::getInt32Ty(ValVTy->getContext()), 4);
4038 ReductionCost +=
4039 getShuffleCost(TTI::SK_PermuteSingleSrc, ShufTy, None, 0, nullptr);
4040 } else {
4041 // Reducing from smaller size is a shift by immediate.
4042 auto *ShiftTy = FixedVectorType::get(
4043 Type::getIntNTy(ValVTy->getContext(), Size), 128 / Size);
4044 ReductionCost += getArithmeticInstrCost(
4045 Instruction::LShr, ShiftTy, CostKind,
4046 TargetTransformInfo::OK_AnyValue,
4047 TargetTransformInfo::OK_UniformConstantValue,
4048 TargetTransformInfo::OP_None, TargetTransformInfo::OP_None);
4049 }
4050
4051 // Add the arithmetic op for this level.
4052 ReductionCost += getArithmeticInstrCost(Opcode, Ty, CostKind);
4053 }
4054
4055 // Add the final extract element to the cost.
4056 return ReductionCost + getVectorInstrCost(Instruction::ExtractElement, Ty, 0);
4057}
4058
4059InstructionCost X86TTIImpl::getMinMaxCost(Type *Ty, Type *CondTy,
4060 bool IsUnsigned) {
4061 std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty);
4062
4063 MVT MTy = LT.second;
4064
4065 int ISD;
4066 if (Ty->isIntOrIntVectorTy()) {
4067 ISD = IsUnsigned ? ISD::UMIN : ISD::SMIN;
4068 } else {
4069 assert(Ty->isFPOrFPVectorTy() &&(static_cast <bool> (Ty->isFPOrFPVectorTy() &&
"Expected float point or integer vector type.") ? void (0) :
__assert_fail ("Ty->isFPOrFPVectorTy() && \"Expected float point or integer vector type.\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 4070, __extension__ __PRETTY_FUNCTION__))
4070 "Expected float point or integer vector type.")(static_cast <bool> (Ty->isFPOrFPVectorTy() &&
"Expected float point or integer vector type.") ? void (0) :
__assert_fail ("Ty->isFPOrFPVectorTy() && \"Expected float point or integer vector type.\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 4070, __extension__ __PRETTY_FUNCTION__))
;
4071 ISD = ISD::FMINNUM;
4072 }
4073
4074 static const CostTblEntry SSE1CostTbl[] = {
4075 {ISD::FMINNUM, MVT::v4f32, 1},
4076 };
4077
4078 static const CostTblEntry SSE2CostTbl[] = {
4079 {ISD::FMINNUM, MVT::v2f64, 1},
4080 {ISD::SMIN, MVT::v8i16, 1},
4081 {ISD::UMIN, MVT::v16i8, 1},
4082 };
4083
4084 static const CostTblEntry SSE41CostTbl[] = {
4085 {ISD::SMIN, MVT::v4i32, 1},
4086 {ISD::UMIN, MVT::v4i32, 1},
4087 {ISD::UMIN, MVT::v8i16, 1},
4088 {ISD::SMIN, MVT::v16i8, 1},
4089 };
4090
4091 static const CostTblEntry SSE42CostTbl[] = {
4092 {ISD::UMIN, MVT::v2i64, 3}, // xor+pcmpgtq+blendvpd
4093 };
4094
4095 static const CostTblEntry AVX1CostTbl[] = {
4096 {ISD::FMINNUM, MVT::v8f32, 1},
4097 {ISD::FMINNUM, MVT::v4f64, 1},
4098 {ISD::SMIN, MVT::v8i32, 3},
4099 {ISD::UMIN, MVT::v8i32, 3},
4100 {ISD::SMIN, MVT::v16i16, 3},
4101 {ISD::UMIN, MVT::v16i16, 3},
4102 {ISD::SMIN, MVT::v32i8, 3},
4103 {ISD::UMIN, MVT::v32i8, 3},
4104 };
4105
4106 static const CostTblEntry AVX2CostTbl[] = {
4107 {ISD::SMIN, MVT::v8i32, 1},
4108 {ISD::UMIN, MVT::v8i32, 1},
4109 {ISD::SMIN, MVT::v16i16, 1},
4110 {ISD::UMIN, MVT::v16i16, 1},
4111 {ISD::SMIN, MVT::v32i8, 1},
4112 {ISD::UMIN, MVT::v32i8, 1},
4113 };
4114
4115 static const CostTblEntry AVX512CostTbl[] = {
4116 {ISD::FMINNUM, MVT::v16f32, 1},
4117 {ISD::FMINNUM, MVT::v8f64, 1},
4118 {ISD::SMIN, MVT::v2i64, 1},
4119 {ISD::UMIN, MVT::v2i64, 1},
4120 {ISD::SMIN, MVT::v4i64, 1},
4121 {ISD::UMIN, MVT::v4i64, 1},
4122 {ISD::SMIN, MVT::v8i64, 1},
4123 {ISD::UMIN, MVT::v8i64, 1},
4124 {ISD::SMIN, MVT::v16i32, 1},
4125 {ISD::UMIN, MVT::v16i32, 1},
4126 };
4127
4128 static const CostTblEntry AVX512BWCostTbl[] = {
4129 {ISD::SMIN, MVT::v32i16, 1},
4130 {ISD::UMIN, MVT::v32i16, 1},
4131 {ISD::SMIN, MVT::v64i8, 1},
4132 {ISD::UMIN, MVT::v64i8, 1},
4133 };
4134
4135 // If we have a native MIN/MAX instruction for this type, use it.
4136 if (ST->hasBWI())
4137 if (const auto *Entry = CostTableLookup(AVX512BWCostTbl, ISD, MTy))
4138 return LT.first * Entry->Cost;
4139
4140 if (ST->hasAVX512())
4141 if (const auto *Entry = CostTableLookup(AVX512CostTbl, ISD, MTy))
4142 return LT.first * Entry->Cost;
4143
4144 if (ST->hasAVX2())
4145 if (const auto *Entry = CostTableLookup(AVX2CostTbl, ISD, MTy))
4146 return LT.first * Entry->Cost;
4147
4148 if (ST->hasAVX())
4149 if (const auto *Entry = CostTableLookup(AVX1CostTbl, ISD, MTy))
4150 return LT.first * Entry->Cost;
4151
4152 if (ST->hasSSE42())
4153 if (const auto *Entry = CostTableLookup(SSE42CostTbl, ISD, MTy))
4154 return LT.first * Entry->Cost;
4155
4156 if (ST->hasSSE41())
4157 if (const auto *Entry = CostTableLookup(SSE41CostTbl, ISD, MTy))
4158 return LT.first * Entry->Cost;
4159
4160 if (ST->hasSSE2())
4161 if (const auto *Entry = CostTableLookup(SSE2CostTbl, ISD, MTy))
4162 return LT.first * Entry->Cost;
4163
4164 if (ST->hasSSE1())
4165 if (const auto *Entry = CostTableLookup(SSE1CostTbl, ISD, MTy))
4166 return LT.first * Entry->Cost;
4167
4168 unsigned CmpOpcode;
4169 if (Ty->isFPOrFPVectorTy()) {
4170 CmpOpcode = Instruction::FCmp;
4171 } else {
4172 assert(Ty->isIntOrIntVectorTy() &&(static_cast <bool> (Ty->isIntOrIntVectorTy() &&
"expecting floating point or integer type for min/max reduction"
) ? void (0) : __assert_fail ("Ty->isIntOrIntVectorTy() && \"expecting floating point or integer type for min/max reduction\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 4173, __extension__ __PRETTY_FUNCTION__))
4173 "expecting floating point or integer type for min/max reduction")(static_cast <bool> (Ty->isIntOrIntVectorTy() &&
"expecting floating point or integer type for min/max reduction"
) ? void (0) : __assert_fail ("Ty->isIntOrIntVectorTy() && \"expecting floating point or integer type for min/max reduction\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 4173, __extension__ __PRETTY_FUNCTION__))
;
4174 CmpOpcode = Instruction::ICmp;
4175 }
4176
4177 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
4178 // Otherwise fall back to cmp+select.
4179 InstructionCost Result =
4180 getCmpSelInstrCost(CmpOpcode, Ty, CondTy, CmpInst::BAD_ICMP_PREDICATE,
4181 CostKind) +
4182 getCmpSelInstrCost(Instruction::Select, Ty, CondTy,
4183 CmpInst::BAD_ICMP_PREDICATE, CostKind);
4184 return Result;
4185}
4186
4187InstructionCost
4188X86TTIImpl::getMinMaxReductionCost(VectorType *ValTy, VectorType *CondTy,
4189 bool IsUnsigned,
4190 TTI::TargetCostKind CostKind) {
4191 std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy);
4192
4193 MVT MTy = LT.second;
4194
4195 int ISD;
4196 if (ValTy->isIntOrIntVectorTy()) {
4197 ISD = IsUnsigned ? ISD::UMIN : ISD::SMIN;
4198 } else {
4199 assert(ValTy->isFPOrFPVectorTy() &&(static_cast <bool> (ValTy->isFPOrFPVectorTy() &&
"Expected float point or integer vector type.") ? void (0) :
__assert_fail ("ValTy->isFPOrFPVectorTy() && \"Expected float point or integer vector type.\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 4200, __extension__ __PRETTY_FUNCTION__))
4200 "Expected float point or integer vector type.")(static_cast <bool> (ValTy->isFPOrFPVectorTy() &&
"Expected float point or integer vector type.") ? void (0) :
__assert_fail ("ValTy->isFPOrFPVectorTy() && \"Expected float point or integer vector type.\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 4200, __extension__ __PRETTY_FUNCTION__))
;
4201 ISD = ISD::FMINNUM;
4202 }
4203
4204 // We use the Intel Architecture Code Analyzer(IACA) to measure the throughput
4205 // and make it as the cost.
4206
4207 static const CostTblEntry SSE2CostTblNoPairWise[] = {
4208 {ISD::UMIN, MVT::v2i16, 5}, // need pxors to use pminsw/pmaxsw
4209 {ISD::UMIN, MVT::v4i16, 7}, // need pxors to use pminsw/pmaxsw
4210 {ISD::UMIN, MVT::v8i16, 9}, // need pxors to use pminsw/pmaxsw
4211 };
4212
4213 static const CostTblEntry SSE41CostTblNoPairWise[] = {
4214 {ISD::SMIN, MVT::v2i16, 3}, // same as sse2
4215 {ISD::SMIN, MVT::v4i16, 5}, // same as sse2
4216 {ISD::UMIN, MVT::v2i16, 5}, // same as sse2
4217 {ISD::UMIN, MVT::v4i16, 7}, // same as sse2
4218 {ISD::SMIN, MVT::v8i16, 4}, // phminposuw+xor
4219 {ISD::UMIN, MVT::v8i16, 4}, // FIXME: umin is cheaper than umax
4220 {ISD::SMIN, MVT::v2i8, 3}, // pminsb
4221 {ISD::SMIN, MVT::v4i8, 5}, // pminsb
4222 {ISD::SMIN, MVT::v8i8, 7}, // pminsb
4223 {ISD::SMIN, MVT::v16i8, 6},
4224 {ISD::UMIN, MVT::v2i8, 3}, // same as sse2
4225 {ISD::UMIN, MVT::v4i8, 5}, // same as sse2
4226 {ISD::UMIN, MVT::v8i8, 7}, // same as sse2
4227 {ISD::UMIN, MVT::v16i8, 6}, // FIXME: umin is cheaper than umax
4228 };
4229
4230 static const CostTblEntry AVX1CostTblNoPairWise[] = {
4231 {ISD::SMIN, MVT::v16i16, 6},
4232 {ISD::UMIN, MVT::v16i16, 6}, // FIXME: umin is cheaper than umax
4233 {ISD::SMIN, MVT::v32i8, 8},
4234 {ISD::UMIN, MVT::v32i8, 8},
4235 };
4236
4237 static const CostTblEntry AVX512BWCostTblNoPairWise[] = {
4238 {ISD::SMIN, MVT::v32i16, 8},
4239 {ISD::UMIN, MVT::v32i16, 8}, // FIXME: umin is cheaper than umax
4240 {ISD::SMIN, MVT::v64i8, 10},
4241 {ISD::UMIN, MVT::v64i8, 10},
4242 };
4243
4244 // Before legalizing the type, give a chance to look up illegal narrow types
4245 // in the table.
4246 // FIXME: Is there a better way to do this?
4247 EVT VT = TLI->getValueType(DL, ValTy);
4248 if (VT.isSimple()) {
4249 MVT MTy = VT.getSimpleVT();
4250 if (ST->hasBWI())
4251 if (const auto *Entry = CostTableLookup(AVX512BWCostTblNoPairWise, ISD, MTy))
4252 return Entry->Cost;
4253
4254 if (ST->hasAVX())
4255 if (const auto *Entry = CostTableLookup(AVX1CostTblNoPairWise, ISD, MTy))
4256 return Entry->Cost;
4257
4258 if (ST->hasSSE41())
4259 if (const auto *Entry = CostTableLookup(SSE41CostTblNoPairWise, ISD, MTy))
4260 return Entry->Cost;
4261
4262 if (ST->hasSSE2())
4263 if (const auto *Entry = CostTableLookup(SSE2CostTblNoPairWise, ISD, MTy))
4264 return Entry->Cost;
4265 }
4266
4267 auto *ValVTy = cast<FixedVectorType>(ValTy);
4268 unsigned NumVecElts = ValVTy->getNumElements();
4269
4270 auto *Ty = ValVTy;
4271 InstructionCost MinMaxCost = 0;
4272 if (LT.first != 1 && MTy.isVector() &&
4273 MTy.getVectorNumElements() < ValVTy->getNumElements()) {
4274 // Type needs to be split. We need LT.first - 1 operations ops.
4275 Ty = FixedVectorType::get(ValVTy->getElementType(),
4276 MTy.getVectorNumElements());
4277 auto *SubCondTy = FixedVectorType::get(CondTy->getElementType(),
4278 MTy.getVectorNumElements());
4279 MinMaxCost = getMinMaxCost(Ty, SubCondTy, IsUnsigned);
4280 MinMaxCost *= LT.first - 1;
4281 NumVecElts = MTy.getVectorNumElements();
4282 }
4283
4284 if (ST->hasBWI())
4285 if (const auto *Entry = CostTableLookup(AVX512BWCostTblNoPairWise, ISD, MTy))
4286 return MinMaxCost + Entry->Cost;
4287
4288 if (ST->hasAVX())
4289 if (const auto *Entry = CostTableLookup(AVX1CostTblNoPairWise, ISD, MTy))
4290 return MinMaxCost + Entry->Cost;
4291
4292 if (ST->hasSSE41())
4293 if (const auto *Entry = CostTableLookup(SSE41CostTblNoPairWise, ISD, MTy))
4294 return MinMaxCost + Entry->Cost;
4295
4296 if (ST->hasSSE2())
4297 if (const auto *Entry = CostTableLookup(SSE2CostTblNoPairWise, ISD, MTy))
4298 return MinMaxCost + Entry->Cost;
4299
4300 unsigned ScalarSize = ValTy->getScalarSizeInBits();
4301
4302 // Special case power of 2 reductions where the scalar type isn't changed
4303 // by type legalization.
4304 if (!isPowerOf2_32(ValVTy->getNumElements()) ||
4305 ScalarSize != MTy.getScalarSizeInBits())
4306 return BaseT::getMinMaxReductionCost(ValTy, CondTy, IsUnsigned, CostKind);
4307
4308 // Now handle reduction with the legal type, taking into account size changes
4309 // at each level.
4310 while (NumVecElts > 1) {
4311 // Determine the size of the remaining vector we need to reduce.
4312 unsigned Size = NumVecElts * ScalarSize;
4313 NumVecElts /= 2;
4314 // If we're reducing from 256/512 bits, use an extract_subvector.
4315 if (Size > 128) {
4316 auto *SubTy = FixedVectorType::get(ValVTy->getElementType(), NumVecElts);
4317 MinMaxCost +=
4318 getShuffleCost(TTI::SK_ExtractSubvector, Ty, None, NumVecElts, SubTy);
4319 Ty = SubTy;
4320 } else if (Size == 128) {
4321 // Reducing from 128 bits is a permute of v2f64/v2i64.
4322 VectorType *ShufTy;
4323 if (ValTy->isFloatingPointTy())
4324 ShufTy =
4325 FixedVectorType::get(Type::getDoubleTy(ValTy->getContext()), 2);
4326 else
4327 ShufTy = FixedVectorType::get(Type::getInt64Ty(ValTy->getContext()), 2);
4328 MinMaxCost +=
4329 getShuffleCost(TTI::SK_PermuteSingleSrc, ShufTy, None, 0, nullptr);
4330 } else if (Size == 64) {
4331 // Reducing from 64 bits is a shuffle of v4f32/v4i32.
4332 FixedVectorType *ShufTy;
4333 if (ValTy->isFloatingPointTy())
4334 ShufTy = FixedVectorType::get(Type::getFloatTy(ValTy->getContext()), 4);
4335 else
4336 ShufTy = FixedVectorType::get(Type::getInt32Ty(ValTy->getContext()), 4);
4337 MinMaxCost +=
4338 getShuffleCost(TTI::SK_PermuteSingleSrc, ShufTy, None, 0, nullptr);
4339 } else {
4340 // Reducing from smaller size is a shift by immediate.
4341 auto *ShiftTy = FixedVectorType::get(
4342 Type::getIntNTy(ValTy->getContext(), Size), 128 / Size);
4343 MinMaxCost += getArithmeticInstrCost(
4344 Instruction::LShr, ShiftTy, TTI::TCK_RecipThroughput,
4345 TargetTransformInfo::OK_AnyValue,
4346 TargetTransformInfo::OK_UniformConstantValue,
4347 TargetTransformInfo::OP_None, TargetTransformInfo::OP_None);
4348 }
4349
4350 // Add the arithmetic op for this level.
4351 auto *SubCondTy =
4352 FixedVectorType::get(CondTy->getElementType(), Ty->getNumElements());
4353 MinMaxCost += getMinMaxCost(Ty, SubCondTy, IsUnsigned);
4354 }
4355
4356 // Add the final extract element to the cost.
4357 return MinMaxCost + getVectorInstrCost(Instruction::ExtractElement, Ty, 0);
4358}
4359
4360/// Calculate the cost of materializing a 64-bit value. This helper
4361/// method might only calculate a fraction of a larger immediate. Therefore it
4362/// is valid to return a cost of ZERO.
4363InstructionCost X86TTIImpl::getIntImmCost(int64_t Val) {
4364 if (Val == 0)
4365 return TTI::TCC_Free;
4366
4367 if (isInt<32>(Val))
4368 return TTI::TCC_Basic;
4369
4370 return 2 * TTI::TCC_Basic;
4371}
4372
4373InstructionCost X86TTIImpl::getIntImmCost(const APInt &Imm, Type *Ty,
4374 TTI::TargetCostKind CostKind) {
4375 assert(Ty->isIntegerTy())(static_cast <bool> (Ty->isIntegerTy()) ? void (0) :
__assert_fail ("Ty->isIntegerTy()", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 4375, __extension__ __PRETTY_FUNCTION__))
;
4376
4377 unsigned BitSize = Ty->getPrimitiveSizeInBits();
4378 if (BitSize == 0)
4379 return ~0U;
4380
4381 // Never hoist constants larger than 128bit, because this might lead to
4382 // incorrect code generation or assertions in codegen.
4383 // Fixme: Create a cost model for types larger than i128 once the codegen
4384 // issues have been fixed.
4385 if (BitSize > 128)
4386 return TTI::TCC_Free;
4387
4388 if (Imm == 0)
4389 return TTI::TCC_Free;
4390
4391 // Sign-extend all constants to a multiple of 64-bit.
4392 APInt ImmVal = Imm;
4393 if (BitSize % 64 != 0)
4394 ImmVal = Imm.sext(alignTo(BitSize, 64));
4395
4396 // Split the constant into 64-bit chunks and calculate the cost for each
4397 // chunk.
4398 InstructionCost Cost = 0;
4399 for (unsigned ShiftVal = 0; ShiftVal < BitSize; ShiftVal += 64) {
4400 APInt Tmp = ImmVal.ashr(ShiftVal).sextOrTrunc(64);
4401 int64_t Val = Tmp.getSExtValue();
4402 Cost += getIntImmCost(Val);
4403 }
4404 // We need at least one instruction to materialize the constant.
4405 return std::max<InstructionCost>(1, Cost);
4406}
4407
4408InstructionCost X86TTIImpl::getIntImmCostInst(unsigned Opcode, unsigned Idx,
4409 const APInt &Imm, Type *Ty,
4410 TTI::TargetCostKind CostKind,
4411 Instruction *Inst) {
4412 assert(Ty->isIntegerTy())(static_cast <bool> (Ty->isIntegerTy()) ? void (0) :
__assert_fail ("Ty->isIntegerTy()", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 4412, __extension__ __PRETTY_FUNCTION__))
;
4413
4414 unsigned BitSize = Ty->getPrimitiveSizeInBits();
4415 // There is no cost model for constants with a bit size of 0. Return TCC_Free
4416 // here, so that constant hoisting will ignore this constant.
4417 if (BitSize == 0)
4418 return TTI::TCC_Free;
4419
4420 unsigned ImmIdx = ~0U;
4421 switch (Opcode) {
4422 default:
4423 return TTI::TCC_Free;
4424 case Instruction::GetElementPtr:
4425 // Always hoist the base address of a GetElementPtr. This prevents the
4426 // creation of new constants for every base constant that gets constant
4427 // folded with the offset.
4428 if (Idx == 0)
4429 return 2 * TTI::TCC_Basic;
4430 return TTI::TCC_Free;
4431 case Instruction::Store:
4432 ImmIdx = 0;
4433 break;
4434 case Instruction::ICmp:
4435 // This is an imperfect hack to prevent constant hoisting of
4436 // compares that might be trying to check if a 64-bit value fits in
4437 // 32-bits. The backend can optimize these cases using a right shift by 32.
4438 // Ideally we would check the compare predicate here. There also other
4439 // similar immediates the backend can use shifts for.
4440 if (Idx == 1 && Imm.getBitWidth() == 64) {
4441 uint64_t ImmVal = Imm.getZExtValue();
4442 if (ImmVal == 0x100000000ULL || ImmVal == 0xffffffff)
4443 return TTI::TCC_Free;
4444 }
4445 ImmIdx = 1;
4446 break;
4447 case Instruction::And:
4448 // We support 64-bit ANDs with immediates with 32-bits of leading zeroes
4449 // by using a 32-bit operation with implicit zero extension. Detect such
4450 // immediates here as the normal path expects bit 31 to be sign extended.
4451 if (Idx == 1 && Imm.getBitWidth() == 64 && isUInt<32>(Imm.getZExtValue()))
4452 return TTI::TCC_Free;
4453 ImmIdx = 1;
4454 break;
4455 case Instruction::Add:
4456 case Instruction::Sub:
4457 // For add/sub, we can use the opposite instruction for INT32_MIN.
4458 if (Idx == 1 && Imm.getBitWidth() == 64 && Imm.getZExtValue() == 0x80000000)
4459 return TTI::TCC_Free;
4460 ImmIdx = 1;
4461 break;
4462 case Instruction::UDiv:
4463 case Instruction::SDiv:
4464 case Instruction::URem:
4465 case Instruction::SRem:
4466 // Division by constant is typically expanded later into a different
4467 // instruction sequence. This completely changes the constants.
4468 // Report them as "free" to stop ConstantHoist from marking them as opaque.
4469 return TTI::TCC_Free;
4470 case Instruction::Mul:
4471 case Instruction::Or:
4472 case Instruction::Xor:
4473 ImmIdx = 1;
4474 break;
4475 // Always return TCC_Free for the shift value of a shift instruction.
4476 case Instruction::Shl:
4477 case Instruction::LShr:
4478 case Instruction::AShr:
4479 if (Idx == 1)
4480 return TTI::TCC_Free;
4481 break;
4482 case Instruction::Trunc:
4483 case Instruction::ZExt:
4484 case Instruction::SExt:
4485 case Instruction::IntToPtr:
4486 case Instruction::PtrToInt:
4487 case Instruction::BitCast:
4488 case Instruction::PHI:
4489 case Instruction::Call:
4490 case Instruction::Select:
4491 case Instruction::Ret:
4492 case Instruction::Load:
4493 break;
4494 }
4495
4496 if (Idx == ImmIdx) {
4497 int NumConstants = divideCeil(BitSize, 64);
4498 InstructionCost Cost = X86TTIImpl::getIntImmCost(Imm, Ty, CostKind);
4499 return (Cost <= NumConstants * TTI::TCC_Basic)
4500 ? static_cast<int>(TTI::TCC_Free)
4501 : Cost;
4502 }
4503
4504 return X86TTIImpl::getIntImmCost(Imm, Ty, CostKind);
4505}
4506
4507InstructionCost X86TTIImpl::getIntImmCostIntrin(Intrinsic::ID IID, unsigned Idx,
4508 const APInt &Imm, Type *Ty,
4509 TTI::TargetCostKind CostKind) {
4510 assert(Ty->isIntegerTy())(static_cast <bool> (Ty->isIntegerTy()) ? void (0) :
__assert_fail ("Ty->isIntegerTy()", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 4510, __extension__ __PRETTY_FUNCTION__))
;
4511
4512 unsigned BitSize = Ty->getPrimitiveSizeInBits();
4513 // There is no cost model for constants with a bit size of 0. Return TCC_Free
4514 // here, so that constant hoisting will ignore this constant.
4515 if (BitSize == 0)
4516 return TTI::TCC_Free;
4517
4518 switch (IID) {
4519 default:
4520 return TTI::TCC_Free;
4521 case Intrinsic::sadd_with_overflow:
4522 case Intrinsic::uadd_with_overflow:
4523 case Intrinsic::ssub_with_overflow:
4524 case Intrinsic::usub_with_overflow:
4525 case Intrinsic::smul_with_overflow:
4526 case Intrinsic::umul_with_overflow:
4527 if ((Idx == 1) && Imm.getBitWidth() <= 64 && isInt<32>(Imm.getSExtValue()))
4528 return TTI::TCC_Free;
4529 break;
4530 case Intrinsic::experimental_stackmap:
4531 if ((Idx < 2) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
4532 return TTI::TCC_Free;
4533 break;
4534 case Intrinsic::experimental_patchpoint_void:
4535 case Intrinsic::experimental_patchpoint_i64:
4536 if ((Idx < 4) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
4537 return TTI::TCC_Free;
4538 break;
4539 }
4540 return X86TTIImpl::getIntImmCost(Imm, Ty, CostKind);
4541}
4542
4543InstructionCost X86TTIImpl::getCFInstrCost(unsigned Opcode,
4544 TTI::TargetCostKind CostKind,
4545 const Instruction *I) {
4546 if (CostKind != TTI::TCK_RecipThroughput)
4547 return Opcode == Instruction::PHI ? 0 : 1;
4548 // Branches are assumed to be predicted.
4549 return 0;
4550}
4551
4552int X86TTIImpl::getGatherOverhead() const {
4553 // Some CPUs have more overhead for gather. The specified overhead is relative
4554 // to the Load operation. "2" is the number provided by Intel architects. This
4555 // parameter is used for cost estimation of Gather Op and comparison with
4556 // other alternatives.
4557 // TODO: Remove the explicit hasAVX512()?, That would mean we would only
4558 // enable gather with a -march.
4559 if (ST->hasAVX512() || (ST->hasAVX2() && ST->hasFastGather()))
4560 return 2;
4561
4562 return 1024;
4563}
4564
4565int X86TTIImpl::getScatterOverhead() const {
4566 if (ST->hasAVX512())
4567 return 2;
4568
4569 return 1024;
4570}
4571
4572// Return an average cost of Gather / Scatter instruction, maybe improved later.
4573// FIXME: Add TargetCostKind support.
4574InstructionCost X86TTIImpl::getGSVectorCost(unsigned Opcode, Type *SrcVTy,
4575 const Value *Ptr, Align Alignment,
4576 unsigned AddressSpace) {
4577
4578 assert(isa<VectorType>(SrcVTy) && "Unexpected type in getGSVectorCost")(static_cast <bool> (isa<VectorType>(SrcVTy) &&
"Unexpected type in getGSVectorCost") ? void (0) : __assert_fail
("isa<VectorType>(SrcVTy) && \"Unexpected type in getGSVectorCost\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 4578, __extension__ __PRETTY_FUNCTION__))
;
4579 unsigned VF = cast<FixedVectorType>(SrcVTy)->getNumElements();
4580
4581 // Try to reduce index size from 64 bit (default for GEP)
4582 // to 32. It is essential for VF 16. If the index can't be reduced to 32, the
4583 // operation will use 16 x 64 indices which do not fit in a zmm and needs
4584 // to split. Also check that the base pointer is the same for all lanes,
4585 // and that there's at most one variable index.
4586 auto getIndexSizeInBits = [](const Value *Ptr, const DataLayout &DL) {
4587 unsigned IndexSize = DL.getPointerSizeInBits();
4588 const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr);
4589 if (IndexSize < 64 || !GEP)
4590 return IndexSize;
4591
4592 unsigned NumOfVarIndices = 0;
4593 const Value *Ptrs = GEP->getPointerOperand();
4594 if (Ptrs->getType()->isVectorTy() && !getSplatValue(Ptrs))
4595 return IndexSize;
4596 for (unsigned i = 1; i < GEP->getNumOperands(); ++i) {
4597 if (isa<Constant>(GEP->getOperand(i)))
4598 continue;
4599 Type *IndxTy = GEP->getOperand(i)->getType();
4600 if (auto *IndexVTy = dyn_cast<VectorType>(IndxTy))
4601 IndxTy = IndexVTy->getElementType();
4602 if ((IndxTy->getPrimitiveSizeInBits() == 64 &&
4603 !isa<SExtInst>(GEP->getOperand(i))) ||
4604 ++NumOfVarIndices > 1)
4605 return IndexSize; // 64
4606 }
4607 return (unsigned)32;
4608 };
4609
4610 // Trying to reduce IndexSize to 32 bits for vector 16.
4611 // By default the IndexSize is equal to pointer size.
4612 unsigned IndexSize = (ST->hasAVX512() && VF >= 16)
4613 ? getIndexSizeInBits(Ptr, DL)
4614 : DL.getPointerSizeInBits();
4615
4616 auto *IndexVTy = FixedVectorType::get(
4617 IntegerType::get(SrcVTy->getContext(), IndexSize), VF);
4618 std::pair<InstructionCost, MVT> IdxsLT =
4619 TLI->getTypeLegalizationCost(DL, IndexVTy);
4620 std::pair<InstructionCost, MVT> SrcLT =
4621 TLI->getTypeLegalizationCost(DL, SrcVTy);
4622 InstructionCost::CostType SplitFactor =
4623 *std::max(IdxsLT.first, SrcLT.first).getValue();
4624 if (SplitFactor > 1) {
4625 // Handle splitting of vector of pointers
4626 auto *SplitSrcTy =
4627 FixedVectorType::get(SrcVTy->getScalarType(), VF / SplitFactor);
4628 return SplitFactor * getGSVectorCost(Opcode, SplitSrcTy, Ptr, Alignment,
4629 AddressSpace);
4630 }
4631
4632 // The gather / scatter cost is given by Intel architects. It is a rough
4633 // number since we are looking at one instruction in a time.
4634 const int GSOverhead = (Opcode == Instruction::Load)
4635 ? getGatherOverhead()
4636 : getScatterOverhead();
4637 return GSOverhead + VF * getMemoryOpCost(Opcode, SrcVTy->getScalarType(),
4638 MaybeAlign(Alignment), AddressSpace,
4639 TTI::TCK_RecipThroughput);
4640}
4641
4642/// Return the cost of full scalarization of gather / scatter operation.
4643///
4644/// Opcode - Load or Store instruction.
4645/// SrcVTy - The type of the data vector that should be gathered or scattered.
4646/// VariableMask - The mask is non-constant at compile time.
4647/// Alignment - Alignment for one element.
4648/// AddressSpace - pointer[s] address space.
4649///
4650/// FIXME: Add TargetCostKind support.
4651InstructionCost X86TTIImpl::getGSScalarCost(unsigned Opcode, Type *SrcVTy,
4652 bool VariableMask, Align Alignment,
4653 unsigned AddressSpace) {
4654 unsigned VF = cast<FixedVectorType>(SrcVTy)->getNumElements();
4655 APInt DemandedElts = APInt::getAllOnesValue(VF);
4656 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
4657
4658 InstructionCost MaskUnpackCost = 0;
4659 if (VariableMask) {
4660 auto *MaskTy =
4661 FixedVectorType::get(Type::getInt1Ty(SrcVTy->getContext()), VF);
4662 MaskUnpackCost =
4663 getScalarizationOverhead(MaskTy, DemandedElts, false, true);
4664 InstructionCost ScalarCompareCost = getCmpSelInstrCost(
4665 Instruction::ICmp, Type::getInt1Ty(SrcVTy->getContext()), nullptr,
4666 CmpInst::BAD_ICMP_PREDICATE, CostKind);
4667 InstructionCost BranchCost = getCFInstrCost(Instruction::Br, CostKind);
4668 MaskUnpackCost += VF * (BranchCost + ScalarCompareCost);
4669 }
4670
4671 // The cost of the scalar loads/stores.
4672 InstructionCost MemoryOpCost =
4673 VF * getMemoryOpCost(Opcode, SrcVTy->getScalarType(),
4674 MaybeAlign(Alignment), AddressSpace, CostKind);
4675
4676 InstructionCost InsertExtractCost = 0;
4677 if (Opcode == Instruction::Load)
4678 for (unsigned i = 0; i < VF; ++i)
4679 // Add the cost of inserting each scalar load into the vector
4680 InsertExtractCost +=
4681 getVectorInstrCost(Instruction::InsertElement, SrcVTy, i);
4682 else
4683 for (unsigned i = 0; i < VF; ++i)
4684 // Add the cost of extracting each element out of the data vector
4685 InsertExtractCost +=
4686 getVectorInstrCost(Instruction::ExtractElement, SrcVTy, i);
4687
4688 return MemoryOpCost + MaskUnpackCost + InsertExtractCost;
4689}
4690
4691/// Calculate the cost of Gather / Scatter operation
4692InstructionCost X86TTIImpl::getGatherScatterOpCost(
4693 unsigned Opcode, Type *SrcVTy, const Value *Ptr, bool VariableMask,
4694 Align Alignment, TTI::TargetCostKind CostKind,
4695 const Instruction *I = nullptr) {
4696 if (CostKind != TTI::TCK_RecipThroughput) {
4697 if ((Opcode == Instruction::Load &&
4698 isLegalMaskedGather(SrcVTy, Align(Alignment))) ||
4699 (Opcode == Instruction::Store &&
4700 isLegalMaskedScatter(SrcVTy, Align(Alignment))))
4701 return 1;
4702 return BaseT::getGatherScatterOpCost(Opcode, SrcVTy, Ptr, VariableMask,
4703 Alignment, CostKind, I);
4704 }
4705
4706 assert(SrcVTy->isVectorTy() && "Unexpected data type for Gather/Scatter")(static_cast <bool> (SrcVTy->isVectorTy() &&
"Unexpected data type for Gather/Scatter") ? void (0) : __assert_fail
("SrcVTy->isVectorTy() && \"Unexpected data type for Gather/Scatter\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 4706, __extension__ __PRETTY_FUNCTION__))
;
4707 PointerType *PtrTy = dyn_cast<PointerType>(Ptr->getType());
4708 if (!PtrTy && Ptr->getType()->isVectorTy())
4709 PtrTy = dyn_cast<PointerType>(
4710 cast<VectorType>(Ptr->getType())->getElementType());
4711 assert(PtrTy && "Unexpected type for Ptr argument")(static_cast <bool> (PtrTy && "Unexpected type for Ptr argument"
) ? void (0) : __assert_fail ("PtrTy && \"Unexpected type for Ptr argument\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 4711, __extension__ __PRETTY_FUNCTION__))
;
4712 unsigned AddressSpace = PtrTy->getAddressSpace();
4713
4714 if ((Opcode == Instruction::Load &&
4715 !isLegalMaskedGather(SrcVTy, Align(Alignment))) ||
4716 (Opcode == Instruction::Store &&
4717 !isLegalMaskedScatter(SrcVTy, Align(Alignment))))
4718 return getGSScalarCost(Opcode, SrcVTy, VariableMask, Alignment,
4719 AddressSpace);
4720
4721 return getGSVectorCost(Opcode, SrcVTy, Ptr, Alignment, AddressSpace);
4722}
4723
4724bool X86TTIImpl::isLSRCostLess(TargetTransformInfo::LSRCost &C1,
4725 TargetTransformInfo::LSRCost &C2) {
4726 // X86 specific here are "instruction number 1st priority".
4727 return std::tie(C1.Insns, C1.NumRegs, C1.AddRecCost,
4728 C1.NumIVMuls, C1.NumBaseAdds,
4729 C1.ScaleCost, C1.ImmCost, C1.SetupCost) <
4730 std::tie(C2.Insns, C2.NumRegs, C2.AddRecCost,
4731 C2.NumIVMuls, C2.NumBaseAdds,
4732 C2.ScaleCost, C2.ImmCost, C2.SetupCost);
4733}
4734
4735bool X86TTIImpl::canMacroFuseCmp() {
4736 return ST->hasMacroFusion() || ST->hasBranchFusion();
4737}
4738
4739bool X86TTIImpl::isLegalMaskedLoad(Type *DataTy, Align Alignment) {
4740 if (!ST->hasAVX())
4741 return false;
4742
4743 // The backend can't handle a single element vector.
4744 if (isa<VectorType>(DataTy) &&
4745 cast<FixedVectorType>(DataTy)->getNumElements() == 1)
4746 return false;
4747 Type *ScalarTy = DataTy->getScalarType();
4748
4749 if (ScalarTy->isPointerTy())
4750 return true;
4751
4752 if (ScalarTy->isFloatTy() || ScalarTy->isDoubleTy())
4753 return true;
4754
4755 if (ScalarTy->isHalfTy() && ST->hasBWI() && ST->hasFP16())
4756 return true;
4757
4758 if (!ScalarTy->isIntegerTy())
4759 return false;
4760
4761 unsigned IntWidth = ScalarTy->getIntegerBitWidth();
4762 return IntWidth == 32 || IntWidth == 64 ||
4763 ((IntWidth == 8 || IntWidth == 16) && ST->hasBWI());
4764}
4765
4766bool X86TTIImpl::isLegalMaskedStore(Type *DataType, Align Alignment) {
4767 return isLegalMaskedLoad(DataType, Alignment);
4768}
4769
4770bool X86TTIImpl::isLegalNTLoad(Type *DataType, Align Alignment) {
4771 unsigned DataSize = DL.getTypeStoreSize(DataType);
4772 // The only supported nontemporal loads are for aligned vectors of 16 or 32
4773 // bytes. Note that 32-byte nontemporal vector loads are supported by AVX2
4774 // (the equivalent stores only require AVX).
4775 if (Alignment >= DataSize && (DataSize == 16 || DataSize == 32))
4776 return DataSize == 16 ? ST->hasSSE1() : ST->hasAVX2();
4777
4778 return false;
4779}
4780
4781bool X86TTIImpl::isLegalNTStore(Type *DataType, Align Alignment) {
4782 unsigned DataSize = DL.getTypeStoreSize(DataType);
4783
4784 // SSE4A supports nontemporal stores of float and double at arbitrary
4785 // alignment.
4786 if (ST->hasSSE4A() && (DataType->isFloatTy() || DataType->isDoubleTy()))
4787 return true;
4788
4789 // Besides the SSE4A subtarget exception above, only aligned stores are
4790 // available nontemporaly on any other subtarget. And only stores with a size
4791 // of 4..32 bytes (powers of 2, only) are permitted.
4792 if (Alignment < DataSize || DataSize < 4 || DataSize > 32 ||
4793 !isPowerOf2_32(DataSize))
4794 return false;
4795
4796 // 32-byte vector nontemporal stores are supported by AVX (the equivalent
4797 // loads require AVX2).
4798 if (DataSize == 32)
4799 return ST->hasAVX();
4800 else if (DataSize == 16)
4801 return ST->hasSSE1();
4802 return true;
4803}
4804
4805bool X86TTIImpl::isLegalMaskedExpandLoad(Type *DataTy) {
4806 if (!isa<VectorType>(DataTy))
4807 return false;
4808
4809 if (!ST->hasAVX512())
4810 return false;
4811
4812 // The backend can't handle a single element vector.
4813 if (cast<FixedVectorType>(DataTy)->getNumElements() == 1)
4814 return false;
4815
4816 Type *ScalarTy = cast<VectorType>(DataTy)->getElementType();
4817
4818 if (ScalarTy->isFloatTy() || ScalarTy->isDoubleTy())
4819 return true;
4820
4821 if (!ScalarTy->isIntegerTy())
4822 return false;
4823
4824 unsigned IntWidth = ScalarTy->getIntegerBitWidth();
4825 return IntWidth == 32 || IntWidth == 64 ||
4826 ((IntWidth == 8 || IntWidth == 16) && ST->hasVBMI2());
4827}
4828
4829bool X86TTIImpl::isLegalMaskedCompressStore(Type *DataTy) {
4830 return isLegalMaskedExpandLoad(DataTy);
4831}
4832
4833bool X86TTIImpl::isLegalMaskedGather(Type *DataTy, Align Alignment) {
4834 // Some CPUs have better gather performance than others.
4835 // TODO: Remove the explicit ST->hasAVX512()?, That would mean we would only
4836 // enable gather with a -march.
4837 if (!(ST->hasAVX512() || (ST->hasFastGather() && ST->hasAVX2())))
4838 return false;
4839
4840 // This function is called now in two cases: from the Loop Vectorizer
4841 // and from the Scalarizer.
4842 // When the Loop Vectorizer asks about legality of the feature,
4843 // the vectorization factor is not calculated yet. The Loop Vectorizer
4844 // sends a scalar type and the decision is based on the width of the
4845 // scalar element.
4846 // Later on, the cost model will estimate usage this intrinsic based on
4847 // the vector type.
4848 // The Scalarizer asks again about legality. It sends a vector type.
4849 // In this case we can reject non-power-of-2 vectors.
4850 // We also reject single element vectors as the type legalizer can't
4851 // scalarize it.
4852 if (auto *DataVTy = dyn_cast<FixedVectorType>(DataTy)) {
4853 unsigned NumElts = DataVTy->getNumElements();
4854 if (NumElts == 1)
4855 return false;
4856 // Gather / Scatter for vector 2 is not profitable on KNL / SKX
4857 // Vector-4 of gather/scatter instruction does not exist on KNL.
4858 // We can extend it to 8 elements, but zeroing upper bits of
4859 // the mask vector will add more instructions. Right now we give the scalar
4860 // cost of vector-4 for KNL. TODO: Check, maybe the gather/scatter
4861 // instruction is better in the VariableMask case.
4862 if (ST->hasAVX512() && (NumElts == 2 || (NumElts == 4 && !ST->hasVLX())))
4863 return false;
4864 }
4865 Type *ScalarTy = DataTy->getScalarType();
4866 if (ScalarTy->isPointerTy())
4867 return true;
4868
4869 if (ScalarTy->isFloatTy() || ScalarTy->isDoubleTy())
4870 return true;
4871
4872 if (!ScalarTy->isIntegerTy())
4873 return false;
4874
4875 unsigned IntWidth = ScalarTy->getIntegerBitWidth();
4876 return IntWidth == 32 || IntWidth == 64;
4877}
4878
4879bool X86TTIImpl::isLegalMaskedScatter(Type *DataType, Align Alignment) {
4880 // AVX2 doesn't support scatter
4881 if (!ST->hasAVX512())
4882 return false;
4883 return isLegalMaskedGather(DataType, Alignment);
4884}
4885
4886bool X86TTIImpl::hasDivRemOp(Type *DataType, bool IsSigned) {
4887 EVT VT = TLI->getValueType(DL, DataType);
4888 return TLI->isOperationLegal(IsSigned ? ISD::SDIVREM : ISD::UDIVREM, VT);
4889}
4890
4891bool X86TTIImpl::isFCmpOrdCheaperThanFCmpZero(Type *Ty) {
4892 return false;
4893}
4894
4895bool X86TTIImpl::areInlineCompatible(const Function *Caller,
4896 const Function *Callee) const {
4897 const TargetMachine &TM = getTLI()->getTargetMachine();
4898
4899 // Work this as a subsetting of subtarget features.
4900 const FeatureBitset &CallerBits =
4901 TM.getSubtargetImpl(*Caller)->getFeatureBits();
4902 const FeatureBitset &CalleeBits =
4903 TM.getSubtargetImpl(*Callee)->getFeatureBits();
4904
4905 FeatureBitset RealCallerBits = CallerBits & ~InlineFeatureIgnoreList;
4906 FeatureBitset RealCalleeBits = CalleeBits & ~InlineFeatureIgnoreList;
4907 return (RealCallerBits & RealCalleeBits) == RealCalleeBits;
4908}
4909
4910bool X86TTIImpl::areFunctionArgsABICompatible(
4911 const Function *Caller, const Function *Callee,
4912 SmallPtrSetImpl<Argument *> &Args) const {
4913 if (!BaseT::areFunctionArgsABICompatible(Caller, Callee, Args))
4914 return false;
4915
4916 // If we get here, we know the target features match. If one function
4917 // considers 512-bit vectors legal and the other does not, consider them
4918 // incompatible.
4919 const TargetMachine &TM = getTLI()->getTargetMachine();
4920
4921 if (TM.getSubtarget<X86Subtarget>(*Caller).useAVX512Regs() ==
4922 TM.getSubtarget<X86Subtarget>(*Callee).useAVX512Regs())
4923 return true;
4924
4925 // Consider the arguments compatible if they aren't vectors or aggregates.
4926 // FIXME: Look at the size of vectors.
4927 // FIXME: Look at the element types of aggregates to see if there are vectors.
4928 // FIXME: The API of this function seems intended to allow arguments
4929 // to be removed from the set, but the caller doesn't check if the set
4930 // becomes empty so that may not work in practice.
4931 return llvm::none_of(Args, [](Argument *A) {
4932 auto *EltTy = cast<PointerType>(A->getType())->getElementType();
4933 return EltTy->isVectorTy() || EltTy->isAggregateType();
4934 });
4935}
4936
4937X86TTIImpl::TTI::MemCmpExpansionOptions
4938X86TTIImpl::enableMemCmpExpansion(bool OptSize, bool IsZeroCmp) const {
4939 TTI::MemCmpExpansionOptions Options;
4940 Options.MaxNumLoads = TLI->getMaxExpandSizeMemcmp(OptSize);
4941 Options.NumLoadsPerBlock = 2;
4942 // All GPR and vector loads can be unaligned.
4943 Options.AllowOverlappingLoads = true;
4944 if (IsZeroCmp) {
4945 // Only enable vector loads for equality comparison. Right now the vector
4946 // version is not as fast for three way compare (see #33329).
4947 const unsigned PreferredWidth = ST->getPreferVectorWidth();
4948 if (PreferredWidth >= 512 && ST->hasAVX512()) Options.LoadSizes.push_back(64);
4949 if (PreferredWidth >= 256 && ST->hasAVX()) Options.LoadSizes.push_back(32);
4950 if (PreferredWidth >= 128 && ST->hasSSE2()) Options.LoadSizes.push_back(16);
4951 }
4952 if (ST->is64Bit()) {
4953 Options.LoadSizes.push_back(8);
4954 }
4955 Options.LoadSizes.push_back(4);
4956 Options.LoadSizes.push_back(2);
4957 Options.LoadSizes.push_back(1);
4958 return Options;
4959}
4960
4961bool X86TTIImpl::enableInterleavedAccessVectorization() {
4962 // TODO: We expect this to be beneficial regardless of arch,
4963 // but there are currently some unexplained performance artifacts on Atom.
4964 // As a temporary solution, disable on Atom.
4965 return !(ST->isAtom());
4966}
4967
4968// Get estimation for interleaved load/store operations for AVX2.
4969// \p Factor is the interleaved-access factor (stride) - number of
4970// (interleaved) elements in the group.
4971// \p Indices contains the indices for a strided load: when the
4972// interleaved load has gaps they indicate which elements are used.
4973// If Indices is empty (or if the number of indices is equal to the size
4974// of the interleaved-access as given in \p Factor) the access has no gaps.
4975//
4976// As opposed to AVX-512, AVX2 does not have generic shuffles that allow
4977// computing the cost using a generic formula as a function of generic
4978// shuffles. We therefore use a lookup table instead, filled according to
4979// the instruction sequences that codegen currently generates.
4980InstructionCost X86TTIImpl::getInterleavedMemoryOpCostAVX2(
4981 unsigned Opcode, FixedVectorType *VecTy, unsigned Factor,
4982 ArrayRef<unsigned> Indices, Align Alignment, unsigned AddressSpace,
4983 TTI::TargetCostKind CostKind, bool UseMaskForCond, bool UseMaskForGaps) {
4984
4985 if (UseMaskForCond || UseMaskForGaps)
4
Assuming 'UseMaskForCond' is false
5
Assuming 'UseMaskForGaps' is false
4986 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
4987 Alignment, AddressSpace, CostKind,
4988 UseMaskForCond, UseMaskForGaps);
4989
4990 // We currently Support only fully-interleaved groups, with no gaps.
4991 // TODO: Support also strided loads (interleaved-groups with gaps).
4992 if (Indices.size() && Indices.size() != Factor)
6
Assuming the condition is true
7
Assuming the condition is true
8
Taking true branch
4993 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
9
Calling 'BasicTTIImplBase::getInterleavedMemoryOpCost'
4994 Alignment, AddressSpace, CostKind);
4995
4996 // VecTy for interleave memop is <VF*Factor x Elt>.
4997 // So, for VF=4, Interleave Factor = 3, Element type = i32 we have
4998 // VecTy = <12 x i32>.
4999 MVT LegalVT = getTLI()->getTypeLegalizationCost(DL, VecTy).second;
5000
5001 // This function can be called with VecTy=<6xi128>, Factor=3, in which case
5002 // the VF=2, while v2i128 is an unsupported MVT vector type
5003 // (see MachineValueType.h::getVectorVT()).
5004 if (!LegalVT.isVector())
5005 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
5006 Alignment, AddressSpace, CostKind);
5007
5008 unsigned VF = VecTy->getNumElements() / Factor;
5009 Type *ScalarTy = VecTy->getElementType();
5010 // Deduplicate entries, model floats/pointers as appropriately-sized integers.
5011 if (!ScalarTy->isIntegerTy())
5012 ScalarTy =
5013 Type::getIntNTy(ScalarTy->getContext(), DL.getTypeSizeInBits(ScalarTy));
5014
5015 // Get the cost of all the memory operations.
5016 InstructionCost MemOpCosts = getMemoryOpCost(
5017 Opcode, VecTy, MaybeAlign(Alignment), AddressSpace, CostKind);
5018
5019 auto *VT = FixedVectorType::get(ScalarTy, VF);
5020 EVT ETy = TLI->getValueType(DL, VT);
5021 if (!ETy.isSimple())
5022 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
5023 Alignment, AddressSpace, CostKind);
5024
5025 // TODO: Complete for other data-types and strides.
5026 // Each combination of Stride, element bit width and VF results in a different
5027 // sequence; The cost tables are therefore accessed with:
5028 // Factor (stride) and VectorType=VFxiN.
5029 // The Cost accounts only for the shuffle sequence;
5030 // The cost of the loads/stores is accounted for separately.
5031 //
5032 static const CostTblEntry AVX2InterleavedLoadTbl[] = {
5033 {2, MVT::v4i64, 6}, // (load 8i64 and) deinterleave into 2 x 4i64
5034
5035 {3, MVT::v2i8, 10}, // (load 6i8 and) deinterleave into 3 x 2i8
5036 {3, MVT::v4i8, 4}, // (load 12i8 and) deinterleave into 3 x 4i8
5037 {3, MVT::v8i8, 9}, // (load 24i8 and) deinterleave into 3 x 8i8
5038 {3, MVT::v16i8, 11}, // (load 48i8 and) deinterleave into 3 x 16i8
5039 {3, MVT::v32i8, 13}, // (load 96i8 and) deinterleave into 3 x 32i8
5040
5041 {3, MVT::v8i32, 17}, // (load 24i32 and) deinterleave into 3 x 8i32
5042
5043 {4, MVT::v2i8, 12}, // (load 8i8 and) deinterleave into 4 x 2i8
5044 {4, MVT::v4i8, 4}, // (load 16i8 and) deinterleave into 4 x 4i8
5045 {4, MVT::v8i8, 20}, // (load 32i8 and) deinterleave into 4 x 8i8
5046 {4, MVT::v16i8, 39}, // (load 64i8 and) deinterleave into 4 x 16i8
5047 {4, MVT::v32i8, 80}, // (load 128i8 and) deinterleave into 4 x 32i8
5048
5049 {8, MVT::v8i32, 40} // (load 64i32 and) deinterleave into 8 x 8i32
5050 };
5051
5052 static const CostTblEntry AVX2InterleavedStoreTbl[] = {
5053 {2, MVT::v4i64, 6}, // interleave 2 x 4i64 into 8i64 (and store)
5054
5055 {3, MVT::v2i8, 7}, // interleave 3 x 2i8 into 6i8 (and store)
5056 {3, MVT::v4i8, 8}, // interleave 3 x 4i8 into 12i8 (and store)
5057 {3, MVT::v8i8, 11}, // interleave 3 x 8i8 into 24i8 (and store)
5058 {3, MVT::v16i8, 11}, // interleave 3 x 16i8 into 48i8 (and store)
5059 {3, MVT::v32i8, 13}, // interleave 3 x 32i8 into 96i8 (and store)
5060
5061 {4, MVT::v2i8, 12}, // interleave 4 x 2i8 into 8i8 (and store)
5062 {4, MVT::v4i8, 9}, // interleave 4 x 4i8 into 16i8 (and store)
5063 {4, MVT::v8i8, 10}, // interleave 4 x 8i8 into 32i8 (and store)
5064 {4, MVT::v16i8, 10}, // interleave 4 x 16i8 into 64i8 (and store)
5065 {4, MVT::v32i8, 12} // interleave 4 x 32i8 into 128i8 (and store)
5066 };
5067
5068 if (Opcode == Instruction::Load) {
5069 if (const auto *Entry =
5070 CostTableLookup(AVX2InterleavedLoadTbl, Factor, ETy.getSimpleVT()))
5071 return MemOpCosts + Entry->Cost;
5072 } else {
5073 assert(Opcode == Instruction::Store &&(static_cast <bool> (Opcode == Instruction::Store &&
"Expected Store Instruction at this point") ? void (0) : __assert_fail
("Opcode == Instruction::Store && \"Expected Store Instruction at this point\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 5074, __extension__ __PRETTY_FUNCTION__))
5074 "Expected Store Instruction at this point")(static_cast <bool> (Opcode == Instruction::Store &&
"Expected Store Instruction at this point") ? void (0) : __assert_fail
("Opcode == Instruction::Store && \"Expected Store Instruction at this point\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 5074, __extension__ __PRETTY_FUNCTION__))
;
5075 if (const auto *Entry =
5076 CostTableLookup(AVX2InterleavedStoreTbl, Factor, ETy.getSimpleVT()))
5077 return MemOpCosts + Entry->Cost;
5078 }
5079
5080 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
5081 Alignment, AddressSpace, CostKind);
5082}
5083
5084// Get estimation for interleaved load/store operations and strided load.
5085// \p Indices contains indices for strided load.
5086// \p Factor - the factor of interleaving.
5087// AVX-512 provides 3-src shuffles that significantly reduces the cost.
5088InstructionCost X86TTIImpl::getInterleavedMemoryOpCostAVX512(
5089 unsigned Opcode, FixedVectorType *VecTy, unsigned Factor,
5090 ArrayRef<unsigned> Indices, Align Alignment, unsigned AddressSpace,
5091 TTI::TargetCostKind CostKind, bool UseMaskForCond, bool UseMaskForGaps) {
5092
5093 if (UseMaskForCond || UseMaskForGaps)
5094 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
5095 Alignment, AddressSpace, CostKind,
5096 UseMaskForCond, UseMaskForGaps);
5097
5098 // VecTy for interleave memop is <VF*Factor x Elt>.
5099 // So, for VF=4, Interleave Factor = 3, Element type = i32 we have
5100 // VecTy = <12 x i32>.
5101
5102 // Calculate the number of memory operations (NumOfMemOps), required
5103 // for load/store the VecTy.
5104 MVT LegalVT = getTLI()->getTypeLegalizationCost(DL, VecTy).second;
5105 unsigned VecTySize = DL.getTypeStoreSize(VecTy);
5106 unsigned LegalVTSize = LegalVT.getStoreSize();
5107 unsigned NumOfMemOps = (VecTySize + LegalVTSize - 1) / LegalVTSize;
5108
5109 // Get the cost of one memory operation.
5110 auto *SingleMemOpTy = FixedVectorType::get(VecTy->getElementType(),
5111 LegalVT.getVectorNumElements());
5112 InstructionCost MemOpCost = getMemoryOpCost(
5113 Opcode, SingleMemOpTy, MaybeAlign(Alignment), AddressSpace, CostKind);
5114
5115 unsigned VF = VecTy->getNumElements() / Factor;
5116 MVT VT = MVT::getVectorVT(MVT::getVT(VecTy->getScalarType()), VF);
5117
5118 if (Opcode == Instruction::Load) {
5119 // The tables (AVX512InterleavedLoadTbl and AVX512InterleavedStoreTbl)
5120 // contain the cost of the optimized shuffle sequence that the
5121 // X86InterleavedAccess pass will generate.
5122 // The cost of loads and stores are computed separately from the table.
5123
5124 // X86InterleavedAccess support only the following interleaved-access group.
5125 static const CostTblEntry AVX512InterleavedLoadTbl[] = {
5126 {3, MVT::v16i8, 12}, //(load 48i8 and) deinterleave into 3 x 16i8
5127 {3, MVT::v32i8, 14}, //(load 96i8 and) deinterleave into 3 x 32i8
5128 {3, MVT::v64i8, 22}, //(load 96i8 and) deinterleave into 3 x 32i8
5129 };
5130
5131 if (const auto *Entry =
5132 CostTableLookup(AVX512InterleavedLoadTbl, Factor, VT))
5133 return NumOfMemOps * MemOpCost + Entry->Cost;
5134 //If an entry does not exist, fallback to the default implementation.
5135
5136 // Kind of shuffle depends on number of loaded values.
5137 // If we load the entire data in one register, we can use a 1-src shuffle.
5138 // Otherwise, we'll merge 2 sources in each operation.
5139 TTI::ShuffleKind ShuffleKind =
5140 (NumOfMemOps > 1) ? TTI::SK_PermuteTwoSrc : TTI::SK_PermuteSingleSrc;
5141
5142 InstructionCost ShuffleCost =
5143 getShuffleCost(ShuffleKind, SingleMemOpTy, None, 0, nullptr);
5144
5145 unsigned NumOfLoadsInInterleaveGrp =
5146 Indices.size() ? Indices.size() : Factor;
5147 auto *ResultTy = FixedVectorType::get(VecTy->getElementType(),
5148 VecTy->getNumElements() / Factor);
5149 InstructionCost NumOfResults =
5150 getTLI()->getTypeLegalizationCost(DL, ResultTy).first *
5151 NumOfLoadsInInterleaveGrp;
5152
5153 // About a half of the loads may be folded in shuffles when we have only
5154 // one result. If we have more than one result, we do not fold loads at all.
5155 unsigned NumOfUnfoldedLoads =
5156 NumOfResults > 1 ? NumOfMemOps : NumOfMemOps / 2;
5157
5158 // Get a number of shuffle operations per result.
5159 unsigned NumOfShufflesPerResult =
5160 std::max((unsigned)1, (unsigned)(NumOfMemOps - 1));
5161
5162 // The SK_MergeTwoSrc shuffle clobbers one of src operands.
5163 // When we have more than one destination, we need additional instructions
5164 // to keep sources.
5165 InstructionCost NumOfMoves = 0;
5166 if (NumOfResults > 1 && ShuffleKind == TTI::SK_PermuteTwoSrc)
5167 NumOfMoves = NumOfResults * NumOfShufflesPerResult / 2;
5168
5169 InstructionCost Cost = NumOfResults * NumOfShufflesPerResult * ShuffleCost +
5170 NumOfUnfoldedLoads * MemOpCost + NumOfMoves;
5171
5172 return Cost;
5173 }
5174
5175 // Store.
5176 assert(Opcode == Instruction::Store &&(static_cast <bool> (Opcode == Instruction::Store &&
"Expected Store Instruction at this point") ? void (0) : __assert_fail
("Opcode == Instruction::Store && \"Expected Store Instruction at this point\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 5177, __extension__ __PRETTY_FUNCTION__))
5177 "Expected Store Instruction at this point")(static_cast <bool> (Opcode == Instruction::Store &&
"Expected Store Instruction at this point") ? void (0) : __assert_fail
("Opcode == Instruction::Store && \"Expected Store Instruction at this point\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 5177, __extension__ __PRETTY_FUNCTION__))
;
5178 // X86InterleavedAccess support only the following interleaved-access group.
5179 static const CostTblEntry AVX512InterleavedStoreTbl[] = {
5180 {3, MVT::v16i8, 12}, // interleave 3 x 16i8 into 48i8 (and store)
5181 {3, MVT::v32i8, 14}, // interleave 3 x 32i8 into 96i8 (and store)
5182 {3, MVT::v64i8, 26}, // interleave 3 x 64i8 into 96i8 (and store)
5183
5184 {4, MVT::v8i8, 10}, // interleave 4 x 8i8 into 32i8 (and store)
5185 {4, MVT::v16i8, 11}, // interleave 4 x 16i8 into 64i8 (and store)
5186 {4, MVT::v32i8, 14}, // interleave 4 x 32i8 into 128i8 (and store)
5187 {4, MVT::v64i8, 24} // interleave 4 x 32i8 into 256i8 (and store)
5188 };
5189
5190 if (const auto *Entry =
5191 CostTableLookup(AVX512InterleavedStoreTbl, Factor, VT))
5192 return NumOfMemOps * MemOpCost + Entry->Cost;
5193 //If an entry does not exist, fallback to the default implementation.
5194
5195 // There is no strided stores meanwhile. And store can't be folded in
5196 // shuffle.
5197 unsigned NumOfSources = Factor; // The number of values to be merged.
5198 InstructionCost ShuffleCost =
5199 getShuffleCost(TTI::SK_PermuteTwoSrc, SingleMemOpTy, None, 0, nullptr);
5200 unsigned NumOfShufflesPerStore = NumOfSources - 1;
5201
5202 // The SK_MergeTwoSrc shuffle clobbers one of src operands.
5203 // We need additional instructions to keep sources.
5204 unsigned NumOfMoves = NumOfMemOps * NumOfShufflesPerStore / 2;
5205 InstructionCost Cost =
5206 NumOfMemOps * (MemOpCost + NumOfShufflesPerStore * ShuffleCost) +
5207 NumOfMoves;
5208 return Cost;
5209}
5210
5211InstructionCost X86TTIImpl::getInterleavedMemoryOpCost(
5212 unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices,
5213 Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind,
5214 bool UseMaskForCond, bool UseMaskForGaps) {
5215 auto isSupportedOnAVX512 = [&](Type *VecTy, bool HasBW) {
5216 Type *EltTy = cast<VectorType>(VecTy)->getElementType();
5217 if (EltTy->isFloatTy() || EltTy->isDoubleTy() || EltTy->isIntegerTy(64) ||
5218 EltTy->isIntegerTy(32) || EltTy->isPointerTy())
5219 return true;
5220 if (EltTy->isIntegerTy(16) || EltTy->isIntegerTy(8) ||
5221 (!ST->useSoftFloat() && ST->hasFP16() && EltTy->isHalfTy()))
5222 return HasBW;
5223 return false;
5224 };
5225 if (ST->hasAVX512() && isSupportedOnAVX512(VecTy, ST->hasBWI()))
5226 return getInterleavedMemoryOpCostAVX512(
5227 Opcode, cast<FixedVectorType>(VecTy), Factor, Indices, Alignment,
5228 AddressSpace, CostKind, UseMaskForCond, UseMaskForGaps);
5229 if (ST->hasAVX2())
1
Taking true branch
5230 return getInterleavedMemoryOpCostAVX2(
3
Calling 'X86TTIImpl::getInterleavedMemoryOpCostAVX2'
5231 Opcode, cast<FixedVectorType>(VecTy), Factor, Indices, Alignment,
2
'VecTy' is a 'FixedVectorType'
5232 AddressSpace, CostKind, UseMaskForCond, UseMaskForGaps);
5233
5234 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
5235 Alignment, AddressSpace, CostKind,
5236 UseMaskForCond, UseMaskForGaps);
5237}

/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/BasicTTIImpl.h

1//===- BasicTTIImpl.h -------------------------------------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9/// \file
10/// This file provides a helper that implements much of the TTI interface in
11/// terms of the target-independent code generator and TargetLowering
12/// interfaces.
13//
14//===----------------------------------------------------------------------===//
15
16#ifndef LLVM_CODEGEN_BASICTTIIMPL_H
17#define LLVM_CODEGEN_BASICTTIIMPL_H
18
19#include "llvm/ADT/APInt.h"
20#include "llvm/ADT/ArrayRef.h"
21#include "llvm/ADT/BitVector.h"
22#include "llvm/ADT/SmallPtrSet.h"
23#include "llvm/ADT/SmallVector.h"
24#include "llvm/Analysis/LoopInfo.h"
25#include "llvm/Analysis/OptimizationRemarkEmitter.h"
26#include "llvm/Analysis/TargetTransformInfo.h"
27#include "llvm/Analysis/TargetTransformInfoImpl.h"
28#include "llvm/CodeGen/ISDOpcodes.h"
29#include "llvm/CodeGen/TargetLowering.h"
30#include "llvm/CodeGen/TargetSubtargetInfo.h"
31#include "llvm/CodeGen/ValueTypes.h"
32#include "llvm/IR/BasicBlock.h"
33#include "llvm/IR/Constant.h"
34#include "llvm/IR/Constants.h"
35#include "llvm/IR/DataLayout.h"
36#include "llvm/IR/DerivedTypes.h"
37#include "llvm/IR/InstrTypes.h"
38#include "llvm/IR/Instruction.h"
39#include "llvm/IR/Instructions.h"
40#include "llvm/IR/Intrinsics.h"
41#include "llvm/IR/Operator.h"
42#include "llvm/IR/Type.h"
43#include "llvm/IR/Value.h"
44#include "llvm/Support/Casting.h"
45#include "llvm/Support/CommandLine.h"
46#include "llvm/Support/ErrorHandling.h"
47#include "llvm/Support/MachineValueType.h"
48#include "llvm/Support/MathExtras.h"
49#include "llvm/Target/TargetMachine.h"
50#include <algorithm>
51#include <cassert>
52#include <cstdint>
53#include <limits>
54#include <utility>
55
56namespace llvm {
57
58class Function;
59class GlobalValue;
60class LLVMContext;
61class ScalarEvolution;
62class SCEV;
63class TargetMachine;
64
65extern cl::opt<unsigned> PartialUnrollingThreshold;
66
67/// Base class which can be used to help build a TTI implementation.
68///
69/// This class provides as much implementation of the TTI interface as is
70/// possible using the target independent parts of the code generator.
71///
72/// In order to subclass it, your class must implement a getST() method to
73/// return the subtarget, and a getTLI() method to return the target lowering.
74/// We need these methods implemented in the derived class so that this class
75/// doesn't have to duplicate storage for them.
76template <typename T>
77class BasicTTIImplBase : public TargetTransformInfoImplCRTPBase<T> {
78private:
79 using BaseT = TargetTransformInfoImplCRTPBase<T>;
80 using TTI = TargetTransformInfo;
81
82 /// Helper function to access this as a T.
83 T *thisT() { return static_cast<T *>(this); }
84
85 /// Estimate a cost of Broadcast as an extract and sequence of insert
86 /// operations.
87 InstructionCost getBroadcastShuffleOverhead(FixedVectorType *VTy) {
88 InstructionCost Cost = 0;
89 // Broadcast cost is equal to the cost of extracting the zero'th element
90 // plus the cost of inserting it into every element of the result vector.
91 Cost += thisT()->getVectorInstrCost(Instruction::ExtractElement, VTy, 0);
92
93 for (int i = 0, e = VTy->getNumElements(); i < e; ++i) {
94 Cost += thisT()->getVectorInstrCost(Instruction::InsertElement, VTy, i);
95 }
96 return Cost;
97 }
98
99 /// Estimate a cost of shuffle as a sequence of extract and insert
100 /// operations.
101 InstructionCost getPermuteShuffleOverhead(FixedVectorType *VTy) {
102 InstructionCost Cost = 0;
103 // Shuffle cost is equal to the cost of extracting element from its argument
104 // plus the cost of inserting them onto the result vector.
105
106 // e.g. <4 x float> has a mask of <0,5,2,7> i.e we need to extract from
107 // index 0 of first vector, index 1 of second vector,index 2 of first
108 // vector and finally index 3 of second vector and insert them at index
109 // <0,1,2,3> of result vector.
110 for (int i = 0, e = VTy->getNumElements(); i < e; ++i) {
111 Cost += thisT()->getVectorInstrCost(Instruction::InsertElement, VTy, i);
112 Cost += thisT()->getVectorInstrCost(Instruction::ExtractElement, VTy, i);
113 }
114 return Cost;
115 }
116
117 /// Estimate a cost of subvector extraction as a sequence of extract and
118 /// insert operations.
119 InstructionCost getExtractSubvectorOverhead(VectorType *VTy, int Index,
120 FixedVectorType *SubVTy) {
121 assert(VTy && SubVTy &&(static_cast <bool> (VTy && SubVTy && "Can only extract subvectors from vectors"
) ? void (0) : __assert_fail ("VTy && SubVTy && \"Can only extract subvectors from vectors\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/BasicTTIImpl.h"
, 122, __extension__ __PRETTY_FUNCTION__))
122 "Can only extract subvectors from vectors")(static_cast <bool> (VTy && SubVTy && "Can only extract subvectors from vectors"
) ? void (0) : __assert_fail ("VTy && SubVTy && \"Can only extract subvectors from vectors\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/BasicTTIImpl.h"
, 122, __extension__ __PRETTY_FUNCTION__))
;
123 int NumSubElts = SubVTy->getNumElements();
124 assert((!isa<FixedVectorType>(VTy) ||(static_cast <bool> ((!isa<FixedVectorType>(VTy) ||
(Index + NumSubElts) <= (int)cast<FixedVectorType>(
VTy)->getNumElements()) && "SK_ExtractSubvector index out of range"
) ? void (0) : __assert_fail ("(!isa<FixedVectorType>(VTy) || (Index + NumSubElts) <= (int)cast<FixedVectorType>(VTy)->getNumElements()) && \"SK_ExtractSubvector index out of range\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/BasicTTIImpl.h"
, 127, __extension__ __PRETTY_FUNCTION__))
125 (Index + NumSubElts) <=(static_cast <bool> ((!isa<FixedVectorType>(VTy) ||
(Index + NumSubElts) <= (int)cast<FixedVectorType>(
VTy)->getNumElements()) && "SK_ExtractSubvector index out of range"
) ? void (0) : __assert_fail ("(!isa<FixedVectorType>(VTy) || (Index + NumSubElts) <= (int)cast<FixedVectorType>(VTy)->getNumElements()) && \"SK_ExtractSubvector index out of range\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/BasicTTIImpl.h"
, 127, __extension__ __PRETTY_FUNCTION__))
126 (int)cast<FixedVectorType>(VTy)->getNumElements()) &&(static_cast <bool> ((!isa<FixedVectorType>(VTy) ||
(Index + NumSubElts) <= (int)cast<FixedVectorType>(
VTy)->getNumElements()) && "SK_ExtractSubvector index out of range"
) ? void (0) : __assert_fail ("(!isa<FixedVectorType>(VTy) || (Index + NumSubElts) <= (int)cast<FixedVectorType>(VTy)->getNumElements()) && \"SK_ExtractSubvector index out of range\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/BasicTTIImpl.h"
, 127, __extension__ __PRETTY_FUNCTION__))
127 "SK_ExtractSubvector index out of range")(static_cast <bool> ((!isa<FixedVectorType>(VTy) ||
(Index + NumSubElts) <= (int)cast<FixedVectorType>(
VTy)->getNumElements()) && "SK_ExtractSubvector index out of range"
) ? void (0) : __assert_fail ("(!isa<FixedVectorType>(VTy) || (Index + NumSubElts) <= (int)cast<FixedVectorType>(VTy)->getNumElements()) && \"SK_ExtractSubvector index out of range\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/BasicTTIImpl.h"
, 127, __extension__ __PRETTY_FUNCTION__))
;
128
129 InstructionCost Cost = 0;
130 // Subvector extraction cost is equal to the cost of extracting element from
131 // the source type plus the cost of inserting them into the result vector
132 // type.
133 for (int i = 0; i != NumSubElts; ++i) {
134 Cost += thisT()->getVectorInstrCost(Instruction::ExtractElement, VTy,
135 i + Index);
136 Cost +=
137 thisT()->getVectorInstrCost(Instruction::InsertElement, SubVTy, i);
138 }
139 return Cost;
140 }
141
142 /// Estimate a cost of subvector insertion as a sequence of extract and
143 /// insert operations.
144 InstructionCost getInsertSubvectorOverhead(VectorType *VTy, int Index,
145 FixedVectorType *SubVTy) {
146 assert(VTy && SubVTy &&(static_cast <bool> (VTy && SubVTy && "Can only insert subvectors into vectors"
) ? void (0) : __assert_fail ("VTy && SubVTy && \"Can only insert subvectors into vectors\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/BasicTTIImpl.h"
, 147, __extension__ __PRETTY_FUNCTION__))
147 "Can only insert subvectors into vectors")(static_cast <bool> (VTy && SubVTy && "Can only insert subvectors into vectors"
) ? void (0) : __assert_fail ("VTy && SubVTy && \"Can only insert subvectors into vectors\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/BasicTTIImpl.h"
, 147, __extension__ __PRETTY_FUNCTION__))
;
148 int NumSubElts = SubVTy->getNumElements();
149 assert((!isa<FixedVectorType>(VTy) ||(static_cast <bool> ((!isa<FixedVectorType>(VTy) ||
(Index + NumSubElts) <= (int)cast<FixedVectorType>(
VTy)->getNumElements()) && "SK_InsertSubvector index out of range"
) ? void (0) : __assert_fail ("(!isa<FixedVectorType>(VTy) || (Index + NumSubElts) <= (int)cast<FixedVectorType>(VTy)->getNumElements()) && \"SK_InsertSubvector index out of range\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/BasicTTIImpl.h"
, 152, __extension__ __PRETTY_FUNCTION__))
150 (Index + NumSubElts) <=(static_cast <bool> ((!isa<FixedVectorType>(VTy) ||
(Index + NumSubElts) <= (int)cast<FixedVectorType>(
VTy)->getNumElements()) && "SK_InsertSubvector index out of range"
) ? void (0) : __assert_fail ("(!isa<FixedVectorType>(VTy) || (Index + NumSubElts) <= (int)cast<FixedVectorType>(VTy)->getNumElements()) && \"SK_InsertSubvector index out of range\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/BasicTTIImpl.h"
, 152, __extension__ __PRETTY_FUNCTION__))
151 (int)cast<FixedVectorType>(VTy)->getNumElements()) &&(static_cast <bool> ((!isa<FixedVectorType>(VTy) ||
(Index + NumSubElts) <= (int)cast<FixedVectorType>(
VTy)->getNumElements()) && "SK_InsertSubvector index out of range"
) ? void (0) : __assert_fail ("(!isa<FixedVectorType>(VTy) || (Index + NumSubElts) <= (int)cast<FixedVectorType>(VTy)->getNumElements()) && \"SK_InsertSubvector index out of range\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/BasicTTIImpl.h"
, 152, __extension__ __PRETTY_FUNCTION__))
152 "SK_InsertSubvector index out of range")(static_cast <bool> ((!isa<FixedVectorType>(VTy) ||
(Index + NumSubElts) <= (int)cast<FixedVectorType>(
VTy)->getNumElements()) && "SK_InsertSubvector index out of range"
) ? void (0) : __assert_fail ("(!isa<FixedVectorType>(VTy) || (Index + NumSubElts) <= (int)cast<FixedVectorType>(VTy)->getNumElements()) && \"SK_InsertSubvector index out of range\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/BasicTTIImpl.h"
, 152, __extension__ __PRETTY_FUNCTION__))
;
153
154 InstructionCost Cost = 0;
155 // Subvector insertion cost is equal to the cost of extracting element from
156 // the source type plus the cost of inserting them into the result vector
157 // type.
158 for (int i = 0; i != NumSubElts; ++i) {
159 Cost +=
160 thisT()->getVectorInstrCost(Instruction::ExtractElement, SubVTy, i);
161 Cost += thisT()->getVectorInstrCost(Instruction::InsertElement, VTy,
162 i + Index);
163 }
164 return Cost;
165 }
166
167 /// Local query method delegates up to T which *must* implement this!
168 const TargetSubtargetInfo *getST() const {
169 return static_cast<const T *>(this)->getST();
170 }
171
172 /// Local query method delegates up to T which *must* implement this!
173 const TargetLoweringBase *getTLI() const {
174 return static_cast<const T *>(this)->getTLI();
175 }
176
177 static ISD::MemIndexedMode getISDIndexedMode(TTI::MemIndexedMode M) {
178 switch (M) {
179 case TTI::MIM_Unindexed:
180 return ISD::UNINDEXED;
181 case TTI::MIM_PreInc:
182 return ISD::PRE_INC;
183 case TTI::MIM_PreDec:
184 return ISD::PRE_DEC;
185 case TTI::MIM_PostInc:
186 return ISD::POST_INC;
187 case TTI::MIM_PostDec:
188 return ISD::POST_DEC;
189 }
190 llvm_unreachable("Unexpected MemIndexedMode")::llvm::llvm_unreachable_internal("Unexpected MemIndexedMode"
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/BasicTTIImpl.h"
, 190)
;
191 }
192
193 InstructionCost getCommonMaskedMemoryOpCost(unsigned Opcode, Type *DataTy,
194 Align Alignment,
195 bool VariableMask,
196 bool IsGatherScatter,
197 TTI::TargetCostKind CostKind) {
198 auto *VT = cast<FixedVectorType>(DataTy);
199 // Assume the target does not have support for gather/scatter operations
200 // and provide a rough estimate.
201 //
202 // First, compute the cost of the individual memory operations.
203 InstructionCost AddrExtractCost =
204 IsGatherScatter
205 ? getVectorInstrCost(Instruction::ExtractElement,
206 FixedVectorType::get(
207 PointerType::get(VT->getElementType(), 0),
208 VT->getNumElements()),
209 -1)
210 : 0;
211 InstructionCost LoadCost =
212 VT->getNumElements() *
213 (AddrExtractCost +
214 getMemoryOpCost(Opcode, VT->getElementType(), Alignment, 0, CostKind));
215
216 // Next, compute the cost of packing the result in a vector.
217 InstructionCost PackingCost = getScalarizationOverhead(
218 VT, Opcode != Instruction::Store, Opcode == Instruction::Store);
219
220 InstructionCost ConditionalCost = 0;
221 if (VariableMask) {
222 // Compute the cost of conditionally executing the memory operations with
223 // variable masks. This includes extracting the individual conditions, a
224 // branches and PHIs to combine the results.
225 // NOTE: Estimating the cost of conditionally executing the memory
226 // operations accurately is quite difficult and the current solution
227 // provides a very rough estimate only.
228 ConditionalCost =
229 VT->getNumElements() *
230 (getVectorInstrCost(
231 Instruction::ExtractElement,
232 FixedVectorType::get(Type::getInt1Ty(DataTy->getContext()),
233 VT->getNumElements()),
234 -1) +
235 getCFInstrCost(Instruction::Br, CostKind) +
236 getCFInstrCost(Instruction::PHI, CostKind));
237 }
238
239 return LoadCost + PackingCost + ConditionalCost;
240 }
241
242protected:
243 explicit BasicTTIImplBase(const TargetMachine *TM, const DataLayout &DL)
244 : BaseT(DL) {}
245 virtual ~BasicTTIImplBase() = default;
246
247 using TargetTransformInfoImplBase::DL;
248
249public:
250 /// \name Scalar TTI Implementations
251 /// @{
252 bool allowsMisalignedMemoryAccesses(LLVMContext &Context, unsigned BitWidth,
253 unsigned AddressSpace, Align Alignment,
254 bool *Fast) const {
255 EVT E = EVT::getIntegerVT(Context, BitWidth);
256 return getTLI()->allowsMisalignedMemoryAccesses(
257 E, AddressSpace, Alignment, MachineMemOperand::MONone, Fast);
258 }
259
260 bool hasBranchDivergence() { return false; }
261
262 bool useGPUDivergenceAnalysis() { return false; }
263
264 bool isSourceOfDivergence(const Value *V) { return false; }
265
266 bool isAlwaysUniform(const Value *V) { return false; }
267
268 unsigned getFlatAddressSpace() {
269 // Return an invalid address space.
270 return -1;
271 }
272
273 bool collectFlatAddressOperands(SmallVectorImpl<int> &OpIndexes,
274 Intrinsic::ID IID) const {
275 return false;
276 }
277
278 bool isNoopAddrSpaceCast(unsigned FromAS, unsigned ToAS) const {
279 return getTLI()->getTargetMachine().isNoopAddrSpaceCast(FromAS, ToAS);
280 }
281
282 unsigned getAssumedAddrSpace(const Value *V) const {
283 return getTLI()->getTargetMachine().getAssumedAddrSpace(V);
284 }
285
286 Value *rewriteIntrinsicWithAddressSpace(IntrinsicInst *II, Value *OldV,
287 Value *NewV) const {
288 return nullptr;
289 }
290
291 bool isLegalAddImmediate(int64_t imm) {
292 return getTLI()->isLegalAddImmediate(imm);
293 }
294
295 bool isLegalICmpImmediate(int64_t imm) {
296 return getTLI()->isLegalICmpImmediate(imm);
297 }
298
299 bool isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset,
300 bool HasBaseReg, int64_t Scale,
301 unsigned AddrSpace, Instruction *I = nullptr) {
302 TargetLoweringBase::AddrMode AM;
303 AM.BaseGV = BaseGV;
304 AM.BaseOffs = BaseOffset;
305 AM.HasBaseReg = HasBaseReg;
306 AM.Scale = Scale;
307 return getTLI()->isLegalAddressingMode(DL, AM, Ty, AddrSpace, I);
308 }
309
310 bool isIndexedLoadLegal(TTI::MemIndexedMode M, Type *Ty,
311 const DataLayout &DL) const {
312 EVT VT = getTLI()->getValueType(DL, Ty);
313 return getTLI()->isIndexedLoadLegal(getISDIndexedMode(M), VT);
314 }
315
316 bool isIndexedStoreLegal(TTI::MemIndexedMode M, Type *Ty,
317 const DataLayout &DL) const {
318 EVT VT = getTLI()->getValueType(DL, Ty);
319 return getTLI()->isIndexedStoreLegal(getISDIndexedMode(M), VT);
320 }
321
322 bool isLSRCostLess(TTI::LSRCost C1, TTI::LSRCost C2) {
323 return TargetTransformInfoImplBase::isLSRCostLess(C1, C2);
324 }
325
326 bool isNumRegsMajorCostOfLSR() {
327 return TargetTransformInfoImplBase::isNumRegsMajorCostOfLSR();
328 }
329
330 bool isProfitableLSRChainElement(Instruction *I) {
331 return TargetTransformInfoImplBase::isProfitableLSRChainElement(I);
332 }
333
334 InstructionCost getScalingFactorCost(Type *Ty, GlobalValue *BaseGV,
335 int64_t BaseOffset, bool HasBaseReg,
336 int64_t Scale, unsigned AddrSpace) {
337 TargetLoweringBase::AddrMode AM;
338 AM.BaseGV = BaseGV;
339 AM.BaseOffs = BaseOffset;
340 AM.HasBaseReg = HasBaseReg;
341 AM.Scale = Scale;
342 return getTLI()->getScalingFactorCost(DL, AM, Ty, AddrSpace);
343 }
344
345 bool isTruncateFree(Type *Ty1, Type *Ty2) {
346 return getTLI()->isTruncateFree(Ty1, Ty2);
347 }
348
349 bool isProfitableToHoist(Instruction *I) {
350 return getTLI()->isProfitableToHoist(I);
351 }
352
353 bool useAA() const { return getST()->useAA(); }
354
355 bool isTypeLegal(Type *Ty) {
356 EVT VT = getTLI()->getValueType(DL, Ty);
357 return getTLI()->isTypeLegal(VT);
358 }
359
360 InstructionCost getRegUsageForType(Type *Ty) {
361 InstructionCost Val = getTLI()->getTypeLegalizationCost(DL, Ty).first;
362 assert(Val >= 0 && "Negative cost!")(static_cast <bool> (Val >= 0 && "Negative cost!"
) ? void (0) : __assert_fail ("Val >= 0 && \"Negative cost!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/BasicTTIImpl.h"
, 362, __extension__ __PRETTY_FUNCTION__))
;
363 return Val;
364 }
365
366 InstructionCost getGEPCost(Type *PointeeType, const Value *Ptr,
367 ArrayRef<const Value *> Operands) {
368 return BaseT::getGEPCost(PointeeType, Ptr, Operands);
369 }
370
371 unsigned getEstimatedNumberOfCaseClusters(const SwitchInst &SI,
372 unsigned &JumpTableSize,
373 ProfileSummaryInfo *PSI,
374 BlockFrequencyInfo *BFI) {
375 /// Try to find the estimated number of clusters. Note that the number of
376 /// clusters identified in this function could be different from the actual
377 /// numbers found in lowering. This function ignore switches that are
378 /// lowered with a mix of jump table / bit test / BTree. This function was
379 /// initially intended to be used when estimating the cost of switch in
380 /// inline cost heuristic, but it's a generic cost model to be used in other
381 /// places (e.g., in loop unrolling).
382 unsigned N = SI.getNumCases();
383 const TargetLoweringBase *TLI = getTLI();
384 const DataLayout &DL = this->getDataLayout();
385
386 JumpTableSize = 0;
387 bool IsJTAllowed = TLI->areJTsAllowed(SI.getParent()->getParent());
388
389 // Early exit if both a jump table and bit test are not allowed.
390 if (N < 1 || (!IsJTAllowed && DL.getIndexSizeInBits(0u) < N))
391 return N;
392
393 APInt MaxCaseVal = SI.case_begin()->getCaseValue()->getValue();
394 APInt MinCaseVal = MaxCaseVal;
395 for (auto CI : SI.cases()) {
396 const APInt &CaseVal = CI.getCaseValue()->getValue();
397 if (CaseVal.sgt(MaxCaseVal))
398 MaxCaseVal = CaseVal;
399 if (CaseVal.slt(MinCaseVal))
400 MinCaseVal = CaseVal;
401 }
402
403 // Check if suitable for a bit test
404 if (N <= DL.getIndexSizeInBits(0u)) {
405 SmallPtrSet<const BasicBlock *, 4> Dests;
406 for (auto I : SI.cases())
407 Dests.insert(I.getCaseSuccessor());
408
409 if (TLI->isSuitableForBitTests(Dests.size(), N, MinCaseVal, MaxCaseVal,
410 DL))
411 return 1;
412 }
413
414 // Check if suitable for a jump table.
415 if (IsJTAllowed) {
416 if (N < 2 || N < TLI->getMinimumJumpTableEntries())
417 return N;
418 uint64_t Range =
419 (MaxCaseVal - MinCaseVal)
420 .getLimitedValue(std::numeric_limits<uint64_t>::max() - 1) + 1;
421 // Check whether a range of clusters is dense enough for a jump table
422 if (TLI->isSuitableForJumpTable(&SI, N, Range, PSI, BFI)) {
423 JumpTableSize = Range;
424 return 1;
425 }
426 }
427 return N;
428 }
429
430 bool shouldBuildLookupTables() {
431 const TargetLoweringBase *TLI = getTLI();
432 return TLI->isOperationLegalOrCustom(ISD::BR_JT, MVT::Other) ||
433 TLI->isOperationLegalOrCustom(ISD::BRIND, MVT::Other);
434 }
435
436 bool shouldBuildRelLookupTables() const {
437 const TargetMachine &TM = getTLI()->getTargetMachine();
438 // If non-PIC mode, do not generate a relative lookup table.
439 if (!TM.isPositionIndependent())
440 return false;
441
442 /// Relative lookup table entries consist of 32-bit offsets.
443 /// Do not generate relative lookup tables for large code models
444 /// in 64-bit achitectures where 32-bit offsets might not be enough.
445 if (TM.getCodeModel() == CodeModel::Medium ||
446 TM.getCodeModel() == CodeModel::Large)
447 return false;
448
449 Triple TargetTriple = TM.getTargetTriple();
450 if (!TargetTriple.isArch64Bit())
451 return false;
452
453 // TODO: Triggers issues on aarch64 on darwin, so temporarily disable it
454 // there.
455 if (TargetTriple.getArch() == Triple::aarch64 && TargetTriple.isOSDarwin())
456 return false;
457
458 return true;
459 }
460
461 bool haveFastSqrt(Type *Ty) {
462 const TargetLoweringBase *TLI = getTLI();
463 EVT VT = TLI->getValueType(DL, Ty);
464 return TLI->isTypeLegal(VT) &&
465 TLI->isOperationLegalOrCustom(ISD::FSQRT, VT);
466 }
467
468 bool isFCmpOrdCheaperThanFCmpZero(Type *Ty) {
469 return true;
470 }
471
472 InstructionCost getFPOpCost(Type *Ty) {
473 // Check whether FADD is available, as a proxy for floating-point in
474 // general.
475 const TargetLoweringBase *TLI = getTLI();
476 EVT VT = TLI->getValueType(DL, Ty);
477 if (TLI->isOperationLegalOrCustomOrPromote(ISD::FADD, VT))
478 return TargetTransformInfo::TCC_Basic;
479 return TargetTransformInfo::TCC_Expensive;
480 }
481
482 unsigned getInliningThresholdMultiplier() { return 1; }
483 unsigned adjustInliningThreshold(const CallBase *CB) { return 0; }
484
485 int getInlinerVectorBonusPercent() { return 150; }
486
487 void getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
488 TTI::UnrollingPreferences &UP,
489 OptimizationRemarkEmitter *ORE) {
490 // This unrolling functionality is target independent, but to provide some
491 // motivation for its intended use, for x86:
492
493 // According to the Intel 64 and IA-32 Architectures Optimization Reference
494 // Manual, Intel Core models and later have a loop stream detector (and
495 // associated uop queue) that can benefit from partial unrolling.
496 // The relevant requirements are:
497 // - The loop must have no more than 4 (8 for Nehalem and later) branches
498 // taken, and none of them may be calls.
499 // - The loop can have no more than 18 (28 for Nehalem and later) uops.
500
501 // According to the Software Optimization Guide for AMD Family 15h
502 // Processors, models 30h-4fh (Steamroller and later) have a loop predictor
503 // and loop buffer which can benefit from partial unrolling.
504 // The relevant requirements are:
505 // - The loop must have fewer than 16 branches
506 // - The loop must have less than 40 uops in all executed loop branches
507
508 // The number of taken branches in a loop is hard to estimate here, and
509 // benchmarking has revealed that it is better not to be conservative when
510 // estimating the branch count. As a result, we'll ignore the branch limits
511 // until someone finds a case where it matters in practice.
512
513 unsigned MaxOps;
514 const TargetSubtargetInfo *ST = getST();
515 if (PartialUnrollingThreshold.getNumOccurrences() > 0)
516 MaxOps = PartialUnrollingThreshold;
517 else if (ST->getSchedModel().LoopMicroOpBufferSize > 0)
518 MaxOps = ST->getSchedModel().LoopMicroOpBufferSize;
519 else
520 return;
521
522 // Scan the loop: don't unroll loops with calls.
523 for (BasicBlock *BB : L->blocks()) {
524 for (Instruction &I : *BB) {
525 if (isa<CallInst>(I) || isa<InvokeInst>(I)) {
526 if (const Function *F = cast<CallBase>(I).getCalledFunction()) {
527 if (!thisT()->isLoweredToCall(F))
528 continue;
529 }
530
531 if (ORE) {
532 ORE->emit([&]() {
533 return OptimizationRemark("TTI", "DontUnroll", L->getStartLoc(),
534 L->getHeader())
535 << "advising against unrolling the loop because it "
536 "contains a "
537 << ore::NV("Call", &I);
538 });
539 }
540 return;
541 }
542 }
543 }
544
545 // Enable runtime and partial unrolling up to the specified size.
546 // Enable using trip count upper bound to unroll loops.
547 UP.Partial = UP.Runtime = UP.UpperBound = true;
548 UP.PartialThreshold = MaxOps;
549
550 // Avoid unrolling when optimizing for size.
551 UP.OptSizeThreshold = 0;
552 UP.PartialOptSizeThreshold = 0;
553
554 // Set number of instructions optimized when "back edge"
555 // becomes "fall through" to default value of 2.
556 UP.BEInsns = 2;
557 }
558
559 void getPeelingPreferences(Loop *L, ScalarEvolution &SE,
560 TTI::PeelingPreferences &PP) {
561 PP.PeelCount = 0;
562 PP.AllowPeeling = true;
563 PP.AllowLoopNestsPeeling = false;
564 PP.PeelProfiledIterations = true;
565 }
566
567 bool isHardwareLoopProfitable(Loop *L, ScalarEvolution &SE,
568 AssumptionCache &AC,
569 TargetLibraryInfo *LibInfo,
570 HardwareLoopInfo &HWLoopInfo) {
571 return BaseT::isHardwareLoopProfitable(L, SE, AC, LibInfo, HWLoopInfo);
572 }
573
574 bool preferPredicateOverEpilogue(Loop *L, LoopInfo *LI, ScalarEvolution &SE,
575 AssumptionCache &AC, TargetLibraryInfo *TLI,
576 DominatorTree *DT,
577 const LoopAccessInfo *LAI) {
578 return BaseT::preferPredicateOverEpilogue(L, LI, SE, AC, TLI, DT, LAI);
579 }
580
581 bool emitGetActiveLaneMask() {
582 return BaseT::emitGetActiveLaneMask();
583 }
584
585 Optional<Instruction *> instCombineIntrinsic(InstCombiner &IC,
586 IntrinsicInst &II) {
587 return BaseT::instCombineIntrinsic(IC, II);
588 }
589
590 Optional<Value *> simplifyDemandedUseBitsIntrinsic(InstCombiner &IC,
591 IntrinsicInst &II,
592 APInt DemandedMask,
593 KnownBits &Known,
594 bool &KnownBitsComputed) {
595 return BaseT::simplifyDemandedUseBitsIntrinsic(IC, II, DemandedMask, Known,
596 KnownBitsComputed);
597 }
598
599 Optional<Value *> simplifyDemandedVectorEltsIntrinsic(
600 InstCombiner &IC, IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts,
601 APInt &UndefElts2, APInt &UndefElts3,
602 std::function<void(Instruction *, unsigned, APInt, APInt &)>
603 SimplifyAndSetOp) {
604 return BaseT::simplifyDemandedVectorEltsIntrinsic(
605 IC, II, DemandedElts, UndefElts, UndefElts2, UndefElts3,
606 SimplifyAndSetOp);
607 }
608
609 InstructionCost getInstructionLatency(const Instruction *I) {
610 if (isa<LoadInst>(I))
611 return getST()->getSchedModel().DefaultLoadLatency;
612
613 return BaseT::getInstructionLatency(I);
614 }
615
616 virtual Optional<unsigned>
617 getCacheSize(TargetTransformInfo::CacheLevel Level) const {
618 return Optional<unsigned>(
619 getST()->getCacheSize(static_cast<unsigned>(Level)));
620 }
621
622 virtual Optional<unsigned>
623 getCacheAssociativity(TargetTransformInfo::CacheLevel Level) const {
624 Optional<unsigned> TargetResult =
625 getST()->getCacheAssociativity(static_cast<unsigned>(Level));
626
627 if (TargetResult)
628 return TargetResult;
629
630 return BaseT::getCacheAssociativity(Level);
631 }
632
633 virtual unsigned getCacheLineSize() const {
634 return getST()->getCacheLineSize();
635 }
636
637 virtual unsigned getPrefetchDistance() const {
638 return getST()->getPrefetchDistance();
639 }
640
641 virtual unsigned getMinPrefetchStride(unsigned NumMemAccesses,
642 unsigned NumStridedMemAccesses,
643 unsigned NumPrefetches,
644 bool HasCall) const {
645 return getST()->getMinPrefetchStride(NumMemAccesses, NumStridedMemAccesses,
646 NumPrefetches, HasCall);
647 }
648
649 virtual unsigned getMaxPrefetchIterationsAhead() const {
650 return getST()->getMaxPrefetchIterationsAhead();
651 }
652
653 virtual bool enableWritePrefetching() const {
654 return getST()->enableWritePrefetching();
655 }
656
657 /// @}
658
659 /// \name Vector TTI Implementations
660 /// @{
661
662 TypeSize getRegisterBitWidth(TargetTransformInfo::RegisterKind K) const {
663 return TypeSize::getFixed(32);
664 }
665
666 Optional<unsigned> getMaxVScale() const { return None; }
667
668 /// Estimate the overhead of scalarizing an instruction. Insert and Extract
669 /// are set if the demanded result elements need to be inserted and/or
670 /// extracted from vectors.
671 InstructionCost getScalarizationOverhead(VectorType *InTy,
672 const APInt &DemandedElts,
673 bool Insert, bool Extract) {
674 /// FIXME: a bitfield is not a reasonable abstraction for talking about
675 /// which elements are needed from a scalable vector
676 auto *Ty = cast<FixedVectorType>(InTy);
677
678 assert(DemandedElts.getBitWidth() == Ty->getNumElements() &&(static_cast <bool> (DemandedElts.getBitWidth() == Ty->
getNumElements() && "Vector size mismatch") ? void (0
) : __assert_fail ("DemandedElts.getBitWidth() == Ty->getNumElements() && \"Vector size mismatch\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/BasicTTIImpl.h"
, 679, __extension__ __PRETTY_FUNCTION__))
679 "Vector size mismatch")(static_cast <bool> (DemandedElts.getBitWidth() == Ty->
getNumElements() && "Vector size mismatch") ? void (0
) : __assert_fail ("DemandedElts.getBitWidth() == Ty->getNumElements() && \"Vector size mismatch\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/BasicTTIImpl.h"
, 679, __extension__ __PRETTY_FUNCTION__))
;
680
681 InstructionCost Cost = 0;
682
683 for (int i = 0, e = Ty->getNumElements(); i < e; ++i) {
684 if (!DemandedElts[i])
685 continue;
686 if (Insert)
687 Cost += thisT()->getVectorInstrCost(Instruction::InsertElement, Ty, i);
688 if (Extract)
689 Cost += thisT()->getVectorInstrCost(Instruction::ExtractElement, Ty, i);
690 }
691
692 return Cost;
693 }
694
695 /// Helper wrapper for the DemandedElts variant of getScalarizationOverhead.
696 InstructionCost getScalarizationOverhead(VectorType *InTy, bool Insert,
697 bool Extract) {
698 auto *Ty = cast<FixedVectorType>(InTy);
699
700 APInt DemandedElts = APInt::getAllOnesValue(Ty->getNumElements());
701 return thisT()->getScalarizationOverhead(Ty, DemandedElts, Insert, Extract);
702 }
703
704 /// Estimate the overhead of scalarizing an instructions unique
705 /// non-constant operands. The (potentially vector) types to use for each of
706 /// argument are passes via Tys.
707 InstructionCost getOperandsScalarizationOverhead(ArrayRef<const Value *> Args,
708 ArrayRef<Type *> Tys) {
709 assert(Args.size() == Tys.size() && "Expected matching Args and Tys")(static_cast <bool> (Args.size() == Tys.size() &&
"Expected matching Args and Tys") ? void (0) : __assert_fail
("Args.size() == Tys.size() && \"Expected matching Args and Tys\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/BasicTTIImpl.h"
, 709, __extension__ __PRETTY_FUNCTION__))
;
710
711 InstructionCost Cost = 0;
712 SmallPtrSet<const Value*, 4> UniqueOperands;
713 for (int I = 0, E = Args.size(); I != E; I++) {
714 // Disregard things like metadata arguments.
715 const Value *A = Args[I];
716 Type *Ty = Tys[I];
717 if (!Ty->isIntOrIntVectorTy() && !Ty->isFPOrFPVectorTy() &&
718 !Ty->isPtrOrPtrVectorTy())
719 continue;
720
721 if (!isa<Constant>(A) && UniqueOperands.insert(A).second) {
722 if (auto *VecTy = dyn_cast<VectorType>(Ty))
723 Cost += getScalarizationOverhead(VecTy, false, true);
724 }
725 }
726
727 return Cost;
728 }
729
730 /// Estimate the overhead of scalarizing the inputs and outputs of an
731 /// instruction, with return type RetTy and arguments Args of type Tys. If
732 /// Args are unknown (empty), then the cost associated with one argument is
733 /// added as a heuristic.
734 InstructionCost getScalarizationOverhead(VectorType *RetTy,
735 ArrayRef<const Value *> Args,
736 ArrayRef<Type *> Tys) {
737 InstructionCost Cost = getScalarizationOverhead(RetTy, true, false);
738 if (!Args.empty())
739 Cost += getOperandsScalarizationOverhead(Args, Tys);
740 else
741 // When no information on arguments is provided, we add the cost
742 // associated with one argument as a heuristic.
743 Cost += getScalarizationOverhead(RetTy, false, true);
744
745 return Cost;
746 }
747
748 unsigned getMaxInterleaveFactor(unsigned VF) { return 1; }
749
750 InstructionCost getArithmeticInstrCost(
751 unsigned Opcode, Type *Ty,
752 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput,
753 TTI::OperandValueKind Opd1Info = TTI::OK_AnyValue,
754 TTI::OperandValueKind Opd2Info = TTI::OK_AnyValue,
755 TTI::OperandValueProperties Opd1PropInfo = TTI::OP_None,
756 TTI::OperandValueProperties Opd2PropInfo = TTI::OP_None,
757 ArrayRef<const Value *> Args = ArrayRef<const Value *>(),
758 const Instruction *CxtI = nullptr) {
759 // Check if any of the operands are vector operands.
760 const TargetLoweringBase *TLI = getTLI();
761 int ISD = TLI->InstructionOpcodeToISD(Opcode);
762 assert(ISD && "Invalid opcode")(static_cast <bool> (ISD && "Invalid opcode") ?
void (0) : __assert_fail ("ISD && \"Invalid opcode\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/BasicTTIImpl.h"
, 762, __extension__ __PRETTY_FUNCTION__))
;
763
764 // TODO: Handle more cost kinds.
765 if (CostKind != TTI::TCK_RecipThroughput)
766 return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind,
767 Opd1Info, Opd2Info,
768 Opd1PropInfo, Opd2PropInfo,
769 Args, CxtI);
770
771 std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty);
772
773 bool IsFloat = Ty->isFPOrFPVectorTy();
774 // Assume that floating point arithmetic operations cost twice as much as
775 // integer operations.
776 InstructionCost OpCost = (IsFloat ? 2 : 1);
777
778 if (TLI->isOperationLegalOrPromote(ISD, LT.second)) {
779 // The operation is legal. Assume it costs 1.
780 // TODO: Once we have extract/insert subvector cost we need to use them.
781 return LT.first * OpCost;
782 }
783
784 if (!TLI->isOperationExpand(ISD, LT.second)) {
785 // If the operation is custom lowered, then assume that the code is twice
786 // as expensive.
787 return LT.first * 2 * OpCost;
788 }
789
790 // An 'Expand' of URem and SRem is special because it may default
791 // to expanding the operation into a sequence of sub-operations
792 // i.e. X % Y -> X-(X/Y)*Y.
793 if (ISD == ISD::UREM || ISD == ISD::SREM) {
794 bool IsSigned = ISD == ISD::SREM;
795 if (TLI->isOperationLegalOrCustom(IsSigned ? ISD::SDIVREM : ISD::UDIVREM,
796 LT.second) ||
797 TLI->isOperationLegalOrCustom(IsSigned ? ISD::SDIV : ISD::UDIV,
798 LT.second)) {
799 unsigned DivOpc = IsSigned ? Instruction::SDiv : Instruction::UDiv;
800 InstructionCost DivCost = thisT()->getArithmeticInstrCost(
801 DivOpc, Ty, CostKind, Opd1Info, Opd2Info, Opd1PropInfo,
802 Opd2PropInfo);
803 InstructionCost MulCost =
804 thisT()->getArithmeticInstrCost(Instruction::Mul, Ty, CostKind);
805 InstructionCost SubCost =
806 thisT()->getArithmeticInstrCost(Instruction::Sub, Ty, CostKind);
807 return DivCost + MulCost + SubCost;
808 }
809 }
810
811 // We cannot scalarize scalable vectors, so return Invalid.
812 if (isa<ScalableVectorType>(Ty))
813 return InstructionCost::getInvalid();
814
815 // Else, assume that we need to scalarize this op.
816 // TODO: If one of the types get legalized by splitting, handle this
817 // similarly to what getCastInstrCost() does.
818 if (auto *VTy = dyn_cast<FixedVectorType>(Ty)) {
819 InstructionCost Cost = thisT()->getArithmeticInstrCost(
820 Opcode, VTy->getScalarType(), CostKind, Opd1Info, Opd2Info,
821 Opd1PropInfo, Opd2PropInfo, Args, CxtI);
822 // Return the cost of multiple scalar invocation plus the cost of
823 // inserting and extracting the values.
824 SmallVector<Type *> Tys(Args.size(), Ty);
825 return getScalarizationOverhead(VTy, Args, Tys) +
826 VTy->getNumElements() * Cost;
827 }
828
829 // We don't know anything about this scalar instruction.
830 return OpCost;
831 }
832
833 TTI::ShuffleKind improveShuffleKindFromMask(TTI::ShuffleKind Kind,
834 ArrayRef<int> Mask) const {
835 int Limit = Mask.size() * 2;
836 if (Mask.empty() ||
837 // Extra check required by isSingleSourceMaskImpl function (called by
838 // ShuffleVectorInst::isSingleSourceMask).
839 any_of(Mask, [Limit](int I) { return I >= Limit; }))
840 return Kind;
841 switch (Kind) {
842 case TTI::SK_PermuteSingleSrc:
843 if (ShuffleVectorInst::isReverseMask(Mask))
844 return TTI::SK_Reverse;
845 if (ShuffleVectorInst::isZeroEltSplatMask(Mask))
846 return TTI::SK_Broadcast;
847 break;
848 case TTI::SK_PermuteTwoSrc:
849 if (ShuffleVectorInst::isSelectMask(Mask))
850 return TTI::SK_Select;
851 if (ShuffleVectorInst::isTransposeMask(Mask))
852 return TTI::SK_Transpose;
853 break;
854 case TTI::SK_Select:
855 case TTI::SK_Reverse:
856 case TTI::SK_Broadcast:
857 case TTI::SK_Transpose:
858 case TTI::SK_InsertSubvector:
859 case TTI::SK_ExtractSubvector:
860 case TTI::SK_Splice:
861 break;
862 }
863 return Kind;
864 }
865
866 InstructionCost getShuffleCost(TTI::ShuffleKind Kind, VectorType *Tp,
867 ArrayRef<int> Mask, int Index,
868 VectorType *SubTp) {
869
870 switch (improveShuffleKindFromMask(Kind, Mask)) {
871 case TTI::SK_Broadcast:
872 return getBroadcastShuffleOverhead(cast<FixedVectorType>(Tp));
873 case TTI::SK_Select:
874 case TTI::SK_Splice:
875 case TTI::SK_Reverse:
876 case TTI::SK_Transpose:
877 case TTI::SK_PermuteSingleSrc:
878 case TTI::SK_PermuteTwoSrc:
879 return getPermuteShuffleOverhead(cast<FixedVectorType>(Tp));
880 case TTI::SK_ExtractSubvector:
881 return getExtractSubvectorOverhead(Tp, Index,
882 cast<FixedVectorType>(SubTp));
883 case TTI::SK_InsertSubvector:
884 return getInsertSubvectorOverhead(Tp, Index,
885 cast<FixedVectorType>(SubTp));
886 }
887 llvm_unreachable("Unknown TTI::ShuffleKind")::llvm::llvm_unreachable_internal("Unknown TTI::ShuffleKind",
"/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/BasicTTIImpl.h"
, 887)
;
888 }
889
890 InstructionCost getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src,
891 TTI::CastContextHint CCH,
892 TTI::TargetCostKind CostKind,
893 const Instruction *I = nullptr) {
894 if (BaseT::getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I) == 0)
895 return 0;
896
897 const TargetLoweringBase *TLI = getTLI();
898 int ISD = TLI->InstructionOpcodeToISD(Opcode);
899 assert(ISD && "Invalid opcode")(static_cast <bool> (ISD && "Invalid opcode") ?
void (0) : __assert_fail ("ISD && \"Invalid opcode\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/BasicTTIImpl.h"
, 899, __extension__ __PRETTY_FUNCTION__))
;
900 std::pair<InstructionCost, MVT> SrcLT =
901 TLI->getTypeLegalizationCost(DL, Src);
902 std::pair<InstructionCost, MVT> DstLT =
903 TLI->getTypeLegalizationCost(DL, Dst);
904
905 TypeSize SrcSize = SrcLT.second.getSizeInBits();
906 TypeSize DstSize = DstLT.second.getSizeInBits();
907 bool IntOrPtrSrc = Src->isIntegerTy() || Src->isPointerTy();
908 bool IntOrPtrDst = Dst->isIntegerTy() || Dst->isPointerTy();
909
910 switch (Opcode) {
911 default:
912 break;
913 case Instruction::Trunc:
914 // Check for NOOP conversions.
915 if (TLI->isTruncateFree(SrcLT.second, DstLT.second))
916 return 0;
917 LLVM_FALLTHROUGH[[gnu::fallthrough]];
918 case Instruction::BitCast:
919 // Bitcast between types that are legalized to the same type are free and
920 // assume int to/from ptr of the same size is also free.
921 if (SrcLT.first == DstLT.first && IntOrPtrSrc == IntOrPtrDst &&
922 SrcSize == DstSize)
923 return 0;
924 break;
925 case Instruction::FPExt:
926 if (I && getTLI()->isExtFree(I))
927 return 0;
928 break;
929 case Instruction::ZExt:
930 if (TLI->isZExtFree(SrcLT.second, DstLT.second))
931 return 0;
932 LLVM_FALLTHROUGH[[gnu::fallthrough]];
933 case Instruction::SExt:
934 if (I && getTLI()->isExtFree(I))
935 return 0;
936
937 // If this is a zext/sext of a load, return 0 if the corresponding
938 // extending load exists on target and the result type is legal.
939 if (CCH == TTI::CastContextHint::Normal) {
940 EVT ExtVT = EVT::getEVT(Dst);
941 EVT LoadVT = EVT::getEVT(Src);
942 unsigned LType =
943 ((Opcode == Instruction::ZExt) ? ISD::ZEXTLOAD : ISD::SEXTLOAD);
944 if (DstLT.first == SrcLT.first &&
945 TLI->isLoadExtLegal(LType, ExtVT, LoadVT))
946 return 0;
947 }
948 break;
949 case Instruction::AddrSpaceCast:
950 if (TLI->isFreeAddrSpaceCast(Src->getPointerAddressSpace(),
951 Dst->getPointerAddressSpace()))
952 return 0;
953 break;
954 }
955
956 auto *SrcVTy = dyn_cast<VectorType>(Src);
957 auto *DstVTy = dyn_cast<VectorType>(Dst);
958
959 // If the cast is marked as legal (or promote) then assume low cost.
960 if (SrcLT.first == DstLT.first &&
961 TLI->isOperationLegalOrPromote(ISD, DstLT.second))
962 return SrcLT.first;
963
964 // Handle scalar conversions.
965 if (!SrcVTy && !DstVTy) {
966 // Just check the op cost. If the operation is legal then assume it costs
967 // 1.
968 if (!TLI->isOperationExpand(ISD, DstLT.second))
969 return 1;
970
971 // Assume that illegal scalar instruction are expensive.
972 return 4;
973 }
974
975 // Check vector-to-vector casts.
976 if (DstVTy && SrcVTy) {
977 // If the cast is between same-sized registers, then the check is simple.
978 if (SrcLT.first == DstLT.first && SrcSize == DstSize) {
979
980 // Assume that Zext is done using AND.
981 if (Opcode == Instruction::ZExt)
982 return SrcLT.first;
983
984 // Assume that sext is done using SHL and SRA.
985 if (Opcode == Instruction::SExt)
986 return SrcLT.first * 2;
987
988 // Just check the op cost. If the operation is legal then assume it
989 // costs
990 // 1 and multiply by the type-legalization overhead.
991 if (!TLI->isOperationExpand(ISD, DstLT.second))
992 return SrcLT.first * 1;
993 }
994
995 // If we are legalizing by splitting, query the concrete TTI for the cost
996 // of casting the original vector twice. We also need to factor in the
997 // cost of the split itself. Count that as 1, to be consistent with
998 // TLI->getTypeLegalizationCost().
999 bool SplitSrc =
1000 TLI->getTypeAction(Src->getContext(), TLI->getValueType(DL, Src)) ==
1001 TargetLowering::TypeSplitVector;
1002 bool SplitDst =
1003 TLI->getTypeAction(Dst->getContext(), TLI->getValueType(DL, Dst)) ==
1004 TargetLowering::TypeSplitVector;
1005 if ((SplitSrc || SplitDst) && SrcVTy->getElementCount().isVector() &&
1006 DstVTy->getElementCount().isVector()) {
1007 Type *SplitDstTy = VectorType::getHalfElementsVectorType(DstVTy);
1008 Type *SplitSrcTy = VectorType::getHalfElementsVectorType(SrcVTy);
1009 T *TTI = static_cast<T *>(this);
1010 // If both types need to be split then the split is free.
1011 InstructionCost SplitCost =
1012 (!SplitSrc || !SplitDst) ? TTI->getVectorSplitCost() : 0;
1013 return SplitCost +
1014 (2 * TTI->getCastInstrCost(Opcode, SplitDstTy, SplitSrcTy, CCH,
1015 CostKind, I));
1016 }
1017
1018 // Scalarization cost is Invalid, can't assume any num elements.
1019 if (isa<ScalableVectorType>(DstVTy))
1020 return InstructionCost::getInvalid();
1021
1022 // In other cases where the source or destination are illegal, assume
1023 // the operation will get scalarized.
1024 unsigned Num = cast<FixedVectorType>(DstVTy)->getNumElements();
1025 InstructionCost Cost = thisT()->getCastInstrCost(
1026 Opcode, Dst->getScalarType(), Src->getScalarType(), CCH, CostKind, I);
1027
1028 // Return the cost of multiple scalar invocation plus the cost of
1029 // inserting and extracting the values.
1030 return getScalarizationOverhead(DstVTy, true, true) + Num * Cost;
1031 }
1032
1033 // We already handled vector-to-vector and scalar-to-scalar conversions.
1034 // This
1035 // is where we handle bitcast between vectors and scalars. We need to assume
1036 // that the conversion is scalarized in one way or another.
1037 if (Opcode == Instruction::BitCast) {
1038 // Illegal bitcasts are done by storing and loading from a stack slot.
1039 return (SrcVTy ? getScalarizationOverhead(SrcVTy, false, true) : 0) +
1040 (DstVTy ? getScalarizationOverhead(DstVTy, true, false) : 0);
1041 }
1042
1043 llvm_unreachable("Unhandled cast")::llvm::llvm_unreachable_internal("Unhandled cast", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/BasicTTIImpl.h"
, 1043)
;
1044 }
1045
1046 InstructionCost getExtractWithExtendCost(unsigned Opcode, Type *Dst,
1047 VectorType *VecTy, unsigned Index) {
1048 return thisT()->getVectorInstrCost(Instruction::ExtractElement, VecTy,
1049 Index) +
1050 thisT()->getCastInstrCost(Opcode, Dst, VecTy->getElementType(),
1051 TTI::CastContextHint::None,
1052 TTI::TCK_RecipThroughput);
1053 }
1054
1055 InstructionCost getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind,
1056 const Instruction *I = nullptr) {
1057 return BaseT::getCFInstrCost(Opcode, CostKind, I);
1058 }
1059
1060 InstructionCost getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy,
1061 CmpInst::Predicate VecPred,
1062 TTI::TargetCostKind CostKind,
1063 const Instruction *I = nullptr) {
1064 const TargetLoweringBase *TLI = getTLI();
1065 int ISD = TLI->InstructionOpcodeToISD(Opcode);
1066 assert(ISD && "Invalid opcode")(static_cast <bool> (ISD && "Invalid opcode") ?
void (0) : __assert_fail ("ISD && \"Invalid opcode\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/BasicTTIImpl.h"
, 1066, __extension__ __PRETTY_FUNCTION__))
;
1067
1068 // TODO: Handle other cost kinds.
1069 if (CostKind != TTI::TCK_RecipThroughput)
1070 return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, VecPred, CostKind,
1071 I);
1072
1073 // Selects on vectors are actually vector selects.
1074 if (ISD == ISD::SELECT) {
1075 assert(CondTy && "CondTy must exist")(static_cast <bool> (CondTy && "CondTy must exist"
) ? void (0) : __assert_fail ("CondTy && \"CondTy must exist\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/BasicTTIImpl.h"
, 1075, __extension__ __PRETTY_FUNCTION__))
;
1076 if (CondTy->isVectorTy())
1077 ISD = ISD::VSELECT;
1078 }
1079 std::pair<InstructionCost, MVT> LT =
1080 TLI->getTypeLegalizationCost(DL, ValTy);
1081
1082 if (!(ValTy->isVectorTy() && !LT.second.isVector()) &&
1083 !TLI->isOperationExpand(ISD, LT.second)) {
1084 // The operation is legal. Assume it costs 1. Multiply
1085 // by the type-legalization overhead.
1086 return LT.first * 1;
1087 }
1088
1089 // Otherwise, assume that the cast is scalarized.
1090 // TODO: If one of the types get legalized by splitting, handle this
1091 // similarly to what getCastInstrCost() does.
1092 if (auto *ValVTy = dyn_cast<VectorType>(ValTy)) {
1093 unsigned Num = cast<FixedVectorType>(ValVTy)->getNumElements();
1094 if (CondTy)
1095 CondTy = CondTy->getScalarType();
1096 InstructionCost Cost = thisT()->getCmpSelInstrCost(
1097 Opcode, ValVTy->getScalarType(), CondTy, VecPred, CostKind, I);
1098
1099 // Return the cost of multiple scalar invocation plus the cost of
1100 // inserting and extracting the values.
1101 return getScalarizationOverhead(ValVTy, true, false) + Num * Cost;
1102 }
1103
1104 // Unknown scalar opcode.
1105 return 1;
1106 }
1107
1108 InstructionCost getVectorInstrCost(unsigned Opcode, Type *Val,
1109 unsigned Index) {
1110 std::pair<InstructionCost, MVT> LT =
1111 getTLI()->getTypeLegalizationCost(DL, Val->getScalarType());
1112
1113 return LT.first;
1114 }
1115
1116 InstructionCost getMemoryOpCost(unsigned Opcode, Type *Src,
1117 MaybeAlign Alignment, unsigned AddressSpace,
1118 TTI::TargetCostKind CostKind,
1119 const Instruction *I = nullptr) {
1120 assert(!Src->isVoidTy() && "Invalid type")(static_cast <bool> (!Src->isVoidTy() && "Invalid type"
) ? void (0) : __assert_fail ("!Src->isVoidTy() && \"Invalid type\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/BasicTTIImpl.h"
, 1120, __extension__ __PRETTY_FUNCTION__))
;
1121 // Assume types, such as structs, are expensive.
1122 if (getTLI()->getValueType(DL, Src, true) == MVT::Other)
1123 return 4;
1124 std::pair<InstructionCost, MVT> LT =
1125 getTLI()->getTypeLegalizationCost(DL, Src);
1126
1127 // Assuming that all loads of legal types cost 1.
1128 InstructionCost Cost = LT.first;
1129 if (CostKind != TTI::TCK_RecipThroughput)
1130 return Cost;
1131
1132 if (Src->isVectorTy() &&
1133 // In practice it's not currently possible to have a change in lane
1134 // length for extending loads or truncating stores so both types should
1135 // have the same scalable property.
1136 TypeSize::isKnownLT(Src->getPrimitiveSizeInBits(),
1137 LT.second.getSizeInBits())) {
1138 // This is a vector load that legalizes to a larger type than the vector
1139 // itself. Unless the corresponding extending load or truncating store is
1140 // legal, then this will scalarize.
1141 TargetLowering::LegalizeAction LA = TargetLowering::Expand;
1142 EVT MemVT = getTLI()->getValueType(DL, Src);
1143 if (Opcode == Instruction::Store)
1144 LA = getTLI()->getTruncStoreAction(LT.second, MemVT);
1145 else
1146 LA = getTLI()->getLoadExtAction(ISD::EXTLOAD, LT.second, MemVT);
1147
1148 if (LA != TargetLowering::Legal && LA != TargetLowering::Custom) {
1149 // This is a vector load/store for some illegal type that is scalarized.
1150 // We must account for the cost of building or decomposing the vector.
1151 Cost += getScalarizationOverhead(cast<VectorType>(Src),
1152 Opcode != Instruction::Store,
1153 Opcode == Instruction::Store);
1154 }
1155 }
1156
1157 return Cost;
1158 }
1159
1160 InstructionCost getMaskedMemoryOpCost(unsigned Opcode, Type *DataTy,
1161 Align Alignment, unsigned AddressSpace,
1162 TTI::TargetCostKind CostKind) {
1163 return getCommonMaskedMemoryOpCost(Opcode, DataTy, Alignment, true, false,
1164 CostKind);
1165 }
1166
1167 InstructionCost getGatherScatterOpCost(unsigned Opcode, Type *DataTy,
1168 const Value *Ptr, bool VariableMask,
1169 Align Alignment,
1170 TTI::TargetCostKind CostKind,
1171 const Instruction *I = nullptr) {
1172 return getCommonMaskedMemoryOpCost(Opcode, DataTy, Alignment, VariableMask,
1173 true, CostKind);
1174 }
1175
1176 InstructionCost getInterleavedMemoryOpCost(
1177 unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices,
1178 Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind,
1179 bool UseMaskForCond = false, bool UseMaskForGaps = false) {
1180 auto *VT = cast<FixedVectorType>(VecTy);
10
'VecTy' is a 'FixedVectorType'
1181
1182 unsigned NumElts = VT->getNumElements();
1183 assert(Factor > 1 && NumElts % Factor == 0 && "Invalid interleave factor")(static_cast <bool> (Factor > 1 && NumElts %
Factor == 0 && "Invalid interleave factor") ? void (
0) : __assert_fail ("Factor > 1 && NumElts % Factor == 0 && \"Invalid interleave factor\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/BasicTTIImpl.h"
, 1183, __extension__ __PRETTY_FUNCTION__))
;
11
Assuming 'Factor' is > 1
12
Assuming the condition is true
13
'?' condition is true
1184
1185 unsigned NumSubElts = NumElts / Factor;
1186 auto *SubVT = FixedVectorType::get(VT->getElementType(), NumSubElts);
1187
1188 // Firstly, the cost of load/store operation.
1189 InstructionCost Cost;
1190 if (UseMaskForCond
13.1
'UseMaskForCond' is false
13.1
'UseMaskForCond' is false
13.1
'UseMaskForCond' is false
|| UseMaskForGaps
13.2
'UseMaskForGaps' is false
13.2
'UseMaskForGaps' is false
13.2
'UseMaskForGaps' is false
)
14
Taking false branch
1191 Cost = thisT()->getMaskedMemoryOpCost(Opcode, VecTy, Alignment,
1192 AddressSpace, CostKind);
1193 else
1194 Cost = thisT()->getMemoryOpCost(Opcode, VecTy, Alignment, AddressSpace,
1195 CostKind);
1196
1197 // Legalize the vector type, and get the legalized and unlegalized type
1198 // sizes.
1199 MVT VecTyLT = getTLI()->getTypeLegalizationCost(DL, VecTy).second;
1200 unsigned VecTySize = thisT()->getDataLayout().getTypeStoreSize(VecTy);
1201 unsigned VecTyLTSize = VecTyLT.getStoreSize();
1202
1203 // Scale the cost of the memory operation by the fraction of legalized
1204 // instructions that will actually be used. We shouldn't account for the
1205 // cost of dead instructions since they will be removed.
1206 //
1207 // E.g., An interleaved load of factor 8:
1208 // %vec = load <16 x i64>, <16 x i64>* %ptr
1209 // %v0 = shufflevector %vec, undef, <0, 8>
1210 //
1211 // If <16 x i64> is legalized to 8 v2i64 loads, only 2 of the loads will be
1212 // used (those corresponding to elements [0:1] and [8:9] of the unlegalized
1213 // type). The other loads are unused.
1214 //
1215 // TODO: Note that legalization can turn masked loads/stores into unmasked
1216 // (legalized) loads/stores. This can be reflected in the cost.
1217 if (VecTySize > VecTyLTSize) {
15
Assuming 'VecTySize' is <= 'VecTyLTSize'
1218 // The number of loads of a legal type it will take to represent a load
1219 // of the unlegalized vector type.
1220 unsigned NumLegalInsts = divideCeil(VecTySize, VecTyLTSize);
1221
1222 // The number of elements of the unlegalized type that correspond to a
1223 // single legal instruction.
1224 unsigned NumEltsPerLegalInst = divideCeil(NumElts, NumLegalInsts);
1225
1226 // Determine which legal instructions will be used.
1227 BitVector UsedInsts(NumLegalInsts, false);
1228 for (unsigned Index : Indices)
1229 for (unsigned Elt = 0; Elt < NumSubElts; ++Elt)
1230 UsedInsts.set((Index + Elt * Factor) / NumEltsPerLegalInst);
1231
1232 // Scale the cost of the load by the fraction of legal instructions that
1233 // will be used.
1234 Cost *= UsedInsts.count() / NumLegalInsts;
1235 }
1236
1237 // Then plus the cost of interleave operation.
1238 assert(Indices.size() <= Factor &&(static_cast <bool> (Indices.size() <= Factor &&
"Interleaved memory op has too many members") ? void (0) : __assert_fail
("Indices.size() <= Factor && \"Interleaved memory op has too many members\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/BasicTTIImpl.h"
, 1239, __extension__ __PRETTY_FUNCTION__))
16
Taking false branch
17
Assuming the condition is true
18
'?' condition is true
1239 "Interleaved memory op has too many members")(static_cast <bool> (Indices.size() <= Factor &&
"Interleaved memory op has too many members") ? void (0) : __assert_fail
("Indices.size() <= Factor && \"Interleaved memory op has too many members\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/BasicTTIImpl.h"
, 1239, __extension__ __PRETTY_FUNCTION__))
;
1240 if (Opcode == Instruction::Load) {
19
Assuming 'Opcode' is not equal to Load
20
Taking false branch
1241 // The interleave cost is similar to extract sub vectors' elements
1242 // from the wide vector, and insert them into sub vectors.
1243 //
1244 // E.g. An interleaved load of factor 2 (with one member of index 0):
1245 // %vec = load <8 x i32>, <8 x i32>* %ptr
1246 // %v0 = shuffle %vec, undef, <0, 2, 4, 6> ; Index 0
1247 // The cost is estimated as extract elements at 0, 2, 4, 6 from the
1248 // <8 x i32> vector and insert them into a <4 x i32> vector.
1249 for (unsigned Index : Indices) {
1250 assert(Index < Factor && "Invalid index for interleaved memory op")(static_cast <bool> (Index < Factor && "Invalid index for interleaved memory op"
) ? void (0) : __assert_fail ("Index < Factor && \"Invalid index for interleaved memory op\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/BasicTTIImpl.h"
, 1250, __extension__ __PRETTY_FUNCTION__))
;
1251
1252 // Extract elements from loaded vector for each sub vector.
1253 for (unsigned Elm = 0; Elm < NumSubElts; Elm++)
1254 Cost += thisT()->getVectorInstrCost(Instruction::ExtractElement, VT,
1255 Index + Elm * Factor);
1256 }
1257
1258 InstructionCost InsSubCost = 0;
1259 for (unsigned Elm = 0; Elm < NumSubElts; Elm++)
1260 InsSubCost +=
1261 thisT()->getVectorInstrCost(Instruction::InsertElement, SubVT, Elm);
1262
1263 Cost += Indices.size() * InsSubCost;
1264 } else {
1265 // The interleave cost is extract elements from sub vectors, and
1266 // insert them into the wide vector.
1267 //
1268 // E.g. An interleaved store of factor 3 with 2 members at indices 0,1:
1269 // (using VF=4):
1270 // %v0_v1 = shuffle %v0, %v1, <0,4,undef,1,5,undef,2,6,undef,3,7,undef>
1271 // %gaps.mask = <true, true, false, true, true, false,
1272 // true, true, false, true, true, false>
1273 // call llvm.masked.store <12 x i32> %v0_v1, <12 x i32>* %ptr,
1274 // i32 Align, <12 x i1> %gaps.mask
1275 // The cost is estimated as extract all elements (of actual members,
1276 // excluding gaps) from both <4 x i32> vectors and insert into the <12 x
1277 // i32> vector.
1278 InstructionCost ExtSubCost = 0;
1279 for (unsigned Elm = 0; Elm < NumSubElts; Elm++)
21
Assuming 'Elm' is < 'NumSubElts'
22
Loop condition is true. Entering loop body
1280 ExtSubCost += thisT()->getVectorInstrCost(Instruction::ExtractElement,
23
Calling 'X86TTIImpl::getVectorInstrCost'
1281 SubVT, Elm);
1282 Cost += ExtSubCost * Indices.size();
1283
1284 for (unsigned Index : Indices) {
1285 assert(Index < Factor && "Invalid index for interleaved memory op")(static_cast <bool> (Index < Factor && "Invalid index for interleaved memory op"
) ? void (0) : __assert_fail ("Index < Factor && \"Invalid index for interleaved memory op\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/BasicTTIImpl.h"
, 1285, __extension__ __PRETTY_FUNCTION__))
;
1286
1287 // Insert elements from loaded vector for each sub vector.
1288 for (unsigned Elm = 0; Elm < NumSubElts; Elm++)
1289 Cost += thisT()->getVectorInstrCost(Instruction::InsertElement, VT,
1290 Index + Elm * Factor);
1291 }
1292 }
1293
1294 if (!UseMaskForCond)
1295 return Cost;
1296
1297 Type *I8Type = Type::getInt8Ty(VT->getContext());
1298 auto *MaskVT = FixedVectorType::get(I8Type, NumElts);
1299 SubVT = FixedVectorType::get(I8Type, NumSubElts);
1300
1301 // The Mask shuffling cost is extract all the elements of the Mask
1302 // and insert each of them Factor times into the wide vector:
1303 //
1304 // E.g. an interleaved group with factor 3:
1305 // %mask = icmp ult <8 x i32> %vec1, %vec2
1306 // %interleaved.mask = shufflevector <8 x i1> %mask, <8 x i1> undef,
1307 // <24 x i32> <0,0,0,1,1,1,2,2,2,3,3,3,4,4,4,5,5,5,6,6,6,7,7,7>
1308 // The cost is estimated as extract all mask elements from the <8xi1> mask
1309 // vector and insert them factor times into the <24xi1> shuffled mask
1310 // vector.
1311 for (unsigned i = 0; i < NumSubElts; i++)
1312 Cost +=
1313 thisT()->getVectorInstrCost(Instruction::ExtractElement, SubVT, i);
1314
1315 for (unsigned i = 0; i < NumElts; i++)
1316 Cost +=
1317 thisT()->getVectorInstrCost(Instruction::InsertElement, MaskVT, i);
1318
1319 // The Gaps mask is invariant and created outside the loop, therefore the
1320 // cost of creating it is not accounted for here. However if we have both
1321 // a MaskForGaps and some other mask that guards the execution of the
1322 // memory access, we need to account for the cost of And-ing the two masks
1323 // inside the loop.
1324 if (UseMaskForGaps)
1325 Cost += thisT()->getArithmeticInstrCost(BinaryOperator::And, MaskVT,
1326 CostKind);
1327
1328 return Cost;
1329 }
1330
1331 /// Get intrinsic cost based on arguments.
1332 InstructionCost getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA,
1333 TTI::TargetCostKind CostKind) {
1334 // Check for generically free intrinsics.
1335 if (BaseT::getIntrinsicInstrCost(ICA, CostKind) == 0)
1336 return 0;
1337
1338 // Assume that target intrinsics are cheap.
1339 Intrinsic::ID IID = ICA.getID();
1340 if (Function::isTargetIntrinsic(IID))
1341 return TargetTransformInfo::TCC_Basic;
1342
1343 if (ICA.isTypeBasedOnly())
1344 return getTypeBasedIntrinsicInstrCost(ICA, CostKind);
1345
1346 Type *RetTy = ICA.getReturnType();
1347
1348 ElementCount RetVF =
1349 (RetTy->isVectorTy() ? cast<VectorType>(RetTy)->getElementCount()
1350 : ElementCount::getFixed(1));
1351 const IntrinsicInst *I = ICA.getInst();
1352 const SmallVectorImpl<const Value *> &Args = ICA.getArgs();
1353 FastMathFlags FMF = ICA.getFlags();
1354 switch (IID) {
1355 default:
1356 break;
1357
1358 case Intrinsic::cttz:
1359 // FIXME: If necessary, this should go in target-specific overrides.
1360 if (RetVF.isScalar() && getTLI()->isCheapToSpeculateCttz())
1361 return TargetTransformInfo::TCC_Basic;
1362 break;
1363
1364 case Intrinsic::ctlz:
1365 // FIXME: If necessary, this should go in target-specific overrides.
1366 if (RetVF.isScalar() && getTLI()->isCheapToSpeculateCtlz())
1367 return TargetTransformInfo::TCC_Basic;
1368 break;
1369
1370 case Intrinsic::memcpy:
1371 return thisT()->getMemcpyCost(ICA.getInst());
1372
1373 case Intrinsic::masked_scatter: {
1374 const Value *Mask = Args[3];
1375 bool VarMask = !isa<Constant>(Mask);
1376 Align Alignment = cast<ConstantInt>(Args[2])->getAlignValue();
1377 return thisT()->getGatherScatterOpCost(Instruction::Store,
1378 ICA.getArgTypes()[0], Args[1],
1379 VarMask, Alignment, CostKind, I);
1380 }
1381 case Intrinsic::masked_gather: {
1382 const Value *Mask = Args[2];
1383 bool VarMask = !isa<Constant>(Mask);
1384 Align Alignment = cast<ConstantInt>(Args[1])->getAlignValue();
1385 return thisT()->getGatherScatterOpCost(Instruction::Load, RetTy, Args[0],
1386 VarMask, Alignment, CostKind, I);
1387 }
1388 case Intrinsic::experimental_stepvector: {
1389 if (isa<ScalableVectorType>(RetTy))
1390 return BaseT::getIntrinsicInstrCost(ICA, CostKind);
1391 // The cost of materialising a constant integer vector.
1392 return TargetTransformInfo::TCC_Basic;
1393 }
1394 case Intrinsic::experimental_vector_extract: {
1395 // FIXME: Handle case where a scalable vector is extracted from a scalable
1396 // vector
1397 if (isa<ScalableVectorType>(RetTy))
1398 return BaseT::getIntrinsicInstrCost(ICA, CostKind);
1399 unsigned Index = cast<ConstantInt>(Args[1])->getZExtValue();
1400 return thisT()->getShuffleCost(TTI::SK_ExtractSubvector,
1401 cast<VectorType>(Args[0]->getType()), None,
1402 Index, cast<VectorType>(RetTy));
1403 }
1404 case Intrinsic::experimental_vector_insert: {
1405 // FIXME: Handle case where a scalable vector is inserted into a scalable
1406 // vector
1407 if (isa<ScalableVectorType>(Args[1]->getType()))
1408 return BaseT::getIntrinsicInstrCost(ICA, CostKind);
1409 unsigned Index = cast<ConstantInt>(Args[2])->getZExtValue();
1410 return thisT()->getShuffleCost(
1411 TTI::SK_InsertSubvector, cast<VectorType>(Args[0]->getType()), None,
1412 Index, cast<VectorType>(Args[1]->getType()));
1413 }
1414 case Intrinsic::experimental_vector_reverse: {
1415 return thisT()->getShuffleCost(TTI::SK_Reverse,
1416 cast<VectorType>(Args[0]->getType()), None,
1417 0, cast<VectorType>(RetTy));
1418 }
1419 case Intrinsic::experimental_vector_splice: {
1420 unsigned Index = cast<ConstantInt>(Args[2])->getZExtValue();
1421 return thisT()->getShuffleCost(TTI::SK_Splice,
1422 cast<VectorType>(Args[0]->getType()), None,
1423 Index, cast<VectorType>(RetTy));
1424 }
1425 case Intrinsic::vector_reduce_add:
1426 case Intrinsic::vector_reduce_mul:
1427 case Intrinsic::vector_reduce_and:
1428 case Intrinsic::vector_reduce_or:
1429 case Intrinsic::vector_reduce_xor:
1430 case Intrinsic::vector_reduce_smax:
1431 case Intrinsic::vector_reduce_smin:
1432 case Intrinsic::vector_reduce_fmax:
1433 case Intrinsic::vector_reduce_fmin:
1434 case Intrinsic::vector_reduce_umax:
1435 case Intrinsic::vector_reduce_umin: {
1436 IntrinsicCostAttributes Attrs(IID, RetTy, Args[0]->getType(), FMF, I, 1);
1437 return getTypeBasedIntrinsicInstrCost(Attrs, CostKind);
1438 }
1439 case Intrinsic::vector_reduce_fadd:
1440 case Intrinsic::vector_reduce_fmul: {
1441 IntrinsicCostAttributes Attrs(
1442 IID, RetTy, {Args[0]->getType(), Args[1]->getType()}, FMF, I, 1);
1443 return getTypeBasedIntrinsicInstrCost(Attrs, CostKind);
1444 }
1445 case Intrinsic::fshl:
1446 case Intrinsic::fshr: {
1447 if (isa<ScalableVectorType>(RetTy))
1448 return BaseT::getIntrinsicInstrCost(ICA, CostKind);
1449 const Value *X = Args[0];
1450 const Value *Y = Args[1];
1451 const Value *Z = Args[2];
1452 TTI::OperandValueProperties OpPropsX, OpPropsY, OpPropsZ, OpPropsBW;
1453 TTI::OperandValueKind OpKindX = TTI::getOperandInfo(X, OpPropsX);
1454 TTI::OperandValueKind OpKindY = TTI::getOperandInfo(Y, OpPropsY);
1455 TTI::OperandValueKind OpKindZ = TTI::getOperandInfo(Z, OpPropsZ);
1456 TTI::OperandValueKind OpKindBW = TTI::OK_UniformConstantValue;
1457 OpPropsBW = isPowerOf2_32(RetTy->getScalarSizeInBits()) ? TTI::OP_PowerOf2
1458 : TTI::OP_None;
1459 // fshl: (X << (Z % BW)) | (Y >> (BW - (Z % BW)))
1460 // fshr: (X << (BW - (Z % BW))) | (Y >> (Z % BW))
1461 InstructionCost Cost = 0;
1462 Cost +=
1463 thisT()->getArithmeticInstrCost(BinaryOperator::Or, RetTy, CostKind);
1464 Cost +=
1465 thisT()->getArithmeticInstrCost(BinaryOperator::Sub, RetTy, CostKind);
1466 Cost += thisT()->getArithmeticInstrCost(
1467 BinaryOperator::Shl, RetTy, CostKind, OpKindX, OpKindZ, OpPropsX);
1468 Cost += thisT()->getArithmeticInstrCost(
1469 BinaryOperator::LShr, RetTy, CostKind, OpKindY, OpKindZ, OpPropsY);
1470 // Non-constant shift amounts requires a modulo.
1471 if (OpKindZ != TTI::OK_UniformConstantValue &&
1472 OpKindZ != TTI::OK_NonUniformConstantValue)
1473 Cost += thisT()->getArithmeticInstrCost(BinaryOperator::URem, RetTy,
1474 CostKind, OpKindZ, OpKindBW,
1475 OpPropsZ, OpPropsBW);
1476 // For non-rotates (X != Y) we must add shift-by-zero handling costs.
1477 if (X != Y) {
1478 Type *CondTy = RetTy->getWithNewBitWidth(1);
1479 Cost +=
1480 thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, RetTy, CondTy,
1481 CmpInst::BAD_ICMP_PREDICATE, CostKind);
1482 Cost +=
1483 thisT()->getCmpSelInstrCost(BinaryOperator::Select, RetTy, CondTy,
1484 CmpInst::BAD_ICMP_PREDICATE, CostKind);
1485 }
1486 return Cost;
1487 }
1488 }
1489
1490 // Assume that we need to scalarize this intrinsic.
1491 // Compute the scalarization overhead based on Args for a vector
1492 // intrinsic.
1493 InstructionCost ScalarizationCost = InstructionCost::getInvalid();
1494 if (RetVF.isVector() && !RetVF.isScalable()) {
1495 ScalarizationCost = 0;
1496 if (!RetTy->isVoidTy())
1497 ScalarizationCost +=
1498 getScalarizationOverhead(cast<VectorType>(RetTy), true, false);
1499 ScalarizationCost +=
1500 getOperandsScalarizationOverhead(Args, ICA.getArgTypes());
1501 }
1502
1503 IntrinsicCostAttributes Attrs(IID, RetTy, ICA.getArgTypes(), FMF, I,
1504 ScalarizationCost);
1505 return thisT()->getTypeBasedIntrinsicInstrCost(Attrs, CostKind);
1506 }
1507
1508 /// Get intrinsic cost based on argument types.
1509 /// If ScalarizationCostPassed is std::numeric_limits<unsigned>::max(), the
1510 /// cost of scalarizing the arguments and the return value will be computed
1511 /// based on types.
1512 InstructionCost
1513 getTypeBasedIntrinsicInstrCost(const IntrinsicCostAttributes &ICA,
1514 TTI::TargetCostKind CostKind) {
1515 Intrinsic::ID IID = ICA.getID();
1516 Type *RetTy = ICA.getReturnType();
1517 const SmallVectorImpl<Type *> &Tys = ICA.getArgTypes();
1518 FastMathFlags FMF = ICA.getFlags();
1519 InstructionCost ScalarizationCostPassed = ICA.getScalarizationCost();
1520 bool SkipScalarizationCost = ICA.skipScalarizationCost();
1521
1522 VectorType *VecOpTy = nullptr;
1523 if (!Tys.empty()) {
1524 // The vector reduction operand is operand 0 except for fadd/fmul.
1525 // Their operand 0 is a scalar start value, so the vector op is operand 1.
1526 unsigned VecTyIndex = 0;
1527 if (IID == Intrinsic::vector_reduce_fadd ||
1528 IID == Intrinsic::vector_reduce_fmul)
1529 VecTyIndex = 1;
1530 assert(Tys.size() > VecTyIndex && "Unexpected IntrinsicCostAttributes")(static_cast <bool> (Tys.size() > VecTyIndex &&
"Unexpected IntrinsicCostAttributes") ? void (0) : __assert_fail
("Tys.size() > VecTyIndex && \"Unexpected IntrinsicCostAttributes\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/BasicTTIImpl.h"
, 1530, __extension__ __PRETTY_FUNCTION__))
;
1531 VecOpTy = dyn_cast<VectorType>(Tys[VecTyIndex]);
1532 }
1533
1534 // Library call cost - other than size, make it expensive.
1535 unsigned SingleCallCost = CostKind == TTI::TCK_CodeSize ? 1 : 10;
1536 SmallVector<unsigned, 2> ISDs;
1537 switch (IID) {
1538 default: {
1539 // Scalable vectors cannot be scalarized, so return Invalid.
1540 if (isa<ScalableVectorType>(RetTy) || any_of(Tys, [](const Type *Ty) {
1541 return isa<ScalableVectorType>(Ty);
1542 }))
1543 return InstructionCost::getInvalid();
1544
1545 // Assume that we need to scalarize this intrinsic.
1546 InstructionCost ScalarizationCost =
1547 SkipScalarizationCost ? ScalarizationCostPassed : 0;
1548 unsigned ScalarCalls = 1;
1549 Type *ScalarRetTy = RetTy;
1550 if (auto *RetVTy = dyn_cast<VectorType>(RetTy)) {
1551 if (!SkipScalarizationCost)
1552 ScalarizationCost = getScalarizationOverhead(RetVTy, true, false);
1553 ScalarCalls = std::max(ScalarCalls,
1554 cast<FixedVectorType>(RetVTy)->getNumElements());
1555 ScalarRetTy = RetTy->getScalarType();
1556 }
1557 SmallVector<Type *, 4> ScalarTys;
1558 for (unsigned i = 0, ie = Tys.size(); i != ie; ++i) {
1559 Type *Ty = Tys[i];
1560 if (auto *VTy = dyn_cast<VectorType>(Ty)) {
1561 if (!SkipScalarizationCost)
1562 ScalarizationCost += getScalarizationOverhead(VTy, false, true);
1563 ScalarCalls = std::max(ScalarCalls,
1564 cast<FixedVectorType>(VTy)->getNumElements());
1565 Ty = Ty->getScalarType();
1566 }
1567 ScalarTys.push_back(Ty);
1568 }
1569 if (ScalarCalls == 1)
1570 return 1; // Return cost of a scalar intrinsic. Assume it to be cheap.
1571
1572 IntrinsicCostAttributes ScalarAttrs(IID, ScalarRetTy, ScalarTys, FMF);
1573 InstructionCost ScalarCost =
1574 thisT()->getIntrinsicInstrCost(ScalarAttrs, CostKind);
1575
1576 return ScalarCalls * ScalarCost + ScalarizationCost;
1577 }
1578 // Look for intrinsics that can be lowered directly or turned into a scalar
1579 // intrinsic call.
1580 case Intrinsic::sqrt:
1581 ISDs.push_back(ISD::FSQRT);
1582 break;
1583 case Intrinsic::sin:
1584 ISDs.push_back(ISD::FSIN);
1585 break;
1586 case Intrinsic::cos:
1587 ISDs.push_back(ISD::FCOS);
1588 break;
1589 case Intrinsic::exp:
1590 ISDs.push_back(ISD::FEXP);
1591 break;
1592 case Intrinsic::exp2:
1593 ISDs.push_back(ISD::FEXP2);
1594 break;
1595 case Intrinsic::log:
1596 ISDs.push_back(ISD::FLOG);
1597 break;
1598 case Intrinsic::log10:
1599 ISDs.push_back(ISD::FLOG10);
1600 break;
1601 case Intrinsic::log2:
1602 ISDs.push_back(ISD::FLOG2);
1603 break;
1604 case Intrinsic::fabs:
1605 ISDs.push_back(ISD::FABS);
1606 break;
1607 case Intrinsic::canonicalize:
1608 ISDs.push_back(ISD::FCANONICALIZE);
1609 break;
1610 case Intrinsic::minnum:
1611 ISDs.push_back(ISD::FMINNUM);
1612 break;
1613 case Intrinsic::maxnum:
1614 ISDs.push_back(ISD::FMAXNUM);
1615 break;
1616 case Intrinsic::minimum:
1617 ISDs.push_back(ISD::FMINIMUM);
1618 break;
1619 case Intrinsic::maximum:
1620 ISDs.push_back(ISD::FMAXIMUM);
1621 break;
1622 case Intrinsic::copysign:
1623 ISDs.push_back(ISD::FCOPYSIGN);
1624 break;
1625 case Intrinsic::floor:
1626 ISDs.push_back(ISD::FFLOOR);
1627 break;
1628 case Intrinsic::ceil:
1629 ISDs.push_back(ISD::FCEIL);
1630 break;
1631 case Intrinsic::trunc:
1632 ISDs.push_back(ISD::FTRUNC);
1633 break;
1634 case Intrinsic::nearbyint:
1635 ISDs.push_back(ISD::FNEARBYINT);
1636 break;
1637 case Intrinsic::rint:
1638 ISDs.push_back(ISD::FRINT);
1639 break;
1640 case Intrinsic::round:
1641 ISDs.push_back(ISD::FROUND);
1642 break;
1643 case Intrinsic::roundeven:
1644 ISDs.push_back(ISD::FROUNDEVEN);
1645 break;
1646 case Intrinsic::pow:
1647 ISDs.push_back(ISD::FPOW);
1648 break;
1649 case Intrinsic::fma:
1650 ISDs.push_back(ISD::FMA);
1651 break;
1652 case Intrinsic::fmuladd:
1653 ISDs.push_back(ISD::FMA);
1654 break;
1655 case Intrinsic::experimental_constrained_fmuladd:
1656 ISDs.push_back(ISD::STRICT_FMA);
1657 break;
1658 // FIXME: We should return 0 whenever getIntrinsicCost == TCC_Free.
1659 case Intrinsic::lifetime_start:
1660 case Intrinsic::lifetime_end:
1661 case Intrinsic::sideeffect:
1662 case Intrinsic::pseudoprobe:
1663 case Intrinsic::arithmetic_fence:
1664 return 0;
1665 case Intrinsic::masked_store: {
1666 Type *Ty = Tys[0];
1667 Align TyAlign = thisT()->DL.getABITypeAlign(Ty);
1668 return thisT()->getMaskedMemoryOpCost(Instruction::Store, Ty, TyAlign, 0,
1669 CostKind);
1670 }
1671 case Intrinsic::masked_load: {
1672 Type *Ty = RetTy;
1673 Align TyAlign = thisT()->DL.getABITypeAlign(Ty);
1674 return thisT()->getMaskedMemoryOpCost(Instruction::Load, Ty, TyAlign, 0,
1675 CostKind);
1676 }
1677 case Intrinsic::vector_reduce_add:
1678 return thisT()->getArithmeticReductionCost(Instruction::Add, VecOpTy,
1679 None, CostKind);
1680 case Intrinsic::vector_reduce_mul:
1681 return thisT()->getArithmeticReductionCost(Instruction::Mul, VecOpTy,
1682 None, CostKind);
1683 case Intrinsic::vector_reduce_and:
1684 return thisT()->getArithmeticReductionCost(Instruction::And, VecOpTy,
1685 None, CostKind);
1686 case Intrinsic::vector_reduce_or:
1687 return thisT()->getArithmeticReductionCost(Instruction::Or, VecOpTy, None,
1688 CostKind);
1689 case Intrinsic::vector_reduce_xor:
1690 return thisT()->getArithmeticReductionCost(Instruction::Xor, VecOpTy,
1691 None, CostKind);
1692 case Intrinsic::vector_reduce_fadd:
1693 return thisT()->getArithmeticReductionCost(Instruction::FAdd, VecOpTy,
1694 FMF, CostKind);
1695 case Intrinsic::vector_reduce_fmul:
1696 return thisT()->getArithmeticReductionCost(Instruction::FMul, VecOpTy,
1697 FMF, CostKind);
1698 case Intrinsic::vector_reduce_smax:
1699 case Intrinsic::vector_reduce_smin:
1700 case Intrinsic::vector_reduce_fmax:
1701 case Intrinsic::vector_reduce_fmin:
1702 return thisT()->getMinMaxReductionCost(
1703 VecOpTy, cast<VectorType>(CmpInst::makeCmpResultType(VecOpTy)),
1704 /*IsUnsigned=*/false, CostKind);
1705 case Intrinsic::vector_reduce_umax:
1706 case Intrinsic::vector_reduce_umin:
1707 return thisT()->getMinMaxReductionCost(
1708 VecOpTy, cast<VectorType>(CmpInst::makeCmpResultType(VecOpTy)),
1709 /*IsUnsigned=*/true, CostKind);
1710 case Intrinsic::abs:
1711 case Intrinsic::smax:
1712 case Intrinsic::smin:
1713 case Intrinsic::umax:
1714 case Intrinsic::umin: {
1715 // abs(X) = select(icmp(X,0),X,sub(0,X))
1716 // minmax(X,Y) = select(icmp(X,Y),X,Y)
1717 Type *CondTy = RetTy->getWithNewBitWidth(1);
1718 InstructionCost Cost = 0;
1719 // TODO: Ideally getCmpSelInstrCost would accept an icmp condition code.
1720 Cost +=
1721 thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, RetTy, CondTy,
1722 CmpInst::BAD_ICMP_PREDICATE, CostKind);
1723 Cost +=
1724 thisT()->getCmpSelInstrCost(BinaryOperator::Select, RetTy, CondTy,
1725 CmpInst::BAD_ICMP_PREDICATE, CostKind);
1726 // TODO: Should we add an OperandValueProperties::OP_Zero property?
1727 if (IID == Intrinsic::abs)
1728 Cost += thisT()->getArithmeticInstrCost(
1729 BinaryOperator::Sub, RetTy, CostKind, TTI::OK_UniformConstantValue);
1730 return Cost;
1731 }
1732 case Intrinsic::sadd_sat:
1733 case Intrinsic::ssub_sat: {
1734 Type *CondTy = RetTy->getWithNewBitWidth(1);
1735
1736 Type *OpTy = StructType::create({RetTy, CondTy});
1737 Intrinsic::ID OverflowOp = IID == Intrinsic::sadd_sat
1738 ? Intrinsic::sadd_with_overflow
1739 : Intrinsic::ssub_with_overflow;
1740
1741 // SatMax -> Overflow && SumDiff < 0
1742 // SatMin -> Overflow && SumDiff >= 0
1743 InstructionCost Cost = 0;
1744 IntrinsicCostAttributes Attrs(OverflowOp, OpTy, {RetTy, RetTy}, FMF,
1745 nullptr, ScalarizationCostPassed);
1746 Cost += thisT()->getIntrinsicInstrCost(Attrs, CostKind);
1747 Cost +=
1748 thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, RetTy, CondTy,
1749 CmpInst::BAD_ICMP_PREDICATE, CostKind);
1750 Cost += 2 * thisT()->getCmpSelInstrCost(
1751 BinaryOperator::Select, RetTy, CondTy,
1752 CmpInst::BAD_ICMP_PREDICATE, CostKind);
1753 return Cost;
1754 }
1755 case Intrinsic::uadd_sat:
1756 case Intrinsic::usub_sat: {
1757 Type *CondTy = RetTy->getWithNewBitWidth(1);
1758
1759 Type *OpTy = StructType::create({RetTy, CondTy});
1760 Intrinsic::ID OverflowOp = IID == Intrinsic::uadd_sat
1761 ? Intrinsic::uadd_with_overflow
1762 : Intrinsic::usub_with_overflow;
1763
1764 InstructionCost Cost = 0;
1765 IntrinsicCostAttributes Attrs(OverflowOp, OpTy, {RetTy, RetTy}, FMF,
1766 nullptr, ScalarizationCostPassed);
1767 Cost += thisT()->getIntrinsicInstrCost(Attrs, CostKind);
1768 Cost +=
1769 thisT()->getCmpSelInstrCost(BinaryOperator::Select, RetTy, CondTy,
1770 CmpInst::BAD_ICMP_PREDICATE, CostKind);
1771 return Cost;
1772 }
1773 case Intrinsic::smul_fix:
1774 case Intrinsic::umul_fix: {
1775 unsigned ExtSize = RetTy->getScalarSizeInBits() * 2;
1776 Type *ExtTy = RetTy->getWithNewBitWidth(ExtSize);
1777
1778 unsigned ExtOp =
1779 IID == Intrinsic::smul_fix ? Instruction::SExt : Instruction::ZExt;
1780 TTI::CastContextHint CCH = TTI::CastContextHint::None;
1781
1782 InstructionCost Cost = 0;
1783 Cost += 2 * thisT()->getCastInstrCost(ExtOp, ExtTy, RetTy, CCH, CostKind);
1784 Cost +=
1785 thisT()->getArithmeticInstrCost(Instruction::Mul, ExtTy, CostKind);
1786 Cost += 2 * thisT()->getCastInstrCost(Instruction::Trunc, RetTy, ExtTy,
1787 CCH, CostKind);
1788 Cost += thisT()->getArithmeticInstrCost(Instruction::LShr, RetTy,
1789 CostKind, TTI::OK_AnyValue,
1790 TTI::OK_UniformConstantValue);
1791 Cost += thisT()->getArithmeticInstrCost(Instruction::Shl, RetTy, CostKind,
1792 TTI::OK_AnyValue,
1793 TTI::OK_UniformConstantValue);
1794 Cost += thisT()->getArithmeticInstrCost(Instruction::Or, RetTy, CostKind);
1795 return Cost;
1796 }
1797 case Intrinsic::sadd_with_overflow:
1798 case Intrinsic::ssub_with_overflow: {
1799 Type *SumTy = RetTy->getContainedType(0);
1800 Type *OverflowTy = RetTy->getContainedType(1);
1801 unsigned Opcode = IID == Intrinsic::sadd_with_overflow
1802 ? BinaryOperator::Add
1803 : BinaryOperator::Sub;
1804
1805 // LHSSign -> LHS >= 0
1806 // RHSSign -> RHS >= 0
1807 // SumSign -> Sum >= 0
1808 //
1809 // Add:
1810 // Overflow -> (LHSSign == RHSSign) && (LHSSign != SumSign)
1811 // Sub:
1812 // Overflow -> (LHSSign != RHSSign) && (LHSSign != SumSign)
1813 InstructionCost Cost = 0;
1814 Cost += thisT()->getArithmeticInstrCost(Opcode, SumTy, CostKind);
1815 Cost += 3 * thisT()->getCmpSelInstrCost(
1816 Instruction::ICmp, SumTy, OverflowTy,
1817 CmpInst::BAD_ICMP_PREDICATE, CostKind);
1818 Cost += 2 * thisT()->getCmpSelInstrCost(
1819 Instruction::Select, OverflowTy, OverflowTy,
1820 CmpInst::BAD_ICMP_PREDICATE, CostKind);
1821 Cost += thisT()->getArithmeticInstrCost(BinaryOperator::And, OverflowTy,
1822 CostKind);
1823 return Cost;
1824 }
1825 case Intrinsic::uadd_with_overflow:
1826 case Intrinsic::usub_with_overflow: {
1827 Type *SumTy = RetTy->getContainedType(0);
1828 Type *OverflowTy = RetTy->getContainedType(1);
1829 unsigned Opcode = IID == Intrinsic::uadd_with_overflow
1830 ? BinaryOperator::Add
1831 : BinaryOperator::Sub;
1832
1833 InstructionCost Cost = 0;
1834 Cost += thisT()->getArithmeticInstrCost(Opcode, SumTy, CostKind);
1835 Cost +=
1836 thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, SumTy, OverflowTy,
1837 CmpInst::BAD_ICMP_PREDICATE, CostKind);
1838 return Cost;
1839 }
1840 case Intrinsic::smul_with_overflow:
1841 case Intrinsic::umul_with_overflow: {
1842 Type *MulTy = RetTy->getContainedType(0);
1843 Type *OverflowTy = RetTy->getContainedType(1);
1844 unsigned ExtSize = MulTy->getScalarSizeInBits() * 2;
1845 Type *ExtTy = MulTy->getWithNewBitWidth(ExtSize);
1846
1847 unsigned ExtOp =
1848 IID == Intrinsic::smul_fix ? Instruction::SExt : Instruction::ZExt;
1849 TTI::CastContextHint CCH = TTI::CastContextHint::None;
1850
1851 InstructionCost Cost = 0;
1852 Cost += 2 * thisT()->getCastInstrCost(ExtOp, ExtTy, MulTy, CCH, CostKind);
1853 Cost +=
1854 thisT()->getArithmeticInstrCost(Instruction::Mul, ExtTy, CostKind);
1855 Cost += 2 * thisT()->getCastInstrCost(Instruction::Trunc, MulTy, ExtTy,
1856 CCH, CostKind);
1857 Cost += thisT()->getArithmeticInstrCost(Instruction::LShr, MulTy,
1858 CostKind, TTI::OK_AnyValue,
1859 TTI::OK_UniformConstantValue);
1860
1861 if (IID == Intrinsic::smul_with_overflow)
1862 Cost += thisT()->getArithmeticInstrCost(Instruction::AShr, MulTy,
1863 CostKind, TTI::OK_AnyValue,
1864 TTI::OK_UniformConstantValue);
1865
1866 Cost +=
1867 thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, MulTy, OverflowTy,
1868 CmpInst::BAD_ICMP_PREDICATE, CostKind);
1869 return Cost;
1870 }
1871 case Intrinsic::ctpop:
1872 ISDs.push_back(ISD::CTPOP);
1873 // In case of legalization use TCC_Expensive. This is cheaper than a
1874 // library call but still not a cheap instruction.
1875 SingleCallCost = TargetTransformInfo::TCC_Expensive;
1876 break;
1877 case Intrinsic::ctlz:
1878 ISDs.push_back(ISD::CTLZ);
1879 break;
1880 case Intrinsic::cttz:
1881 ISDs.push_back(ISD::CTTZ);
1882 break;
1883 case Intrinsic::bswap:
1884 ISDs.push_back(ISD::BSWAP);
1885 break;
1886 case Intrinsic::bitreverse:
1887 ISDs.push_back(ISD::BITREVERSE);
1888 break;
1889 }
1890
1891 const TargetLoweringBase *TLI = getTLI();
1892 std::pair<InstructionCost, MVT> LT =
1893 TLI->getTypeLegalizationCost(DL, RetTy);
1894
1895 SmallVector<InstructionCost, 2> LegalCost;
1896 SmallVector<InstructionCost, 2> CustomCost;
1897 for (unsigned ISD : ISDs) {
1898 if (TLI->isOperationLegalOrPromote(ISD, LT.second)) {
1899 if (IID == Intrinsic::fabs && LT.second.isFloatingPoint() &&
1900 TLI->isFAbsFree(LT.second)) {
1901 return 0;
1902 }
1903
1904 // The operation is legal. Assume it costs 1.
1905 // If the type is split to multiple registers, assume that there is some
1906 // overhead to this.
1907 // TODO: Once we have extract/insert subvector cost we need to use them.
1908 if (LT.first > 1)
1909 LegalCost.push_back(LT.first * 2);
1910 else
1911 LegalCost.push_back(LT.first * 1);
1912 } else if (!TLI->isOperationExpand(ISD, LT.second)) {
1913 // If the operation is custom lowered then assume
1914 // that the code is twice as expensive.
1915 CustomCost.push_back(LT.first * 2);
1916 }
1917 }
1918
1919 auto *MinLegalCostI = std::min_element(LegalCost.begin(), LegalCost.end());
1920 if (MinLegalCostI != LegalCost.end())
1921 return *MinLegalCostI;
1922
1923 auto MinCustomCostI =
1924 std::min_element(CustomCost.begin(), CustomCost.end());
1925 if (MinCustomCostI != CustomCost.end())
1926 return *MinCustomCostI;
1927
1928 // If we can't lower fmuladd into an FMA estimate the cost as a floating
1929 // point mul followed by an add.
1930 if (IID == Intrinsic::fmuladd)
1931 return thisT()->getArithmeticInstrCost(BinaryOperator::FMul, RetTy,
1932 CostKind) +
1933 thisT()->getArithmeticInstrCost(BinaryOperator::FAdd, RetTy,
1934 CostKind);
1935 if (IID == Intrinsic::experimental_constrained_fmuladd) {
1936 IntrinsicCostAttributes FMulAttrs(
1937 Intrinsic::experimental_constrained_fmul, RetTy, Tys);
1938 IntrinsicCostAttributes FAddAttrs(
1939 Intrinsic::experimental_constrained_fadd, RetTy, Tys);
1940 return thisT()->getIntrinsicInstrCost(FMulAttrs, CostKind) +
1941 thisT()->getIntrinsicInstrCost(FAddAttrs, CostKind);
1942 }
1943
1944 // Else, assume that we need to scalarize this intrinsic. For math builtins
1945 // this will emit a costly libcall, adding call overhead and spills. Make it
1946 // very expensive.
1947 if (auto *RetVTy = dyn_cast<VectorType>(RetTy)) {
1948 // Scalable vectors cannot be scalarized, so return Invalid.
1949 if (isa<ScalableVectorType>(RetTy) || any_of(Tys, [](const Type *Ty) {
1950 return isa<ScalableVectorType>(Ty);
1951 }))
1952 return InstructionCost::getInvalid();
1953
1954 InstructionCost ScalarizationCost =
1955 SkipScalarizationCost ? ScalarizationCostPassed
1956 : getScalarizationOverhead(RetVTy, true, false);
1957
1958 unsigned ScalarCalls = cast<FixedVectorType>(RetVTy)->getNumElements();
1959 SmallVector<Type *, 4> ScalarTys;
1960 for (unsigned i = 0, ie = Tys.size(); i != ie; ++i) {
1961 Type *Ty = Tys[i];
1962 if (Ty->isVectorTy())
1963 Ty = Ty->getScalarType();
1964 ScalarTys.push_back(Ty);
1965 }
1966 IntrinsicCostAttributes Attrs(IID, RetTy->getScalarType(), ScalarTys, FMF);
1967 InstructionCost ScalarCost =
1968 thisT()->getIntrinsicInstrCost(Attrs, CostKind);
1969 for (unsigned i = 0, ie = Tys.size(); i != ie; ++i) {
1970 if (auto *VTy = dyn_cast<VectorType>(Tys[i])) {
1971 if (!ICA.skipScalarizationCost())
1972 ScalarizationCost += getScalarizationOverhead(VTy, false, true);
1973 ScalarCalls = std::max(ScalarCalls,
1974 cast<FixedVectorType>(VTy)->getNumElements());
1975 }
1976 }
1977 return ScalarCalls * ScalarCost + ScalarizationCost;
1978 }
1979
1980 // This is going to be turned into a library call, make it expensive.
1981 return SingleCallCost;
1982 }
1983
1984 /// Compute a cost of the given call instruction.
1985 ///
1986 /// Compute the cost of calling function F with return type RetTy and
1987 /// argument types Tys. F might be nullptr, in this case the cost of an
1988 /// arbitrary call with the specified signature will be returned.
1989 /// This is used, for instance, when we estimate call of a vector
1990 /// counterpart of the given function.
1991 /// \param F Called function, might be nullptr.
1992 /// \param RetTy Return value types.
1993 /// \param Tys Argument types.
1994 /// \returns The cost of Call instruction.
1995 InstructionCost
1996 getCallInstrCost(Function *F, Type *RetTy, ArrayRef<Type *> Tys,
1997 TTI::TargetCostKind CostKind = TTI::TCK_SizeAndLatency) {
1998 return 10;
1999 }
2000
2001 unsigned getNumberOfParts(Type *Tp) {
2002 std::pair<InstructionCost, MVT> LT =
2003 getTLI()->getTypeLegalizationCost(DL, Tp);
2004 return *LT.first.getValue();
2005 }
2006
2007 InstructionCost getAddressComputationCost(Type *Ty, ScalarEvolution *,
2008 const SCEV *) {
2009 return 0;
2010 }
2011
2012 /// Try to calculate arithmetic and shuffle op costs for reduction intrinsics.
2013 /// We're assuming that reduction operation are performing the following way:
2014 ///
2015 /// %val1 = shufflevector<n x t> %val, <n x t> %undef,
2016 /// <n x i32> <i32 n/2, i32 n/2 + 1, ..., i32 n, i32 undef, ..., i32 undef>
2017 /// \----------------v-------------/ \----------v------------/
2018 /// n/2 elements n/2 elements
2019 /// %red1 = op <n x t> %val, <n x t> val1
2020 /// After this operation we have a vector %red1 where only the first n/2
2021 /// elements are meaningful, the second n/2 elements are undefined and can be
2022 /// dropped. All other operations are actually working with the vector of
2023 /// length n/2, not n, though the real vector length is still n.
2024 /// %val2 = shufflevector<n x t> %red1, <n x t> %undef,
2025 /// <n x i32> <i32 n/4, i32 n/4 + 1, ..., i32 n/2, i32 undef, ..., i32 undef>
2026 /// \----------------v-------------/ \----------v------------/
2027 /// n/4 elements 3*n/4 elements
2028 /// %red2 = op <n x t> %red1, <n x t> val2 - working with the vector of
2029 /// length n/2, the resulting vector has length n/4 etc.
2030 ///
2031 /// The cost model should take into account that the actual length of the
2032 /// vector is reduced on each iteration.
2033 InstructionCost getTreeReductionCost(unsigned Opcode, VectorType *Ty,
2034 TTI::TargetCostKind CostKind) {
2035 Type *ScalarTy = Ty->getElementType();
2036 unsigned NumVecElts = cast<FixedVectorType>(Ty)->getNumElements();
2037 if ((Opcode == Instruction::Or || Opcode == Instruction::And) &&
2038 ScalarTy == IntegerType::getInt1Ty(Ty->getContext()) &&
2039 NumVecElts >= 2) {
2040 // Or reduction for i1 is represented as:
2041 // %val = bitcast <ReduxWidth x i1> to iReduxWidth
2042 // %res = cmp ne iReduxWidth %val, 0
2043 // And reduction for i1 is represented as:
2044 // %val = bitcast <ReduxWidth x i1> to iReduxWidth
2045 // %res = cmp eq iReduxWidth %val, 11111
2046 Type *ValTy = IntegerType::get(Ty->getContext(), NumVecElts);
2047 return thisT()->getCastInstrCost(Instruction::BitCast, ValTy, Ty,
2048 TTI::CastContextHint::None, CostKind) +
2049 thisT()->getCmpSelInstrCost(Instruction::ICmp, ValTy,
2050 CmpInst::makeCmpResultType(ValTy),
2051 CmpInst::BAD_ICMP_PREDICATE, CostKind);
2052 }
2053 unsigned NumReduxLevels = Log2_32(NumVecElts);
2054 InstructionCost ArithCost = 0;
2055 InstructionCost ShuffleCost = 0;
2056 std::pair<InstructionCost, MVT> LT =
2057 thisT()->getTLI()->getTypeLegalizationCost(DL, Ty);
2058 unsigned LongVectorCount = 0;
2059 unsigned MVTLen =
2060 LT.second.isVector() ? LT.second.getVectorNumElements() : 1;
2061 while (NumVecElts > MVTLen) {
2062 NumVecElts /= 2;
2063 VectorType *SubTy = FixedVectorType::get(ScalarTy, NumVecElts);
2064 ShuffleCost += thisT()->getShuffleCost(TTI::SK_ExtractSubvector, Ty, None,
2065 NumVecElts, SubTy);
2066 ArithCost += thisT()->getArithmeticInstrCost(Opcode, SubTy, CostKind);
2067 Ty = SubTy;
2068 ++LongVectorCount;
2069 }
2070
2071 NumReduxLevels -= LongVectorCount;
2072
2073 // The minimal length of the vector is limited by the real length of vector
2074 // operations performed on the current platform. That's why several final
2075 // reduction operations are performed on the vectors with the same
2076 // architecture-dependent length.
2077
2078 // By default reductions need one shuffle per reduction level.
2079 ShuffleCost += NumReduxLevels * thisT()->getShuffleCost(
2080 TTI::SK_PermuteSingleSrc, Ty, None, 0, Ty);
2081 ArithCost += NumReduxLevels * thisT()->getArithmeticInstrCost(Opcode, Ty);
2082 return ShuffleCost + ArithCost +
2083 thisT()->getVectorInstrCost(Instruction::ExtractElement, Ty, 0);
2084 }
2085
2086 /// Try to calculate the cost of performing strict (in-order) reductions,
2087 /// which involves doing a sequence of floating point additions in lane
2088 /// order, starting with an initial value. For example, consider a scalar
2089 /// initial value 'InitVal' of type float and a vector of type <4 x float>:
2090 ///
2091 /// Vector = <float %v0, float %v1, float %v2, float %v3>
2092 ///
2093 /// %add1 = %InitVal + %v0
2094 /// %add2 = %add1 + %v1
2095 /// %add3 = %add2 + %v2
2096 /// %add4 = %add3 + %v3
2097 ///
2098 /// As a simple estimate we can say the cost of such a reduction is 4 times
2099 /// the cost of a scalar FP addition. We can only estimate the costs for
2100 /// fixed-width vectors here because for scalable vectors we do not know the
2101 /// runtime number of operations.
2102 InstructionCost getOrderedReductionCost(unsigned Opcode, VectorType *Ty,
2103 TTI::TargetCostKind CostKind) {
2104 // Targets must implement a default value for the scalable case, since
2105 // we don't know how many lanes the vector has.
2106 if (isa<ScalableVectorType>(Ty))
2107 return InstructionCost::getInvalid();
2108
2109 auto *VTy = cast<FixedVectorType>(Ty);
2110 InstructionCost ExtractCost =
2111 getScalarizationOverhead(VTy, /*Insert=*/false, /*Extract=*/true);
2112 InstructionCost ArithCost = thisT()->getArithmeticInstrCost(
2113 Opcode, VTy->getElementType(), CostKind);
2114 ArithCost *= VTy->getNumElements();
2115
2116 return ExtractCost + ArithCost;
2117 }
2118
2119 InstructionCost getArithmeticReductionCost(unsigned Opcode, VectorType *Ty,
2120 Optional<FastMathFlags> FMF,
2121 TTI::TargetCostKind CostKind) {
2122 if (TTI::requiresOrderedReduction(FMF))
2123 return getOrderedReductionCost(Opcode, Ty, CostKind);
2124 return getTreeReductionCost(Opcode, Ty, CostKind);
2125 }
2126
2127 /// Try to calculate op costs for min/max reduction operations.
2128 /// \param CondTy Conditional type for the Select instruction.
2129 InstructionCost getMinMaxReductionCost(VectorType *Ty, VectorType *CondTy,
2130 bool IsUnsigned,
2131 TTI::TargetCostKind CostKind) {
2132 Type *ScalarTy = Ty->getElementType();
2133 Type *ScalarCondTy = CondTy->getElementType();
2134 unsigned NumVecElts = cast<FixedVectorType>(Ty)->getNumElements();
2135 unsigned NumReduxLevels = Log2_32(NumVecElts);
2136 unsigned CmpOpcode;
2137 if (Ty->isFPOrFPVectorTy()) {
2138 CmpOpcode = Instruction::FCmp;
2139 } else {
2140 assert(Ty->isIntOrIntVectorTy() &&(static_cast <bool> (Ty->isIntOrIntVectorTy() &&
"expecting floating point or integer type for min/max reduction"
) ? void (0) : __assert_fail ("Ty->isIntOrIntVectorTy() && \"expecting floating point or integer type for min/max reduction\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/BasicTTIImpl.h"
, 2141, __extension__ __PRETTY_FUNCTION__))
2141 "expecting floating point or integer type for min/max reduction")(static_cast <bool> (Ty->isIntOrIntVectorTy() &&
"expecting floating point or integer type for min/max reduction"
) ? void (0) : __assert_fail ("Ty->isIntOrIntVectorTy() && \"expecting floating point or integer type for min/max reduction\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/BasicTTIImpl.h"
, 2141, __extension__ __PRETTY_FUNCTION__))
;
2142 CmpOpcode = Instruction::ICmp;
2143 }
2144 InstructionCost MinMaxCost = 0;
2145 InstructionCost ShuffleCost = 0;
2146 std::pair<InstructionCost, MVT> LT =
2147 thisT()->getTLI()->getTypeLegalizationCost(DL, Ty);
2148 unsigned LongVectorCount = 0;
2149 unsigned MVTLen =
2150 LT.second.isVector() ? LT.second.getVectorNumElements() : 1;
2151 while (NumVecElts > MVTLen) {
2152 NumVecElts /= 2;
2153 auto *SubTy = FixedVectorType::get(ScalarTy, NumVecElts);
2154 CondTy = FixedVectorType::get(ScalarCondTy, NumVecElts);
2155
2156 ShuffleCost += thisT()->getShuffleCost(TTI::SK_ExtractSubvector, Ty, None,
2157 NumVecElts, SubTy);
2158 MinMaxCost +=
2159 thisT()->getCmpSelInstrCost(CmpOpcode, SubTy, CondTy,
2160 CmpInst::BAD_ICMP_PREDICATE, CostKind) +
2161 thisT()->getCmpSelInstrCost(Instruction::Select, SubTy, CondTy,
2162 CmpInst::BAD_ICMP_PREDICATE, CostKind);
2163 Ty = SubTy;
2164 ++LongVectorCount;
2165 }
2166
2167 NumReduxLevels -= LongVectorCount;
2168
2169 // The minimal length of the vector is limited by the real length of vector
2170 // operations performed on the current platform. That's why several final
2171 // reduction opertions are perfomed on the vectors with the same
2172 // architecture-dependent length.
2173 ShuffleCost += NumReduxLevels * thisT()->getShuffleCost(
2174 TTI::SK_PermuteSingleSrc, Ty, None, 0, Ty);
2175 MinMaxCost +=
2176 NumReduxLevels *
2177 (thisT()->getCmpSelInstrCost(CmpOpcode, Ty, CondTy,
2178 CmpInst::BAD_ICMP_PREDICATE, CostKind) +
2179 thisT()->getCmpSelInstrCost(Instruction::Select, Ty, CondTy,
2180 CmpInst::BAD_ICMP_PREDICATE, CostKind));
2181 // The last min/max should be in vector registers and we counted it above.
2182 // So just need a single extractelement.
2183 return ShuffleCost + MinMaxCost +
2184 thisT()->getVectorInstrCost(Instruction::ExtractElement, Ty, 0);
2185 }
2186
2187 InstructionCost getExtendedAddReductionCost(bool IsMLA, bool IsUnsigned,
2188 Type *ResTy, VectorType *Ty,
2189 TTI::TargetCostKind CostKind) {
2190 // Without any native support, this is equivalent to the cost of
2191 // vecreduce.add(ext) or if IsMLA vecreduce.add(mul(ext, ext))
2192 VectorType *ExtTy = VectorType::get(ResTy, Ty);
2193 InstructionCost RedCost = thisT()->getArithmeticReductionCost(
2194 Instruction::Add, ExtTy, None, CostKind);
2195 InstructionCost MulCost = 0;
2196 InstructionCost ExtCost = thisT()->getCastInstrCost(
2197 IsUnsigned ? Instruction::ZExt : Instruction::SExt, ExtTy, Ty,
2198 TTI::CastContextHint::None, CostKind);
2199 if (IsMLA) {
2200 MulCost =
2201 thisT()->getArithmeticInstrCost(Instruction::Mul, ExtTy, CostKind);
2202 ExtCost *= 2;
2203 }
2204
2205 return RedCost + MulCost + ExtCost;
2206 }
2207
2208 InstructionCost getVectorSplitCost() { return 1; }
2209
2210 /// @}
2211};
2212
2213/// Concrete BasicTTIImpl that can be used if no further customization
2214/// is needed.
2215class BasicTTIImpl : public BasicTTIImplBase<BasicTTIImpl> {
2216 using BaseT = BasicTTIImplBase<BasicTTIImpl>;
2217
2218 friend class BasicTTIImplBase<BasicTTIImpl>;
2219
2220 const TargetSubtargetInfo *ST;
2221 const TargetLoweringBase *TLI;
2222
2223 const TargetSubtargetInfo *getST() const { return ST; }
2224 const TargetLoweringBase *getTLI() const { return TLI; }
2225
2226public:
2227 explicit BasicTTIImpl(const TargetMachine *TM, const Function &F);
2228};
2229
2230} // end namespace llvm
2231
2232#endif // LLVM_CODEGEN_BASICTTIIMPL_H

/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/Support/MachineValueType.h

1//===- Support/MachineValueType.h - Machine-Level types ---------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines the set of machine-level target independent types which
10// legal values in the code generator use.
11//
12//===----------------------------------------------------------------------===//
13
14#ifndef LLVM_SUPPORT_MACHINEVALUETYPE_H
15#define LLVM_SUPPORT_MACHINEVALUETYPE_H
16
17#include "llvm/ADT/Sequence.h"
18#include "llvm/ADT/iterator_range.h"
19#include "llvm/Support/ErrorHandling.h"
20#include "llvm/Support/MathExtras.h"
21#include "llvm/Support/TypeSize.h"
22#include <cassert>
23
24namespace llvm {
25
26 class Type;
27
28 /// Machine Value Type. Every type that is supported natively by some
29 /// processor targeted by LLVM occurs here. This means that any legal value
30 /// type can be represented by an MVT.
31 class MVT {
32 public:
33 enum SimpleValueType : uint8_t {
34 // clang-format off
35
36 // Simple value types that aren't explicitly part of this enumeration
37 // are considered extended value types.
38 INVALID_SIMPLE_VALUE_TYPE = 0,
39
40 // If you change this numbering, you must change the values in
41 // ValueTypes.td as well!
42 Other = 1, // This is a non-standard value
43 i1 = 2, // This is a 1 bit integer value
44 i8 = 3, // This is an 8 bit integer value
45 i16 = 4, // This is a 16 bit integer value
46 i32 = 5, // This is a 32 bit integer value
47 i64 = 6, // This is a 64 bit integer value
48 i128 = 7, // This is a 128 bit integer value
49
50 FIRST_INTEGER_VALUETYPE = i1,
51 LAST_INTEGER_VALUETYPE = i128,
52
53 bf16 = 8, // This is a 16 bit brain floating point value
54 f16 = 9, // This is a 16 bit floating point value
55 f32 = 10, // This is a 32 bit floating point value
56 f64 = 11, // This is a 64 bit floating point value
57 f80 = 12, // This is a 80 bit floating point value
58 f128 = 13, // This is a 128 bit floating point value
59 ppcf128 = 14, // This is a PPC 128-bit floating point value
60
61 FIRST_FP_VALUETYPE = bf16,
62 LAST_FP_VALUETYPE = ppcf128,
63
64 v1i1 = 15, // 1 x i1
65 v2i1 = 16, // 2 x i1
66 v4i1 = 17, // 4 x i1
67 v8i1 = 18, // 8 x i1
68 v16i1 = 19, // 16 x i1
69 v32i1 = 20, // 32 x i1
70 v64i1 = 21, // 64 x i1
71 v128i1 = 22, // 128 x i1
72 v256i1 = 23, // 256 x i1
73 v512i1 = 24, // 512 x i1
74 v1024i1 = 25, // 1024 x i1
75
76 v1i8 = 26, // 1 x i8
77 v2i8 = 27, // 2 x i8
78 v4i8 = 28, // 4 x i8
79 v8i8 = 29, // 8 x i8
80 v16i8 = 30, // 16 x i8
81 v32i8 = 31, // 32 x i8
82 v64i8 = 32, // 64 x i8
83 v128i8 = 33, // 128 x i8
84 v256i8 = 34, // 256 x i8
85 v512i8 = 35, // 512 x i8
86 v1024i8 = 36, // 1024 x i8
87
88 v1i16 = 37, // 1 x i16
89 v2i16 = 38, // 2 x i16
90 v3i16 = 39, // 3 x i16
91 v4i16 = 40, // 4 x i16
92 v8i16 = 41, // 8 x i16
93 v16i16 = 42, // 16 x i16
94 v32i16 = 43, // 32 x i16
95 v64i16 = 44, // 64 x i16
96 v128i16 = 45, // 128 x i16
97 v256i16 = 46, // 256 x i16
98 v512i16 = 47, // 512 x i16
99
100 v1i32 = 48, // 1 x i32
101 v2i32 = 49, // 2 x i32
102 v3i32 = 50, // 3 x i32
103 v4i32 = 51, // 4 x i32
104 v5i32 = 52, // 5 x i32
105 v6i32 = 53, // 6 x i32
106 v7i32 = 54, // 7 x i32
107 v8i32 = 55, // 8 x i32
108 v16i32 = 56, // 16 x i32
109 v32i32 = 57, // 32 x i32
110 v64i32 = 58, // 64 x i32
111 v128i32 = 59, // 128 x i32
112 v256i32 = 60, // 256 x i32
113 v512i32 = 61, // 512 x i32
114 v1024i32 = 62, // 1024 x i32
115 v2048i32 = 63, // 2048 x i32
116
117 v1i64 = 64, // 1 x i64
118 v2i64 = 65, // 2 x i64
119 v3i64 = 66, // 3 x i64
120 v4i64 = 67, // 4 x i64
121 v8i64 = 68, // 8 x i64
122 v16i64 = 69, // 16 x i64
123 v32i64 = 70, // 32 x i64
124 v64i64 = 71, // 64 x i64
125 v128i64 = 72, // 128 x i64
126 v256i64 = 73, // 256 x i64
127
128 v1i128 = 74, // 1 x i128
129
130 FIRST_INTEGER_FIXEDLEN_VECTOR_VALUETYPE = v1i1,
131 LAST_INTEGER_FIXEDLEN_VECTOR_VALUETYPE = v1i128,
132
133 v1f16 = 75, // 1 x f16
134 v2f16 = 76, // 2 x f16
135 v3f16 = 77, // 3 x f16
136 v4f16 = 78, // 4 x f16
137 v8f16 = 79, // 8 x f16
138 v16f16 = 80, // 16 x f16
139 v32f16 = 81, // 32 x f16
140 v64f16 = 82, // 64 x f16
141 v128f16 = 83, // 128 x f16
142 v256f16 = 84, // 256 x f16
143 v512f16 = 85, // 256 x f16
144
145 v2bf16 = 86, // 2 x bf16
146 v3bf16 = 87, // 3 x bf16
147 v4bf16 = 88, // 4 x bf16
148 v8bf16 = 89, // 8 x bf16
149 v16bf16 = 90, // 16 x bf16
150 v32bf16 = 91, // 32 x bf16
151 v64bf16 = 92, // 64 x bf16
152 v128bf16 = 93, // 128 x bf16
153
154 v1f32 = 94, // 1 x f32
155 v2f32 = 95, // 2 x f32
156 v3f32 = 96, // 3 x f32
157 v4f32 = 97, // 4 x f32
158 v5f32 = 98, // 5 x f32
159 v6f32 = 99, // 6 x f32
160 v7f32 = 100, // 7 x f32
161 v8f32 = 101, // 8 x f32
162 v16f32 = 102, // 16 x f32
163 v32f32 = 103, // 32 x f32
164 v64f32 = 104, // 64 x f32
165 v128f32 = 105, // 128 x f32
166 v256f32 = 106, // 256 x f32
167 v512f32 = 107, // 512 x f32
168 v1024f32 = 108, // 1024 x f32
169 v2048f32 = 109, // 2048 x f32
170
171 v1f64 = 110, // 1 x f64
172 v2f64 = 111, // 2 x f64
173 v3f64 = 112, // 3 x f64
174 v4f64 = 113, // 4 x f64
175 v8f64 = 114, // 8 x f64
176 v16f64 = 115, // 16 x f64
177 v32f64 = 116, // 32 x f64
178 v64f64 = 117, // 64 x f64
179 v128f64 = 118, // 128 x f64
180 v256f64 = 119, // 256 x f64
181
182 FIRST_FP_FIXEDLEN_VECTOR_VALUETYPE = v1f16,
183 LAST_FP_FIXEDLEN_VECTOR_VALUETYPE = v256f64,
184
185 FIRST_FIXEDLEN_VECTOR_VALUETYPE = v1i1,
186 LAST_FIXEDLEN_VECTOR_VALUETYPE = v256f64,
187
188 nxv1i1 = 120, // n x 1 x i1
189 nxv2i1 = 121, // n x 2 x i1
190 nxv4i1 = 122, // n x 4 x i1
191 nxv8i1 = 123, // n x 8 x i1
192 nxv16i1 = 124, // n x 16 x i1
193 nxv32i1 = 125, // n x 32 x i1
194 nxv64i1 = 126, // n x 64 x i1
195
196 nxv1i8 = 127, // n x 1 x i8
197 nxv2i8 = 128, // n x 2 x i8
198 nxv4i8 = 129, // n x 4 x i8
199 nxv8i8 = 130, // n x 8 x i8
200 nxv16i8 = 131, // n x 16 x i8
201 nxv32i8 = 132, // n x 32 x i8
202 nxv64i8 = 133, // n x 64 x i8
203
204 nxv1i16 = 134, // n x 1 x i16
205 nxv2i16 = 135, // n x 2 x i16
206 nxv4i16 = 136, // n x 4 x i16
207 nxv8i16 = 137, // n x 8 x i16
208 nxv16i16 = 138, // n x 16 x i16
209 nxv32i16 = 139, // n x 32 x i16
210
211 nxv1i32 = 140, // n x 1 x i32
212 nxv2i32 = 141, // n x 2 x i32
213 nxv4i32 = 142, // n x 4 x i32
214 nxv8i32 = 143, // n x 8 x i32
215 nxv16i32 = 144, // n x 16 x i32
216 nxv32i32 = 145, // n x 32 x i32
217
218 nxv1i64 = 146, // n x 1 x i64
219 nxv2i64 = 147, // n x 2 x i64
220 nxv4i64 = 148, // n x 4 x i64
221 nxv8i64 = 149, // n x 8 x i64
222 nxv16i64 = 150, // n x 16 x i64
223 nxv32i64 = 151, // n x 32 x i64
224
225 FIRST_INTEGER_SCALABLE_VECTOR_VALUETYPE = nxv1i1,
226 LAST_INTEGER_SCALABLE_VECTOR_VALUETYPE = nxv32i64,
227
228 nxv1f16 = 152, // n x 1 x f16
229 nxv2f16 = 153, // n x 2 x f16
230 nxv4f16 = 154, // n x 4 x f16
231 nxv8f16 = 155, // n x 8 x f16
232 nxv16f16 = 156, // n x 16 x f16
233 nxv32f16 = 157, // n x 32 x f16
234
235 nxv1bf16 = 158, // n x 1 x bf16
236 nxv2bf16 = 159, // n x 2 x bf16
237 nxv4bf16 = 160, // n x 4 x bf16
238 nxv8bf16 = 161, // n x 8 x bf16
239
240 nxv1f32 = 162, // n x 1 x f32
241 nxv2f32 = 163, // n x 2 x f32
242 nxv4f32 = 164, // n x 4 x f32
243 nxv8f32 = 165, // n x 8 x f32
244 nxv16f32 = 166, // n x 16 x f32
245
246 nxv1f64 = 167, // n x 1 x f64
247 nxv2f64 = 168, // n x 2 x f64
248 nxv4f64 = 169, // n x 4 x f64
249 nxv8f64 = 170, // n x 8 x f64
250
251 FIRST_FP_SCALABLE_VECTOR_VALUETYPE = nxv1f16,
252 LAST_FP_SCALABLE_VECTOR_VALUETYPE = nxv8f64,
253
254 FIRST_SCALABLE_VECTOR_VALUETYPE = nxv1i1,
255 LAST_SCALABLE_VECTOR_VALUETYPE = nxv8f64,
256
257 FIRST_VECTOR_VALUETYPE = v1i1,
258 LAST_VECTOR_VALUETYPE = nxv8f64,
259
260 x86mmx = 171, // This is an X86 MMX value
261
262 Glue = 172, // This glues nodes together during pre-RA sched
263
264 isVoid = 173, // This has no value
265
266 Untyped = 174, // This value takes a register, but has
267 // unspecified type. The register class
268 // will be determined by the opcode.
269
270 funcref = 175, // WebAssembly's funcref type
271 externref = 176, // WebAssembly's externref type
272 x86amx = 177, // This is an X86 AMX value
273 i64x8 = 178, // 8 Consecutive GPRs (AArch64)
274
275 FIRST_VALUETYPE = 1, // This is always the beginning of the list.
276 LAST_VALUETYPE = i64x8, // This always remains at the end of the list.
277 VALUETYPE_SIZE = LAST_VALUETYPE + 1,
278
279 // This is the current maximum for LAST_VALUETYPE.
280 // MVT::MAX_ALLOWED_VALUETYPE is used for asserts and to size bit vectors
281 // This value must be a multiple of 32.
282 MAX_ALLOWED_VALUETYPE = 192,
283
284 // A value of type llvm::TokenTy
285 token = 248,
286
287 // This is MDNode or MDString.
288 Metadata = 249,
289
290 // An int value the size of the pointer of the current
291 // target to any address space. This must only be used internal to
292 // tblgen. Other than for overloading, we treat iPTRAny the same as iPTR.
293 iPTRAny = 250,
294
295 // A vector with any length and element size. This is used
296 // for intrinsics that have overloadings based on vector types.
297 // This is only for tblgen's consumption!
298 vAny = 251,
299
300 // Any floating-point or vector floating-point value. This is used
301 // for intrinsics that have overloadings based on floating-point types.
302 // This is only for tblgen's consumption!
303 fAny = 252,
304
305 // An integer or vector integer value of any bit width. This is
306 // used for intrinsics that have overloadings based on integer bit widths.
307 // This is only for tblgen's consumption!
308 iAny = 253,
309
310 // An int value the size of the pointer of the current
311 // target. This should only be used internal to tblgen!
312 iPTR = 254,
313
314 // Any type. This is used for intrinsics that have overloadings.
315 // This is only for tblgen's consumption!
316 Any = 255
317
318 // clang-format on
319 };
320
321 SimpleValueType SimpleTy = INVALID_SIMPLE_VALUE_TYPE;
322
323 constexpr MVT() = default;
324 constexpr MVT(SimpleValueType SVT) : SimpleTy(SVT) {}
325
326 bool operator>(const MVT& S) const { return SimpleTy > S.SimpleTy; }
327 bool operator<(const MVT& S) const { return SimpleTy < S.SimpleTy; }
328 bool operator==(const MVT& S) const { return SimpleTy == S.SimpleTy; }
329 bool operator!=(const MVT& S) const { return SimpleTy != S.SimpleTy; }
330 bool operator>=(const MVT& S) const { return SimpleTy >= S.SimpleTy; }
331 bool operator<=(const MVT& S) const { return SimpleTy <= S.SimpleTy; }
332
333 /// Return true if this is a valid simple valuetype.
334 bool isValid() const {
335 return (SimpleTy >= MVT::FIRST_VALUETYPE &&
336 SimpleTy <= MVT::LAST_VALUETYPE);
337 }
338
339 /// Return true if this is a FP or a vector FP type.
340 bool isFloatingPoint() const {
341 return ((SimpleTy >= MVT::FIRST_FP_VALUETYPE &&
342 SimpleTy <= MVT::LAST_FP_VALUETYPE) ||
343 (SimpleTy >= MVT::FIRST_FP_FIXEDLEN_VECTOR_VALUETYPE &&
344 SimpleTy <= MVT::LAST_FP_FIXEDLEN_VECTOR_VALUETYPE) ||
345 (SimpleTy >= MVT::FIRST_FP_SCALABLE_VECTOR_VALUETYPE &&
346 SimpleTy <= MVT::LAST_FP_SCALABLE_VECTOR_VALUETYPE));
347 }
348
349 /// Return true if this is an integer or a vector integer type.
350 bool isInteger() const {
351 return ((SimpleTy >= MVT::FIRST_INTEGER_VALUETYPE &&
352 SimpleTy <= MVT::LAST_INTEGER_VALUETYPE) ||
353 (SimpleTy >= MVT::FIRST_INTEGER_FIXEDLEN_VECTOR_VALUETYPE &&
354 SimpleTy <= MVT::LAST_INTEGER_FIXEDLEN_VECTOR_VALUETYPE) ||
355 (SimpleTy >= MVT::FIRST_INTEGER_SCALABLE_VECTOR_VALUETYPE &&
356 SimpleTy <= MVT::LAST_INTEGER_SCALABLE_VECTOR_VALUETYPE));
357 }
358
359 /// Return true if this is an integer, not including vectors.
360 bool isScalarInteger() const {
361 return (SimpleTy >= MVT::FIRST_INTEGER_VALUETYPE &&
362 SimpleTy <= MVT::LAST_INTEGER_VALUETYPE);
363 }
364
365 /// Return true if this is a vector value type.
366 bool isVector() const {
367 return (SimpleTy >= MVT::FIRST_VECTOR_VALUETYPE &&
26
Assuming field 'SimpleTy' is >= FIRST_VECTOR_VALUETYPE
28
Returning the value 1, which participates in a condition later
368 SimpleTy <= MVT::LAST_VECTOR_VALUETYPE);
27
Assuming field 'SimpleTy' is <= LAST_VECTOR_VALUETYPE
369 }
370
371 /// Return true if this is a vector value type where the
372 /// runtime length is machine dependent
373 bool isScalableVector() const {
374 return (SimpleTy >= MVT::FIRST_SCALABLE_VECTOR_VALUETYPE &&
375 SimpleTy <= MVT::LAST_SCALABLE_VECTOR_VALUETYPE);
376 }
377
378 bool isFixedLengthVector() const {
379 return (SimpleTy >= MVT::FIRST_FIXEDLEN_VECTOR_VALUETYPE &&
380 SimpleTy <= MVT::LAST_FIXEDLEN_VECTOR_VALUETYPE);
381 }
382
383 /// Return true if this is a 16-bit vector type.
384 bool is16BitVector() const {
385 return (SimpleTy == MVT::v2i8 || SimpleTy == MVT::v1i16 ||
386 SimpleTy == MVT::v16i1 || SimpleTy == MVT::v1f16);
387 }
388
389 /// Return true if this is a 32-bit vector type.
390 bool is32BitVector() const {
391 return (SimpleTy == MVT::v32i1 || SimpleTy == MVT::v4i8 ||
392 SimpleTy == MVT::v2i16 || SimpleTy == MVT::v1i32 ||
393 SimpleTy == MVT::v2f16 || SimpleTy == MVT::v2bf16 ||
394 SimpleTy == MVT::v1f32);
395 }
396
397 /// Return true if this is a 64-bit vector type.
398 bool is64BitVector() const {
399 return (SimpleTy == MVT::v64i1 || SimpleTy == MVT::v8i8 ||
400 SimpleTy == MVT::v4i16 || SimpleTy == MVT::v2i32 ||
401 SimpleTy == MVT::v1i64 || SimpleTy == MVT::v4f16 ||
402 SimpleTy == MVT::v4bf16 ||SimpleTy == MVT::v2f32 ||
403 SimpleTy == MVT::v1f64);
404 }
405
406 /// Return true if this is a 128-bit vector type.
407 bool is128BitVector() const {
408 return (SimpleTy == MVT::v128i1 || SimpleTy == MVT::v16i8 ||
409 SimpleTy == MVT::v8i16 || SimpleTy == MVT::v4i32 ||
410 SimpleTy == MVT::v2i64 || SimpleTy == MVT::v1i128 ||
411 SimpleTy == MVT::v8f16 || SimpleTy == MVT::v8bf16 ||
412 SimpleTy == MVT::v4f32 || SimpleTy == MVT::v2f64);
413 }
414
415 /// Return true if this is a 256-bit vector type.
416 bool is256BitVector() const {
417 return (SimpleTy == MVT::v16f16 || SimpleTy == MVT::v16bf16 ||
418 SimpleTy == MVT::v8f32 || SimpleTy == MVT::v4f64 ||
419 SimpleTy == MVT::v32i8 || SimpleTy == MVT::v16i16 ||
420 SimpleTy == MVT::v8i32 || SimpleTy == MVT::v4i64 ||
421 SimpleTy == MVT::v256i1);
422 }
423
424 /// Return true if this is a 512-bit vector type.
425 bool is512BitVector() const {
426 return (SimpleTy == MVT::v32f16 || SimpleTy == MVT::v32bf16 ||
427 SimpleTy == MVT::v16f32 || SimpleTy == MVT::v8f64 ||
428 SimpleTy == MVT::v512i1 || SimpleTy == MVT::v64i8 ||
429 SimpleTy == MVT::v32i16 || SimpleTy == MVT::v16i32 ||
430 SimpleTy == MVT::v8i64);
431 }
432
433 /// Return true if this is a 1024-bit vector type.
434 bool is1024BitVector() const {
435 return (SimpleTy == MVT::v1024i1 || SimpleTy == MVT::v128i8 ||
436 SimpleTy == MVT::v64i16 || SimpleTy == MVT::v32i32 ||
437 SimpleTy == MVT::v16i64 || SimpleTy == MVT::v64f16 ||
438 SimpleTy == MVT::v32f32 || SimpleTy == MVT::v16f64 ||
439 SimpleTy == MVT::v64bf16);
440 }
441
442 /// Return true if this is a 2048-bit vector type.
443 bool is2048BitVector() const {
444 return (SimpleTy == MVT::v256i8 || SimpleTy == MVT::v128i16 ||
445 SimpleTy == MVT::v64i32 || SimpleTy == MVT::v32i64 ||
446 SimpleTy == MVT::v128f16 || SimpleTy == MVT::v64f32 ||
447 SimpleTy == MVT::v32f64 || SimpleTy == MVT::v128bf16);
448 }
449
450 /// Return true if this is an overloaded type for TableGen.
451 bool isOverloaded() const {
452 return (SimpleTy == MVT::Any || SimpleTy == MVT::iAny ||
453 SimpleTy == MVT::fAny || SimpleTy == MVT::vAny ||
454 SimpleTy == MVT::iPTRAny);
455 }
456
457 /// Return a vector with the same number of elements as this vector, but
458 /// with the element type converted to an integer type with the same
459 /// bitwidth.
460 MVT changeVectorElementTypeToInteger() const {
461 MVT EltTy = getVectorElementType();
462 MVT IntTy = MVT::getIntegerVT(EltTy.getSizeInBits());
463 MVT VecTy = MVT::getVectorVT(IntTy, getVectorElementCount());
464 assert(VecTy.SimpleTy != MVT::INVALID_SIMPLE_VALUE_TYPE &&(static_cast <bool> (VecTy.SimpleTy != MVT::INVALID_SIMPLE_VALUE_TYPE
&& "Simple vector VT not representable by simple integer vector VT!"
) ? void (0) : __assert_fail ("VecTy.SimpleTy != MVT::INVALID_SIMPLE_VALUE_TYPE && \"Simple vector VT not representable by simple integer vector VT!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/Support/MachineValueType.h"
, 465, __extension__ __PRETTY_FUNCTION__))
465 "Simple vector VT not representable by simple integer vector VT!")(static_cast <bool> (VecTy.SimpleTy != MVT::INVALID_SIMPLE_VALUE_TYPE
&& "Simple vector VT not representable by simple integer vector VT!"
) ? void (0) : __assert_fail ("VecTy.SimpleTy != MVT::INVALID_SIMPLE_VALUE_TYPE && \"Simple vector VT not representable by simple integer vector VT!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/Support/MachineValueType.h"
, 465, __extension__ __PRETTY_FUNCTION__))
;
466 return VecTy;
467 }
468
469 /// Return a VT for a vector type whose attributes match ourselves
470 /// with the exception of the element type that is chosen by the caller.
471 MVT changeVectorElementType(MVT EltVT) const {
472 MVT VecTy = MVT::getVectorVT(EltVT, getVectorElementCount());
473 assert(VecTy.SimpleTy != MVT::INVALID_SIMPLE_VALUE_TYPE &&(static_cast <bool> (VecTy.SimpleTy != MVT::INVALID_SIMPLE_VALUE_TYPE
&& "Simple vector VT not representable by simple integer vector VT!"
) ? void (0) : __assert_fail ("VecTy.SimpleTy != MVT::INVALID_SIMPLE_VALUE_TYPE && \"Simple vector VT not representable by simple integer vector VT!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/Support/MachineValueType.h"
, 474, __extension__ __PRETTY_FUNCTION__))
474 "Simple vector VT not representable by simple integer vector VT!")(static_cast <bool> (VecTy.SimpleTy != MVT::INVALID_SIMPLE_VALUE_TYPE
&& "Simple vector VT not representable by simple integer vector VT!"
) ? void (0) : __assert_fail ("VecTy.SimpleTy != MVT::INVALID_SIMPLE_VALUE_TYPE && \"Simple vector VT not representable by simple integer vector VT!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/Support/MachineValueType.h"
, 474, __extension__ __PRETTY_FUNCTION__))
;
475 return VecTy;
476 }
477
478 /// Return the type converted to an equivalently sized integer or vector
479 /// with integer element type. Similar to changeVectorElementTypeToInteger,
480 /// but also handles scalars.
481 MVT changeTypeToInteger() {
482 if (isVector())
483 return changeVectorElementTypeToInteger();
484 return MVT::getIntegerVT(getSizeInBits());
485 }
486
487 /// Return a VT for a vector type with the same element type but
488 /// half the number of elements.
489 MVT getHalfNumVectorElementsVT() const {
490 MVT EltVT = getVectorElementType();
491 auto EltCnt = getVectorElementCount();
492 assert(EltCnt.isKnownEven() && "Splitting vector, but not in half!")(static_cast <bool> (EltCnt.isKnownEven() && "Splitting vector, but not in half!"
) ? void (0) : __assert_fail ("EltCnt.isKnownEven() && \"Splitting vector, but not in half!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/Support/MachineValueType.h"
, 492, __extension__ __PRETTY_FUNCTION__))
;
493 return getVectorVT(EltVT, EltCnt.divideCoefficientBy(2));
494 }
495
496 /// Returns true if the given vector is a power of 2.
497 bool isPow2VectorType() const {
498 unsigned NElts = getVectorMinNumElements();
499 return !(NElts & (NElts - 1));
500 }
501
502 /// Widens the length of the given vector MVT up to the nearest power of 2
503 /// and returns that type.
504 MVT getPow2VectorType() const {
505 if (isPow2VectorType())
506 return *this;
507
508 ElementCount NElts = getVectorElementCount();
509 unsigned NewMinCount = 1 << Log2_32_Ceil(NElts.getKnownMinValue());
510 NElts = ElementCount::get(NewMinCount, NElts.isScalable());
511 return MVT::getVectorVT(getVectorElementType(), NElts);
512 }
513
514 /// If this is a vector, return the element type, otherwise return this.
515 MVT getScalarType() const {
516 return isVector() ? getVectorElementType() : *this;
517 }
518
519 MVT getVectorElementType() const {
520 switch (SimpleTy) {
521 default:
522 llvm_unreachable("Not a vector MVT!")::llvm::llvm_unreachable_internal("Not a vector MVT!", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/Support/MachineValueType.h"
, 522)
;
523 case v1i1:
524 case v2i1:
525 case v4i1:
526 case v8i1:
527 case v16i1:
528 case v32i1:
529 case v64i1:
530 case v128i1:
531 case v256i1:
532 case v512i1:
533 case v1024i1:
534 case nxv1i1:
535 case nxv2i1:
536 case nxv4i1:
537 case nxv8i1:
538 case nxv16i1:
539 case nxv32i1:
540 case nxv64i1: return i1;
541 case v1i8:
542 case v2i8:
543 case v4i8:
544 case v8i8:
545 case v16i8:
546 case v32i8:
547 case v64i8:
548 case v128i8:
549 case v256i8:
550 case v512i8:
551 case v1024i8:
552 case nxv1i8:
553 case nxv2i8:
554 case nxv4i8:
555 case nxv8i8:
556 case nxv16i8:
557 case nxv32i8:
558 case nxv64i8: return i8;
559 case v1i16:
560 case v2i16:
561 case v3i16:
562 case v4i16:
563 case v8i16:
564 case v16i16:
565 case v32i16:
566 case v64i16:
567 case v128i16:
568 case v256i16:
569 case v512i16:
570 case nxv1i16:
571 case nxv2i16:
572 case nxv4i16:
573 case nxv8i16:
574 case nxv16i16:
575 case nxv32i16: return i16;
576 case v1i32:
577 case v2i32:
578 case v3i32:
579 case v4i32:
580 case v5i32:
581 case v6i32:
582 case v7i32:
583 case v8i32:
584 case v16i32:
585 case v32i32:
586 case v64i32:
587 case v128i32:
588 case v256i32:
589 case v512i32:
590 case v1024i32:
591 case v2048i32:
592 case nxv1i32:
593 case nxv2i32:
594 case nxv4i32:
595 case nxv8i32:
596 case nxv16i32:
597 case nxv32i32: return i32;
598 case v1i64:
599 case v2i64:
600 case v3i64:
601 case v4i64:
602 case v8i64:
603 case v16i64:
604 case v32i64:
605 case v64i64:
606 case v128i64:
607 case v256i64:
608 case nxv1i64:
609 case nxv2i64:
610 case nxv4i64:
611 case nxv8i64:
612 case nxv16i64:
613 case nxv32i64: return i64;
614 case v1i128: return i128;
615 case v1f16:
616 case v2f16:
617 case v3f16:
618 case v4f16:
619 case v8f16:
620 case v16f16:
621 case v32f16:
622 case v64f16:
623 case v128f16:
624 case v256f16:
625 case v512f16:
626 case nxv1f16:
627 case nxv2f16:
628 case nxv4f16:
629 case nxv8f16:
630 case nxv16f16:
631 case nxv32f16: return f16;
632 case v2bf16:
633 case v3bf16:
634 case v4bf16:
635 case v8bf16:
636 case v16bf16:
637 case v32bf16:
638 case v64bf16:
639 case v128bf16:
640 case nxv1bf16:
641 case nxv2bf16:
642 case nxv4bf16:
643 case nxv8bf16: return bf16;
644 case v1f32:
645 case v2f32:
646 case v3f32:
647 case v4f32:
648 case v5f32:
649 case v6f32:
650 case v7f32:
651 case v8f32:
652 case v16f32:
653 case v32f32:
654 case v64f32:
655 case v128f32:
656 case v256f32:
657 case v512f32:
658 case v1024f32:
659 case v2048f32:
660 case nxv1f32:
661 case nxv2f32:
662 case nxv4f32:
663 case nxv8f32:
664 case nxv16f32: return f32;
665 case v1f64:
666 case v2f64:
667 case v3f64:
668 case v4f64:
669 case v8f64:
670 case v16f64:
671 case v32f64:
672 case v64f64:
673 case v128f64:
674 case v256f64:
675 case nxv1f64:
676 case nxv2f64:
677 case nxv4f64:
678 case nxv8f64: return f64;
679 }
680 }
681
682 /// Given a vector type, return the minimum number of elements it contains.
683 unsigned getVectorMinNumElements() const {
684 switch (SimpleTy) {
685 default:
686 llvm_unreachable("Not a vector MVT!")::llvm::llvm_unreachable_internal("Not a vector MVT!", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/Support/MachineValueType.h"
, 686)
;
687 case v2048i32:
688 case v2048f32: return 2048;
689 case v1024i1:
690 case v1024i8:
691 case v1024i32:
692 case v1024f32: return 1024;
693 case v512i1:
694 case v512i8:
695 case v512i16:
696 case v512i32:
697 case v512f16:
698 case v512f32: return 512;
699 case v256i1:
700 case v256i8:
701 case v256i16:
702 case v256f16:
703 case v256i32:
704 case v256i64:
705 case v256f32:
706 case v256f64: return 256;
707 case v128i1:
708 case v128i8:
709 case v128i16:
710 case v128i32:
711 case v128i64:
712 case v128f16:
713 case v128bf16:
714 case v128f32:
715 case v128f64: return 128;
716 case v64i1:
717 case v64i8:
718 case v64i16:
719 case v64i32:
720 case v64i64:
721 case v64f16:
722 case v64bf16:
723 case v64f32:
724 case v64f64:
725 case nxv64i1:
726 case nxv64i8: return 64;
727 case v32i1:
728 case v32i8:
729 case v32i16:
730 case v32i32:
731 case v32i64:
732 case v32f16:
733 case v32bf16:
734 case v32f32:
735 case v32f64:
736 case nxv32i1:
737 case nxv32i8:
738 case nxv32i16:
739 case nxv32i32:
740 case nxv32i64:
741 case nxv32f16: return 32;
742 case v16i1:
743 case v16i8:
744 case v16i16:
745 case v16i32:
746 case v16i64:
747 case v16f16:
748 case v16bf16:
749 case v16f32:
750 case v16f64:
751 case nxv16i1:
752 case nxv16i8:
753 case nxv16i16:
754 case nxv16i32:
755 case nxv16i64:
756 case nxv16f16:
757 case nxv16f32: return 16;
758 case v8i1:
759 case v8i8:
760 case v8i16:
761 case v8i32:
762 case v8i64:
763 case v8f16:
764 case v8bf16:
765 case v8f32:
766 case v8f64:
767 case nxv8i1:
768 case nxv8i8:
769 case nxv8i16:
770 case nxv8i32:
771 case nxv8i64:
772 case nxv8f16:
773 case nxv8bf16:
774 case nxv8f32:
775 case nxv8f64: return 8;
776 case v7i32:
777 case v7f32: return 7;
778 case v6i32:
779 case v6f32: return 6;
780 case v5i32:
781 case v5f32: return 5;
782 case v4i1:
783 case v4i8:
784 case v4i16:
785 case v4i32:
786 case v4i64:
787 case v4f16:
788 case v4bf16:
789 case v4f32:
790 case v4f64:
791 case nxv4i1:
792 case nxv4i8:
793 case nxv4i16:
794 case nxv4i32:
795 case nxv4i64:
796 case nxv4f16:
797 case nxv4bf16:
798 case nxv4f32:
799 case nxv4f64: return 4;
800 case v3i16:
801 case v3i32:
802 case v3i64:
803 case v3f16:
804 case v3bf16:
805 case v3f32:
806 case v3f64: return 3;
807 case v2i1:
808 case v2i8:
809 case v2i16:
810 case v2i32:
811 case v2i64:
812 case v2f16:
813 case v2bf16:
814 case v2f32:
815 case v2f64:
816 case nxv2i1:
817 case nxv2i8:
818 case nxv2i16:
819 case nxv2i32:
820 case nxv2i64:
821 case nxv2f16:
822 case nxv2bf16:
823 case nxv2f32:
824 case nxv2f64: return 2;
825 case v1i1:
826 case v1i8:
827 case v1i16:
828 case v1i32:
829 case v1i64:
830 case v1i128:
831 case v1f16:
832 case v1f32:
833 case v1f64:
834 case nxv1i1:
835 case nxv1i8:
836 case nxv1i16:
837 case nxv1i32:
838 case nxv1i64:
839 case nxv1f16:
840 case nxv1bf16:
841 case nxv1f32:
842 case nxv1f64: return 1;
843 }
844 }
845
846 ElementCount getVectorElementCount() const {
847 return ElementCount::get(getVectorMinNumElements(), isScalableVector());
848 }
849
850 unsigned getVectorNumElements() const {
851 // TODO: Check that this isn't a scalable vector.
852 return getVectorMinNumElements();
853 }
854
855 /// Returns the size of the specified MVT in bits.
856 ///
857 /// If the value type is a scalable vector type, the scalable property will
858 /// be set and the runtime size will be a positive integer multiple of the
859 /// base size.
860 TypeSize getSizeInBits() const {
861 switch (SimpleTy) {
862 default:
863 llvm_unreachable("getSizeInBits called on extended MVT.")::llvm::llvm_unreachable_internal("getSizeInBits called on extended MVT."
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/Support/MachineValueType.h"
, 863)
;
864 case Other:
865 llvm_unreachable("Value type is non-standard value, Other.")::llvm::llvm_unreachable_internal("Value type is non-standard value, Other."
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/Support/MachineValueType.h"
, 865)
;
866 case iPTR:
867 llvm_unreachable("Value type size is target-dependent. Ask TLI.")::llvm::llvm_unreachable_internal("Value type size is target-dependent. Ask TLI."
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/Support/MachineValueType.h"
, 867)
;
868 case iPTRAny:
869 case iAny:
870 case fAny:
871 case vAny:
872 case Any:
873 llvm_unreachable("Value type is overloaded.")::llvm::llvm_unreachable_internal("Value type is overloaded."
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/Support/MachineValueType.h"
, 873)
;
874 case token:
875 llvm_unreachable("Token type is a sentinel that cannot be used "::llvm::llvm_unreachable_internal("Token type is a sentinel that cannot be used "
"in codegen and has no size", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/Support/MachineValueType.h"
, 876)
876 "in codegen and has no size")::llvm::llvm_unreachable_internal("Token type is a sentinel that cannot be used "
"in codegen and has no size", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/Support/MachineValueType.h"
, 876)
;
877 case Metadata:
878 llvm_unreachable("Value type is metadata.")::llvm::llvm_unreachable_internal("Value type is metadata.", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/Support/MachineValueType.h"
, 878)
;
879 case i1:
880 case v1i1: return TypeSize::Fixed(1);
881 case nxv1i1: return TypeSize::Scalable(1);
882 case v2i1: return TypeSize::Fixed(2);
883 case nxv2i1: return TypeSize::Scalable(2);
884 case v4i1: return TypeSize::Fixed(4);
885 case nxv4i1: return TypeSize::Scalable(4);
886 case i8 :
887 case v1i8:
888 case v8i1: return TypeSize::Fixed(8);
889 case nxv1i8:
890 case nxv8i1: return TypeSize::Scalable(8);
891 case i16 :
892 case f16:
893 case bf16:
894 case v16i1:
895 case v2i8:
896 case v1i16:
897 case v1f16: return TypeSize::Fixed(16);
898 case nxv16i1:
899 case nxv2i8:
900 case nxv1i16:
901 case nxv1bf16:
902 case nxv1f16: return TypeSize::Scalable(16);
903 case f32 :
904 case i32 :
905 case v32i1:
906 case v4i8:
907 case v2i16:
908 case v2f16:
909 case v2bf16:
910 case v1f32:
911 case v1i32: return TypeSize::Fixed(32);
912 case nxv32i1:
913 case nxv4i8:
914 case nxv2i16:
915 case nxv1i32:
916 case nxv2f16:
917 case nxv2bf16:
918 case nxv1f32: return TypeSize::Scalable(32);
919 case v3i16:
920 case v3f16:
921 case v3bf16: return TypeSize::Fixed(48);
922 case x86mmx:
923 case f64 :
924 case i64 :
925 case v64i1:
926 case v8i8:
927 case v4i16:
928 case v2i32:
929 case v1i64:
930 case v4f16:
931 case v4bf16:
932 case v2f32:
933 case v1f64: return TypeSize::Fixed(64);
934 case nxv64i1:
935 case nxv8i8:
936 case nxv4i16:
937 case nxv2i32:
938 case nxv1i64:
939 case nxv4f16:
940 case nxv4bf16:
941 case nxv2f32:
942 case nxv1f64: return TypeSize::Scalable(64);
943 case f80 : return TypeSize::Fixed(80);
944 case v3i32:
945 case v3f32: return TypeSize::Fixed(96);
946 case f128:
947 case ppcf128:
948 case i128:
949 case v128i1:
950 case v16i8:
951 case v8i16:
952 case v4i32:
953 case v2i64:
954 case v1i128:
955 case v8f16:
956 case v8bf16:
957 case v4f32:
958 case v2f64: return TypeSize::Fixed(128);
959 case nxv16i8:
960 case nxv8i16:
961 case nxv4i32:
962 case nxv2i64:
963 case nxv8f16:
964 case nxv8bf16:
965 case nxv4f32:
966 case nxv2f64: return TypeSize::Scalable(128);
967 case v5i32:
968 case v5f32: return TypeSize::Fixed(160);
969 case v6i32:
970 case v3i64:
971 case v6f32:
972 case v3f64: return TypeSize::Fixed(192);
973 case v7i32:
974 case v7f32: return TypeSize::Fixed(224);
975 case v256i1:
976 case v32i8:
977 case v16i16:
978 case v8i32:
979 case v4i64:
980 case v16f16:
981 case v16bf16:
982 case v8f32:
983 case v4f64: return TypeSize::Fixed(256);
984 case nxv32i8:
985 case nxv16i16:
986 case nxv8i32:
987 case nxv4i64:
988 case nxv16f16:
989 case nxv8f32:
990 case nxv4f64: return TypeSize::Scalable(256);
991 case i64x8:
992 case v512i1:
993 case v64i8:
994 case v32i16:
995 case v16i32:
996 case v8i64:
997 case v32f16:
998 case v32bf16:
999 case v16f32:
1000 case v8f64: return TypeSize::Fixed(512);
1001 case nxv64i8:
1002 case nxv32i16:
1003 case nxv16i32:
1004 case nxv8i64:
1005 case nxv32f16:
1006 case nxv16f32:
1007 case nxv8f64: return TypeSize::Scalable(512);
1008 case v1024i1:
1009 case v128i8:
1010 case v64i16:
1011 case v32i32:
1012 case v16i64:
1013 case v64f16:
1014 case v64bf16:
1015 case v32f32:
1016 case v16f64: return TypeSize::Fixed(1024);
1017 case nxv32i32:
1018 case nxv16i64: return TypeSize::Scalable(1024);
1019 case v256i8:
1020 case v128i16:
1021 case v64i32:
1022 case v32i64:
1023 case v128f16:
1024 case v128bf16:
1025 case v64f32:
1026 case v32f64: return TypeSize::Fixed(2048);
1027 case nxv32i64: return TypeSize::Scalable(2048);
1028 case v512i8:
1029 case v256i16:
1030 case v128i32:
1031 case v64i64:
1032 case v256f16:
1033 case v128f32:
1034 case v64f64: return TypeSize::Fixed(4096);
1035 case v1024i8:
1036 case v512i16:
1037 case v256i32:
1038 case v128i64:
1039 case v512f16:
1040 case v256f32:
1041 case x86amx:
1042 case v128f64: return TypeSize::Fixed(8192);
1043 case v512i32:
1044 case v256i64:
1045 case v512f32:
1046 case v256f64: return TypeSize::Fixed(16384);
1047 case v1024i32:
1048 case v1024f32: return TypeSize::Fixed(32768);
1049 case v2048i32:
1050 case v2048f32: return TypeSize::Fixed(65536);
1051 case funcref:
1052 case externref: return TypeSize::Fixed(0); // opaque type
1053 }
1054 }
1055
1056 /// Return the size of the specified fixed width value type in bits. The
1057 /// function will assert if the type is scalable.
1058 uint64_t getFixedSizeInBits() const {
1059 return getSizeInBits().getFixedSize();
1060 }
1061
1062 uint64_t getScalarSizeInBits() const {
1063 return getScalarType().getSizeInBits().getFixedSize();
1064 }
1065
1066 /// Return the number of bytes overwritten by a store of the specified value
1067 /// type.
1068 ///
1069 /// If the value type is a scalable vector type, the scalable property will
1070 /// be set and the runtime size will be a positive integer multiple of the
1071 /// base size.
1072 TypeSize getStoreSize() const {
1073 TypeSize BaseSize = getSizeInBits();
1074 return {(BaseSize.getKnownMinSize() + 7) / 8, BaseSize.isScalable()};
1075 }
1076
1077 /// Return the number of bits overwritten by a store of the specified value
1078 /// type.
1079 ///
1080 /// If the value type is a scalable vector type, the scalable property will
1081 /// be set and the runtime size will be a positive integer multiple of the
1082 /// base size.
1083 TypeSize getStoreSizeInBits() const {
1084 return getStoreSize() * 8;
1085 }
1086
1087 /// Returns true if the number of bits for the type is a multiple of an
1088 /// 8-bit byte.
1089 bool isByteSized() const { return getSizeInBits().isKnownMultipleOf(8); }
1090
1091 /// Return true if we know at compile time this has more bits than VT.
1092 bool knownBitsGT(MVT VT) const {
1093 return TypeSize::isKnownGT(getSizeInBits(), VT.getSizeInBits());
1094 }
1095
1096 /// Return true if we know at compile time this has more than or the same
1097 /// bits as VT.
1098 bool knownBitsGE(MVT VT) const {
1099 return TypeSize::isKnownGE(getSizeInBits(), VT.getSizeInBits());
1100 }
1101
1102 /// Return true if we know at compile time this has fewer bits than VT.
1103 bool knownBitsLT(MVT VT) const {
1104 return TypeSize::isKnownLT(getSizeInBits(), VT.getSizeInBits());
1105 }
1106
1107 /// Return true if we know at compile time this has fewer than or the same
1108 /// bits as VT.
1109 bool knownBitsLE(MVT VT) const {
1110 return TypeSize::isKnownLE(getSizeInBits(), VT.getSizeInBits());
1111 }
1112
1113 /// Return true if this has more bits than VT.
1114 bool bitsGT(MVT VT) const {
1115 assert(isScalableVector() == VT.isScalableVector() &&(static_cast <bool> (isScalableVector() == VT.isScalableVector
() && "Comparison between scalable and fixed types") ?
void (0) : __assert_fail ("isScalableVector() == VT.isScalableVector() && \"Comparison between scalable and fixed types\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/Support/MachineValueType.h"
, 1116, __extension__ __PRETTY_FUNCTION__))
1116 "Comparison between scalable and fixed types")(static_cast <bool> (isScalableVector() == VT.isScalableVector
() && "Comparison between scalable and fixed types") ?
void (0) : __assert_fail ("isScalableVector() == VT.isScalableVector() && \"Comparison between scalable and fixed types\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/Support/MachineValueType.h"
, 1116, __extension__ __PRETTY_FUNCTION__))
;
1117 return knownBitsGT(VT);
1118 }
1119
1120 /// Return true if this has no less bits than VT.
1121 bool bitsGE(MVT VT) const {
1122 assert(isScalableVector() == VT.isScalableVector() &&(static_cast <bool> (isScalableVector() == VT.isScalableVector
() && "Comparison between scalable and fixed types") ?
void (0) : __assert_fail ("isScalableVector() == VT.isScalableVector() && \"Comparison between scalable and fixed types\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/Support/MachineValueType.h"
, 1123, __extension__ __PRETTY_FUNCTION__))
1123 "Comparison between scalable and fixed types")(static_cast <bool> (isScalableVector() == VT.isScalableVector
() && "Comparison between scalable and fixed types") ?
void (0) : __assert_fail ("isScalableVector() == VT.isScalableVector() && \"Comparison between scalable and fixed types\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/Support/MachineValueType.h"
, 1123, __extension__ __PRETTY_FUNCTION__))
;
1124 return knownBitsGE(VT);
1125 }
1126
1127 /// Return true if this has less bits than VT.
1128 bool bitsLT(MVT VT) const {
1129 assert(isScalableVector() == VT.isScalableVector() &&(static_cast <bool> (isScalableVector() == VT.isScalableVector
() && "Comparison between scalable and fixed types") ?
void (0) : __assert_fail ("isScalableVector() == VT.isScalableVector() && \"Comparison between scalable and fixed types\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/Support/MachineValueType.h"
, 1130, __extension__ __PRETTY_FUNCTION__))
1130 "Comparison between scalable and fixed types")(static_cast <bool> (isScalableVector() == VT.isScalableVector
() && "Comparison between scalable and fixed types") ?
void (0) : __assert_fail ("isScalableVector() == VT.isScalableVector() && \"Comparison between scalable and fixed types\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/Support/MachineValueType.h"
, 1130, __extension__ __PRETTY_FUNCTION__))
;
1131 return knownBitsLT(VT);
1132 }
1133
1134 /// Return true if this has no more bits than VT.
1135 bool bitsLE(MVT VT) const {
1136 assert(isScalableVector() == VT.isScalableVector() &&(static_cast <bool> (isScalableVector() == VT.isScalableVector
() && "Comparison between scalable and fixed types") ?
void (0) : __assert_fail ("isScalableVector() == VT.isScalableVector() && \"Comparison between scalable and fixed types\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/Support/MachineValueType.h"
, 1137, __extension__ __PRETTY_FUNCTION__))
1137 "Comparison between scalable and fixed types")(static_cast <bool> (isScalableVector() == VT.isScalableVector
() && "Comparison between scalable and fixed types") ?
void (0) : __assert_fail ("isScalableVector() == VT.isScalableVector() && \"Comparison between scalable and fixed types\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/Support/MachineValueType.h"
, 1137, __extension__ __PRETTY_FUNCTION__))
;
1138 return knownBitsLE(VT);
1139 }
1140
1141 static MVT getFloatingPointVT(unsigned BitWidth) {
1142 switch (BitWidth) {
1143 default:
1144 llvm_unreachable("Bad bit width!")::llvm::llvm_unreachable_internal("Bad bit width!", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/Support/MachineValueType.h"
, 1144)
;
1145 case 16:
1146 return MVT::f16;
1147 case 32:
1148 return MVT::f32;
1149 case 64:
1150 return MVT::f64;
1151 case 80:
1152 return MVT::f80;
1153 case 128:
1154 return MVT::f128;
1155 }
1156 }
1157
1158 static MVT getIntegerVT(unsigned BitWidth) {
1159 switch (BitWidth) {
1160 default:
1161 return (MVT::SimpleValueType)(MVT::INVALID_SIMPLE_VALUE_TYPE);
1162 case 1:
1163 return MVT::i1;
1164 case 8:
1165 return MVT::i8;
1166 case 16:
1167 return MVT::i16;
1168 case 32:
1169 return MVT::i32;
1170 case 64:
1171 return MVT::i64;
1172 case 128:
1173 return MVT::i128;
1174 }
1175 }
1176
1177 static MVT getVectorVT(MVT VT, unsigned NumElements) {
1178 switch (VT.SimpleTy) {
1179 default:
1180 break;
1181 case MVT::i1:
1182 if (NumElements == 1) return MVT::v1i1;
1183 if (NumElements == 2) return MVT::v2i1;
1184 if (NumElements == 4) return MVT::v4i1;
1185 if (NumElements == 8) return MVT::v8i1;
1186 if (NumElements == 16) return MVT::v16i1;
1187 if (NumElements == 32) return MVT::v32i1;
1188 if (NumElements == 64) return MVT::v64i1;
1189 if (NumElements == 128) return MVT::v128i1;
1190 if (NumElements == 256) return MVT::v256i1;
1191 if (NumElements == 512) return MVT::v512i1;
1192 if (NumElements == 1024) return MVT::v1024i1;
1193 break;
1194 case MVT::i8:
1195 if (NumElements == 1) return MVT::v1i8;
1196 if (NumElements == 2) return MVT::v2i8;
1197 if (NumElements == 4) return MVT::v4i8;
1198 if (NumElements == 8) return MVT::v8i8;
1199 if (NumElements == 16) return MVT::v16i8;
1200 if (NumElements == 32) return MVT::v32i8;
1201 if (NumElements == 64) return MVT::v64i8;
1202 if (NumElements == 128) return MVT::v128i8;
1203 if (NumElements == 256) return MVT::v256i8;
1204 if (NumElements == 512) return MVT::v512i8;
1205 if (NumElements == 1024) return MVT::v1024i8;
1206 break;
1207 case MVT::i16:
1208 if (NumElements == 1) return MVT::v1i16;
1209 if (NumElements == 2) return MVT::v2i16;
1210 if (NumElements == 3) return MVT::v3i16;
1211 if (NumElements == 4) return MVT::v4i16;
1212 if (NumElements == 8) return MVT::v8i16;
1213 if (NumElements == 16) return MVT::v16i16;
1214 if (NumElements == 32) return MVT::v32i16;
1215 if (NumElements == 64) return MVT::v64i16;
1216 if (NumElements == 128) return MVT::v128i16;
1217 if (NumElements == 256) return MVT::v256i16;
1218 if (NumElements == 512) return MVT::v512i16;
1219 break;
1220 case MVT::i32:
1221 if (NumElements == 1) return MVT::v1i32;
1222 if (NumElements == 2) return MVT::v2i32;
1223 if (NumElements == 3) return MVT::v3i32;
1224 if (NumElements == 4) return MVT::v4i32;
1225 if (NumElements == 5) return MVT::v5i32;
1226 if (NumElements == 6) return MVT::v6i32;
1227 if (NumElements == 7) return MVT::v7i32;
1228 if (NumElements == 8) return MVT::v8i32;
1229 if (NumElements == 16) return MVT::v16i32;
1230 if (NumElements == 32) return MVT::v32i32;
1231 if (NumElements == 64) return MVT::v64i32;
1232 if (NumElements == 128) return MVT::v128i32;
1233 if (NumElements == 256) return MVT::v256i32;
1234 if (NumElements == 512) return MVT::v512i32;
1235 if (NumElements == 1024) return MVT::v1024i32;
1236 if (NumElements == 2048) return MVT::v2048i32;
1237 break;
1238 case MVT::i64:
1239 if (NumElements == 1) return MVT::v1i64;
1240 if (NumElements == 2) return MVT::v2i64;
1241 if (NumElements == 3) return MVT::v3i64;
1242 if (NumElements == 4) return MVT::v4i64;
1243 if (NumElements == 8) return MVT::v8i64;
1244 if (NumElements == 16) return MVT::v16i64;
1245 if (NumElements == 32) return MVT::v32i64;
1246 if (NumElements == 64) return MVT::v64i64;
1247 if (NumElements == 128) return MVT::v128i64;
1248 if (NumElements == 256) return MVT::v256i64;
1249 break;
1250 case MVT::i128:
1251 if (NumElements == 1) return MVT::v1i128;
1252 break;
1253 case MVT::f16:
1254 if (NumElements == 1) return MVT::v1f16;
1255 if (NumElements == 2) return MVT::v2f16;
1256 if (NumElements == 3) return MVT::v3f16;
1257 if (NumElements == 4) return MVT::v4f16;
1258 if (NumElements == 8) return MVT::v8f16;
1259 if (NumElements == 16) return MVT::v16f16;
1260 if (NumElements == 32) return MVT::v32f16;
1261 if (NumElements == 64) return MVT::v64f16;
1262 if (NumElements == 128) return MVT::v128f16;
1263 if (NumElements == 256) return MVT::v256f16;
1264 if (NumElements == 512) return MVT::v512f16;
1265 break;
1266 case MVT::bf16:
1267 if (NumElements == 2) return MVT::v2bf16;
1268 if (NumElements == 3) return MVT::v3bf16;
1269 if (NumElements == 4) return MVT::v4bf16;
1270 if (NumElements == 8) return MVT::v8bf16;
1271 if (NumElements == 16) return MVT::v16bf16;
1272 if (NumElements == 32) return MVT::v32bf16;
1273 if (NumElements == 64) return MVT::v64bf16;
1274 if (NumElements == 128) return MVT::v128bf16;
1275 break;
1276 case MVT::f32:
1277 if (NumElements == 1) return MVT::v1f32;
1278 if (NumElements == 2) return MVT::v2f32;
1279 if (NumElements == 3) return MVT::v3f32;
1280 if (NumElements == 4) return MVT::v4f32;
1281 if (NumElements == 5) return MVT::v5f32;
1282 if (NumElements == 6) return MVT::v6f32;
1283 if (NumElements == 7) return MVT::v7f32;
1284 if (NumElements == 8) return MVT::v8f32;
1285 if (NumElements == 16) return MVT::v16f32;
1286 if (NumElements == 32) return MVT::v32f32;
1287 if (NumElements == 64) return MVT::v64f32;
1288 if (NumElements == 128) return MVT::v128f32;
1289 if (NumElements == 256) return MVT::v256f32;
1290 if (NumElements == 512) return MVT::v512f32;
1291 if (NumElements == 1024) return MVT::v1024f32;
1292 if (NumElements == 2048) return MVT::v2048f32;
1293 break;
1294 case MVT::f64:
1295 if (NumElements == 1) return MVT::v1f64;
1296 if (NumElements == 2) return MVT::v2f64;
1297 if (NumElements == 3) return MVT::v3f64;
1298 if (NumElements == 4) return MVT::v4f64;
1299 if (NumElements == 8) return MVT::v8f64;
1300 if (NumElements == 16) return MVT::v16f64;
1301 if (NumElements == 32) return MVT::v32f64;
1302 if (NumElements == 64) return MVT::v64f64;
1303 if (NumElements == 128) return MVT::v128f64;
1304 if (NumElements == 256) return MVT::v256f64;
1305 break;
1306 }
1307 return (MVT::SimpleValueType)(MVT::INVALID_SIMPLE_VALUE_TYPE);
1308 }
1309
1310 static MVT getScalableVectorVT(MVT VT, unsigned NumElements) {
1311 switch(VT.SimpleTy) {
1312 default:
1313 break;
1314 case MVT::i1:
1315 if (NumElements == 1) return MVT::nxv1i1;
1316 if (NumElements == 2) return MVT::nxv2i1;
1317 if (NumElements == 4) return MVT::nxv4i1;
1318 if (NumElements == 8) return MVT::nxv8i1;
1319 if (NumElements == 16) return MVT::nxv16i1;
1320 if (NumElements == 32) return MVT::nxv32i1;
1321 if (NumElements == 64) return MVT::nxv64i1;
1322 break;
1323 case MVT::i8:
1324 if (NumElements == 1) return MVT::nxv1i8;
1325 if (NumElements == 2) return MVT::nxv2i8;
1326 if (NumElements == 4) return MVT::nxv4i8;
1327 if (NumElements == 8) return MVT::nxv8i8;
1328 if (NumElements == 16) return MVT::nxv16i8;
1329 if (NumElements == 32) return MVT::nxv32i8;
1330 if (NumElements == 64) return MVT::nxv64i8;
1331 break;
1332 case MVT::i16:
1333 if (NumElements == 1) return MVT::nxv1i16;
1334 if (NumElements == 2) return MVT::nxv2i16;
1335 if (NumElements == 4) return MVT::nxv4i16;
1336 if (NumElements == 8) return MVT::nxv8i16;
1337 if (NumElements == 16) return MVT::nxv16i16;
1338 if (NumElements == 32) return MVT::nxv32i16;
1339 break;
1340 case MVT::i32:
1341 if (NumElements == 1) return MVT::nxv1i32;
1342 if (NumElements == 2) return MVT::nxv2i32;
1343 if (NumElements == 4) return MVT::nxv4i32;
1344 if (NumElements == 8) return MVT::nxv8i32;
1345 if (NumElements == 16) return MVT::nxv16i32;
1346 if (NumElements == 32) return MVT::nxv32i32;
1347 break;
1348 case MVT::i64:
1349 if (NumElements == 1) return MVT::nxv1i64;
1350 if (NumElements == 2) return MVT::nxv2i64;
1351 if (NumElements == 4) return MVT::nxv4i64;
1352 if (NumElements == 8) return MVT::nxv8i64;
1353 if (NumElements == 16) return MVT::nxv16i64;
1354 if (NumElements == 32) return MVT::nxv32i64;
1355 break;
1356 case MVT::f16:
1357 if (NumElements == 1) return MVT::nxv1f16;
1358 if (NumElements == 2) return MVT::nxv2f16;
1359 if (NumElements == 4) return MVT::nxv4f16;
1360 if (NumElements == 8) return MVT::nxv8f16;
1361 if (NumElements == 16) return MVT::nxv16f16;
1362 if (NumElements == 32) return MVT::nxv32f16;
1363 break;
1364 case MVT::bf16:
1365 if (NumElements == 1) return MVT::nxv1bf16;
1366 if (NumElements == 2) return MVT::nxv2bf16;
1367 if (NumElements == 4) return MVT::nxv4bf16;
1368 if (NumElements == 8) return MVT::nxv8bf16;
1369 break;
1370 case MVT::f32:
1371 if (NumElements == 1) return MVT::nxv1f32;
1372 if (NumElements == 2) return MVT::nxv2f32;
1373 if (NumElements == 4) return MVT::nxv4f32;
1374 if (NumElements == 8) return MVT::nxv8f32;
1375 if (NumElements == 16) return MVT::nxv16f32;
1376 break;
1377 case MVT::f64:
1378 if (NumElements == 1) return MVT::nxv1f64;
1379 if (NumElements == 2) return MVT::nxv2f64;
1380 if (NumElements == 4) return MVT::nxv4f64;
1381 if (NumElements == 8) return MVT::nxv8f64;
1382 break;
1383 }
1384 return (MVT::SimpleValueType)(MVT::INVALID_SIMPLE_VALUE_TYPE);
1385 }
1386
1387 static MVT getVectorVT(MVT VT, unsigned NumElements, bool IsScalable) {
1388 if (IsScalable)
1389 return getScalableVectorVT(VT, NumElements);
1390 return getVectorVT(VT, NumElements);
1391 }
1392
1393 static MVT getVectorVT(MVT VT, ElementCount EC) {
1394 if (EC.isScalable())
1395 return getScalableVectorVT(VT, EC.getKnownMinValue());
1396 return getVectorVT(VT, EC.getKnownMinValue());
1397 }
1398
1399 /// Return the value type corresponding to the specified type. This returns
1400 /// all pointers as iPTR. If HandleUnknown is true, unknown types are
1401 /// returned as Other, otherwise they are invalid.
1402 static MVT getVT(Type *Ty, bool HandleUnknown = false);
1403
1404 public:
1405 /// SimpleValueType Iteration
1406 /// @{
1407 static auto all_valuetypes() {
1408 return seq_inclusive(MVT::FIRST_VALUETYPE, MVT::LAST_VALUETYPE);
1409 }
1410
1411 static auto integer_valuetypes() {
1412 return seq_inclusive(MVT::FIRST_INTEGER_VALUETYPE,
1413 MVT::LAST_INTEGER_VALUETYPE);
1414 }
1415
1416 static auto fp_valuetypes() {
1417 return seq_inclusive(MVT::FIRST_FP_VALUETYPE, MVT::LAST_FP_VALUETYPE);
1418 }
1419
1420 static auto vector_valuetypes() {
1421 return seq_inclusive(MVT::FIRST_VECTOR_VALUETYPE,
1422 MVT::LAST_VECTOR_VALUETYPE);
1423 }
1424
1425 static auto fixedlen_vector_valuetypes() {
1426 return seq_inclusive(MVT::FIRST_FIXEDLEN_VECTOR_VALUETYPE,
1427 MVT::LAST_FIXEDLEN_VECTOR_VALUETYPE);
1428 }
1429
1430 static auto scalable_vector_valuetypes() {
1431 return seq_inclusive(MVT::FIRST_SCALABLE_VECTOR_VALUETYPE,
1432 MVT::LAST_SCALABLE_VECTOR_VALUETYPE);
1433 }
1434
1435 static auto integer_fixedlen_vector_valuetypes() {
1436 return seq_inclusive(MVT::FIRST_INTEGER_FIXEDLEN_VECTOR_VALUETYPE,
1437 MVT::LAST_INTEGER_FIXEDLEN_VECTOR_VALUETYPE);
1438 }
1439
1440 static auto fp_fixedlen_vector_valuetypes() {
1441 return seq_inclusive(MVT::FIRST_FP_FIXEDLEN_VECTOR_VALUETYPE,
1442 MVT::LAST_FP_FIXEDLEN_VECTOR_VALUETYPE);
1443 }
1444
1445 static auto integer_scalable_vector_valuetypes() {
1446 return seq_inclusive(MVT::FIRST_INTEGER_SCALABLE_VECTOR_VALUETYPE,
1447 MVT::LAST_INTEGER_SCALABLE_VECTOR_VALUETYPE);
1448 }
1449
1450 static auto fp_scalable_vector_valuetypes() {
1451 return seq_inclusive(MVT::FIRST_FP_SCALABLE_VECTOR_VALUETYPE,
1452 MVT::LAST_FP_SCALABLE_VECTOR_VALUETYPE);
1453 }
1454 /// @}
1455 };
1456
1457} // end namespace llvm
1458
1459#endif // LLVM_SUPPORT_MACHINEVALUETYPE_H