Bug Summary

File:lib/Target/PowerPC/PPCTargetTransformInfo.cpp
Warning:line 77, column 25
Called C++ object pointer is null

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name PPCTargetTransformInfo.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-eagerly-assume -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -mrelocation-model pic -pic-level 2 -mthread-model posix -fmath-errno -masm-verbose -mconstructor-aliases -munwind-tables -fuse-init-array -target-cpu x86-64 -dwarf-column-info -debugger-tuning=gdb -momit-leaf-frame-pointer -ffunction-sections -fdata-sections -resource-dir /usr/lib/llvm-7/lib/clang/7.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-7~svn329677/build-llvm/lib/Target/PowerPC -I /build/llvm-toolchain-snapshot-7~svn329677/lib/Target/PowerPC -I /build/llvm-toolchain-snapshot-7~svn329677/build-llvm/include -I /build/llvm-toolchain-snapshot-7~svn329677/include -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/7.3.0/../../../../include/c++/7.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/7.3.0/../../../../include/x86_64-linux-gnu/c++/7.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/7.3.0/../../../../include/x86_64-linux-gnu/c++/7.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/7.3.0/../../../../include/c++/7.3.0/backward -internal-isystem /usr/include/clang/7.0.0/include/ -internal-isystem /usr/local/include -internal-isystem /usr/lib/llvm-7/lib/clang/7.0.0/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-comment -std=c++11 -fdeprecated-macro -fdebug-compilation-dir /build/llvm-toolchain-snapshot-7~svn329677/build-llvm/lib/Target/PowerPC -ferror-limit 19 -fmessage-length 0 -fvisibility-inlines-hidden -fobjc-runtime=gcc -fdiagnostics-show-option -vectorize-loops -vectorize-slp -analyzer-checker optin.performance.Padding -analyzer-output=html -analyzer-config stable-report-filename=true -o /tmp/scan-build-2018-04-11-031539-24776-1 -x c++ /build/llvm-toolchain-snapshot-7~svn329677/lib/Target/PowerPC/PPCTargetTransformInfo.cpp

/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/PowerPC/PPCTargetTransformInfo.cpp

1//===-- PPCTargetTransformInfo.cpp - PPC specific TTI ---------------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9
10#include "PPCTargetTransformInfo.h"
11#include "llvm/Analysis/TargetTransformInfo.h"
12#include "llvm/CodeGen/BasicTTIImpl.h"
13#include "llvm/CodeGen/CostTable.h"
14#include "llvm/CodeGen/TargetLowering.h"
15#include "llvm/Support/CommandLine.h"
16#include "llvm/Support/Debug.h"
17using namespace llvm;
18
19#define DEBUG_TYPE"ppctti" "ppctti"
20
21static cl::opt<bool> DisablePPCConstHoist("disable-ppc-constant-hoisting",
22cl::desc("disable constant hoisting on PPC"), cl::init(false), cl::Hidden);
23
24// This is currently only used for the data prefetch pass which is only enabled
25// for BG/Q by default.
26static cl::opt<unsigned>
27CacheLineSize("ppc-loop-prefetch-cache-line", cl::Hidden, cl::init(64),
28 cl::desc("The loop prefetch cache line size"));
29
30static cl::opt<bool>
31EnablePPCColdCC("ppc-enable-coldcc", cl::Hidden, cl::init(false),
32 cl::desc("Enable using coldcc calling conv for cold "
33 "internal functions"));
34
35//===----------------------------------------------------------------------===//
36//
37// PPC cost model.
38//
39//===----------------------------------------------------------------------===//
40
41TargetTransformInfo::PopcntSupportKind
42PPCTTIImpl::getPopcntSupport(unsigned TyWidth) {
43 assert(isPowerOf2_32(TyWidth) && "Ty width must be power of 2")(static_cast <bool> (isPowerOf2_32(TyWidth) && "Ty width must be power of 2"
) ? void (0) : __assert_fail ("isPowerOf2_32(TyWidth) && \"Ty width must be power of 2\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/PowerPC/PPCTargetTransformInfo.cpp"
, 43, __extension__ __PRETTY_FUNCTION__))
;
44 if (ST->hasPOPCNTD() != PPCSubtarget::POPCNTD_Unavailable && TyWidth <= 64)
45 return ST->hasPOPCNTD() == PPCSubtarget::POPCNTD_Slow ?
46 TTI::PSK_SlowHardware : TTI::PSK_FastHardware;
47 return TTI::PSK_Software;
48}
49
50int PPCTTIImpl::getIntImmCost(const APInt &Imm, Type *Ty) {
51 if (DisablePPCConstHoist)
52 return BaseT::getIntImmCost(Imm, Ty);
53
54 assert(Ty->isIntegerTy())(static_cast <bool> (Ty->isIntegerTy()) ? void (0) :
__assert_fail ("Ty->isIntegerTy()", "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/PowerPC/PPCTargetTransformInfo.cpp"
, 54, __extension__ __PRETTY_FUNCTION__))
;
55
56 unsigned BitSize = Ty->getPrimitiveSizeInBits();
57 if (BitSize == 0)
58 return ~0U;
59
60 if (Imm == 0)
61 return TTI::TCC_Free;
62
63 if (Imm.getBitWidth() <= 64) {
64 if (isInt<16>(Imm.getSExtValue()))
65 return TTI::TCC_Basic;
66
67 if (isInt<32>(Imm.getSExtValue())) {
68 // A constant that can be materialized using lis.
69 if ((Imm.getZExtValue() & 0xFFFF) == 0)
70 return TTI::TCC_Basic;
71
72 return 2 * TTI::TCC_Basic;
73 }
74 }
75
76 return 4 * TTI::TCC_Basic;
77}
78
79int PPCTTIImpl::getIntImmCost(Intrinsic::ID IID, unsigned Idx, const APInt &Imm,
80 Type *Ty) {
81 if (DisablePPCConstHoist)
82 return BaseT::getIntImmCost(IID, Idx, Imm, Ty);
83
84 assert(Ty->isIntegerTy())(static_cast <bool> (Ty->isIntegerTy()) ? void (0) :
__assert_fail ("Ty->isIntegerTy()", "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/PowerPC/PPCTargetTransformInfo.cpp"
, 84, __extension__ __PRETTY_FUNCTION__))
;
85
86 unsigned BitSize = Ty->getPrimitiveSizeInBits();
87 if (BitSize == 0)
88 return ~0U;
89
90 switch (IID) {
91 default:
92 return TTI::TCC_Free;
93 case Intrinsic::sadd_with_overflow:
94 case Intrinsic::uadd_with_overflow:
95 case Intrinsic::ssub_with_overflow:
96 case Intrinsic::usub_with_overflow:
97 if ((Idx == 1) && Imm.getBitWidth() <= 64 && isInt<16>(Imm.getSExtValue()))
98 return TTI::TCC_Free;
99 break;
100 case Intrinsic::experimental_stackmap:
101 if ((Idx < 2) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
102 return TTI::TCC_Free;
103 break;
104 case Intrinsic::experimental_patchpoint_void:
105 case Intrinsic::experimental_patchpoint_i64:
106 if ((Idx < 4) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
107 return TTI::TCC_Free;
108 break;
109 }
110 return PPCTTIImpl::getIntImmCost(Imm, Ty);
111}
112
113int PPCTTIImpl::getIntImmCost(unsigned Opcode, unsigned Idx, const APInt &Imm,
114 Type *Ty) {
115 if (DisablePPCConstHoist)
116 return BaseT::getIntImmCost(Opcode, Idx, Imm, Ty);
117
118 assert(Ty->isIntegerTy())(static_cast <bool> (Ty->isIntegerTy()) ? void (0) :
__assert_fail ("Ty->isIntegerTy()", "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/PowerPC/PPCTargetTransformInfo.cpp"
, 118, __extension__ __PRETTY_FUNCTION__))
;
119
120 unsigned BitSize = Ty->getPrimitiveSizeInBits();
121 if (BitSize == 0)
122 return ~0U;
123
124 unsigned ImmIdx = ~0U;
125 bool ShiftedFree = false, RunFree = false, UnsignedFree = false,
126 ZeroFree = false;
127 switch (Opcode) {
128 default:
129 return TTI::TCC_Free;
130 case Instruction::GetElementPtr:
131 // Always hoist the base address of a GetElementPtr. This prevents the
132 // creation of new constants for every base constant that gets constant
133 // folded with the offset.
134 if (Idx == 0)
135 return 2 * TTI::TCC_Basic;
136 return TTI::TCC_Free;
137 case Instruction::And:
138 RunFree = true; // (for the rotate-and-mask instructions)
139 LLVM_FALLTHROUGH[[clang::fallthrough]];
140 case Instruction::Add:
141 case Instruction::Or:
142 case Instruction::Xor:
143 ShiftedFree = true;
144 LLVM_FALLTHROUGH[[clang::fallthrough]];
145 case Instruction::Sub:
146 case Instruction::Mul:
147 case Instruction::Shl:
148 case Instruction::LShr:
149 case Instruction::AShr:
150 ImmIdx = 1;
151 break;
152 case Instruction::ICmp:
153 UnsignedFree = true;
154 ImmIdx = 1;
155 // Zero comparisons can use record-form instructions.
156 LLVM_FALLTHROUGH[[clang::fallthrough]];
157 case Instruction::Select:
158 ZeroFree = true;
159 break;
160 case Instruction::PHI:
161 case Instruction::Call:
162 case Instruction::Ret:
163 case Instruction::Load:
164 case Instruction::Store:
165 break;
166 }
167
168 if (ZeroFree && Imm == 0)
169 return TTI::TCC_Free;
170
171 if (Idx == ImmIdx && Imm.getBitWidth() <= 64) {
172 if (isInt<16>(Imm.getSExtValue()))
173 return TTI::TCC_Free;
174
175 if (RunFree) {
176 if (Imm.getBitWidth() <= 32 &&
177 (isShiftedMask_32(Imm.getZExtValue()) ||
178 isShiftedMask_32(~Imm.getZExtValue())))
179 return TTI::TCC_Free;
180
181 if (ST->isPPC64() &&
182 (isShiftedMask_64(Imm.getZExtValue()) ||
183 isShiftedMask_64(~Imm.getZExtValue())))
184 return TTI::TCC_Free;
185 }
186
187 if (UnsignedFree && isUInt<16>(Imm.getZExtValue()))
188 return TTI::TCC_Free;
189
190 if (ShiftedFree && (Imm.getZExtValue() & 0xFFFF) == 0)
191 return TTI::TCC_Free;
192 }
193
194 return PPCTTIImpl::getIntImmCost(Imm, Ty);
195}
196
197unsigned PPCTTIImpl::getUserCost(const User *U,
198 ArrayRef<const Value *> Operands) {
199 if (U->getType()->isVectorTy()) {
1
Taking false branch
200 // Instructions that need to be split should cost more.
201 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, U->getType());
202 return LT.first * BaseT::getUserCost(U, Operands);
203 }
204
205 return BaseT::getUserCost(U, Operands);
2
Calling 'TargetTransformInfoImplCRTPBase::getUserCost'
206}
207
208void PPCTTIImpl::getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
209 TTI::UnrollingPreferences &UP) {
210 if (ST->getDarwinDirective() == PPC::DIR_A2) {
211 // The A2 is in-order with a deep pipeline, and concatenation unrolling
212 // helps expose latency-hiding opportunities to the instruction scheduler.
213 UP.Partial = UP.Runtime = true;
214
215 // We unroll a lot on the A2 (hundreds of instructions), and the benefits
216 // often outweigh the cost of a division to compute the trip count.
217 UP.AllowExpensiveTripCount = true;
218 }
219
220 BaseT::getUnrollingPreferences(L, SE, UP);
221}
222
223// This function returns true to allow using coldcc calling convention.
224// Returning true results in coldcc being used for functions which are cold at
225// all call sites when the callers of the functions are not calling any other
226// non coldcc functions.
227bool PPCTTIImpl::useColdCCForColdCall(Function &F) {
228 return EnablePPCColdCC;
229}
230
231bool PPCTTIImpl::enableAggressiveInterleaving(bool LoopHasReductions) {
232 // On the A2, always unroll aggressively. For QPX unaligned loads, we depend
233 // on combining the loads generated for consecutive accesses, and failure to
234 // do so is particularly expensive. This makes it much more likely (compared
235 // to only using concatenation unrolling).
236 if (ST->getDarwinDirective() == PPC::DIR_A2)
237 return true;
238
239 return LoopHasReductions;
240}
241
242const PPCTTIImpl::TTI::MemCmpExpansionOptions *
243PPCTTIImpl::enableMemCmpExpansion(bool IsZeroCmp) const {
244 static const auto Options = []() {
245 TTI::MemCmpExpansionOptions Options;
246 Options.LoadSizes.push_back(8);
247 Options.LoadSizes.push_back(4);
248 Options.LoadSizes.push_back(2);
249 Options.LoadSizes.push_back(1);
250 return Options;
251 }();
252 return &Options;
253}
254
255bool PPCTTIImpl::enableInterleavedAccessVectorization() {
256 return true;
257}
258
259unsigned PPCTTIImpl::getNumberOfRegisters(bool Vector) {
260 if (Vector && !ST->hasAltivec() && !ST->hasQPX())
261 return 0;
262 return ST->hasVSX() ? 64 : 32;
263}
264
265unsigned PPCTTIImpl::getRegisterBitWidth(bool Vector) const {
266 if (Vector) {
267 if (ST->hasQPX()) return 256;
268 if (ST->hasAltivec()) return 128;
269 return 0;
270 }
271
272 if (ST->isPPC64())
273 return 64;
274 return 32;
275
276}
277
278unsigned PPCTTIImpl::getCacheLineSize() {
279 // Check first if the user specified a custom line size.
280 if (CacheLineSize.getNumOccurrences() > 0)
281 return CacheLineSize;
282
283 // On P7, P8 or P9 we have a cache line size of 128.
284 unsigned Directive = ST->getDarwinDirective();
285 if (Directive == PPC::DIR_PWR7 || Directive == PPC::DIR_PWR8 ||
286 Directive == PPC::DIR_PWR9)
287 return 128;
288
289 // On other processors return a default of 64 bytes.
290 return 64;
291}
292
293unsigned PPCTTIImpl::getPrefetchDistance() {
294 // This seems like a reasonable default for the BG/Q (this pass is enabled, by
295 // default, only on the BG/Q).
296 return 300;
297}
298
299unsigned PPCTTIImpl::getMaxInterleaveFactor(unsigned VF) {
300 unsigned Directive = ST->getDarwinDirective();
301 // The 440 has no SIMD support, but floating-point instructions
302 // have a 5-cycle latency, so unroll by 5x for latency hiding.
303 if (Directive == PPC::DIR_440)
304 return 5;
305
306 // The A2 has no SIMD support, but floating-point instructions
307 // have a 6-cycle latency, so unroll by 6x for latency hiding.
308 if (Directive == PPC::DIR_A2)
309 return 6;
310
311 // FIXME: For lack of any better information, do no harm...
312 if (Directive == PPC::DIR_E500mc || Directive == PPC::DIR_E5500)
313 return 1;
314
315 // For P7 and P8, floating-point instructions have a 6-cycle latency and
316 // there are two execution units, so unroll by 12x for latency hiding.
317 // FIXME: the same for P9 as previous gen until POWER9 scheduling is ready
318 if (Directive == PPC::DIR_PWR7 || Directive == PPC::DIR_PWR8 ||
319 Directive == PPC::DIR_PWR9)
320 return 12;
321
322 // For most things, modern systems have two execution units (and
323 // out-of-order execution).
324 return 2;
325}
326
327int PPCTTIImpl::getArithmeticInstrCost(
328 unsigned Opcode, Type *Ty, TTI::OperandValueKind Op1Info,
329 TTI::OperandValueKind Op2Info, TTI::OperandValueProperties Opd1PropInfo,
330 TTI::OperandValueProperties Opd2PropInfo, ArrayRef<const Value *> Args) {
331 assert(TLI->InstructionOpcodeToISD(Opcode) && "Invalid opcode")(static_cast <bool> (TLI->InstructionOpcodeToISD(Opcode
) && "Invalid opcode") ? void (0) : __assert_fail ("TLI->InstructionOpcodeToISD(Opcode) && \"Invalid opcode\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/PowerPC/PPCTargetTransformInfo.cpp"
, 331, __extension__ __PRETTY_FUNCTION__))
;
332
333 // Fallback to the default implementation.
334 return BaseT::getArithmeticInstrCost(Opcode, Ty, Op1Info, Op2Info,
335 Opd1PropInfo, Opd2PropInfo);
336}
337
338int PPCTTIImpl::getShuffleCost(TTI::ShuffleKind Kind, Type *Tp, int Index,
339 Type *SubTp) {
340 // Legalize the type.
341 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp);
342
343 // PPC, for both Altivec/VSX and QPX, support cheap arbitrary permutations
344 // (at least in the sense that there need only be one non-loop-invariant
345 // instruction). We need one such shuffle instruction for each actual
346 // register (this is not true for arbitrary shuffles, but is true for the
347 // structured types of shuffles covered by TTI::ShuffleKind).
348 return LT.first;
349}
350
351int PPCTTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src,
352 const Instruction *I) {
353 assert(TLI->InstructionOpcodeToISD(Opcode) && "Invalid opcode")(static_cast <bool> (TLI->InstructionOpcodeToISD(Opcode
) && "Invalid opcode") ? void (0) : __assert_fail ("TLI->InstructionOpcodeToISD(Opcode) && \"Invalid opcode\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/PowerPC/PPCTargetTransformInfo.cpp"
, 353, __extension__ __PRETTY_FUNCTION__))
;
354
355 return BaseT::getCastInstrCost(Opcode, Dst, Src);
356}
357
358int PPCTTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy,
359 const Instruction *I) {
360 return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, I);
361}
362
363int PPCTTIImpl::getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index) {
364 assert(Val->isVectorTy() && "This must be a vector type")(static_cast <bool> (Val->isVectorTy() && "This must be a vector type"
) ? void (0) : __assert_fail ("Val->isVectorTy() && \"This must be a vector type\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/PowerPC/PPCTargetTransformInfo.cpp"
, 364, __extension__ __PRETTY_FUNCTION__))
;
365
366 int ISD = TLI->InstructionOpcodeToISD(Opcode);
367 assert(ISD && "Invalid opcode")(static_cast <bool> (ISD && "Invalid opcode") ?
void (0) : __assert_fail ("ISD && \"Invalid opcode\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/PowerPC/PPCTargetTransformInfo.cpp"
, 367, __extension__ __PRETTY_FUNCTION__))
;
368
369 if (ST->hasVSX() && Val->getScalarType()->isDoubleTy()) {
370 // Double-precision scalars are already located in index #0.
371 if (Index == 0)
372 return 0;
373
374 return BaseT::getVectorInstrCost(Opcode, Val, Index);
375 } else if (ST->hasQPX() && Val->getScalarType()->isFloatingPointTy()) {
376 // Floating point scalars are already located in index #0.
377 if (Index == 0)
378 return 0;
379
380 return BaseT::getVectorInstrCost(Opcode, Val, Index);
381 }
382
383 // Estimated cost of a load-hit-store delay. This was obtained
384 // experimentally as a minimum needed to prevent unprofitable
385 // vectorization for the paq8p benchmark. It may need to be
386 // raised further if other unprofitable cases remain.
387 unsigned LHSPenalty = 2;
388 if (ISD == ISD::INSERT_VECTOR_ELT)
389 LHSPenalty += 7;
390
391 // Vector element insert/extract with Altivec is very expensive,
392 // because they require store and reload with the attendant
393 // processor stall for load-hit-store. Until VSX is available,
394 // these need to be estimated as very costly.
395 if (ISD == ISD::EXTRACT_VECTOR_ELT ||
396 ISD == ISD::INSERT_VECTOR_ELT)
397 return LHSPenalty + BaseT::getVectorInstrCost(Opcode, Val, Index);
398
399 return BaseT::getVectorInstrCost(Opcode, Val, Index);
400}
401
402int PPCTTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
403 unsigned AddressSpace, const Instruction *I) {
404 // Legalize the type.
405 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Src);
406 assert((Opcode == Instruction::Load || Opcode == Instruction::Store) &&(static_cast <bool> ((Opcode == Instruction::Load || Opcode
== Instruction::Store) && "Invalid Opcode") ? void (
0) : __assert_fail ("(Opcode == Instruction::Load || Opcode == Instruction::Store) && \"Invalid Opcode\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/PowerPC/PPCTargetTransformInfo.cpp"
, 407, __extension__ __PRETTY_FUNCTION__))
407 "Invalid Opcode")(static_cast <bool> ((Opcode == Instruction::Load || Opcode
== Instruction::Store) && "Invalid Opcode") ? void (
0) : __assert_fail ("(Opcode == Instruction::Load || Opcode == Instruction::Store) && \"Invalid Opcode\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/PowerPC/PPCTargetTransformInfo.cpp"
, 407, __extension__ __PRETTY_FUNCTION__))
;
408
409 int Cost = BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace);
410
411 bool IsAltivecType = ST->hasAltivec() &&
412 (LT.second == MVT::v16i8 || LT.second == MVT::v8i16 ||
413 LT.second == MVT::v4i32 || LT.second == MVT::v4f32);
414 bool IsVSXType = ST->hasVSX() &&
415 (LT.second == MVT::v2f64 || LT.second == MVT::v2i64);
416 bool IsQPXType = ST->hasQPX() &&
417 (LT.second == MVT::v4f64 || LT.second == MVT::v4f32);
418
419 // VSX has 32b/64b load instructions. Legalization can handle loading of
420 // 32b/64b to VSR correctly and cheaply. But BaseT::getMemoryOpCost and
421 // PPCTargetLowering can't compute the cost appropriately. So here we
422 // explicitly check this case.
423 unsigned MemBytes = Src->getPrimitiveSizeInBits();
424 if (Opcode == Instruction::Load && ST->hasVSX() && IsAltivecType &&
425 (MemBytes == 64 || (ST->hasP8Vector() && MemBytes == 32)))
426 return 1;
427
428 // Aligned loads and stores are easy.
429 unsigned SrcBytes = LT.second.getStoreSize();
430 if (!SrcBytes || !Alignment || Alignment >= SrcBytes)
431 return Cost;
432
433 // If we can use the permutation-based load sequence, then this is also
434 // relatively cheap (not counting loop-invariant instructions): one load plus
435 // one permute (the last load in a series has extra cost, but we're
436 // neglecting that here). Note that on the P7, we could do unaligned loads
437 // for Altivec types using the VSX instructions, but that's more expensive
438 // than using the permutation-based load sequence. On the P8, that's no
439 // longer true.
440 if (Opcode == Instruction::Load &&
441 ((!ST->hasP8Vector() && IsAltivecType) || IsQPXType) &&
442 Alignment >= LT.second.getScalarType().getStoreSize())
443 return Cost + LT.first; // Add the cost of the permutations.
444
445 // For VSX, we can do unaligned loads and stores on Altivec/VSX types. On the
446 // P7, unaligned vector loads are more expensive than the permutation-based
447 // load sequence, so that might be used instead, but regardless, the net cost
448 // is about the same (not counting loop-invariant instructions).
449 if (IsVSXType || (ST->hasVSX() && IsAltivecType))
450 return Cost;
451
452 // Newer PPC supports unaligned memory access.
453 if (TLI->allowsMisalignedMemoryAccesses(LT.second, 0))
454 return Cost;
455
456 // PPC in general does not support unaligned loads and stores. They'll need
457 // to be decomposed based on the alignment factor.
458
459 // Add the cost of each scalar load or store.
460 Cost += LT.first*(SrcBytes/Alignment-1);
461
462 // For a vector type, there is also scalarization overhead (only for
463 // stores, loads are expanded using the vector-load + permutation sequence,
464 // which is much less expensive).
465 if (Src->isVectorTy() && Opcode == Instruction::Store)
466 for (int i = 0, e = Src->getVectorNumElements(); i < e; ++i)
467 Cost += getVectorInstrCost(Instruction::ExtractElement, Src, i);
468
469 return Cost;
470}
471
472int PPCTTIImpl::getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy,
473 unsigned Factor,
474 ArrayRef<unsigned> Indices,
475 unsigned Alignment,
476 unsigned AddressSpace) {
477 assert(isa<VectorType>(VecTy) &&(static_cast <bool> (isa<VectorType>(VecTy) &&
"Expect a vector type for interleaved memory op") ? void (0)
: __assert_fail ("isa<VectorType>(VecTy) && \"Expect a vector type for interleaved memory op\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/PowerPC/PPCTargetTransformInfo.cpp"
, 478, __extension__ __PRETTY_FUNCTION__))
478 "Expect a vector type for interleaved memory op")(static_cast <bool> (isa<VectorType>(VecTy) &&
"Expect a vector type for interleaved memory op") ? void (0)
: __assert_fail ("isa<VectorType>(VecTy) && \"Expect a vector type for interleaved memory op\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/PowerPC/PPCTargetTransformInfo.cpp"
, 478, __extension__ __PRETTY_FUNCTION__))
;
479
480 // Legalize the type.
481 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, VecTy);
482
483 // Firstly, the cost of load/store operation.
484 int Cost = getMemoryOpCost(Opcode, VecTy, Alignment, AddressSpace);
485
486 // PPC, for both Altivec/VSX and QPX, support cheap arbitrary permutations
487 // (at least in the sense that there need only be one non-loop-invariant
488 // instruction). For each result vector, we need one shuffle per incoming
489 // vector (except that the first shuffle can take two incoming vectors
490 // because it does not need to take itself).
491 Cost += Factor*(LT.first-1);
492
493 return Cost;
494}
495

/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/Analysis/TargetTransformInfoImpl.h

1//===- TargetTransformInfoImpl.h --------------------------------*- C++ -*-===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9/// \file
10/// This file provides helpers for the implementation of
11/// a TargetTransformInfo-conforming class.
12///
13//===----------------------------------------------------------------------===//
14
15#ifndef LLVM_ANALYSIS_TARGETTRANSFORMINFOIMPL_H
16#define LLVM_ANALYSIS_TARGETTRANSFORMINFOIMPL_H
17
18#include "llvm/Analysis/ScalarEvolutionExpressions.h"
19#include "llvm/Analysis/TargetTransformInfo.h"
20#include "llvm/Analysis/VectorUtils.h"
21#include "llvm/IR/CallSite.h"
22#include "llvm/IR/DataLayout.h"
23#include "llvm/IR/Function.h"
24#include "llvm/IR/GetElementPtrTypeIterator.h"
25#include "llvm/IR/Operator.h"
26#include "llvm/IR/Type.h"
27
28namespace llvm {
29
30/// \brief Base class for use as a mix-in that aids implementing
31/// a TargetTransformInfo-compatible class.
32class TargetTransformInfoImplBase {
33protected:
34 typedef TargetTransformInfo TTI;
35
36 const DataLayout &DL;
37
38 explicit TargetTransformInfoImplBase(const DataLayout &DL) : DL(DL) {}
39
40public:
41 // Provide value semantics. MSVC requires that we spell all of these out.
42 TargetTransformInfoImplBase(const TargetTransformInfoImplBase &Arg)
43 : DL(Arg.DL) {}
44 TargetTransformInfoImplBase(TargetTransformInfoImplBase &&Arg) : DL(Arg.DL) {}
45
46 const DataLayout &getDataLayout() const { return DL; }
47
48 unsigned getOperationCost(unsigned Opcode, Type *Ty, Type *OpTy) {
49 switch (Opcode) {
16
Control jumps to 'case IntToPtr:' at line 74
50 default:
51 // By default, just classify everything as 'basic'.
52 return TTI::TCC_Basic;
53
54 case Instruction::GetElementPtr:
55 llvm_unreachable("Use getGEPCost for GEP operations!")::llvm::llvm_unreachable_internal("Use getGEPCost for GEP operations!"
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/Analysis/TargetTransformInfoImpl.h"
, 55)
;
56
57 case Instruction::BitCast:
58 assert(OpTy && "Cast instructions must provide the operand type")(static_cast <bool> (OpTy && "Cast instructions must provide the operand type"
) ? void (0) : __assert_fail ("OpTy && \"Cast instructions must provide the operand type\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/Analysis/TargetTransformInfoImpl.h"
, 58, __extension__ __PRETTY_FUNCTION__))
;
59 if (Ty == OpTy || (Ty->isPointerTy() && OpTy->isPointerTy()))
60 // Identity and pointer-to-pointer casts are free.
61 return TTI::TCC_Free;
62
63 // Otherwise, the default basic cost is used.
64 return TTI::TCC_Basic;
65
66 case Instruction::FDiv:
67 case Instruction::FRem:
68 case Instruction::SDiv:
69 case Instruction::SRem:
70 case Instruction::UDiv:
71 case Instruction::URem:
72 return TTI::TCC_Expensive;
73
74 case Instruction::IntToPtr: {
75 // An inttoptr cast is free so long as the input is a legal integer type
76 // which doesn't contain values outside the range of a pointer.
77 unsigned OpSize = OpTy->getScalarSizeInBits();
17
Called C++ object pointer is null
78 if (DL.isLegalInteger(OpSize) &&
79 OpSize <= DL.getPointerTypeSizeInBits(Ty))
80 return TTI::TCC_Free;
81
82 // Otherwise it's not a no-op.
83 return TTI::TCC_Basic;
84 }
85 case Instruction::PtrToInt: {
86 // A ptrtoint cast is free so long as the result is large enough to store
87 // the pointer, and a legal integer type.
88 unsigned DestSize = Ty->getScalarSizeInBits();
89 if (DL.isLegalInteger(DestSize) &&
90 DestSize >= DL.getPointerTypeSizeInBits(OpTy))
91 return TTI::TCC_Free;
92
93 // Otherwise it's not a no-op.
94 return TTI::TCC_Basic;
95 }
96 case Instruction::Trunc:
97 // trunc to a native type is free (assuming the target has compare and
98 // shift-right of the same width).
99 if (DL.isLegalInteger(DL.getTypeSizeInBits(Ty)))
100 return TTI::TCC_Free;
101
102 return TTI::TCC_Basic;
103 }
104 }
105
106 int getGEPCost(Type *PointeeType, const Value *Ptr,
107 ArrayRef<const Value *> Operands) {
108 // In the basic model, we just assume that all-constant GEPs will be folded
109 // into their uses via addressing modes.
110 for (unsigned Idx = 0, Size = Operands.size(); Idx != Size; ++Idx)
111 if (!isa<Constant>(Operands[Idx]))
112 return TTI::TCC_Basic;
113
114 return TTI::TCC_Free;
115 }
116
117 unsigned getEstimatedNumberOfCaseClusters(const SwitchInst &SI,
118 unsigned &JTSize) {
119 JTSize = 0;
120 return SI.getNumCases();
121 }
122
123 int getExtCost(const Instruction *I, const Value *Src) {
124 return TTI::TCC_Basic;
125 }
126
127 unsigned getCallCost(FunctionType *FTy, int NumArgs) {
128 assert(FTy && "FunctionType must be provided to this routine.")(static_cast <bool> (FTy && "FunctionType must be provided to this routine."
) ? void (0) : __assert_fail ("FTy && \"FunctionType must be provided to this routine.\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/Analysis/TargetTransformInfoImpl.h"
, 128, __extension__ __PRETTY_FUNCTION__))
;
129
130 // The target-independent implementation just measures the size of the
131 // function by approximating that each argument will take on average one
132 // instruction to prepare.
133
134 if (NumArgs < 0)
135 // Set the argument number to the number of explicit arguments in the
136 // function.
137 NumArgs = FTy->getNumParams();
138
139 return TTI::TCC_Basic * (NumArgs + 1);
140 }
141
142 unsigned getInliningThresholdMultiplier() { return 1; }
143
144 unsigned getIntrinsicCost(Intrinsic::ID IID, Type *RetTy,
145 ArrayRef<Type *> ParamTys) {
146 switch (IID) {
147 default:
148 // Intrinsics rarely (if ever) have normal argument setup constraints.
149 // Model them as having a basic instruction cost.
150 // FIXME: This is wrong for libc intrinsics.
151 return TTI::TCC_Basic;
152
153 case Intrinsic::annotation:
154 case Intrinsic::assume:
155 case Intrinsic::sideeffect:
156 case Intrinsic::dbg_declare:
157 case Intrinsic::dbg_value:
158 case Intrinsic::invariant_start:
159 case Intrinsic::invariant_end:
160 case Intrinsic::lifetime_start:
161 case Intrinsic::lifetime_end:
162 case Intrinsic::objectsize:
163 case Intrinsic::ptr_annotation:
164 case Intrinsic::var_annotation:
165 case Intrinsic::experimental_gc_result:
166 case Intrinsic::experimental_gc_relocate:
167 case Intrinsic::coro_alloc:
168 case Intrinsic::coro_begin:
169 case Intrinsic::coro_free:
170 case Intrinsic::coro_end:
171 case Intrinsic::coro_frame:
172 case Intrinsic::coro_size:
173 case Intrinsic::coro_suspend:
174 case Intrinsic::coro_param:
175 case Intrinsic::coro_subfn_addr:
176 // These intrinsics don't actually represent code after lowering.
177 return TTI::TCC_Free;
178 }
179 }
180
181 bool hasBranchDivergence() { return false; }
182
183 bool isSourceOfDivergence(const Value *V) { return false; }
184
185 bool isAlwaysUniform(const Value *V) { return false; }
186
187 unsigned getFlatAddressSpace () {
188 return -1;
189 }
190
191 bool isLoweredToCall(const Function *F) {
192 assert(F && "A concrete function must be provided to this routine.")(static_cast <bool> (F && "A concrete function must be provided to this routine."
) ? void (0) : __assert_fail ("F && \"A concrete function must be provided to this routine.\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/Analysis/TargetTransformInfoImpl.h"
, 192, __extension__ __PRETTY_FUNCTION__))
;
193
194 // FIXME: These should almost certainly not be handled here, and instead
195 // handled with the help of TLI or the target itself. This was largely
196 // ported from existing analysis heuristics here so that such refactorings
197 // can take place in the future.
198
199 if (F->isIntrinsic())
200 return false;
201
202 if (F->hasLocalLinkage() || !F->hasName())
203 return true;
204
205 StringRef Name = F->getName();
206
207 // These will all likely lower to a single selection DAG node.
208 if (Name == "copysign" || Name == "copysignf" || Name == "copysignl" ||
209 Name == "fabs" || Name == "fabsf" || Name == "fabsl" || Name == "sin" ||
210 Name == "fmin" || Name == "fminf" || Name == "fminl" ||
211 Name == "fmax" || Name == "fmaxf" || Name == "fmaxl" ||
212 Name == "sinf" || Name == "sinl" || Name == "cos" || Name == "cosf" ||
213 Name == "cosl" || Name == "sqrt" || Name == "sqrtf" || Name == "sqrtl")
214 return false;
215
216 // These are all likely to be optimized into something smaller.
217 if (Name == "pow" || Name == "powf" || Name == "powl" || Name == "exp2" ||
218 Name == "exp2l" || Name == "exp2f" || Name == "floor" ||
219 Name == "floorf" || Name == "ceil" || Name == "round" ||
220 Name == "ffs" || Name == "ffsl" || Name == "abs" || Name == "labs" ||
221 Name == "llabs")
222 return false;
223
224 return true;
225 }
226
227 void getUnrollingPreferences(Loop *, ScalarEvolution &,
228 TTI::UnrollingPreferences &) {}
229
230 bool isLegalAddImmediate(int64_t Imm) { return false; }
231
232 bool isLegalICmpImmediate(int64_t Imm) { return false; }
233
234 bool isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset,
235 bool HasBaseReg, int64_t Scale,
236 unsigned AddrSpace, Instruction *I = nullptr) {
237 // Guess that only reg and reg+reg addressing is allowed. This heuristic is
238 // taken from the implementation of LSR.
239 return !BaseGV && BaseOffset == 0 && (Scale == 0 || Scale == 1);
240 }
241
242 bool isLSRCostLess(TTI::LSRCost &C1, TTI::LSRCost &C2) {
243 return std::tie(C1.NumRegs, C1.AddRecCost, C1.NumIVMuls, C1.NumBaseAdds,
244 C1.ScaleCost, C1.ImmCost, C1.SetupCost) <
245 std::tie(C2.NumRegs, C2.AddRecCost, C2.NumIVMuls, C2.NumBaseAdds,
246 C2.ScaleCost, C2.ImmCost, C2.SetupCost);
247 }
248
249 bool canMacroFuseCmp() { return false; }
250
251 bool shouldFavorPostInc() const { return false; }
252
253 bool isLegalMaskedStore(Type *DataType) { return false; }
254
255 bool isLegalMaskedLoad(Type *DataType) { return false; }
256
257 bool isLegalMaskedScatter(Type *DataType) { return false; }
258
259 bool isLegalMaskedGather(Type *DataType) { return false; }
260
261 bool hasDivRemOp(Type *DataType, bool IsSigned) { return false; }
262
263 bool hasVolatileVariant(Instruction *I, unsigned AddrSpace) { return false; }
264
265 bool prefersVectorizedAddressing() { return true; }
266
267 int getScalingFactorCost(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset,
268 bool HasBaseReg, int64_t Scale, unsigned AddrSpace) {
269 // Guess that all legal addressing mode are free.
270 if (isLegalAddressingMode(Ty, BaseGV, BaseOffset, HasBaseReg,
271 Scale, AddrSpace))
272 return 0;
273 return -1;
274 }
275
276 bool LSRWithInstrQueries() { return false; }
277
278 bool isTruncateFree(Type *Ty1, Type *Ty2) { return false; }
279
280 bool isProfitableToHoist(Instruction *I) { return true; }
281
282 bool useAA() { return false; }
283
284 bool isTypeLegal(Type *Ty) { return false; }
285
286 unsigned getJumpBufAlignment() { return 0; }
287
288 unsigned getJumpBufSize() { return 0; }
289
290 bool shouldBuildLookupTables() { return true; }
291 bool shouldBuildLookupTablesForConstant(Constant *C) { return true; }
292
293 bool useColdCCForColdCall(Function &F) { return false; }
294
295 unsigned getScalarizationOverhead(Type *Ty, bool Insert, bool Extract) {
296 return 0;
297 }
298
299 unsigned getOperandsScalarizationOverhead(ArrayRef<const Value *> Args,
300 unsigned VF) { return 0; }
301
302 bool supportsEfficientVectorElementLoadStore() { return false; }
303
304 bool enableAggressiveInterleaving(bool LoopHasReductions) { return false; }
305
306 const TTI::MemCmpExpansionOptions *enableMemCmpExpansion(
307 bool IsZeroCmp) const {
308 return nullptr;
309 }
310
311 bool enableInterleavedAccessVectorization() { return false; }
312
313 bool isFPVectorizationPotentiallyUnsafe() { return false; }
314
315 bool allowsMisalignedMemoryAccesses(LLVMContext &Context,
316 unsigned BitWidth,
317 unsigned AddressSpace,
318 unsigned Alignment,
319 bool *Fast) { return false; }
320
321 TTI::PopcntSupportKind getPopcntSupport(unsigned IntTyWidthInBit) {
322 return TTI::PSK_Software;
323 }
324
325 bool haveFastSqrt(Type *Ty) { return false; }
326
327 bool isFCmpOrdCheaperThanFCmpZero(Type *Ty) { return true; }
328
329 unsigned getFPOpCost(Type *Ty) { return TargetTransformInfo::TCC_Basic; }
330
331 int getIntImmCodeSizeCost(unsigned Opcode, unsigned Idx, const APInt &Imm,
332 Type *Ty) {
333 return 0;
334 }
335
336 unsigned getIntImmCost(const APInt &Imm, Type *Ty) { return TTI::TCC_Basic; }
337
338 unsigned getIntImmCost(unsigned Opcode, unsigned Idx, const APInt &Imm,
339 Type *Ty) {
340 return TTI::TCC_Free;
341 }
342
343 unsigned getIntImmCost(Intrinsic::ID IID, unsigned Idx, const APInt &Imm,
344 Type *Ty) {
345 return TTI::TCC_Free;
346 }
347
348 unsigned getNumberOfRegisters(bool Vector) { return 8; }
349
350 unsigned getRegisterBitWidth(bool Vector) const { return 32; }
351
352 unsigned getMinVectorRegisterBitWidth() { return 128; }
353
354 bool shouldMaximizeVectorBandwidth(bool OptSize) const { return false; }
355
356 bool
357 shouldConsiderAddressTypePromotion(const Instruction &I,
358 bool &AllowPromotionWithoutCommonHeader) {
359 AllowPromotionWithoutCommonHeader = false;
360 return false;
361 }
362
363 unsigned getCacheLineSize() { return 0; }
364
365 llvm::Optional<unsigned> getCacheSize(TargetTransformInfo::CacheLevel Level) {
366 switch (Level) {
367 case TargetTransformInfo::CacheLevel::L1D:
368 LLVM_FALLTHROUGH[[clang::fallthrough]];
369 case TargetTransformInfo::CacheLevel::L2D:
370 return llvm::Optional<unsigned>();
371 }
372
373 llvm_unreachable("Unknown TargetTransformInfo::CacheLevel")::llvm::llvm_unreachable_internal("Unknown TargetTransformInfo::CacheLevel"
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/Analysis/TargetTransformInfoImpl.h"
, 373)
;
374 }
375
376 llvm::Optional<unsigned> getCacheAssociativity(
377 TargetTransformInfo::CacheLevel Level) {
378 switch (Level) {
379 case TargetTransformInfo::CacheLevel::L1D:
380 LLVM_FALLTHROUGH[[clang::fallthrough]];
381 case TargetTransformInfo::CacheLevel::L2D:
382 return llvm::Optional<unsigned>();
383 }
384
385 llvm_unreachable("Unknown TargetTransformInfo::CacheLevel")::llvm::llvm_unreachable_internal("Unknown TargetTransformInfo::CacheLevel"
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/Analysis/TargetTransformInfoImpl.h"
, 385)
;
386 }
387
388 unsigned getPrefetchDistance() { return 0; }
389
390 unsigned getMinPrefetchStride() { return 1; }
391
392 unsigned getMaxPrefetchIterationsAhead() { return UINT_MAX(2147483647 *2U +1U); }
393
394 unsigned getMaxInterleaveFactor(unsigned VF) { return 1; }
395
396 unsigned getArithmeticInstrCost(unsigned Opcode, Type *Ty,
397 TTI::OperandValueKind Opd1Info,
398 TTI::OperandValueKind Opd2Info,
399 TTI::OperandValueProperties Opd1PropInfo,
400 TTI::OperandValueProperties Opd2PropInfo,
401 ArrayRef<const Value *> Args) {
402 return 1;
403 }
404
405 unsigned getShuffleCost(TTI::ShuffleKind Kind, Type *Ty, int Index,
406 Type *SubTp) {
407 return 1;
408 }
409
410 unsigned getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src,
411 const Instruction *I) { return 1; }
412
413 unsigned getExtractWithExtendCost(unsigned Opcode, Type *Dst,
414 VectorType *VecTy, unsigned Index) {
415 return 1;
416 }
417
418 unsigned getCFInstrCost(unsigned Opcode) { return 1; }
419
420 unsigned getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy,
421 const Instruction *I) {
422 return 1;
423 }
424
425 unsigned getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index) {
426 return 1;
427 }
428
429 unsigned getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
430 unsigned AddressSpace, const Instruction *I) {
431 return 1;
432 }
433
434 unsigned getMaskedMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
435 unsigned AddressSpace) {
436 return 1;
437 }
438
439 unsigned getGatherScatterOpCost(unsigned Opcode, Type *DataTy, Value *Ptr,
440 bool VariableMask,
441 unsigned Alignment) {
442 return 1;
443 }
444
445 unsigned getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy,
446 unsigned Factor,
447 ArrayRef<unsigned> Indices,
448 unsigned Alignment,
449 unsigned AddressSpace) {
450 return 1;
451 }
452
453 unsigned getIntrinsicInstrCost(Intrinsic::ID ID, Type *RetTy,
454 ArrayRef<Type *> Tys, FastMathFlags FMF,
455 unsigned ScalarizationCostPassed) {
456 return 1;
457 }
458 unsigned getIntrinsicInstrCost(Intrinsic::ID ID, Type *RetTy,
459 ArrayRef<Value *> Args, FastMathFlags FMF, unsigned VF) {
460 return 1;
461 }
462
463 unsigned getCallInstrCost(Function *F, Type *RetTy, ArrayRef<Type *> Tys) {
464 return 1;
465 }
466
467 unsigned getNumberOfParts(Type *Tp) { return 0; }
468
469 unsigned getAddressComputationCost(Type *Tp, ScalarEvolution *,
470 const SCEV *) {
471 return 0;
472 }
473
474 unsigned getArithmeticReductionCost(unsigned, Type *, bool) { return 1; }
475
476 unsigned getMinMaxReductionCost(Type *, Type *, bool, bool) { return 1; }
477
478 unsigned getCostOfKeepingLiveOverCall(ArrayRef<Type *> Tys) { return 0; }
479
480 bool getTgtMemIntrinsic(IntrinsicInst *Inst, MemIntrinsicInfo &Info) {
481 return false;
482 }
483
484 unsigned getAtomicMemIntrinsicMaxElementSize() const {
485 // Note for overrides: You must ensure for all element unordered-atomic
486 // memory intrinsics that all power-of-2 element sizes up to, and
487 // including, the return value of this method have a corresponding
488 // runtime lib call. These runtime lib call definitions can be found
489 // in RuntimeLibcalls.h
490 return 0;
491 }
492
493 Value *getOrCreateResultFromMemIntrinsic(IntrinsicInst *Inst,
494 Type *ExpectedType) {
495 return nullptr;
496 }
497
498 Type *getMemcpyLoopLoweringType(LLVMContext &Context, Value *Length,
499 unsigned SrcAlign, unsigned DestAlign) const {
500 return Type::getInt8Ty(Context);
501 }
502
503 void getMemcpyLoopResidualLoweringType(SmallVectorImpl<Type *> &OpsOut,
504 LLVMContext &Context,
505 unsigned RemainingBytes,
506 unsigned SrcAlign,
507 unsigned DestAlign) const {
508 for (unsigned i = 0; i != RemainingBytes; ++i)
509 OpsOut.push_back(Type::getInt8Ty(Context));
510 }
511
512 bool areInlineCompatible(const Function *Caller,
513 const Function *Callee) const {
514 return (Caller->getFnAttribute("target-cpu") ==
515 Callee->getFnAttribute("target-cpu")) &&
516 (Caller->getFnAttribute("target-features") ==
517 Callee->getFnAttribute("target-features"));
518 }
519
520 bool isIndexedLoadLegal(TTI::MemIndexedMode Mode, Type *Ty,
521 const DataLayout &DL) const {
522 return false;
523 }
524
525 bool isIndexedStoreLegal(TTI::MemIndexedMode Mode, Type *Ty,
526 const DataLayout &DL) const {
527 return false;
528 }
529
530 unsigned getLoadStoreVecRegBitWidth(unsigned AddrSpace) const { return 128; }
531
532 bool isLegalToVectorizeLoad(LoadInst *LI) const { return true; }
533
534 bool isLegalToVectorizeStore(StoreInst *SI) const { return true; }
535
536 bool isLegalToVectorizeLoadChain(unsigned ChainSizeInBytes,
537 unsigned Alignment,
538 unsigned AddrSpace) const {
539 return true;
540 }
541
542 bool isLegalToVectorizeStoreChain(unsigned ChainSizeInBytes,
543 unsigned Alignment,
544 unsigned AddrSpace) const {
545 return true;
546 }
547
548 unsigned getLoadVectorFactor(unsigned VF, unsigned LoadSize,
549 unsigned ChainSizeInBytes,
550 VectorType *VecTy) const {
551 return VF;
552 }
553
554 unsigned getStoreVectorFactor(unsigned VF, unsigned StoreSize,
555 unsigned ChainSizeInBytes,
556 VectorType *VecTy) const {
557 return VF;
558 }
559
560 bool useReductionIntrinsic(unsigned Opcode, Type *Ty,
561 TTI::ReductionFlags Flags) const {
562 return false;
563 }
564
565 bool shouldExpandReduction(const IntrinsicInst *II) const {
566 return true;
567 }
568
569protected:
570 // Obtain the minimum required size to hold the value (without the sign)
571 // In case of a vector it returns the min required size for one element.
572 unsigned minRequiredElementSize(const Value* Val, bool &isSigned) {
573 if (isa<ConstantDataVector>(Val) || isa<ConstantVector>(Val)) {
574 const auto* VectorValue = cast<Constant>(Val);
575
576 // In case of a vector need to pick the max between the min
577 // required size for each element
578 auto *VT = cast<VectorType>(Val->getType());
579
580 // Assume unsigned elements
581 isSigned = false;
582
583 // The max required size is the total vector width divided by num
584 // of elements in the vector
585 unsigned MaxRequiredSize = VT->getBitWidth() / VT->getNumElements();
586
587 unsigned MinRequiredSize = 0;
588 for(unsigned i = 0, e = VT->getNumElements(); i < e; ++i) {
589 if (auto* IntElement =
590 dyn_cast<ConstantInt>(VectorValue->getAggregateElement(i))) {
591 bool signedElement = IntElement->getValue().isNegative();
592 // Get the element min required size.
593 unsigned ElementMinRequiredSize =
594 IntElement->getValue().getMinSignedBits() - 1;
595 // In case one element is signed then all the vector is signed.
596 isSigned |= signedElement;
597 // Save the max required bit size between all the elements.
598 MinRequiredSize = std::max(MinRequiredSize, ElementMinRequiredSize);
599 }
600 else {
601 // not an int constant element
602 return MaxRequiredSize;
603 }
604 }
605 return MinRequiredSize;
606 }
607
608 if (const auto* CI = dyn_cast<ConstantInt>(Val)) {
609 isSigned = CI->getValue().isNegative();
610 return CI->getValue().getMinSignedBits() - 1;
611 }
612
613 if (const auto* Cast = dyn_cast<SExtInst>(Val)) {
614 isSigned = true;
615 return Cast->getSrcTy()->getScalarSizeInBits() - 1;
616 }
617
618 if (const auto* Cast = dyn_cast<ZExtInst>(Val)) {
619 isSigned = false;
620 return Cast->getSrcTy()->getScalarSizeInBits();
621 }
622
623 isSigned = false;
624 return Val->getType()->getScalarSizeInBits();
625 }
626
627 bool isStridedAccess(const SCEV *Ptr) {
628 return Ptr && isa<SCEVAddRecExpr>(Ptr);
629 }
630
631 const SCEVConstant *getConstantStrideStep(ScalarEvolution *SE,
632 const SCEV *Ptr) {
633 if (!isStridedAccess(Ptr))
634 return nullptr;
635 const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ptr);
636 return dyn_cast<SCEVConstant>(AddRec->getStepRecurrence(*SE));
637 }
638
639 bool isConstantStridedAccessLessThan(ScalarEvolution *SE, const SCEV *Ptr,
640 int64_t MergeDistance) {
641 const SCEVConstant *Step = getConstantStrideStep(SE, Ptr);
642 if (!Step)
643 return false;
644 APInt StrideVal = Step->getAPInt();
645 if (StrideVal.getBitWidth() > 64)
646 return false;
647 // FIXME: Need to take absolute value for negative stride case.
648 return StrideVal.getSExtValue() < MergeDistance;
649 }
650};
651
652/// \brief CRTP base class for use as a mix-in that aids implementing
653/// a TargetTransformInfo-compatible class.
654template <typename T>
655class TargetTransformInfoImplCRTPBase : public TargetTransformInfoImplBase {
656private:
657 typedef TargetTransformInfoImplBase BaseT;
658
659protected:
660 explicit TargetTransformInfoImplCRTPBase(const DataLayout &DL) : BaseT(DL) {}
661
662public:
663 using BaseT::getCallCost;
664
665 unsigned getCallCost(const Function *F, int NumArgs) {
666 assert(F && "A concrete function must be provided to this routine.")(static_cast <bool> (F && "A concrete function must be provided to this routine."
) ? void (0) : __assert_fail ("F && \"A concrete function must be provided to this routine.\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/Analysis/TargetTransformInfoImpl.h"
, 666, __extension__ __PRETTY_FUNCTION__))
;
667
668 if (NumArgs < 0)
669 // Set the argument number to the number of explicit arguments in the
670 // function.
671 NumArgs = F->arg_size();
672
673 if (Intrinsic::ID IID = F->getIntrinsicID()) {
674 FunctionType *FTy = F->getFunctionType();
675 SmallVector<Type *, 8> ParamTys(FTy->param_begin(), FTy->param_end());
676 return static_cast<T *>(this)
677 ->getIntrinsicCost(IID, FTy->getReturnType(), ParamTys);
678 }
679
680 if (!static_cast<T *>(this)->isLoweredToCall(F))
681 return TTI::TCC_Basic; // Give a basic cost if it will be lowered
682 // directly.
683
684 return static_cast<T *>(this)->getCallCost(F->getFunctionType(), NumArgs);
685 }
686
687 unsigned getCallCost(const Function *F, ArrayRef<const Value *> Arguments) {
688 // Simply delegate to generic handling of the call.
689 // FIXME: We should use instsimplify or something else to catch calls which
690 // will constant fold with these arguments.
691 return static_cast<T *>(this)->getCallCost(F, Arguments.size());
692 }
693
694 using BaseT::getGEPCost;
695
696 int getGEPCost(Type *PointeeType, const Value *Ptr,
697 ArrayRef<const Value *> Operands) {
698 const GlobalValue *BaseGV = nullptr;
699 if (Ptr != nullptr) {
700 // TODO: will remove this when pointers have an opaque type.
701 assert(Ptr->getType()->getScalarType()->getPointerElementType() ==(static_cast <bool> (Ptr->getType()->getScalarType
()->getPointerElementType() == PointeeType && "explicit pointee type doesn't match operand's pointee type"
) ? void (0) : __assert_fail ("Ptr->getType()->getScalarType()->getPointerElementType() == PointeeType && \"explicit pointee type doesn't match operand's pointee type\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/Analysis/TargetTransformInfoImpl.h"
, 703, __extension__ __PRETTY_FUNCTION__))
702 PointeeType &&(static_cast <bool> (Ptr->getType()->getScalarType
()->getPointerElementType() == PointeeType && "explicit pointee type doesn't match operand's pointee type"
) ? void (0) : __assert_fail ("Ptr->getType()->getScalarType()->getPointerElementType() == PointeeType && \"explicit pointee type doesn't match operand's pointee type\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/Analysis/TargetTransformInfoImpl.h"
, 703, __extension__ __PRETTY_FUNCTION__))
703 "explicit pointee type doesn't match operand's pointee type")(static_cast <bool> (Ptr->getType()->getScalarType
()->getPointerElementType() == PointeeType && "explicit pointee type doesn't match operand's pointee type"
) ? void (0) : __assert_fail ("Ptr->getType()->getScalarType()->getPointerElementType() == PointeeType && \"explicit pointee type doesn't match operand's pointee type\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/Analysis/TargetTransformInfoImpl.h"
, 703, __extension__ __PRETTY_FUNCTION__))
;
704 BaseGV = dyn_cast<GlobalValue>(Ptr->stripPointerCasts());
705 }
706 bool HasBaseReg = (BaseGV == nullptr);
707
708 auto PtrSizeBits = DL.getPointerTypeSizeInBits(Ptr->getType());
709 APInt BaseOffset(PtrSizeBits, 0);
710 int64_t Scale = 0;
711
712 auto GTI = gep_type_begin(PointeeType, Operands);
713 Type *TargetType = nullptr;
714
715 // Handle the case where the GEP instruction has a single operand,
716 // the basis, therefore TargetType is a nullptr.
717 if (Operands.empty())
718 return !BaseGV ? TTI::TCC_Free : TTI::TCC_Basic;
719
720 for (auto I = Operands.begin(); I != Operands.end(); ++I, ++GTI) {
721 TargetType = GTI.getIndexedType();
722 // We assume that the cost of Scalar GEP with constant index and the
723 // cost of Vector GEP with splat constant index are the same.
724 const ConstantInt *ConstIdx = dyn_cast<ConstantInt>(*I);
725 if (!ConstIdx)
726 if (auto Splat = getSplatValue(*I))
727 ConstIdx = dyn_cast<ConstantInt>(Splat);
728 if (StructType *STy = GTI.getStructTypeOrNull()) {
729 // For structures the index is always splat or scalar constant
730 assert(ConstIdx && "Unexpected GEP index")(static_cast <bool> (ConstIdx && "Unexpected GEP index"
) ? void (0) : __assert_fail ("ConstIdx && \"Unexpected GEP index\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/Analysis/TargetTransformInfoImpl.h"
, 730, __extension__ __PRETTY_FUNCTION__))
;
731 uint64_t Field = ConstIdx->getZExtValue();
732 BaseOffset += DL.getStructLayout(STy)->getElementOffset(Field);
733 } else {
734 int64_t ElementSize = DL.getTypeAllocSize(GTI.getIndexedType());
735 if (ConstIdx) {
736 BaseOffset +=
737 ConstIdx->getValue().sextOrTrunc(PtrSizeBits) * ElementSize;
738 } else {
739 // Needs scale register.
740 if (Scale != 0)
741 // No addressing mode takes two scale registers.
742 return TTI::TCC_Basic;
743 Scale = ElementSize;
744 }
745 }
746 }
747
748 // Assumes the address space is 0 when Ptr is nullptr.
749 unsigned AS =
750 (Ptr == nullptr ? 0 : Ptr->getType()->getPointerAddressSpace());
751
752 if (static_cast<T *>(this)->isLegalAddressingMode(
753 TargetType, const_cast<GlobalValue *>(BaseGV),
754 BaseOffset.sextOrTrunc(64).getSExtValue(), HasBaseReg, Scale, AS))
755 return TTI::TCC_Free;
756 return TTI::TCC_Basic;
757 }
758
759 using BaseT::getIntrinsicCost;
760
761 unsigned getIntrinsicCost(Intrinsic::ID IID, Type *RetTy,
762 ArrayRef<const Value *> Arguments) {
763 // Delegate to the generic intrinsic handling code. This mostly provides an
764 // opportunity for targets to (for example) special case the cost of
765 // certain intrinsics based on constants used as arguments.
766 SmallVector<Type *, 8> ParamTys;
767 ParamTys.reserve(Arguments.size());
768 for (unsigned Idx = 0, Size = Arguments.size(); Idx != Size; ++Idx)
769 ParamTys.push_back(Arguments[Idx]->getType());
770 return static_cast<T *>(this)->getIntrinsicCost(IID, RetTy, ParamTys);
771 }
772
773 unsigned getUserCost(const User *U, ArrayRef<const Value *> Operands) {
774 if (isa<PHINode>(U))
3
Taking false branch
775 return TTI::TCC_Free; // Model all PHI nodes as free.
776
777 // Static alloca doesn't generate target instructions.
778 if (auto *A = dyn_cast<AllocaInst>(U))
4
Taking false branch
779 if (A->isStaticAlloca())
780 return TTI::TCC_Free;
781
782 if (const GEPOperator *GEP = dyn_cast<GEPOperator>(U)) {
5
Taking false branch
783 return static_cast<T *>(this)->getGEPCost(GEP->getSourceElementType(),
784 GEP->getPointerOperand(),
785 Operands.drop_front());
786 }
787
788 if (auto CS = ImmutableCallSite(U)) {
6
Taking false branch
789 const Function *F = CS.getCalledFunction();
790 if (!F) {
791 // Just use the called value type.
792 Type *FTy = CS.getCalledValue()->getType()->getPointerElementType();
793 return static_cast<T *>(this)
794 ->getCallCost(cast<FunctionType>(FTy), CS.arg_size());
795 }
796
797 SmallVector<const Value *, 8> Arguments(CS.arg_begin(), CS.arg_end());
798 return static_cast<T *>(this)->getCallCost(F, Arguments);
799 }
800
801 if (const CastInst *CI = dyn_cast<CastInst>(U)) {
7
Taking false branch
802 // Result of a cmp instruction is often extended (to be used by other
803 // cmp instructions, logical or return instructions). These are usually
804 // nop on most sane targets.
805 if (isa<CmpInst>(CI->getOperand(0)))
806 return TTI::TCC_Free;
807 if (isa<SExtInst>(CI) || isa<ZExtInst>(CI) || isa<FPExtInst>(CI))
808 return static_cast<T *>(this)->getExtCost(CI, Operands.back());
809 }
810
811 return static_cast<T *>(this)->getOperationCost(
11
Calling 'BasicTTIImplBase::getOperationCost'
812 Operator::getOpcode(U), U->getType(),
813 U->getNumOperands() == 1 ? U->getOperand(0)->getType() : nullptr);
8
Assuming the condition is false
9
'?' condition is false
10
Passing null pointer value via 3rd parameter 'OpTy'
814 }
815
816 int getInstructionLatency(const Instruction *I) {
817 SmallVector<const Value *, 4> Operands(I->value_op_begin(),
818 I->value_op_end());
819 if (getUserCost(I, Operands) == TTI::TCC_Free)
820 return 0;
821
822 if (isa<LoadInst>(I))
823 return 4;
824
825 Type *DstTy = I->getType();
826
827 // Usually an intrinsic is a simple instruction.
828 // A real function call is much slower.
829 if (auto *CI = dyn_cast<CallInst>(I)) {
830 const Function *F = CI->getCalledFunction();
831 if (!F || static_cast<T *>(this)->isLoweredToCall(F))
832 return 40;
833 // Some intrinsics return a value and a flag, we use the value type
834 // to decide its latency.
835 if (StructType* StructTy = dyn_cast<StructType>(DstTy))
836 DstTy = StructTy->getElementType(0);
837 // Fall through to simple instructions.
838 }
839
840 if (VectorType *VectorTy = dyn_cast<VectorType>(DstTy))
841 DstTy = VectorTy->getElementType();
842 if (DstTy->isFloatingPointTy())
843 return 3;
844
845 return 1;
846 }
847};
848}
849
850#endif

/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/CodeGen/BasicTTIImpl.h

1//===- BasicTTIImpl.h -------------------------------------------*- C++ -*-===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10/// \file
11/// This file provides a helper that implements much of the TTI interface in
12/// terms of the target-independent code generator and TargetLowering
13/// interfaces.
14//
15//===----------------------------------------------------------------------===//
16
17#ifndef LLVM_CODEGEN_BASICTTIIMPL_H
18#define LLVM_CODEGEN_BASICTTIIMPL_H
19
20#include "llvm/ADT/APInt.h"
21#include "llvm/ADT/ArrayRef.h"
22#include "llvm/ADT/BitVector.h"
23#include "llvm/ADT/SmallPtrSet.h"
24#include "llvm/ADT/SmallVector.h"
25#include "llvm/Analysis/LoopInfo.h"
26#include "llvm/Analysis/TargetTransformInfo.h"
27#include "llvm/Analysis/TargetTransformInfoImpl.h"
28#include "llvm/CodeGen/ISDOpcodes.h"
29#include "llvm/CodeGen/TargetLowering.h"
30#include "llvm/CodeGen/TargetSubtargetInfo.h"
31#include "llvm/CodeGen/ValueTypes.h"
32#include "llvm/IR/BasicBlock.h"
33#include "llvm/IR/CallSite.h"
34#include "llvm/IR/Constant.h"
35#include "llvm/IR/Constants.h"
36#include "llvm/IR/DataLayout.h"
37#include "llvm/IR/DerivedTypes.h"
38#include "llvm/IR/InstrTypes.h"
39#include "llvm/IR/Instruction.h"
40#include "llvm/IR/Instructions.h"
41#include "llvm/IR/Intrinsics.h"
42#include "llvm/IR/Operator.h"
43#include "llvm/IR/Type.h"
44#include "llvm/IR/Value.h"
45#include "llvm/MC/MCSchedule.h"
46#include "llvm/Support/Casting.h"
47#include "llvm/Support/CommandLine.h"
48#include "llvm/Support/ErrorHandling.h"
49#include "llvm/Support/MachineValueType.h"
50#include "llvm/Support/MathExtras.h"
51#include <algorithm>
52#include <cassert>
53#include <cstdint>
54#include <limits>
55#include <utility>
56
57namespace llvm {
58
59class Function;
60class GlobalValue;
61class LLVMContext;
62class ScalarEvolution;
63class SCEV;
64class TargetMachine;
65
66extern cl::opt<unsigned> PartialUnrollingThreshold;
67
68/// \brief Base class which can be used to help build a TTI implementation.
69///
70/// This class provides as much implementation of the TTI interface as is
71/// possible using the target independent parts of the code generator.
72///
73/// In order to subclass it, your class must implement a getST() method to
74/// return the subtarget, and a getTLI() method to return the target lowering.
75/// We need these methods implemented in the derived class so that this class
76/// doesn't have to duplicate storage for them.
77template <typename T>
78class BasicTTIImplBase : public TargetTransformInfoImplCRTPBase<T> {
79private:
80 using BaseT = TargetTransformInfoImplCRTPBase<T>;
81 using TTI = TargetTransformInfo;
82
83 /// Estimate a cost of shuffle as a sequence of extract and insert
84 /// operations.
85 unsigned getPermuteShuffleOverhead(Type *Ty) {
86 assert(Ty->isVectorTy() && "Can only shuffle vectors")(static_cast <bool> (Ty->isVectorTy() && "Can only shuffle vectors"
) ? void (0) : __assert_fail ("Ty->isVectorTy() && \"Can only shuffle vectors\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/CodeGen/BasicTTIImpl.h"
, 86, __extension__ __PRETTY_FUNCTION__))
;
87 unsigned Cost = 0;
88 // Shuffle cost is equal to the cost of extracting element from its argument
89 // plus the cost of inserting them onto the result vector.
90
91 // e.g. <4 x float> has a mask of <0,5,2,7> i.e we need to extract from
92 // index 0 of first vector, index 1 of second vector,index 2 of first
93 // vector and finally index 3 of second vector and insert them at index
94 // <0,1,2,3> of result vector.
95 for (int i = 0, e = Ty->getVectorNumElements(); i < e; ++i) {
96 Cost += static_cast<T *>(this)
97 ->getVectorInstrCost(Instruction::InsertElement, Ty, i);
98 Cost += static_cast<T *>(this)
99 ->getVectorInstrCost(Instruction::ExtractElement, Ty, i);
100 }
101 return Cost;
102 }
103
104 /// \brief Local query method delegates up to T which *must* implement this!
105 const TargetSubtargetInfo *getST() const {
106 return static_cast<const T *>(this)->getST();
107 }
108
109 /// \brief Local query method delegates up to T which *must* implement this!
110 const TargetLoweringBase *getTLI() const {
111 return static_cast<const T *>(this)->getTLI();
112 }
113
114 static ISD::MemIndexedMode getISDIndexedMode(TTI::MemIndexedMode M) {
115 switch (M) {
116 case TTI::MIM_Unindexed:
117 return ISD::UNINDEXED;
118 case TTI::MIM_PreInc:
119 return ISD::PRE_INC;
120 case TTI::MIM_PreDec:
121 return ISD::PRE_DEC;
122 case TTI::MIM_PostInc:
123 return ISD::POST_INC;
124 case TTI::MIM_PostDec:
125 return ISD::POST_DEC;
126 }
127 llvm_unreachable("Unexpected MemIndexedMode")::llvm::llvm_unreachable_internal("Unexpected MemIndexedMode"
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/CodeGen/BasicTTIImpl.h"
, 127)
;
128 }
129
130protected:
131 explicit BasicTTIImplBase(const TargetMachine *TM, const DataLayout &DL)
132 : BaseT(DL) {}
133
134 using TargetTransformInfoImplBase::DL;
135
136public:
137 /// \name Scalar TTI Implementations
138 /// @{
139 bool allowsMisalignedMemoryAccesses(LLVMContext &Context,
140 unsigned BitWidth, unsigned AddressSpace,
141 unsigned Alignment, bool *Fast) const {
142 EVT E = EVT::getIntegerVT(Context, BitWidth);
143 return getTLI()->allowsMisalignedMemoryAccesses(E, AddressSpace, Alignment, Fast);
144 }
145
146 bool hasBranchDivergence() { return false; }
147
148 bool isSourceOfDivergence(const Value *V) { return false; }
149
150 bool isAlwaysUniform(const Value *V) { return false; }
151
152 unsigned getFlatAddressSpace() {
153 // Return an invalid address space.
154 return -1;
155 }
156
157 bool isLegalAddImmediate(int64_t imm) {
158 return getTLI()->isLegalAddImmediate(imm);
159 }
160
161 bool isLegalICmpImmediate(int64_t imm) {
162 return getTLI()->isLegalICmpImmediate(imm);
163 }
164
165 bool isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset,
166 bool HasBaseReg, int64_t Scale,
167 unsigned AddrSpace, Instruction *I = nullptr) {
168 TargetLoweringBase::AddrMode AM;
169 AM.BaseGV = BaseGV;
170 AM.BaseOffs = BaseOffset;
171 AM.HasBaseReg = HasBaseReg;
172 AM.Scale = Scale;
173 return getTLI()->isLegalAddressingMode(DL, AM, Ty, AddrSpace, I);
174 }
175
176 bool isIndexedLoadLegal(TTI::MemIndexedMode M, Type *Ty,
177 const DataLayout &DL) const {
178 EVT VT = getTLI()->getValueType(DL, Ty);
179 return getTLI()->isIndexedLoadLegal(getISDIndexedMode(M), VT);
180 }
181
182 bool isIndexedStoreLegal(TTI::MemIndexedMode M, Type *Ty,
183 const DataLayout &DL) const {
184 EVT VT = getTLI()->getValueType(DL, Ty);
185 return getTLI()->isIndexedStoreLegal(getISDIndexedMode(M), VT);
186 }
187
188 bool isLSRCostLess(TTI::LSRCost C1, TTI::LSRCost C2) {
189 return TargetTransformInfoImplBase::isLSRCostLess(C1, C2);
190 }
191
192 int getScalingFactorCost(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset,
193 bool HasBaseReg, int64_t Scale, unsigned AddrSpace) {
194 TargetLoweringBase::AddrMode AM;
195 AM.BaseGV = BaseGV;
196 AM.BaseOffs = BaseOffset;
197 AM.HasBaseReg = HasBaseReg;
198 AM.Scale = Scale;
199 return getTLI()->getScalingFactorCost(DL, AM, Ty, AddrSpace);
200 }
201
202 bool isTruncateFree(Type *Ty1, Type *Ty2) {
203 return getTLI()->isTruncateFree(Ty1, Ty2);
204 }
205
206 bool isProfitableToHoist(Instruction *I) {
207 return getTLI()->isProfitableToHoist(I);
208 }
209
210 bool useAA() const { return getST()->useAA(); }
211
212 bool isTypeLegal(Type *Ty) {
213 EVT VT = getTLI()->getValueType(DL, Ty);
214 return getTLI()->isTypeLegal(VT);
215 }
216
217 int getGEPCost(Type *PointeeType, const Value *Ptr,
218 ArrayRef<const Value *> Operands) {
219 return BaseT::getGEPCost(PointeeType, Ptr, Operands);
220 }
221
222 int getExtCost(const Instruction *I, const Value *Src) {
223 if (getTLI()->isExtFree(I))
224 return TargetTransformInfo::TCC_Free;
225
226 if (isa<ZExtInst>(I) || isa<SExtInst>(I))
227 if (const LoadInst *LI = dyn_cast<LoadInst>(Src))
228 if (getTLI()->isExtLoad(LI, I, DL))
229 return TargetTransformInfo::TCC_Free;
230
231 return TargetTransformInfo::TCC_Basic;
232 }
233
234 unsigned getIntrinsicCost(Intrinsic::ID IID, Type *RetTy,
235 ArrayRef<const Value *> Arguments) {
236 return BaseT::getIntrinsicCost(IID, RetTy, Arguments);
237 }
238
239 unsigned getIntrinsicCost(Intrinsic::ID IID, Type *RetTy,
240 ArrayRef<Type *> ParamTys) {
241 if (IID == Intrinsic::cttz) {
242 if (getTLI()->isCheapToSpeculateCttz())
243 return TargetTransformInfo::TCC_Basic;
244 return TargetTransformInfo::TCC_Expensive;
245 }
246
247 if (IID == Intrinsic::ctlz) {
248 if (getTLI()->isCheapToSpeculateCtlz())
249 return TargetTransformInfo::TCC_Basic;
250 return TargetTransformInfo::TCC_Expensive;
251 }
252
253 return BaseT::getIntrinsicCost(IID, RetTy, ParamTys);
254 }
255
256 unsigned getEstimatedNumberOfCaseClusters(const SwitchInst &SI,
257 unsigned &JumpTableSize) {
258 /// Try to find the estimated number of clusters. Note that the number of
259 /// clusters identified in this function could be different from the actural
260 /// numbers found in lowering. This function ignore switches that are
261 /// lowered with a mix of jump table / bit test / BTree. This function was
262 /// initially intended to be used when estimating the cost of switch in
263 /// inline cost heuristic, but it's a generic cost model to be used in other
264 /// places (e.g., in loop unrolling).
265 unsigned N = SI.getNumCases();
266 const TargetLoweringBase *TLI = getTLI();
267 const DataLayout &DL = this->getDataLayout();
268
269 JumpTableSize = 0;
270 bool IsJTAllowed = TLI->areJTsAllowed(SI.getParent()->getParent());
271
272 // Early exit if both a jump table and bit test are not allowed.
273 if (N < 1 || (!IsJTAllowed && DL.getIndexSizeInBits(0u) < N))
274 return N;
275
276 APInt MaxCaseVal = SI.case_begin()->getCaseValue()->getValue();
277 APInt MinCaseVal = MaxCaseVal;
278 for (auto CI : SI.cases()) {
279 const APInt &CaseVal = CI.getCaseValue()->getValue();
280 if (CaseVal.sgt(MaxCaseVal))
281 MaxCaseVal = CaseVal;
282 if (CaseVal.slt(MinCaseVal))
283 MinCaseVal = CaseVal;
284 }
285
286 // Check if suitable for a bit test
287 if (N <= DL.getIndexSizeInBits(0u)) {
288 SmallPtrSet<const BasicBlock *, 4> Dests;
289 for (auto I : SI.cases())
290 Dests.insert(I.getCaseSuccessor());
291
292 if (TLI->isSuitableForBitTests(Dests.size(), N, MinCaseVal, MaxCaseVal,
293 DL))
294 return 1;
295 }
296
297 // Check if suitable for a jump table.
298 if (IsJTAllowed) {
299 if (N < 2 || N < TLI->getMinimumJumpTableEntries())
300 return N;
301 uint64_t Range =
302 (MaxCaseVal - MinCaseVal)
303 .getLimitedValue(std::numeric_limits<uint64_t>::max() - 1) + 1;
304 // Check whether a range of clusters is dense enough for a jump table
305 if (TLI->isSuitableForJumpTable(&SI, N, Range)) {
306 JumpTableSize = Range;
307 return 1;
308 }
309 }
310 return N;
311 }
312
313 unsigned getJumpBufAlignment() { return getTLI()->getJumpBufAlignment(); }
314
315 unsigned getJumpBufSize() { return getTLI()->getJumpBufSize(); }
316
317 bool shouldBuildLookupTables() {
318 const TargetLoweringBase *TLI = getTLI();
319 return TLI->isOperationLegalOrCustom(ISD::BR_JT, MVT::Other) ||
320 TLI->isOperationLegalOrCustom(ISD::BRIND, MVT::Other);
321 }
322
323 bool haveFastSqrt(Type *Ty) {
324 const TargetLoweringBase *TLI = getTLI();
325 EVT VT = TLI->getValueType(DL, Ty);
326 return TLI->isTypeLegal(VT) &&
327 TLI->isOperationLegalOrCustom(ISD::FSQRT, VT);
328 }
329
330 bool isFCmpOrdCheaperThanFCmpZero(Type *Ty) {
331 return true;
332 }
333
334 unsigned getFPOpCost(Type *Ty) {
335 // Check whether FADD is available, as a proxy for floating-point in
336 // general.
337 const TargetLoweringBase *TLI = getTLI();
338 EVT VT = TLI->getValueType(DL, Ty);
339 if (TLI->isOperationLegalOrCustomOrPromote(ISD::FADD, VT))
340 return TargetTransformInfo::TCC_Basic;
341 return TargetTransformInfo::TCC_Expensive;
342 }
343
344 unsigned getOperationCost(unsigned Opcode, Type *Ty, Type *OpTy) {
345 const TargetLoweringBase *TLI = getTLI();
346 switch (Opcode) {
12
Control jumps to the 'default' case at line 347
347 default: break;
13
Execution continues on line 358
348 case Instruction::Trunc:
349 if (TLI->isTruncateFree(OpTy, Ty))
350 return TargetTransformInfo::TCC_Free;
351 return TargetTransformInfo::TCC_Basic;
352 case Instruction::ZExt:
353 if (TLI->isZExtFree(OpTy, Ty))
354 return TargetTransformInfo::TCC_Free;
355 return TargetTransformInfo::TCC_Basic;
356 }
357
358 return BaseT::getOperationCost(Opcode, Ty, OpTy);
14
Passing null pointer value via 3rd parameter 'OpTy'
15
Calling 'TargetTransformInfoImplBase::getOperationCost'
359 }
360
361 unsigned getInliningThresholdMultiplier() { return 1; }
362
363 void getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
364 TTI::UnrollingPreferences &UP) {
365 // This unrolling functionality is target independent, but to provide some
366 // motivation for its intended use, for x86:
367
368 // According to the Intel 64 and IA-32 Architectures Optimization Reference
369 // Manual, Intel Core models and later have a loop stream detector (and
370 // associated uop queue) that can benefit from partial unrolling.
371 // The relevant requirements are:
372 // - The loop must have no more than 4 (8 for Nehalem and later) branches
373 // taken, and none of them may be calls.
374 // - The loop can have no more than 18 (28 for Nehalem and later) uops.
375
376 // According to the Software Optimization Guide for AMD Family 15h
377 // Processors, models 30h-4fh (Steamroller and later) have a loop predictor
378 // and loop buffer which can benefit from partial unrolling.
379 // The relevant requirements are:
380 // - The loop must have fewer than 16 branches
381 // - The loop must have less than 40 uops in all executed loop branches
382
383 // The number of taken branches in a loop is hard to estimate here, and
384 // benchmarking has revealed that it is better not to be conservative when
385 // estimating the branch count. As a result, we'll ignore the branch limits
386 // until someone finds a case where it matters in practice.
387
388 unsigned MaxOps;
389 const TargetSubtargetInfo *ST = getST();
390 if (PartialUnrollingThreshold.getNumOccurrences() > 0)
391 MaxOps = PartialUnrollingThreshold;
392 else if (ST->getSchedModel().LoopMicroOpBufferSize > 0)
393 MaxOps = ST->getSchedModel().LoopMicroOpBufferSize;
394 else
395 return;
396
397 // Scan the loop: don't unroll loops with calls.
398 for (Loop::block_iterator I = L->block_begin(), E = L->block_end(); I != E;
399 ++I) {
400 BasicBlock *BB = *I;
401
402 for (BasicBlock::iterator J = BB->begin(), JE = BB->end(); J != JE; ++J)
403 if (isa<CallInst>(J) || isa<InvokeInst>(J)) {
404 ImmutableCallSite CS(&*J);
405 if (const Function *F = CS.getCalledFunction()) {
406 if (!static_cast<T *>(this)->isLoweredToCall(F))
407 continue;
408 }
409
410 return;
411 }
412 }
413
414 // Enable runtime and partial unrolling up to the specified size.
415 // Enable using trip count upper bound to unroll loops.
416 UP.Partial = UP.Runtime = UP.UpperBound = true;
417 UP.PartialThreshold = MaxOps;
418
419 // Avoid unrolling when optimizing for size.
420 UP.OptSizeThreshold = 0;
421 UP.PartialOptSizeThreshold = 0;
422
423 // Set number of instructions optimized when "back edge"
424 // becomes "fall through" to default value of 2.
425 UP.BEInsns = 2;
426 }
427
428 int getInstructionLatency(const Instruction *I) {
429 if (isa<LoadInst>(I))
430 return getST()->getSchedModel().DefaultLoadLatency;
431
432 return BaseT::getInstructionLatency(I);
433 }
434
435 /// @}
436
437 /// \name Vector TTI Implementations
438 /// @{
439
440 unsigned getNumberOfRegisters(bool Vector) { return Vector ? 0 : 1; }
441
442 unsigned getRegisterBitWidth(bool Vector) const { return 32; }
443
444 /// Estimate the overhead of scalarizing an instruction. Insert and Extract
445 /// are set if the result needs to be inserted and/or extracted from vectors.
446 unsigned getScalarizationOverhead(Type *Ty, bool Insert, bool Extract) {
447 assert(Ty->isVectorTy() && "Can only scalarize vectors")(static_cast <bool> (Ty->isVectorTy() && "Can only scalarize vectors"
) ? void (0) : __assert_fail ("Ty->isVectorTy() && \"Can only scalarize vectors\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/CodeGen/BasicTTIImpl.h"
, 447, __extension__ __PRETTY_FUNCTION__))
;
448 unsigned Cost = 0;
449
450 for (int i = 0, e = Ty->getVectorNumElements(); i < e; ++i) {
451 if (Insert)
452 Cost += static_cast<T *>(this)
453 ->getVectorInstrCost(Instruction::InsertElement, Ty, i);
454 if (Extract)
455 Cost += static_cast<T *>(this)
456 ->getVectorInstrCost(Instruction::ExtractElement, Ty, i);
457 }
458
459 return Cost;
460 }
461
462 /// Estimate the overhead of scalarizing an instructions unique
463 /// non-constant operands. The types of the arguments are ordinarily
464 /// scalar, in which case the costs are multiplied with VF.
465 unsigned getOperandsScalarizationOverhead(ArrayRef<const Value *> Args,
466 unsigned VF) {
467 unsigned Cost = 0;
468 SmallPtrSet<const Value*, 4> UniqueOperands;
469 for (const Value *A : Args) {
470 if (!isa<Constant>(A) && UniqueOperands.insert(A).second) {
471 Type *VecTy = nullptr;
472 if (A->getType()->isVectorTy()) {
473 VecTy = A->getType();
474 // If A is a vector operand, VF should be 1 or correspond to A.
475 assert((VF == 1 || VF == VecTy->getVectorNumElements()) &&(static_cast <bool> ((VF == 1 || VF == VecTy->getVectorNumElements
()) && "Vector argument does not match VF") ? void (0
) : __assert_fail ("(VF == 1 || VF == VecTy->getVectorNumElements()) && \"Vector argument does not match VF\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/CodeGen/BasicTTIImpl.h"
, 476, __extension__ __PRETTY_FUNCTION__))
476 "Vector argument does not match VF")(static_cast <bool> ((VF == 1 || VF == VecTy->getVectorNumElements
()) && "Vector argument does not match VF") ? void (0
) : __assert_fail ("(VF == 1 || VF == VecTy->getVectorNumElements()) && \"Vector argument does not match VF\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/CodeGen/BasicTTIImpl.h"
, 476, __extension__ __PRETTY_FUNCTION__))
;
477 }
478 else
479 VecTy = VectorType::get(A->getType(), VF);
480
481 Cost += getScalarizationOverhead(VecTy, false, true);
482 }
483 }
484
485 return Cost;
486 }
487
488 unsigned getScalarizationOverhead(Type *VecTy, ArrayRef<const Value *> Args) {
489 assert(VecTy->isVectorTy())(static_cast <bool> (VecTy->isVectorTy()) ? void (0)
: __assert_fail ("VecTy->isVectorTy()", "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/CodeGen/BasicTTIImpl.h"
, 489, __extension__ __PRETTY_FUNCTION__))
;
490
491 unsigned Cost = 0;
492
493 Cost += getScalarizationOverhead(VecTy, true, false);
494 if (!Args.empty())
495 Cost += getOperandsScalarizationOverhead(Args,
496 VecTy->getVectorNumElements());
497 else
498 // When no information on arguments is provided, we add the cost
499 // associated with one argument as a heuristic.
500 Cost += getScalarizationOverhead(VecTy, false, true);
501
502 return Cost;
503 }
504
505 unsigned getMaxInterleaveFactor(unsigned VF) { return 1; }
506
507 unsigned getArithmeticInstrCost(
508 unsigned Opcode, Type *Ty,
509 TTI::OperandValueKind Opd1Info = TTI::OK_AnyValue,
510 TTI::OperandValueKind Opd2Info = TTI::OK_AnyValue,
511 TTI::OperandValueProperties Opd1PropInfo = TTI::OP_None,
512 TTI::OperandValueProperties Opd2PropInfo = TTI::OP_None,
513 ArrayRef<const Value *> Args = ArrayRef<const Value *>()) {
514 // Check if any of the operands are vector operands.
515 const TargetLoweringBase *TLI = getTLI();
516 int ISD = TLI->InstructionOpcodeToISD(Opcode);
517 assert(ISD && "Invalid opcode")(static_cast <bool> (ISD && "Invalid opcode") ?
void (0) : __assert_fail ("ISD && \"Invalid opcode\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/CodeGen/BasicTTIImpl.h"
, 517, __extension__ __PRETTY_FUNCTION__))
;
518
519 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty);
520
521 bool IsFloat = Ty->isFPOrFPVectorTy();
522 // Assume that floating point arithmetic operations cost twice as much as
523 // integer operations.
524 unsigned OpCost = (IsFloat ? 2 : 1);
525
526 if (TLI->isOperationLegalOrPromote(ISD, LT.second)) {
527 // The operation is legal. Assume it costs 1.
528 // TODO: Once we have extract/insert subvector cost we need to use them.
529 return LT.first * OpCost;
530 }
531
532 if (!TLI->isOperationExpand(ISD, LT.second)) {
533 // If the operation is custom lowered, then assume that the code is twice
534 // as expensive.
535 return LT.first * 2 * OpCost;
536 }
537
538 // Else, assume that we need to scalarize this op.
539 // TODO: If one of the types get legalized by splitting, handle this
540 // similarly to what getCastInstrCost() does.
541 if (Ty->isVectorTy()) {
542 unsigned Num = Ty->getVectorNumElements();
543 unsigned Cost = static_cast<T *>(this)
544 ->getArithmeticInstrCost(Opcode, Ty->getScalarType());
545 // Return the cost of multiple scalar invocation plus the cost of
546 // inserting and extracting the values.
547 return getScalarizationOverhead(Ty, Args) + Num * Cost;
548 }
549
550 // We don't know anything about this scalar instruction.
551 return OpCost;
552 }
553
554 unsigned getShuffleCost(TTI::ShuffleKind Kind, Type *Tp, int Index,
555 Type *SubTp) {
556 if (Kind == TTI::SK_Alternate || Kind == TTI::SK_PermuteTwoSrc ||
557 Kind == TTI::SK_PermuteSingleSrc) {
558 return getPermuteShuffleOverhead(Tp);
559 }
560 return 1;
561 }
562
563 unsigned getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src,
564 const Instruction *I = nullptr) {
565 const TargetLoweringBase *TLI = getTLI();
566 int ISD = TLI->InstructionOpcodeToISD(Opcode);
567 assert(ISD && "Invalid opcode")(static_cast <bool> (ISD && "Invalid opcode") ?
void (0) : __assert_fail ("ISD && \"Invalid opcode\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/CodeGen/BasicTTIImpl.h"
, 567, __extension__ __PRETTY_FUNCTION__))
;
568 std::pair<unsigned, MVT> SrcLT = TLI->getTypeLegalizationCost(DL, Src);
569 std::pair<unsigned, MVT> DstLT = TLI->getTypeLegalizationCost(DL, Dst);
570
571 // Check for NOOP conversions.
572 if (SrcLT.first == DstLT.first &&
573 SrcLT.second.getSizeInBits() == DstLT.second.getSizeInBits()) {
574
575 // Bitcast between types that are legalized to the same type are free.
576 if (Opcode == Instruction::BitCast || Opcode == Instruction::Trunc)
577 return 0;
578 }
579
580 if (Opcode == Instruction::Trunc &&
581 TLI->isTruncateFree(SrcLT.second, DstLT.second))
582 return 0;
583
584 if (Opcode == Instruction::ZExt &&
585 TLI->isZExtFree(SrcLT.second, DstLT.second))
586 return 0;
587
588 if (Opcode == Instruction::AddrSpaceCast &&
589 TLI->isNoopAddrSpaceCast(Src->getPointerAddressSpace(),
590 Dst->getPointerAddressSpace()))
591 return 0;
592
593 // If this is a zext/sext of a load, return 0 if the corresponding
594 // extending load exists on target.
595 if ((Opcode == Instruction::ZExt || Opcode == Instruction::SExt) &&
596 I && isa<LoadInst>(I->getOperand(0))) {
597 EVT ExtVT = EVT::getEVT(Dst);
598 EVT LoadVT = EVT::getEVT(Src);
599 unsigned LType =
600 ((Opcode == Instruction::ZExt) ? ISD::ZEXTLOAD : ISD::SEXTLOAD);
601 if (TLI->isLoadExtLegal(LType, ExtVT, LoadVT))
602 return 0;
603 }
604
605 // If the cast is marked as legal (or promote) then assume low cost.
606 if (SrcLT.first == DstLT.first &&
607 TLI->isOperationLegalOrPromote(ISD, DstLT.second))
608 return 1;
609
610 // Handle scalar conversions.
611 if (!Src->isVectorTy() && !Dst->isVectorTy()) {
612 // Scalar bitcasts are usually free.
613 if (Opcode == Instruction::BitCast)
614 return 0;
615
616 // Just check the op cost. If the operation is legal then assume it costs
617 // 1.
618 if (!TLI->isOperationExpand(ISD, DstLT.second))
619 return 1;
620
621 // Assume that illegal scalar instruction are expensive.
622 return 4;
623 }
624
625 // Check vector-to-vector casts.
626 if (Dst->isVectorTy() && Src->isVectorTy()) {
627 // If the cast is between same-sized registers, then the check is simple.
628 if (SrcLT.first == DstLT.first &&
629 SrcLT.second.getSizeInBits() == DstLT.second.getSizeInBits()) {
630
631 // Assume that Zext is done using AND.
632 if (Opcode == Instruction::ZExt)
633 return 1;
634
635 // Assume that sext is done using SHL and SRA.
636 if (Opcode == Instruction::SExt)
637 return 2;
638
639 // Just check the op cost. If the operation is legal then assume it
640 // costs
641 // 1 and multiply by the type-legalization overhead.
642 if (!TLI->isOperationExpand(ISD, DstLT.second))
643 return SrcLT.first * 1;
644 }
645
646 // If we are legalizing by splitting, query the concrete TTI for the cost
647 // of casting the original vector twice. We also need to factor in the
648 // cost of the split itself. Count that as 1, to be consistent with
649 // TLI->getTypeLegalizationCost().
650 if ((TLI->getTypeAction(Src->getContext(), TLI->getValueType(DL, Src)) ==
651 TargetLowering::TypeSplitVector) ||
652 (TLI->getTypeAction(Dst->getContext(), TLI->getValueType(DL, Dst)) ==
653 TargetLowering::TypeSplitVector)) {
654 Type *SplitDst = VectorType::get(Dst->getVectorElementType(),
655 Dst->getVectorNumElements() / 2);
656 Type *SplitSrc = VectorType::get(Src->getVectorElementType(),
657 Src->getVectorNumElements() / 2);
658 T *TTI = static_cast<T *>(this);
659 return TTI->getVectorSplitCost() +
660 (2 * TTI->getCastInstrCost(Opcode, SplitDst, SplitSrc, I));
661 }
662
663 // In other cases where the source or destination are illegal, assume
664 // the operation will get scalarized.
665 unsigned Num = Dst->getVectorNumElements();
666 unsigned Cost = static_cast<T *>(this)->getCastInstrCost(
667 Opcode, Dst->getScalarType(), Src->getScalarType(), I);
668
669 // Return the cost of multiple scalar invocation plus the cost of
670 // inserting and extracting the values.
671 return getScalarizationOverhead(Dst, true, true) + Num * Cost;
672 }
673
674 // We already handled vector-to-vector and scalar-to-scalar conversions.
675 // This
676 // is where we handle bitcast between vectors and scalars. We need to assume
677 // that the conversion is scalarized in one way or another.
678 if (Opcode == Instruction::BitCast)
679 // Illegal bitcasts are done by storing and loading from a stack slot.
680 return (Src->isVectorTy() ? getScalarizationOverhead(Src, false, true)
681 : 0) +
682 (Dst->isVectorTy() ? getScalarizationOverhead(Dst, true, false)
683 : 0);
684
685 llvm_unreachable("Unhandled cast")::llvm::llvm_unreachable_internal("Unhandled cast", "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/CodeGen/BasicTTIImpl.h"
, 685)
;
686 }
687
688 unsigned getExtractWithExtendCost(unsigned Opcode, Type *Dst,
689 VectorType *VecTy, unsigned Index) {
690 return static_cast<T *>(this)->getVectorInstrCost(
691 Instruction::ExtractElement, VecTy, Index) +
692 static_cast<T *>(this)->getCastInstrCost(Opcode, Dst,
693 VecTy->getElementType());
694 }
695
696 unsigned getCFInstrCost(unsigned Opcode) {
697 // Branches are assumed to be predicted.
698 return 0;
699 }
700
701 unsigned getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy,
702 const Instruction *I) {
703 const TargetLoweringBase *TLI = getTLI();
704 int ISD = TLI->InstructionOpcodeToISD(Opcode);
705 assert(ISD && "Invalid opcode")(static_cast <bool> (ISD && "Invalid opcode") ?
void (0) : __assert_fail ("ISD && \"Invalid opcode\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/CodeGen/BasicTTIImpl.h"
, 705, __extension__ __PRETTY_FUNCTION__))
;
706
707 // Selects on vectors are actually vector selects.
708 if (ISD == ISD::SELECT) {
709 assert(CondTy && "CondTy must exist")(static_cast <bool> (CondTy && "CondTy must exist"
) ? void (0) : __assert_fail ("CondTy && \"CondTy must exist\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/CodeGen/BasicTTIImpl.h"
, 709, __extension__ __PRETTY_FUNCTION__))
;
710 if (CondTy->isVectorTy())
711 ISD = ISD::VSELECT;
712 }
713 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy);
714
715 if (!(ValTy->isVectorTy() && !LT.second.isVector()) &&
716 !TLI->isOperationExpand(ISD, LT.second)) {
717 // The operation is legal. Assume it costs 1. Multiply
718 // by the type-legalization overhead.
719 return LT.first * 1;
720 }
721
722 // Otherwise, assume that the cast is scalarized.
723 // TODO: If one of the types get legalized by splitting, handle this
724 // similarly to what getCastInstrCost() does.
725 if (ValTy->isVectorTy()) {
726 unsigned Num = ValTy->getVectorNumElements();
727 if (CondTy)
728 CondTy = CondTy->getScalarType();
729 unsigned Cost = static_cast<T *>(this)->getCmpSelInstrCost(
730 Opcode, ValTy->getScalarType(), CondTy, I);
731
732 // Return the cost of multiple scalar invocation plus the cost of
733 // inserting and extracting the values.
734 return getScalarizationOverhead(ValTy, true, false) + Num * Cost;
735 }
736
737 // Unknown scalar opcode.
738 return 1;
739 }
740
741 unsigned getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index) {
742 std::pair<unsigned, MVT> LT =
743 getTLI()->getTypeLegalizationCost(DL, Val->getScalarType());
744
745 return LT.first;
746 }
747
748 unsigned getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
749 unsigned AddressSpace, const Instruction *I = nullptr) {
750 assert(!Src->isVoidTy() && "Invalid type")(static_cast <bool> (!Src->isVoidTy() && "Invalid type"
) ? void (0) : __assert_fail ("!Src->isVoidTy() && \"Invalid type\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/CodeGen/BasicTTIImpl.h"
, 750, __extension__ __PRETTY_FUNCTION__))
;
751 std::pair<unsigned, MVT> LT = getTLI()->getTypeLegalizationCost(DL, Src);
752
753 // Assuming that all loads of legal types cost 1.
754 unsigned Cost = LT.first;
755
756 if (Src->isVectorTy() &&
757 Src->getPrimitiveSizeInBits() < LT.second.getSizeInBits()) {
758 // This is a vector load that legalizes to a larger type than the vector
759 // itself. Unless the corresponding extending load or truncating store is
760 // legal, then this will scalarize.
761 TargetLowering::LegalizeAction LA = TargetLowering::Expand;
762 EVT MemVT = getTLI()->getValueType(DL, Src);
763 if (Opcode == Instruction::Store)
764 LA = getTLI()->getTruncStoreAction(LT.second, MemVT);
765 else
766 LA = getTLI()->getLoadExtAction(ISD::EXTLOAD, LT.second, MemVT);
767
768 if (LA != TargetLowering::Legal && LA != TargetLowering::Custom) {
769 // This is a vector load/store for some illegal type that is scalarized.
770 // We must account for the cost of building or decomposing the vector.
771 Cost += getScalarizationOverhead(Src, Opcode != Instruction::Store,
772 Opcode == Instruction::Store);
773 }
774 }
775
776 return Cost;
777 }
778
779 unsigned getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy,
780 unsigned Factor,
781 ArrayRef<unsigned> Indices,
782 unsigned Alignment,
783 unsigned AddressSpace) {
784 VectorType *VT = dyn_cast<VectorType>(VecTy);
785 assert(VT && "Expect a vector type for interleaved memory op")(static_cast <bool> (VT && "Expect a vector type for interleaved memory op"
) ? void (0) : __assert_fail ("VT && \"Expect a vector type for interleaved memory op\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/CodeGen/BasicTTIImpl.h"
, 785, __extension__ __PRETTY_FUNCTION__))
;
786
787 unsigned NumElts = VT->getNumElements();
788 assert(Factor > 1 && NumElts % Factor == 0 && "Invalid interleave factor")(static_cast <bool> (Factor > 1 && NumElts %
Factor == 0 && "Invalid interleave factor") ? void (
0) : __assert_fail ("Factor > 1 && NumElts % Factor == 0 && \"Invalid interleave factor\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/CodeGen/BasicTTIImpl.h"
, 788, __extension__ __PRETTY_FUNCTION__))
;
789
790 unsigned NumSubElts = NumElts / Factor;
791 VectorType *SubVT = VectorType::get(VT->getElementType(), NumSubElts);
792
793 // Firstly, the cost of load/store operation.
794 unsigned Cost = static_cast<T *>(this)->getMemoryOpCost(
795 Opcode, VecTy, Alignment, AddressSpace);
796
797 // Legalize the vector type, and get the legalized and unlegalized type
798 // sizes.
799 MVT VecTyLT = getTLI()->getTypeLegalizationCost(DL, VecTy).second;
800 unsigned VecTySize =
801 static_cast<T *>(this)->getDataLayout().getTypeStoreSize(VecTy);
802 unsigned VecTyLTSize = VecTyLT.getStoreSize();
803
804 // Return the ceiling of dividing A by B.
805 auto ceil = [](unsigned A, unsigned B) { return (A + B - 1) / B; };
806
807 // Scale the cost of the memory operation by the fraction of legalized
808 // instructions that will actually be used. We shouldn't account for the
809 // cost of dead instructions since they will be removed.
810 //
811 // E.g., An interleaved load of factor 8:
812 // %vec = load <16 x i64>, <16 x i64>* %ptr
813 // %v0 = shufflevector %vec, undef, <0, 8>
814 //
815 // If <16 x i64> is legalized to 8 v2i64 loads, only 2 of the loads will be
816 // used (those corresponding to elements [0:1] and [8:9] of the unlegalized
817 // type). The other loads are unused.
818 //
819 // We only scale the cost of loads since interleaved store groups aren't
820 // allowed to have gaps.
821 if (Opcode == Instruction::Load && VecTySize > VecTyLTSize) {
822 // The number of loads of a legal type it will take to represent a load
823 // of the unlegalized vector type.
824 unsigned NumLegalInsts = ceil(VecTySize, VecTyLTSize);
825
826 // The number of elements of the unlegalized type that correspond to a
827 // single legal instruction.
828 unsigned NumEltsPerLegalInst = ceil(NumElts, NumLegalInsts);
829
830 // Determine which legal instructions will be used.
831 BitVector UsedInsts(NumLegalInsts, false);
832 for (unsigned Index : Indices)
833 for (unsigned Elt = 0; Elt < NumSubElts; ++Elt)
834 UsedInsts.set((Index + Elt * Factor) / NumEltsPerLegalInst);
835
836 // Scale the cost of the load by the fraction of legal instructions that
837 // will be used.
838 Cost *= UsedInsts.count() / NumLegalInsts;
839 }
840
841 // Then plus the cost of interleave operation.
842 if (Opcode == Instruction::Load) {
843 // The interleave cost is similar to extract sub vectors' elements
844 // from the wide vector, and insert them into sub vectors.
845 //
846 // E.g. An interleaved load of factor 2 (with one member of index 0):
847 // %vec = load <8 x i32>, <8 x i32>* %ptr
848 // %v0 = shuffle %vec, undef, <0, 2, 4, 6> ; Index 0
849 // The cost is estimated as extract elements at 0, 2, 4, 6 from the
850 // <8 x i32> vector and insert them into a <4 x i32> vector.
851
852 assert(Indices.size() <= Factor &&(static_cast <bool> (Indices.size() <= Factor &&
"Interleaved memory op has too many members") ? void (0) : __assert_fail
("Indices.size() <= Factor && \"Interleaved memory op has too many members\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/CodeGen/BasicTTIImpl.h"
, 853, __extension__ __PRETTY_FUNCTION__))
853 "Interleaved memory op has too many members")(static_cast <bool> (Indices.size() <= Factor &&
"Interleaved memory op has too many members") ? void (0) : __assert_fail
("Indices.size() <= Factor && \"Interleaved memory op has too many members\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/CodeGen/BasicTTIImpl.h"
, 853, __extension__ __PRETTY_FUNCTION__))
;
854
855 for (unsigned Index : Indices) {
856 assert(Index < Factor && "Invalid index for interleaved memory op")(static_cast <bool> (Index < Factor && "Invalid index for interleaved memory op"
) ? void (0) : __assert_fail ("Index < Factor && \"Invalid index for interleaved memory op\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/CodeGen/BasicTTIImpl.h"
, 856, __extension__ __PRETTY_FUNCTION__))
;
857
858 // Extract elements from loaded vector for each sub vector.
859 for (unsigned i = 0; i < NumSubElts; i++)
860 Cost += static_cast<T *>(this)->getVectorInstrCost(
861 Instruction::ExtractElement, VT, Index + i * Factor);
862 }
863
864 unsigned InsSubCost = 0;
865 for (unsigned i = 0; i < NumSubElts; i++)
866 InsSubCost += static_cast<T *>(this)->getVectorInstrCost(
867 Instruction::InsertElement, SubVT, i);
868
869 Cost += Indices.size() * InsSubCost;
870 } else {
871 // The interleave cost is extract all elements from sub vectors, and
872 // insert them into the wide vector.
873 //
874 // E.g. An interleaved store of factor 2:
875 // %v0_v1 = shuffle %v0, %v1, <0, 4, 1, 5, 2, 6, 3, 7>
876 // store <8 x i32> %interleaved.vec, <8 x i32>* %ptr
877 // The cost is estimated as extract all elements from both <4 x i32>
878 // vectors and insert into the <8 x i32> vector.
879
880 unsigned ExtSubCost = 0;
881 for (unsigned i = 0; i < NumSubElts; i++)
882 ExtSubCost += static_cast<T *>(this)->getVectorInstrCost(
883 Instruction::ExtractElement, SubVT, i);
884 Cost += ExtSubCost * Factor;
885
886 for (unsigned i = 0; i < NumElts; i++)
887 Cost += static_cast<T *>(this)
888 ->getVectorInstrCost(Instruction::InsertElement, VT, i);
889 }
890
891 return Cost;
892 }
893
894 /// Get intrinsic cost based on arguments.
895 unsigned getIntrinsicInstrCost(Intrinsic::ID IID, Type *RetTy,
896 ArrayRef<Value *> Args, FastMathFlags FMF,
897 unsigned VF = 1) {
898 unsigned RetVF = (RetTy->isVectorTy() ? RetTy->getVectorNumElements() : 1);
899 assert((RetVF == 1 || VF == 1) && "VF > 1 and RetVF is a vector type")(static_cast <bool> ((RetVF == 1 || VF == 1) &&
"VF > 1 and RetVF is a vector type") ? void (0) : __assert_fail
("(RetVF == 1 || VF == 1) && \"VF > 1 and RetVF is a vector type\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/CodeGen/BasicTTIImpl.h"
, 899, __extension__ __PRETTY_FUNCTION__))
;
900
901 switch (IID) {
902 default: {
903 // Assume that we need to scalarize this intrinsic.
904 SmallVector<Type *, 4> Types;
905 for (Value *Op : Args) {
906 Type *OpTy = Op->getType();
907 assert(VF == 1 || !OpTy->isVectorTy())(static_cast <bool> (VF == 1 || !OpTy->isVectorTy())
? void (0) : __assert_fail ("VF == 1 || !OpTy->isVectorTy()"
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/CodeGen/BasicTTIImpl.h"
, 907, __extension__ __PRETTY_FUNCTION__))
;
908 Types.push_back(VF == 1 ? OpTy : VectorType::get(OpTy, VF));
909 }
910
911 if (VF > 1 && !RetTy->isVoidTy())
912 RetTy = VectorType::get(RetTy, VF);
913
914 // Compute the scalarization overhead based on Args for a vector
915 // intrinsic. A vectorizer will pass a scalar RetTy and VF > 1, while
916 // CostModel will pass a vector RetTy and VF is 1.
917 unsigned ScalarizationCost = std::numeric_limits<unsigned>::max();
918 if (RetVF > 1 || VF > 1) {
919 ScalarizationCost = 0;
920 if (!RetTy->isVoidTy())
921 ScalarizationCost += getScalarizationOverhead(RetTy, true, false);
922 ScalarizationCost += getOperandsScalarizationOverhead(Args, VF);
923 }
924
925 return static_cast<T *>(this)->
926 getIntrinsicInstrCost(IID, RetTy, Types, FMF, ScalarizationCost);
927 }
928 case Intrinsic::masked_scatter: {
929 assert(VF == 1 && "Can't vectorize types here.")(static_cast <bool> (VF == 1 && "Can't vectorize types here."
) ? void (0) : __assert_fail ("VF == 1 && \"Can't vectorize types here.\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/CodeGen/BasicTTIImpl.h"
, 929, __extension__ __PRETTY_FUNCTION__))
;
930 Value *Mask = Args[3];
931 bool VarMask = !isa<Constant>(Mask);
932 unsigned Alignment = cast<ConstantInt>(Args[2])->getZExtValue();
933 return
934 static_cast<T *>(this)->getGatherScatterOpCost(Instruction::Store,
935 Args[0]->getType(),
936 Args[1], VarMask,
937 Alignment);
938 }
939 case Intrinsic::masked_gather: {
940 assert(VF == 1 && "Can't vectorize types here.")(static_cast <bool> (VF == 1 && "Can't vectorize types here."
) ? void (0) : __assert_fail ("VF == 1 && \"Can't vectorize types here.\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/CodeGen/BasicTTIImpl.h"
, 940, __extension__ __PRETTY_FUNCTION__))
;
941 Value *Mask = Args[2];
942 bool VarMask = !isa<Constant>(Mask);
943 unsigned Alignment = cast<ConstantInt>(Args[1])->getZExtValue();
944 return
945 static_cast<T *>(this)->getGatherScatterOpCost(Instruction::Load,
946 RetTy, Args[0], VarMask,
947 Alignment);
948 }
949 case Intrinsic::experimental_vector_reduce_add:
950 case Intrinsic::experimental_vector_reduce_mul:
951 case Intrinsic::experimental_vector_reduce_and:
952 case Intrinsic::experimental_vector_reduce_or:
953 case Intrinsic::experimental_vector_reduce_xor:
954 case Intrinsic::experimental_vector_reduce_fadd:
955 case Intrinsic::experimental_vector_reduce_fmul:
956 case Intrinsic::experimental_vector_reduce_smax:
957 case Intrinsic::experimental_vector_reduce_smin:
958 case Intrinsic::experimental_vector_reduce_fmax:
959 case Intrinsic::experimental_vector_reduce_fmin:
960 case Intrinsic::experimental_vector_reduce_umax:
961 case Intrinsic::experimental_vector_reduce_umin:
962 return getIntrinsicInstrCost(IID, RetTy, Args[0]->getType(), FMF);
963 }
964 }
965
966 /// Get intrinsic cost based on argument types.
967 /// If ScalarizationCostPassed is std::numeric_limits<unsigned>::max(), the
968 /// cost of scalarizing the arguments and the return value will be computed
969 /// based on types.
970 unsigned getIntrinsicInstrCost(
971 Intrinsic::ID IID, Type *RetTy, ArrayRef<Type *> Tys, FastMathFlags FMF,
972 unsigned ScalarizationCostPassed = std::numeric_limits<unsigned>::max()) {
973 SmallVector<unsigned, 2> ISDs;
974 unsigned SingleCallCost = 10; // Library call cost. Make it expensive.
975 switch (IID) {
976 default: {
977 // Assume that we need to scalarize this intrinsic.
978 unsigned ScalarizationCost = ScalarizationCostPassed;
979 unsigned ScalarCalls = 1;
980 Type *ScalarRetTy = RetTy;
981 if (RetTy->isVectorTy()) {
982 if (ScalarizationCostPassed == std::numeric_limits<unsigned>::max())
983 ScalarizationCost = getScalarizationOverhead(RetTy, true, false);
984 ScalarCalls = std::max(ScalarCalls, RetTy->getVectorNumElements());
985 ScalarRetTy = RetTy->getScalarType();
986 }
987 SmallVector<Type *, 4> ScalarTys;
988 for (unsigned i = 0, ie = Tys.size(); i != ie; ++i) {
989 Type *Ty = Tys[i];
990 if (Ty->isVectorTy()) {
991 if (ScalarizationCostPassed == std::numeric_limits<unsigned>::max())
992 ScalarizationCost += getScalarizationOverhead(Ty, false, true);
993 ScalarCalls = std::max(ScalarCalls, Ty->getVectorNumElements());
994 Ty = Ty->getScalarType();
995 }
996 ScalarTys.push_back(Ty);
997 }
998 if (ScalarCalls == 1)
999 return 1; // Return cost of a scalar intrinsic. Assume it to be cheap.
1000
1001 unsigned ScalarCost = static_cast<T *>(this)->getIntrinsicInstrCost(
1002 IID, ScalarRetTy, ScalarTys, FMF);
1003
1004 return ScalarCalls * ScalarCost + ScalarizationCost;
1005 }
1006 // Look for intrinsics that can be lowered directly or turned into a scalar
1007 // intrinsic call.
1008 case Intrinsic::sqrt:
1009 ISDs.push_back(ISD::FSQRT);
1010 break;
1011 case Intrinsic::sin:
1012 ISDs.push_back(ISD::FSIN);
1013 break;
1014 case Intrinsic::cos:
1015 ISDs.push_back(ISD::FCOS);
1016 break;
1017 case Intrinsic::exp:
1018 ISDs.push_back(ISD::FEXP);
1019 break;
1020 case Intrinsic::exp2:
1021 ISDs.push_back(ISD::FEXP2);
1022 break;
1023 case Intrinsic::log:
1024 ISDs.push_back(ISD::FLOG);
1025 break;
1026 case Intrinsic::log10:
1027 ISDs.push_back(ISD::FLOG10);
1028 break;
1029 case Intrinsic::log2:
1030 ISDs.push_back(ISD::FLOG2);
1031 break;
1032 case Intrinsic::fabs:
1033 ISDs.push_back(ISD::FABS);
1034 break;
1035 case Intrinsic::minnum:
1036 ISDs.push_back(ISD::FMINNUM);
1037 if (FMF.noNaNs())
1038 ISDs.push_back(ISD::FMINNAN);
1039 break;
1040 case Intrinsic::maxnum:
1041 ISDs.push_back(ISD::FMAXNUM);
1042 if (FMF.noNaNs())
1043 ISDs.push_back(ISD::FMAXNAN);
1044 break;
1045 case Intrinsic::copysign:
1046 ISDs.push_back(ISD::FCOPYSIGN);
1047 break;
1048 case Intrinsic::floor:
1049 ISDs.push_back(ISD::FFLOOR);
1050 break;
1051 case Intrinsic::ceil:
1052 ISDs.push_back(ISD::FCEIL);
1053 break;
1054 case Intrinsic::trunc:
1055 ISDs.push_back(ISD::FTRUNC);
1056 break;
1057 case Intrinsic::nearbyint:
1058 ISDs.push_back(ISD::FNEARBYINT);
1059 break;
1060 case Intrinsic::rint:
1061 ISDs.push_back(ISD::FRINT);
1062 break;
1063 case Intrinsic::round:
1064 ISDs.push_back(ISD::FROUND);
1065 break;
1066 case Intrinsic::pow:
1067 ISDs.push_back(ISD::FPOW);
1068 break;
1069 case Intrinsic::fma:
1070 ISDs.push_back(ISD::FMA);
1071 break;
1072 case Intrinsic::fmuladd:
1073 ISDs.push_back(ISD::FMA);
1074 break;
1075 // FIXME: We should return 0 whenever getIntrinsicCost == TCC_Free.
1076 case Intrinsic::lifetime_start:
1077 case Intrinsic::lifetime_end:
1078 case Intrinsic::sideeffect:
1079 return 0;
1080 case Intrinsic::masked_store:
1081 return static_cast<T *>(this)
1082 ->getMaskedMemoryOpCost(Instruction::Store, Tys[0], 0, 0);
1083 case Intrinsic::masked_load:
1084 return static_cast<T *>(this)
1085 ->getMaskedMemoryOpCost(Instruction::Load, RetTy, 0, 0);
1086 case Intrinsic::experimental_vector_reduce_add:
1087 return static_cast<T *>(this)->getArithmeticReductionCost(
1088 Instruction::Add, Tys[0], /*IsPairwiseForm=*/false);
1089 case Intrinsic::experimental_vector_reduce_mul:
1090 return static_cast<T *>(this)->getArithmeticReductionCost(
1091 Instruction::Mul, Tys[0], /*IsPairwiseForm=*/false);
1092 case Intrinsic::experimental_vector_reduce_and:
1093 return static_cast<T *>(this)->getArithmeticReductionCost(
1094 Instruction::And, Tys[0], /*IsPairwiseForm=*/false);
1095 case Intrinsic::experimental_vector_reduce_or:
1096 return static_cast<T *>(this)->getArithmeticReductionCost(
1097 Instruction::Or, Tys[0], /*IsPairwiseForm=*/false);
1098 case Intrinsic::experimental_vector_reduce_xor:
1099 return static_cast<T *>(this)->getArithmeticReductionCost(
1100 Instruction::Xor, Tys[0], /*IsPairwiseForm=*/false);
1101 case Intrinsic::experimental_vector_reduce_fadd:
1102 return static_cast<T *>(this)->getArithmeticReductionCost(
1103 Instruction::FAdd, Tys[0], /*IsPairwiseForm=*/false);
1104 case Intrinsic::experimental_vector_reduce_fmul:
1105 return static_cast<T *>(this)->getArithmeticReductionCost(
1106 Instruction::FMul, Tys[0], /*IsPairwiseForm=*/false);
1107 case Intrinsic::experimental_vector_reduce_smax:
1108 case Intrinsic::experimental_vector_reduce_smin:
1109 case Intrinsic::experimental_vector_reduce_fmax:
1110 case Intrinsic::experimental_vector_reduce_fmin:
1111 return static_cast<T *>(this)->getMinMaxReductionCost(
1112 Tys[0], CmpInst::makeCmpResultType(Tys[0]), /*IsPairwiseForm=*/false,
1113 /*IsSigned=*/true);
1114 case Intrinsic::experimental_vector_reduce_umax:
1115 case Intrinsic::experimental_vector_reduce_umin:
1116 return static_cast<T *>(this)->getMinMaxReductionCost(
1117 Tys[0], CmpInst::makeCmpResultType(Tys[0]), /*IsPairwiseForm=*/false,
1118 /*IsSigned=*/false);
1119 case Intrinsic::ctpop:
1120 ISDs.push_back(ISD::CTPOP);
1121 // In case of legalization use TCC_Expensive. This is cheaper than a
1122 // library call but still not a cheap instruction.
1123 SingleCallCost = TargetTransformInfo::TCC_Expensive;
1124 break;
1125 // FIXME: ctlz, cttz, ...
1126 }
1127
1128 const TargetLoweringBase *TLI = getTLI();
1129 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(DL, RetTy);
1130
1131 SmallVector<unsigned, 2> LegalCost;
1132 SmallVector<unsigned, 2> CustomCost;
1133 for (unsigned ISD : ISDs) {
1134 if (TLI->isOperationLegalOrPromote(ISD, LT.second)) {
1135 if (IID == Intrinsic::fabs && TLI->isFAbsFree(LT.second)) {
1136 return 0;
1137 }
1138
1139 // The operation is legal. Assume it costs 1.
1140 // If the type is split to multiple registers, assume that there is some
1141 // overhead to this.
1142 // TODO: Once we have extract/insert subvector cost we need to use them.
1143 if (LT.first > 1)
1144 LegalCost.push_back(LT.first * 2);
1145 else
1146 LegalCost.push_back(LT.first * 1);
1147 } else if (!TLI->isOperationExpand(ISD, LT.second)) {
1148 // If the operation is custom lowered then assume
1149 // that the code is twice as expensive.
1150 CustomCost.push_back(LT.first * 2);
1151 }
1152 }
1153
1154 auto MinLegalCostI = std::min_element(LegalCost.begin(), LegalCost.end());
1155 if (MinLegalCostI != LegalCost.end())
1156 return *MinLegalCostI;
1157
1158 auto MinCustomCostI = std::min_element(CustomCost.begin(), CustomCost.end());
1159 if (MinCustomCostI != CustomCost.end())
1160 return *MinCustomCostI;
1161
1162 // If we can't lower fmuladd into an FMA estimate the cost as a floating
1163 // point mul followed by an add.
1164 if (IID == Intrinsic::fmuladd)
1165 return static_cast<T *>(this)
1166 ->getArithmeticInstrCost(BinaryOperator::FMul, RetTy) +
1167 static_cast<T *>(this)
1168 ->getArithmeticInstrCost(BinaryOperator::FAdd, RetTy);
1169
1170 // Else, assume that we need to scalarize this intrinsic. For math builtins
1171 // this will emit a costly libcall, adding call overhead and spills. Make it
1172 // very expensive.
1173 if (RetTy->isVectorTy()) {
1174 unsigned ScalarizationCost =
1175 ((ScalarizationCostPassed != std::numeric_limits<unsigned>::max())
1176 ? ScalarizationCostPassed
1177 : getScalarizationOverhead(RetTy, true, false));
1178 unsigned ScalarCalls = RetTy->getVectorNumElements();
1179 SmallVector<Type *, 4> ScalarTys;
1180 for (unsigned i = 0, ie = Tys.size(); i != ie; ++i) {
1181 Type *Ty = Tys[i];
1182 if (Ty->isVectorTy())
1183 Ty = Ty->getScalarType();
1184 ScalarTys.push_back(Ty);
1185 }
1186 unsigned ScalarCost = static_cast<T *>(this)->getIntrinsicInstrCost(
1187 IID, RetTy->getScalarType(), ScalarTys, FMF);
1188 for (unsigned i = 0, ie = Tys.size(); i != ie; ++i) {
1189 if (Tys[i]->isVectorTy()) {
1190 if (ScalarizationCostPassed == std::numeric_limits<unsigned>::max())
1191 ScalarizationCost += getScalarizationOverhead(Tys[i], false, true);
1192 ScalarCalls = std::max(ScalarCalls, Tys[i]->getVectorNumElements());
1193 }
1194 }
1195
1196 return ScalarCalls * ScalarCost + ScalarizationCost;
1197 }
1198
1199 // This is going to be turned into a library call, make it expensive.
1200 return SingleCallCost;
1201 }
1202
1203 /// \brief Compute a cost of the given call instruction.
1204 ///
1205 /// Compute the cost of calling function F with return type RetTy and
1206 /// argument types Tys. F might be nullptr, in this case the cost of an
1207 /// arbitrary call with the specified signature will be returned.
1208 /// This is used, for instance, when we estimate call of a vector
1209 /// counterpart of the given function.
1210 /// \param F Called function, might be nullptr.
1211 /// \param RetTy Return value types.
1212 /// \param Tys Argument types.
1213 /// \returns The cost of Call instruction.
1214 unsigned getCallInstrCost(Function *F, Type *RetTy, ArrayRef<Type *> Tys) {
1215 return 10;
1216 }
1217
1218 unsigned getNumberOfParts(Type *Tp) {
1219 std::pair<unsigned, MVT> LT = getTLI()->getTypeLegalizationCost(DL, Tp);
1220 return LT.first;
1221 }
1222
1223 unsigned getAddressComputationCost(Type *Ty, ScalarEvolution *,
1224 const SCEV *) {
1225 return 0;
1226 }
1227
1228 /// Try to calculate arithmetic and shuffle op costs for reduction operations.
1229 /// We're assuming that reduction operation are performing the following way:
1230 /// 1. Non-pairwise reduction
1231 /// %val1 = shufflevector<n x t> %val, <n x t> %undef,
1232 /// <n x i32> <i32 n/2, i32 n/2 + 1, ..., i32 n, i32 undef, ..., i32 undef>
1233 /// \----------------v-------------/ \----------v------------/
1234 /// n/2 elements n/2 elements
1235 /// %red1 = op <n x t> %val, <n x t> val1
1236 /// After this operation we have a vector %red1 where only the first n/2
1237 /// elements are meaningful, the second n/2 elements are undefined and can be
1238 /// dropped. All other operations are actually working with the vector of
1239 /// length n/2, not n, though the real vector length is still n.
1240 /// %val2 = shufflevector<n x t> %red1, <n x t> %undef,
1241 /// <n x i32> <i32 n/4, i32 n/4 + 1, ..., i32 n/2, i32 undef, ..., i32 undef>
1242 /// \----------------v-------------/ \----------v------------/
1243 /// n/4 elements 3*n/4 elements
1244 /// %red2 = op <n x t> %red1, <n x t> val2 - working with the vector of
1245 /// length n/2, the resulting vector has length n/4 etc.
1246 /// 2. Pairwise reduction:
1247 /// Everything is the same except for an additional shuffle operation which
1248 /// is used to produce operands for pairwise kind of reductions.
1249 /// %val1 = shufflevector<n x t> %val, <n x t> %undef,
1250 /// <n x i32> <i32 0, i32 2, ..., i32 n-2, i32 undef, ..., i32 undef>
1251 /// \-------------v----------/ \----------v------------/
1252 /// n/2 elements n/2 elements
1253 /// %val2 = shufflevector<n x t> %val, <n x t> %undef,
1254 /// <n x i32> <i32 1, i32 3, ..., i32 n-1, i32 undef, ..., i32 undef>
1255 /// \-------------v----------/ \----------v------------/
1256 /// n/2 elements n/2 elements
1257 /// %red1 = op <n x t> %val1, <n x t> val2
1258 /// Again, the operation is performed on <n x t> vector, but the resulting
1259 /// vector %red1 is <n/2 x t> vector.
1260 ///
1261 /// The cost model should take into account that the actual length of the
1262 /// vector is reduced on each iteration.
1263 unsigned getArithmeticReductionCost(unsigned Opcode, Type *Ty,
1264 bool IsPairwise) {
1265 assert(Ty->isVectorTy() && "Expect a vector type")(static_cast <bool> (Ty->isVectorTy() && "Expect a vector type"
) ? void (0) : __assert_fail ("Ty->isVectorTy() && \"Expect a vector type\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/CodeGen/BasicTTIImpl.h"
, 1265, __extension__ __PRETTY_FUNCTION__))
;
1266 Type *ScalarTy = Ty->getVectorElementType();
1267 unsigned NumVecElts = Ty->getVectorNumElements();
1268 unsigned NumReduxLevels = Log2_32(NumVecElts);
1269 unsigned ArithCost = 0;
1270 unsigned ShuffleCost = 0;
1271 auto *ConcreteTTI = static_cast<T *>(this);
1272 std::pair<unsigned, MVT> LT =
1273 ConcreteTTI->getTLI()->getTypeLegalizationCost(DL, Ty);
1274 unsigned LongVectorCount = 0;
1275 unsigned MVTLen =
1276 LT.second.isVector() ? LT.second.getVectorNumElements() : 1;
1277 while (NumVecElts > MVTLen) {
1278 NumVecElts /= 2;
1279 // Assume the pairwise shuffles add a cost.
1280 ShuffleCost += (IsPairwise + 1) *
1281 ConcreteTTI->getShuffleCost(TTI::SK_ExtractSubvector, Ty,
1282 NumVecElts, Ty);
1283 ArithCost += ConcreteTTI->getArithmeticInstrCost(Opcode, Ty);
1284 Ty = VectorType::get(ScalarTy, NumVecElts);
1285 ++LongVectorCount;
1286 }
1287 // The minimal length of the vector is limited by the real length of vector
1288 // operations performed on the current platform. That's why several final
1289 // reduction operations are performed on the vectors with the same
1290 // architecture-dependent length.
1291 ShuffleCost += (NumReduxLevels - LongVectorCount) * (IsPairwise + 1) *
1292 ConcreteTTI->getShuffleCost(TTI::SK_ExtractSubvector, Ty,
1293 NumVecElts, Ty);
1294 ArithCost += (NumReduxLevels - LongVectorCount) *
1295 ConcreteTTI->getArithmeticInstrCost(Opcode, Ty);
1296 return ShuffleCost + ArithCost + getScalarizationOverhead(Ty, false, true);
1297 }
1298
1299 /// Try to calculate op costs for min/max reduction operations.
1300 /// \param CondTy Conditional type for the Select instruction.
1301 unsigned getMinMaxReductionCost(Type *Ty, Type *CondTy, bool IsPairwise,
1302 bool) {
1303 assert(Ty->isVectorTy() && "Expect a vector type")(static_cast <bool> (Ty->isVectorTy() && "Expect a vector type"
) ? void (0) : __assert_fail ("Ty->isVectorTy() && \"Expect a vector type\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/CodeGen/BasicTTIImpl.h"
, 1303, __extension__ __PRETTY_FUNCTION__))
;
1304 Type *ScalarTy = Ty->getVectorElementType();
1305 Type *ScalarCondTy = CondTy->getVectorElementType();
1306 unsigned NumVecElts = Ty->getVectorNumElements();
1307 unsigned NumReduxLevels = Log2_32(NumVecElts);
1308 unsigned CmpOpcode;
1309 if (Ty->isFPOrFPVectorTy()) {
1310 CmpOpcode = Instruction::FCmp;
1311 } else {
1312 assert(Ty->isIntOrIntVectorTy() &&(static_cast <bool> (Ty->isIntOrIntVectorTy() &&
"expecting floating point or integer type for min/max reduction"
) ? void (0) : __assert_fail ("Ty->isIntOrIntVectorTy() && \"expecting floating point or integer type for min/max reduction\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/CodeGen/BasicTTIImpl.h"
, 1313, __extension__ __PRETTY_FUNCTION__))
1313 "expecting floating point or integer type for min/max reduction")(static_cast <bool> (Ty->isIntOrIntVectorTy() &&
"expecting floating point or integer type for min/max reduction"
) ? void (0) : __assert_fail ("Ty->isIntOrIntVectorTy() && \"expecting floating point or integer type for min/max reduction\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/CodeGen/BasicTTIImpl.h"
, 1313, __extension__ __PRETTY_FUNCTION__))
;
1314 CmpOpcode = Instruction::ICmp;
1315 }
1316 unsigned MinMaxCost = 0;
1317 unsigned ShuffleCost = 0;
1318 auto *ConcreteTTI = static_cast<T *>(this);
1319 std::pair<unsigned, MVT> LT =
1320 ConcreteTTI->getTLI()->getTypeLegalizationCost(DL, Ty);
1321 unsigned LongVectorCount = 0;
1322 unsigned MVTLen =
1323 LT.second.isVector() ? LT.second.getVectorNumElements() : 1;
1324 while (NumVecElts > MVTLen) {
1325 NumVecElts /= 2;
1326 // Assume the pairwise shuffles add a cost.
1327 ShuffleCost += (IsPairwise + 1) *
1328 ConcreteTTI->getShuffleCost(TTI::SK_ExtractSubvector, Ty,
1329 NumVecElts, Ty);
1330 MinMaxCost +=
1331 ConcreteTTI->getCmpSelInstrCost(CmpOpcode, Ty, CondTy, nullptr) +
1332 ConcreteTTI->getCmpSelInstrCost(Instruction::Select, Ty, CondTy,
1333 nullptr);
1334 Ty = VectorType::get(ScalarTy, NumVecElts);
1335 CondTy = VectorType::get(ScalarCondTy, NumVecElts);
1336 ++LongVectorCount;
1337 }
1338 // The minimal length of the vector is limited by the real length of vector
1339 // operations performed on the current platform. That's why several final
1340 // reduction opertions are perfomed on the vectors with the same
1341 // architecture-dependent length.
1342 ShuffleCost += (NumReduxLevels - LongVectorCount) * (IsPairwise + 1) *
1343 ConcreteTTI->getShuffleCost(TTI::SK_ExtractSubvector, Ty,
1344 NumVecElts, Ty);
1345 MinMaxCost +=
1346 (NumReduxLevels - LongVectorCount) *
1347 (ConcreteTTI->getCmpSelInstrCost(CmpOpcode, Ty, CondTy, nullptr) +
1348 ConcreteTTI->getCmpSelInstrCost(Instruction::Select, Ty, CondTy,
1349 nullptr));
1350 // Need 3 extractelement instructions for scalarization + an additional
1351 // scalar select instruction.
1352 return ShuffleCost + MinMaxCost +
1353 3 * getScalarizationOverhead(Ty, /*Insert=*/false,
1354 /*Extract=*/true) +
1355 ConcreteTTI->getCmpSelInstrCost(Instruction::Select, ScalarTy,
1356 ScalarCondTy, nullptr);
1357 }
1358
1359 unsigned getVectorSplitCost() { return 1; }
1360
1361 /// @}
1362};
1363
1364/// \brief Concrete BasicTTIImpl that can be used if no further customization
1365/// is needed.
1366class BasicTTIImpl : public BasicTTIImplBase<BasicTTIImpl> {
1367 using BaseT = BasicTTIImplBase<BasicTTIImpl>;
1368
1369 friend class BasicTTIImplBase<BasicTTIImpl>;
1370
1371 const TargetSubtargetInfo *ST;
1372 const TargetLoweringBase *TLI;
1373
1374 const TargetSubtargetInfo *getST() const { return ST; }
1375 const TargetLoweringBase *getTLI() const { return TLI; }
1376
1377public:
1378 explicit BasicTTIImpl(const TargetMachine *ST, const Function &F);
1379};
1380
1381} // end namespace llvm
1382
1383#endif // LLVM_CODEGEN_BASICTTIIMPL_H