Line data Source code
1 : //===- TargetTransformInfoImpl.h --------------------------------*- C++ -*-===//
2 : //
3 : // The LLVM Compiler Infrastructure
4 : //
5 : // This file is distributed under the University of Illinois Open Source
6 : // License. See LICENSE.TXT for details.
7 : //
8 : //===----------------------------------------------------------------------===//
9 : /// \file
10 : /// This file provides helpers for the implementation of
11 : /// a TargetTransformInfo-conforming class.
12 : ///
13 : //===----------------------------------------------------------------------===//
14 :
15 : #ifndef LLVM_ANALYSIS_TARGETTRANSFORMINFOIMPL_H
16 : #define LLVM_ANALYSIS_TARGETTRANSFORMINFOIMPL_H
17 :
18 : #include "llvm/Analysis/ScalarEvolutionExpressions.h"
19 : #include "llvm/Analysis/TargetTransformInfo.h"
20 : #include "llvm/Analysis/VectorUtils.h"
21 : #include "llvm/IR/CallSite.h"
22 : #include "llvm/IR/DataLayout.h"
23 : #include "llvm/IR/Function.h"
24 : #include "llvm/IR/GetElementPtrTypeIterator.h"
25 : #include "llvm/IR/Operator.h"
26 : #include "llvm/IR/Type.h"
27 :
28 : namespace llvm {
29 :
30 : /// Base class for use as a mix-in that aids implementing
31 : /// a TargetTransformInfo-compatible class.
32 : class TargetTransformInfoImplBase {
33 : protected:
34 : typedef TargetTransformInfo TTI;
35 :
36 : const DataLayout &DL;
37 :
38 3493232 : explicit TargetTransformInfoImplBase(const DataLayout &DL) : DL(DL) {}
39 :
40 : public:
41 : // Provide value semantics. MSVC requires that we spell all of these out.
42 : TargetTransformInfoImplBase(const TargetTransformInfoImplBase &Arg)
43 3491359 : : DL(Arg.DL) {}
44 2639316 : TargetTransformInfoImplBase(TargetTransformInfoImplBase &&Arg) : DL(Arg.DL) {}
45 :
46 0 : const DataLayout &getDataLayout() const { return DL; }
47 :
48 1687249 : unsigned getOperationCost(unsigned Opcode, Type *Ty, Type *OpTy) {
49 1687249 : switch (Opcode) {
50 : default:
51 : // By default, just classify everything as 'basic'.
52 : return TTI::TCC_Basic;
53 :
54 : case Instruction::GetElementPtr:
55 : llvm_unreachable("Use getGEPCost for GEP operations!");
56 :
57 41871 : case Instruction::BitCast:
58 : assert(OpTy && "Cast instructions must provide the operand type");
59 41871 : if (Ty == OpTy || (Ty->isPointerTy() && OpTy->isPointerTy()))
60 : // Identity and pointer-to-pointer casts are free.
61 41626 : return TTI::TCC_Free;
62 :
63 : // Otherwise, the default basic cost is used.
64 : return TTI::TCC_Basic;
65 :
66 3125 : case Instruction::FDiv:
67 : case Instruction::FRem:
68 : case Instruction::SDiv:
69 : case Instruction::SRem:
70 : case Instruction::UDiv:
71 : case Instruction::URem:
72 3125 : return TTI::TCC_Expensive;
73 :
74 15089 : case Instruction::IntToPtr: {
75 : // An inttoptr cast is free so long as the input is a legal integer type
76 : // which doesn't contain values outside the range of a pointer.
77 15089 : unsigned OpSize = OpTy->getScalarSizeInBits();
78 45264 : if (DL.isLegalInteger(OpSize) &&
79 15086 : OpSize <= DL.getPointerTypeSizeInBits(Ty))
80 15084 : return TTI::TCC_Free;
81 :
82 : // Otherwise it's not a no-op.
83 : return TTI::TCC_Basic;
84 : }
85 51262 : case Instruction::PtrToInt: {
86 : // A ptrtoint cast is free so long as the result is large enough to store
87 : // the pointer, and a legal integer type.
88 51262 : unsigned DestSize = Ty->getScalarSizeInBits();
89 153777 : if (DL.isLegalInteger(DestSize) &&
90 51253 : DestSize >= DL.getPointerTypeSizeInBits(OpTy))
91 51251 : return TTI::TCC_Free;
92 :
93 : // Otherwise it's not a no-op.
94 : return TTI::TCC_Basic;
95 : }
96 50 : case Instruction::Trunc:
97 : // trunc to a native type is free (assuming the target has compare and
98 : // shift-right of the same width).
99 100 : if (DL.isLegalInteger(DL.getTypeSizeInBits(Ty)))
100 17 : return TTI::TCC_Free;
101 :
102 : return TTI::TCC_Basic;
103 : }
104 : }
105 :
106 : int getGEPCost(Type *PointeeType, const Value *Ptr,
107 : ArrayRef<const Value *> Operands) {
108 : // In the basic model, we just assume that all-constant GEPs will be folded
109 : // into their uses via addressing modes.
110 : for (unsigned Idx = 0, Size = Operands.size(); Idx != Size; ++Idx)
111 : if (!isa<Constant>(Operands[Idx]))
112 : return TTI::TCC_Basic;
113 :
114 : return TTI::TCC_Free;
115 : }
116 :
117 0 : unsigned getEstimatedNumberOfCaseClusters(const SwitchInst &SI,
118 : unsigned &JTSize) {
119 2 : JTSize = 0;
120 0 : return SI.getNumCases();
121 : }
122 :
123 0 : int getExtCost(const Instruction *I, const Value *Src) {
124 0 : return TTI::TCC_Basic;
125 : }
126 :
127 0 : unsigned getCallCost(FunctionType *FTy, int NumArgs) {
128 : assert(FTy && "FunctionType must be provided to this routine.");
129 :
130 : // The target-independent implementation just measures the size of the
131 : // function by approximating that each argument will take on average one
132 : // instruction to prepare.
133 :
134 861765 : if (NumArgs < 0)
135 : // Set the argument number to the number of explicit arguments in the
136 : // function.
137 0 : NumArgs = FTy->getNumParams();
138 :
139 891262 : return TTI::TCC_Basic * (NumArgs + 1);
140 : }
141 :
142 0 : unsigned getInliningThresholdMultiplier() { return 1; }
143 :
144 641804 : unsigned getIntrinsicCost(Intrinsic::ID IID, Type *RetTy,
145 : ArrayRef<Type *> ParamTys) {
146 641804 : switch (IID) {
147 : default:
148 : // Intrinsics rarely (if ever) have normal argument setup constraints.
149 : // Model them as having a basic instruction cost.
150 : // FIXME: This is wrong for libc intrinsics.
151 : return TTI::TCC_Basic;
152 :
153 638365 : case Intrinsic::annotation:
154 : case Intrinsic::assume:
155 : case Intrinsic::sideeffect:
156 : case Intrinsic::dbg_declare:
157 : case Intrinsic::dbg_value:
158 : case Intrinsic::dbg_label:
159 : case Intrinsic::invariant_start:
160 : case Intrinsic::invariant_end:
161 : case Intrinsic::launder_invariant_group:
162 : case Intrinsic::strip_invariant_group:
163 : case Intrinsic::lifetime_start:
164 : case Intrinsic::lifetime_end:
165 : case Intrinsic::objectsize:
166 : case Intrinsic::ptr_annotation:
167 : case Intrinsic::var_annotation:
168 : case Intrinsic::experimental_gc_result:
169 : case Intrinsic::experimental_gc_relocate:
170 : case Intrinsic::coro_alloc:
171 : case Intrinsic::coro_begin:
172 : case Intrinsic::coro_free:
173 : case Intrinsic::coro_end:
174 : case Intrinsic::coro_frame:
175 : case Intrinsic::coro_size:
176 : case Intrinsic::coro_suspend:
177 : case Intrinsic::coro_param:
178 : case Intrinsic::coro_subfn_addr:
179 : // These intrinsics don't actually represent code after lowering.
180 638365 : return TTI::TCC_Free;
181 : }
182 : }
183 :
184 0 : bool hasBranchDivergence() { return false; }
185 :
186 0 : bool isSourceOfDivergence(const Value *V) { return false; }
187 :
188 0 : bool isAlwaysUniform(const Value *V) { return false; }
189 :
190 0 : unsigned getFlatAddressSpace () {
191 0 : return -1;
192 : }
193 :
194 1908307 : bool isLoweredToCall(const Function *F) {
195 : assert(F && "A concrete function must be provided to this routine.");
196 :
197 : // FIXME: These should almost certainly not be handled here, and instead
198 : // handled with the help of TLI or the target itself. This was largely
199 : // ported from existing analysis heuristics here so that such refactorings
200 : // can take place in the future.
201 :
202 1908307 : if (F->isIntrinsic())
203 : return false;
204 :
205 1613704 : if (F->hasLocalLinkage() || !F->hasName())
206 : return true;
207 :
208 1613704 : StringRef Name = F->getName();
209 :
210 : // These will all likely lower to a single selection DAG node.
211 : if (Name == "copysign" || Name == "copysignf" || Name == "copysignl" ||
212 : Name == "fabs" || Name == "fabsf" || Name == "fabsl" || Name == "sin" ||
213 : Name == "fmin" || Name == "fminf" || Name == "fminl" ||
214 : Name == "fmax" || Name == "fmaxf" || Name == "fmaxl" ||
215 : Name == "sinf" || Name == "sinl" || Name == "cos" || Name == "cosf" ||
216 : Name == "cosl" || Name == "sqrt" || Name == "sqrtf" || Name == "sqrtl")
217 : return false;
218 :
219 : // These are all likely to be optimized into something smaller.
220 : if (Name == "pow" || Name == "powf" || Name == "powl" || Name == "exp2" ||
221 : Name == "exp2l" || Name == "exp2f" || Name == "floor" ||
222 : Name == "floorf" || Name == "ceil" || Name == "round" ||
223 : Name == "ffs" || Name == "ffsl" || Name == "abs" || Name == "labs" ||
224 : Name == "llabs")
225 0 : return false;
226 :
227 : return true;
228 : }
229 :
230 0 : void getUnrollingPreferences(Loop *, ScalarEvolution &,
231 0 : TTI::UnrollingPreferences &) {}
232 :
233 0 : bool isLegalAddImmediate(int64_t Imm) { return false; }
234 :
235 0 : bool isLegalICmpImmediate(int64_t Imm) { return false; }
236 :
237 : bool isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset,
238 : bool HasBaseReg, int64_t Scale,
239 : unsigned AddrSpace, Instruction *I = nullptr) {
240 : // Guess that only reg and reg+reg addressing is allowed. This heuristic is
241 : // taken from the implementation of LSR.
242 52056 : return !BaseGV && BaseOffset == 0 && (Scale == 0 || Scale == 1);
243 : }
244 :
245 0 : bool isLSRCostLess(TTI::LSRCost &C1, TTI::LSRCost &C2) {
246 0 : return std::tie(C1.NumRegs, C1.AddRecCost, C1.NumIVMuls, C1.NumBaseAdds,
247 0 : C1.ScaleCost, C1.ImmCost, C1.SetupCost) <
248 0 : std::tie(C2.NumRegs, C2.AddRecCost, C2.NumIVMuls, C2.NumBaseAdds,
249 0 : C2.ScaleCost, C2.ImmCost, C2.SetupCost);
250 : }
251 :
252 0 : bool canMacroFuseCmp() { return false; }
253 :
254 0 : bool shouldFavorPostInc() const { return false; }
255 :
256 0 : bool isLegalMaskedStore(Type *DataType) { return false; }
257 :
258 0 : bool isLegalMaskedLoad(Type *DataType) { return false; }
259 :
260 0 : bool isLegalMaskedScatter(Type *DataType) { return false; }
261 :
262 0 : bool isLegalMaskedGather(Type *DataType) { return false; }
263 :
264 : bool hasDivRemOp(Type *DataType, bool IsSigned) { return false; }
265 :
266 0 : bool hasVolatileVariant(Instruction *I, unsigned AddrSpace) { return false; }
267 :
268 0 : bool prefersVectorizedAddressing() { return true; }
269 :
270 : int getScalingFactorCost(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset,
271 : bool HasBaseReg, int64_t Scale, unsigned AddrSpace) {
272 : // Guess that all legal addressing mode are free.
273 : if (isLegalAddressingMode(Ty, BaseGV, BaseOffset, HasBaseReg,
274 : Scale, AddrSpace))
275 : return 0;
276 : return -1;
277 : }
278 :
279 0 : bool LSRWithInstrQueries() { return false; }
280 :
281 0 : bool isTruncateFree(Type *Ty1, Type *Ty2) { return false; }
282 :
283 0 : bool isProfitableToHoist(Instruction *I) { return true; }
284 :
285 0 : bool useAA() { return false; }
286 :
287 0 : bool isTypeLegal(Type *Ty) { return false; }
288 :
289 0 : unsigned getJumpBufAlignment() { return 0; }
290 :
291 0 : unsigned getJumpBufSize() { return 0; }
292 :
293 0 : bool shouldBuildLookupTables() { return true; }
294 0 : bool shouldBuildLookupTablesForConstant(Constant *C) { return true; }
295 :
296 0 : bool useColdCCForColdCall(Function &F) { return false; }
297 :
298 : unsigned getScalarizationOverhead(Type *Ty, bool Insert, bool Extract) {
299 : return 0;
300 : }
301 :
302 0 : unsigned getOperandsScalarizationOverhead(ArrayRef<const Value *> Args,
303 0 : unsigned VF) { return 0; }
304 :
305 0 : bool supportsEfficientVectorElementLoadStore() { return false; }
306 :
307 : bool enableAggressiveInterleaving(bool LoopHasReductions) { return false; }
308 :
309 : const TTI::MemCmpExpansionOptions *enableMemCmpExpansion(
310 : bool IsZeroCmp) const {
311 : return nullptr;
312 : }
313 :
314 0 : bool enableInterleavedAccessVectorization() { return false; }
315 :
316 0 : bool enableMaskedInterleavedAccessVectorization() { return false; }
317 :
318 0 : bool isFPVectorizationPotentiallyUnsafe() { return false; }
319 :
320 0 : bool allowsMisalignedMemoryAccesses(LLVMContext &Context,
321 : unsigned BitWidth,
322 : unsigned AddressSpace,
323 : unsigned Alignment,
324 0 : bool *Fast) { return false; }
325 :
326 0 : TTI::PopcntSupportKind getPopcntSupport(unsigned IntTyWidthInBit) {
327 0 : return TTI::PSK_Software;
328 : }
329 :
330 0 : bool haveFastSqrt(Type *Ty) { return false; }
331 :
332 0 : bool isFCmpOrdCheaperThanFCmpZero(Type *Ty) { return true; }
333 :
334 0 : unsigned getFPOpCost(Type *Ty) { return TargetTransformInfo::TCC_Basic; }
335 :
336 0 : int getIntImmCodeSizeCost(unsigned Opcode, unsigned Idx, const APInt &Imm,
337 : Type *Ty) {
338 0 : return 0;
339 : }
340 :
341 0 : unsigned getIntImmCost(const APInt &Imm, Type *Ty) { return TTI::TCC_Basic; }
342 :
343 0 : unsigned getIntImmCost(unsigned Opcode, unsigned Idx, const APInt &Imm,
344 : Type *Ty) {
345 0 : return TTI::TCC_Free;
346 : }
347 :
348 0 : unsigned getIntImmCost(Intrinsic::ID IID, unsigned Idx, const APInt &Imm,
349 : Type *Ty) {
350 0 : return TTI::TCC_Free;
351 : }
352 :
353 : unsigned getNumberOfRegisters(bool Vector) { return 8; }
354 :
355 : unsigned getRegisterBitWidth(bool Vector) const { return 32; }
356 :
357 0 : unsigned getMinVectorRegisterBitWidth() { return 128; }
358 :
359 : bool shouldMaximizeVectorBandwidth(bool OptSize) const { return false; }
360 :
361 0 : unsigned getMinimumVF(unsigned ElemWidth) const { return 0; }
362 :
363 : bool
364 0 : shouldConsiderAddressTypePromotion(const Instruction &I,
365 : bool &AllowPromotionWithoutCommonHeader) {
366 39983 : AllowPromotionWithoutCommonHeader = false;
367 0 : return false;
368 : }
369 :
370 0 : unsigned getCacheLineSize() { return 0; }
371 :
372 0 : llvm::Optional<unsigned> getCacheSize(TargetTransformInfo::CacheLevel Level) {
373 4 : switch (Level) {
374 : case TargetTransformInfo::CacheLevel::L1D:
375 : LLVM_FALLTHROUGH;
376 : case TargetTransformInfo::CacheLevel::L2D:
377 0 : return llvm::Optional<unsigned>();
378 : }
379 :
380 0 : llvm_unreachable("Unknown TargetTransformInfo::CacheLevel");
381 : }
382 :
383 0 : llvm::Optional<unsigned> getCacheAssociativity(
384 : TargetTransformInfo::CacheLevel Level) {
385 2 : switch (Level) {
386 : case TargetTransformInfo::CacheLevel::L1D:
387 : LLVM_FALLTHROUGH;
388 : case TargetTransformInfo::CacheLevel::L2D:
389 0 : return llvm::Optional<unsigned>();
390 : }
391 :
392 0 : llvm_unreachable("Unknown TargetTransformInfo::CacheLevel");
393 : }
394 :
395 0 : unsigned getPrefetchDistance() { return 0; }
396 :
397 0 : unsigned getMinPrefetchStride() { return 1; }
398 :
399 0 : unsigned getMaxPrefetchIterationsAhead() { return UINT_MAX; }
400 :
401 0 : unsigned getMaxInterleaveFactor(unsigned VF) { return 1; }
402 :
403 0 : unsigned getArithmeticInstrCost(unsigned Opcode, Type *Ty,
404 : TTI::OperandValueKind Opd1Info,
405 : TTI::OperandValueKind Opd2Info,
406 : TTI::OperandValueProperties Opd1PropInfo,
407 : TTI::OperandValueProperties Opd2PropInfo,
408 : ArrayRef<const Value *> Args) {
409 0 : return 1;
410 : }
411 :
412 0 : unsigned getShuffleCost(TTI::ShuffleKind Kind, Type *Ty, int Index,
413 : Type *SubTp) {
414 0 : return 1;
415 : }
416 :
417 0 : unsigned getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src,
418 0 : const Instruction *I) { return 1; }
419 :
420 0 : unsigned getExtractWithExtendCost(unsigned Opcode, Type *Dst,
421 : VectorType *VecTy, unsigned Index) {
422 0 : return 1;
423 : }
424 :
425 0 : unsigned getCFInstrCost(unsigned Opcode) { return 1; }
426 :
427 0 : unsigned getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy,
428 : const Instruction *I) {
429 0 : return 1;
430 : }
431 :
432 0 : unsigned getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index) {
433 0 : return 1;
434 : }
435 :
436 0 : unsigned getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
437 : unsigned AddressSpace, const Instruction *I) {
438 0 : return 1;
439 : }
440 :
441 0 : unsigned getMaskedMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
442 : unsigned AddressSpace) {
443 0 : return 1;
444 : }
445 :
446 : unsigned getGatherScatterOpCost(unsigned Opcode, Type *DataTy, Value *Ptr,
447 : bool VariableMask,
448 : unsigned Alignment) {
449 : return 1;
450 : }
451 :
452 : unsigned getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy,
453 : unsigned Factor,
454 : ArrayRef<unsigned> Indices,
455 : unsigned Alignment, unsigned AddressSpace,
456 : bool IsMasked = false) {
457 : return 1;
458 : }
459 :
460 0 : unsigned getIntrinsicInstrCost(Intrinsic::ID ID, Type *RetTy,
461 : ArrayRef<Type *> Tys, FastMathFlags FMF,
462 : unsigned ScalarizationCostPassed) {
463 0 : return 1;
464 : }
465 0 : unsigned getIntrinsicInstrCost(Intrinsic::ID ID, Type *RetTy,
466 : ArrayRef<Value *> Args, FastMathFlags FMF, unsigned VF) {
467 0 : return 1;
468 : }
469 :
470 0 : unsigned getCallInstrCost(Function *F, Type *RetTy, ArrayRef<Type *> Tys) {
471 0 : return 1;
472 : }
473 :
474 0 : unsigned getNumberOfParts(Type *Tp) { return 0; }
475 :
476 0 : unsigned getAddressComputationCost(Type *Tp, ScalarEvolution *,
477 : const SCEV *) {
478 0 : return 0;
479 : }
480 :
481 : unsigned getArithmeticReductionCost(unsigned, Type *, bool) { return 1; }
482 :
483 : unsigned getMinMaxReductionCost(Type *, Type *, bool, bool) { return 1; }
484 :
485 0 : unsigned getCostOfKeepingLiveOverCall(ArrayRef<Type *> Tys) { return 0; }
486 :
487 0 : bool getTgtMemIntrinsic(IntrinsicInst *Inst, MemIntrinsicInfo &Info) {
488 0 : return false;
489 : }
490 :
491 0 : unsigned getAtomicMemIntrinsicMaxElementSize() const {
492 : // Note for overrides: You must ensure for all element unordered-atomic
493 : // memory intrinsics that all power-of-2 element sizes up to, and
494 : // including, the return value of this method have a corresponding
495 : // runtime lib call. These runtime lib call definitions can be found
496 : // in RuntimeLibcalls.h
497 0 : return 0;
498 : }
499 :
500 0 : Value *getOrCreateResultFromMemIntrinsic(IntrinsicInst *Inst,
501 : Type *ExpectedType) {
502 0 : return nullptr;
503 : }
504 :
505 0 : Type *getMemcpyLoopLoweringType(LLVMContext &Context, Value *Length,
506 : unsigned SrcAlign, unsigned DestAlign) const {
507 15 : return Type::getInt8Ty(Context);
508 : }
509 :
510 0 : void getMemcpyLoopResidualLoweringType(SmallVectorImpl<Type *> &OpsOut,
511 : LLVMContext &Context,
512 : unsigned RemainingBytes,
513 : unsigned SrcAlign,
514 : unsigned DestAlign) const {
515 0 : for (unsigned i = 0; i != RemainingBytes; ++i)
516 0 : OpsOut.push_back(Type::getInt8Ty(Context));
517 0 : }
518 :
519 1079 : bool areInlineCompatible(const Function *Caller,
520 : const Function *Callee) const {
521 : return (Caller->getFnAttribute("target-cpu") ==
522 2156 : Callee->getFnAttribute("target-cpu")) &&
523 : (Caller->getFnAttribute("target-features") ==
524 1079 : Callee->getFnAttribute("target-features"));
525 : }
526 :
527 0 : bool isIndexedLoadLegal(TTI::MemIndexedMode Mode, Type *Ty,
528 : const DataLayout &DL) const {
529 0 : return false;
530 : }
531 :
532 0 : bool isIndexedStoreLegal(TTI::MemIndexedMode Mode, Type *Ty,
533 : const DataLayout &DL) const {
534 0 : return false;
535 : }
536 :
537 0 : unsigned getLoadStoreVecRegBitWidth(unsigned AddrSpace) const { return 128; }
538 :
539 0 : bool isLegalToVectorizeLoad(LoadInst *LI) const { return true; }
540 :
541 0 : bool isLegalToVectorizeStore(StoreInst *SI) const { return true; }
542 :
543 0 : bool isLegalToVectorizeLoadChain(unsigned ChainSizeInBytes,
544 : unsigned Alignment,
545 : unsigned AddrSpace) const {
546 0 : return true;
547 : }
548 :
549 0 : bool isLegalToVectorizeStoreChain(unsigned ChainSizeInBytes,
550 : unsigned Alignment,
551 : unsigned AddrSpace) const {
552 0 : return true;
553 : }
554 :
555 0 : unsigned getLoadVectorFactor(unsigned VF, unsigned LoadSize,
556 : unsigned ChainSizeInBytes,
557 : VectorType *VecTy) const {
558 0 : return VF;
559 : }
560 :
561 0 : unsigned getStoreVectorFactor(unsigned VF, unsigned StoreSize,
562 : unsigned ChainSizeInBytes,
563 : VectorType *VecTy) const {
564 0 : return VF;
565 : }
566 :
567 0 : bool useReductionIntrinsic(unsigned Opcode, Type *Ty,
568 : TTI::ReductionFlags Flags) const {
569 0 : return false;
570 : }
571 :
572 0 : bool shouldExpandReduction(const IntrinsicInst *II) const {
573 0 : return true;
574 : }
575 :
576 : protected:
577 : // Obtain the minimum required size to hold the value (without the sign)
578 : // In case of a vector it returns the min required size for one element.
579 48 : unsigned minRequiredElementSize(const Value* Val, bool &isSigned) {
580 48 : if (isa<ConstantDataVector>(Val) || isa<ConstantVector>(Val)) {
581 : const auto* VectorValue = cast<Constant>(Val);
582 :
583 : // In case of a vector need to pick the max between the min
584 : // required size for each element
585 12 : auto *VT = cast<VectorType>(Val->getType());
586 :
587 : // Assume unsigned elements
588 12 : isSigned = false;
589 :
590 : // The max required size is the total vector width divided by num
591 : // of elements in the vector
592 12 : unsigned MaxRequiredSize = VT->getBitWidth() / VT->getNumElements();
593 :
594 12 : unsigned MinRequiredSize = 0;
595 60 : for(unsigned i = 0, e = VT->getNumElements(); i < e; ++i) {
596 : if (auto* IntElement =
597 48 : dyn_cast<ConstantInt>(VectorValue->getAggregateElement(i))) {
598 48 : bool signedElement = IntElement->getValue().isNegative();
599 : // Get the element min required size.
600 : unsigned ElementMinRequiredSize =
601 48 : IntElement->getValue().getMinSignedBits() - 1;
602 : // In case one element is signed then all the vector is signed.
603 48 : isSigned |= signedElement;
604 : // Save the max required bit size between all the elements.
605 48 : MinRequiredSize = std::max(MinRequiredSize, ElementMinRequiredSize);
606 : }
607 : else {
608 : // not an int constant element
609 : return MaxRequiredSize;
610 : }
611 : }
612 12 : return MinRequiredSize;
613 : }
614 :
615 : if (const auto* CI = dyn_cast<ConstantInt>(Val)) {
616 0 : isSigned = CI->getValue().isNegative();
617 0 : return CI->getValue().getMinSignedBits() - 1;
618 : }
619 :
620 : if (const auto* Cast = dyn_cast<SExtInst>(Val)) {
621 6 : isSigned = true;
622 6 : return Cast->getSrcTy()->getScalarSizeInBits() - 1;
623 : }
624 :
625 : if (const auto* Cast = dyn_cast<ZExtInst>(Val)) {
626 6 : isSigned = false;
627 6 : return Cast->getSrcTy()->getScalarSizeInBits();
628 : }
629 :
630 24 : isSigned = false;
631 24 : return Val->getType()->getScalarSizeInBits();
632 : }
633 :
634 0 : bool isStridedAccess(const SCEV *Ptr) {
635 269 : return Ptr && isa<SCEVAddRecExpr>(Ptr);
636 : }
637 :
638 0 : const SCEVConstant *getConstantStrideStep(ScalarEvolution *SE,
639 : const SCEV *Ptr) {
640 : if (!isStridedAccess(Ptr))
641 0 : return nullptr;
642 : const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ptr);
643 0 : return dyn_cast<SCEVConstant>(AddRec->getStepRecurrence(*SE));
644 : }
645 :
646 36 : bool isConstantStridedAccessLessThan(ScalarEvolution *SE, const SCEV *Ptr,
647 : int64_t MergeDistance) {
648 36 : const SCEVConstant *Step = getConstantStrideStep(SE, Ptr);
649 36 : if (!Step)
650 : return false;
651 : APInt StrideVal = Step->getAPInt();
652 19 : if (StrideVal.getBitWidth() > 64)
653 : return false;
654 : // FIXME: Need to take absolute value for negative stride case.
655 19 : return StrideVal.getSExtValue() < MergeDistance;
656 : }
657 : };
658 :
659 : /// CRTP base class for use as a mix-in that aids implementing
660 : /// a TargetTransformInfo-compatible class.
661 : template <typename T>
662 : class TargetTransformInfoImplCRTPBase : public TargetTransformInfoImplBase {
663 : private:
664 : typedef TargetTransformInfoImplBase BaseT;
665 :
666 : protected:
667 : explicit TargetTransformInfoImplCRTPBase(const DataLayout &DL) : BaseT(DL) {}
668 :
669 : public:
670 : using BaseT::getCallCost;
671 :
672 1505240 : unsigned getCallCost(const Function *F, int NumArgs) {
673 : assert(F && "A concrete function must be provided to this routine.");
674 :
675 1505240 : if (NumArgs < 0)
676 : // Set the argument number to the number of explicit arguments in the
677 : // function.
678 0 : NumArgs = F->arg_size();
679 :
680 1505240 : if (Intrinsic::ID IID = F->getIntrinsicID()) {
681 : FunctionType *FTy = F->getFunctionType();
682 643430 : SmallVector<Type *, 8> ParamTys(FTy->param_begin(), FTy->param_end());
683 : return static_cast<T *>(this)
684 1930290 : ->getIntrinsicCost(IID, FTy->getReturnType(), ParamTys);
685 : }
686 :
687 861810 : if (!static_cast<T *>(this)->isLoweredToCall(F))
688 : return TTI::TCC_Basic; // Give a basic cost if it will be lowered
689 : // directly.
690 :
691 861765 : return static_cast<T *>(this)->getCallCost(F->getFunctionType(), NumArgs);
692 : }
693 2 :
694 0 : unsigned getCallCost(const Function *F, ArrayRef<const Value *> Arguments) {
695 : // Simply delegate to generic handling of the call.
696 2 : // FIXME: We should use instsimplify or something else to catch calls which
697 : // will constant fold with these arguments.
698 1504059 : return static_cast<T *>(this)->getCallCost(F, Arguments.size());
699 0 : }
700 :
701 2 : using BaseT::getGEPCost;
702 :
703 180857 : int getGEPCost(Type *PointeeType, const Value *Ptr,
704 : ArrayRef<const Value *> Operands) {
705 6 : const GlobalValue *BaseGV = nullptr;
706 180855 : if (Ptr != nullptr) {
707 : // TODO: will remove this when pointers have an opaque type.
708 0 : assert(Ptr->getType()->getScalarType()->getPointerElementType() ==
709 : PointeeType &&
710 : "explicit pointee type doesn't match operand's pointee type");
711 180855 : BaseGV = dyn_cast<GlobalValue>(Ptr->stripPointerCasts());
712 0 : }
713 180855 : bool HasBaseReg = (BaseGV == nullptr);
714 1179 :
715 180855 : auto PtrSizeBits = DL.getPointerTypeSizeInBits(Ptr->getType());
716 : APInt BaseOffset(PtrSizeBits, 0);
717 1179 : int64_t Scale = 0;
718 :
719 : auto GTI = gep_type_begin(PointeeType, Operands);
720 0 : Type *TargetType = nullptr;
721 :
722 1179 : // Handle the case where the GEP instruction has a single operand,
723 : // the basis, therefore TargetType is a nullptr.
724 181661 : if (Operands.empty())
725 2 : return !BaseGV ? TTI::TCC_Free : TTI::TCC_Basic;
726 2418 :
727 565050 : for (auto I = Operands.begin(); I != Operands.end(); ++I, ++GTI) {
728 389341 : TargetType = GTI.getIndexedType();
729 373 : // We assume that the cost of Scalar GEP with constant index and the
730 : // cost of Vector GEP with splat constant index are the same.
731 389341 : const ConstantInt *ConstIdx = dyn_cast<ConstantInt>(*I);
732 : if (!ConstIdx)
733 102119 : if (auto Splat = getSplatValue(*I))
734 : ConstIdx = dyn_cast<ConstantInt>(Splat);
735 166871 : if (StructType *STy = GTI.getStructTypeOrNull()) {
736 0 : // For structures the index is always splat or scalar constant
737 : assert(ConstIdx && "Unexpected GEP index");
738 : uint64_t Field = ConstIdx->getZExtValue();
739 166871 : BaseOffset += DL.getStructLayout(STy)->getElementOffset(Field);
740 1181 : } else {
741 222470 : int64_t ElementSize = DL.getTypeAllocSize(GTI.getIndexedType());
742 222470 : if (ConstIdx) {
743 362121 : BaseOffset +=
744 : ConstIdx->getValue().sextOrTrunc(PtrSizeBits) * ElementSize;
745 : } else {
746 0 : // Needs scale register.
747 101763 : if (Scale != 0)
748 0 : // No addressing mode takes two scale registers.
749 0 : return TTI::TCC_Basic;
750 : Scale = ElementSize;
751 : }
752 0 : }
753 : }
754 :
755 : // Assumes the address space is 0 when Ptr is nullptr.
756 : unsigned AS =
757 210161 : (Ptr == nullptr ? 0 : Ptr->getType()->getPointerAddressSpace());
758 :
759 351418 : if (static_cast<T *>(this)->isLegalAddressingMode(
760 34452 : TargetType, const_cast<GlobalValue *>(BaseGV),
761 : BaseOffset.sextOrTrunc(64).getSExtValue(), HasBaseReg, Scale, AS))
762 158731 : return TTI::TCC_Free;
763 : return TTI::TCC_Basic;
764 : }
765 34452 :
766 : using BaseT::getIntrinsicCost;
767 34452 :
768 4 : unsigned getIntrinsicCost(Intrinsic::ID IID, Type *RetTy,
769 34452 : ArrayRef<const Value *> Arguments) {
770 : // Delegate to the generic intrinsic handling code. This mostly provides an
771 : // opportunity for targets to (for example) special case the cost of
772 : // certain intrinsics based on constants used as arguments.
773 : SmallVector<Type *, 8> ParamTys;
774 : ParamTys.reserve(Arguments.size());
775 12 : for (unsigned Idx = 0, Size = Arguments.size(); Idx != Size; ++Idx)
776 16 : ParamTys.push_back(Arguments[Idx]->getType());
777 8 : return static_cast<T *>(this)->getIntrinsicCost(IID, RetTy, ParamTys);
778 34452 : }
779 0 :
780 3595246 : unsigned getUserCost(const User *U, ArrayRef<const Value *> Operands) {
781 74641 : if (isa<PHINode>(U))
782 40326 : return TTI::TCC_Free; // Model all PHI nodes as free.
783 :
784 : // Static alloca doesn't generate target instructions.
785 40326 : if (auto *A = dyn_cast<AllocaInst>(U))
786 5 : if (A->isStaticAlloca())
787 19851 : return TTI::TCC_Free;
788 :
789 525 : if (const GEPOperator *GEP = dyn_cast<GEPOperator>(U)) {
790 : return static_cast<T *>(this)->getGEPCost(GEP->getSourceElementType(),
791 : GEP->getPointerOperand(),
792 380344 : Operands.drop_front());
793 525 : }
794 :
795 3315098 : if (auto CS = ImmutableCallSite(U)) {
796 39801 : const Function *F = CS.getCalledFunction();
797 59850 : if (!F) {
798 : // Just use the called value type.
799 29385 : Type *FTy = CS.getCalledValue()->getType()->getPointerElementType();
800 : return static_cast<T *>(this)
801 49236 : ->getCallCost(cast<FunctionType>(FTy), CS.arg_size());
802 : }
803 :
804 1504059 : SmallVector<const Value *, 8> Arguments(CS.arg_begin(), CS.arg_end());
805 : return static_cast<T *>(this)->getCallCost(F, Arguments);
806 : }
807 :
808 : if (const CastInst *CI = dyn_cast<CastInst>(U)) {
809 : // Result of a cmp instruction is often extended (to be used by other
810 : // cmp instructions, logical or return instructions). These are usually
811 34315 : // nop on most sane targets.
812 : if (isa<CmpInst>(CI->getOperand(0)))
813 68630 : return TTI::TCC_Free;
814 163825 : if (isa<SExtInst>(CI) || isa<ZExtInst>(CI) || isa<FPExtInst>(CI))
815 79174 : return static_cast<T *>(this)->getExtCost(CI, Operands.back());
816 11023 : }
817 :
818 : return static_cast<T *>(this)->getOperationCost(
819 1838 : Operator::getOpcode(U), U->getType(),
820 5944070 : U->getNumOperands() == 1 ? U->getOperand(0)->getType() : nullptr);
821 : }
822 1838 :
823 10 : int getInstructionLatency(const Instruction *I) {
824 20 : SmallVector<const Value *, 4> Operands(I->value_op_begin(),
825 : I->value_op_end());
826 10 : if (getUserCost(I, Operands) == TTI::TCC_Free)
827 1838 : return 0;
828 :
829 1843 : if (isa<LoadInst>(I))
830 : return 4;
831 1838 :
832 5 : Type *DstTy = I->getType();
833 :
834 : // Usually an intrinsic is a simple instruction.
835 : // A real function call is much slower.
836 : if (auto *CI = dyn_cast<CallInst>(I)) {
837 : const Function *F = CI->getCalledFunction();
838 1 : if (!F || static_cast<T *>(this)->isLoweredToCall(F))
839 1 : return 40;
840 1838 : // Some intrinsics return a value and a flag, we use the value type
841 0 : // to decide its latency.
842 : if (StructType* StructTy = dyn_cast<StructType>(DstTy))
843 4617 : DstTy = StructTy->getElementType(0);
844 2778 : // Fall through to simple instructions.
845 : }
846 :
847 2778 : if (VectorType *VectorTy = dyn_cast<VectorType>(DstTy))
848 0 : DstTy = VectorTy->getElementType();
849 1038 : if (DstTy->isFloatingPointTy())
850 0 : return 3;
851 112 :
852 : return 1;
853 : }
854 : };
855 112 : }
856 :
857 2666 : #endif
|