LLVM  7.0.0svn
TargetTransformInfoImpl.h
Go to the documentation of this file.
1 //===- TargetTransformInfoImpl.h --------------------------------*- C++ -*-===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 /// \file
10 /// This file provides helpers for the implementation of
11 /// a TargetTransformInfo-conforming class.
12 ///
13 //===----------------------------------------------------------------------===//
14 
15 #ifndef LLVM_ANALYSIS_TARGETTRANSFORMINFOIMPL_H
16 #define LLVM_ANALYSIS_TARGETTRANSFORMINFOIMPL_H
17 
21 #include "llvm/IR/CallSite.h"
22 #include "llvm/IR/DataLayout.h"
23 #include "llvm/IR/Function.h"
25 #include "llvm/IR/Operator.h"
26 #include "llvm/IR/Type.h"
27 
28 namespace llvm {
29 
30 /// \brief Base class for use as a mix-in that aids implementing
31 /// a TargetTransformInfo-compatible class.
33 protected:
35 
36  const DataLayout &DL;
37 
38  explicit TargetTransformInfoImplBase(const DataLayout &DL) : DL(DL) {}
39 
40 public:
41  // Provide value semantics. MSVC requires that we spell all of these out.
43  : DL(Arg.DL) {}
45 
46  const DataLayout &getDataLayout() const { return DL; }
47 
48  unsigned getOperationCost(unsigned Opcode, Type *Ty, Type *OpTy) {
49  switch (Opcode) {
50  default:
51  // By default, just classify everything as 'basic'.
52  return TTI::TCC_Basic;
53 
54  case Instruction::GetElementPtr:
55  llvm_unreachable("Use getGEPCost for GEP operations!");
56 
57  case Instruction::BitCast:
58  assert(OpTy && "Cast instructions must provide the operand type");
59  if (Ty == OpTy || (Ty->isPointerTy() && OpTy->isPointerTy()))
60  // Identity and pointer-to-pointer casts are free.
61  return TTI::TCC_Free;
62 
63  // Otherwise, the default basic cost is used.
64  return TTI::TCC_Basic;
65 
66  case Instruction::FDiv:
67  case Instruction::FRem:
68  case Instruction::SDiv:
69  case Instruction::SRem:
70  case Instruction::UDiv:
71  case Instruction::URem:
72  return TTI::TCC_Expensive;
73 
74  case Instruction::IntToPtr: {
75  // An inttoptr cast is free so long as the input is a legal integer type
76  // which doesn't contain values outside the range of a pointer.
77  unsigned OpSize = OpTy->getScalarSizeInBits();
78  if (DL.isLegalInteger(OpSize) &&
79  OpSize <= DL.getPointerTypeSizeInBits(Ty))
80  return TTI::TCC_Free;
81 
82  // Otherwise it's not a no-op.
83  return TTI::TCC_Basic;
84  }
85  case Instruction::PtrToInt: {
86  // A ptrtoint cast is free so long as the result is large enough to store
87  // the pointer, and a legal integer type.
88  unsigned DestSize = Ty->getScalarSizeInBits();
89  if (DL.isLegalInteger(DestSize) &&
90  DestSize >= DL.getPointerTypeSizeInBits(OpTy))
91  return TTI::TCC_Free;
92 
93  // Otherwise it's not a no-op.
94  return TTI::TCC_Basic;
95  }
96  case Instruction::Trunc:
97  // trunc to a native type is free (assuming the target has compare and
98  // shift-right of the same width).
99  if (DL.isLegalInteger(DL.getTypeSizeInBits(Ty)))
100  return TTI::TCC_Free;
101 
102  return TTI::TCC_Basic;
103  }
104  }
105 
106  int getGEPCost(Type *PointeeType, const Value *Ptr,
107  ArrayRef<const Value *> Operands) {
108  // In the basic model, we just assume that all-constant GEPs will be folded
109  // into their uses via addressing modes.
110  for (unsigned Idx = 0, Size = Operands.size(); Idx != Size; ++Idx)
111  if (!isa<Constant>(Operands[Idx]))
112  return TTI::TCC_Basic;
113 
114  return TTI::TCC_Free;
115  }
116 
118  unsigned &JTSize) {
119  JTSize = 0;
120  return SI.getNumCases();
121  }
122 
123  int getExtCost(const Instruction *I, const Value *Src) {
124  return TTI::TCC_Basic;
125  }
126 
127  unsigned getCallCost(FunctionType *FTy, int NumArgs) {
128  assert(FTy && "FunctionType must be provided to this routine.");
129 
130  // The target-independent implementation just measures the size of the
131  // function by approximating that each argument will take on average one
132  // instruction to prepare.
133 
134  if (NumArgs < 0)
135  // Set the argument number to the number of explicit arguments in the
136  // function.
137  NumArgs = FTy->getNumParams();
138 
139  return TTI::TCC_Basic * (NumArgs + 1);
140  }
141 
142  unsigned getInliningThresholdMultiplier() { return 1; }
143 
144  unsigned getIntrinsicCost(Intrinsic::ID IID, Type *RetTy,
145  ArrayRef<Type *> ParamTys) {
146  switch (IID) {
147  default:
148  // Intrinsics rarely (if ever) have normal argument setup constraints.
149  // Model them as having a basic instruction cost.
150  // FIXME: This is wrong for libc intrinsics.
151  return TTI::TCC_Basic;
152 
153  case Intrinsic::annotation:
154  case Intrinsic::assume:
155  case Intrinsic::sideeffect:
156  case Intrinsic::dbg_declare:
157  case Intrinsic::dbg_value:
158  case Intrinsic::invariant_start:
159  case Intrinsic::invariant_end:
160  case Intrinsic::lifetime_start:
161  case Intrinsic::lifetime_end:
162  case Intrinsic::objectsize:
163  case Intrinsic::ptr_annotation:
164  case Intrinsic::var_annotation:
165  case Intrinsic::experimental_gc_result:
166  case Intrinsic::experimental_gc_relocate:
167  case Intrinsic::coro_alloc:
168  case Intrinsic::coro_begin:
169  case Intrinsic::coro_free:
170  case Intrinsic::coro_end:
171  case Intrinsic::coro_frame:
172  case Intrinsic::coro_size:
173  case Intrinsic::coro_suspend:
174  case Intrinsic::coro_param:
175  case Intrinsic::coro_subfn_addr:
176  // These intrinsics don't actually represent code after lowering.
177  return TTI::TCC_Free;
178  }
179  }
180 
181  bool hasBranchDivergence() { return false; }
182 
183  bool isSourceOfDivergence(const Value *V) { return false; }
184 
185  bool isAlwaysUniform(const Value *V) { return false; }
186 
187  unsigned getFlatAddressSpace () {
188  return -1;
189  }
190 
191  bool isLoweredToCall(const Function *F) {
192  assert(F && "A concrete function must be provided to this routine.");
193 
194  // FIXME: These should almost certainly not be handled here, and instead
195  // handled with the help of TLI or the target itself. This was largely
196  // ported from existing analysis heuristics here so that such refactorings
197  // can take place in the future.
198 
199  if (F->isIntrinsic())
200  return false;
201 
202  if (F->hasLocalLinkage() || !F->hasName())
203  return true;
204 
205  StringRef Name = F->getName();
206 
207  // These will all likely lower to a single selection DAG node.
208  if (Name == "copysign" || Name == "copysignf" || Name == "copysignl" ||
209  Name == "fabs" || Name == "fabsf" || Name == "fabsl" || Name == "sin" ||
210  Name == "fmin" || Name == "fminf" || Name == "fminl" ||
211  Name == "fmax" || Name == "fmaxf" || Name == "fmaxl" ||
212  Name == "sinf" || Name == "sinl" || Name == "cos" || Name == "cosf" ||
213  Name == "cosl" || Name == "sqrt" || Name == "sqrtf" || Name == "sqrtl")
214  return false;
215 
216  // These are all likely to be optimized into something smaller.
217  if (Name == "pow" || Name == "powf" || Name == "powl" || Name == "exp2" ||
218  Name == "exp2l" || Name == "exp2f" || Name == "floor" ||
219  Name == "floorf" || Name == "ceil" || Name == "round" ||
220  Name == "ffs" || Name == "ffsl" || Name == "abs" || Name == "labs" ||
221  Name == "llabs")
222  return false;
223 
224  return true;
225  }
226 
229 
230  bool isLegalAddImmediate(int64_t Imm) { return false; }
231 
232  bool isLegalICmpImmediate(int64_t Imm) { return false; }
233 
234  bool isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset,
235  bool HasBaseReg, int64_t Scale,
236  unsigned AddrSpace, Instruction *I = nullptr) {
237  // Guess that only reg and reg+reg addressing is allowed. This heuristic is
238  // taken from the implementation of LSR.
239  return !BaseGV && BaseOffset == 0 && (Scale == 0 || Scale == 1);
240  }
241 
243  return std::tie(C1.NumRegs, C1.AddRecCost, C1.NumIVMuls, C1.NumBaseAdds,
244  C1.ScaleCost, C1.ImmCost, C1.SetupCost) <
245  std::tie(C2.NumRegs, C2.AddRecCost, C2.NumIVMuls, C2.NumBaseAdds,
246  C2.ScaleCost, C2.ImmCost, C2.SetupCost);
247  }
248 
249  bool isLegalMaskedStore(Type *DataType) { return false; }
250 
251  bool isLegalMaskedLoad(Type *DataType) { return false; }
252 
253  bool isLegalMaskedScatter(Type *DataType) { return false; }
254 
255  bool isLegalMaskedGather(Type *DataType) { return false; }
256 
257  bool hasDivRemOp(Type *DataType, bool IsSigned) { return false; }
258 
259  bool hasVolatileVariant(Instruction *I, unsigned AddrSpace) { return false; }
260 
261  bool prefersVectorizedAddressing() { return true; }
262 
263  int getScalingFactorCost(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset,
264  bool HasBaseReg, int64_t Scale, unsigned AddrSpace) {
265  // Guess that all legal addressing mode are free.
266  if (isLegalAddressingMode(Ty, BaseGV, BaseOffset, HasBaseReg,
267  Scale, AddrSpace))
268  return 0;
269  return -1;
270  }
271 
272  bool LSRWithInstrQueries() { return false; }
273 
274  bool isTruncateFree(Type *Ty1, Type *Ty2) { return false; }
275 
276  bool isProfitableToHoist(Instruction *I) { return true; }
277 
278  bool isTypeLegal(Type *Ty) { return false; }
279 
280  unsigned getJumpBufAlignment() { return 0; }
281 
282  unsigned getJumpBufSize() { return 0; }
283 
284  bool shouldBuildLookupTables() { return true; }
286 
287  unsigned getScalarizationOverhead(Type *Ty, bool Insert, bool Extract) {
288  return 0;
289  }
290 
292  unsigned VF) { return 0; }
293 
295 
296  bool enableAggressiveInterleaving(bool LoopHasReductions) { return false; }
297 
299  bool IsZeroCmp) const {
300  return nullptr;
301  }
302 
303  bool enableInterleavedAccessVectorization() { return false; }
304 
305  bool isFPVectorizationPotentiallyUnsafe() { return false; }
306 
308  unsigned BitWidth,
309  unsigned AddressSpace,
310  unsigned Alignment,
311  bool *Fast) { return false; }
312 
313  TTI::PopcntSupportKind getPopcntSupport(unsigned IntTyWidthInBit) {
314  return TTI::PSK_Software;
315  }
316 
317  bool haveFastSqrt(Type *Ty) { return false; }
318 
319  bool isFCmpOrdCheaperThanFCmpZero(Type *Ty) { return true; }
320 
322 
323  int getIntImmCodeSizeCost(unsigned Opcode, unsigned Idx, const APInt &Imm,
324  Type *Ty) {
325  return 0;
326  }
327 
328  unsigned getIntImmCost(const APInt &Imm, Type *Ty) { return TTI::TCC_Basic; }
329 
330  unsigned getIntImmCost(unsigned Opcode, unsigned Idx, const APInt &Imm,
331  Type *Ty) {
332  return TTI::TCC_Free;
333  }
334 
335  unsigned getIntImmCost(Intrinsic::ID IID, unsigned Idx, const APInt &Imm,
336  Type *Ty) {
337  return TTI::TCC_Free;
338  }
339 
340  unsigned getNumberOfRegisters(bool Vector) { return 8; }
341 
342  unsigned getRegisterBitWidth(bool Vector) const { return 32; }
343 
344  unsigned getMinVectorRegisterBitWidth() { return 128; }
345 
346  bool
348  bool &AllowPromotionWithoutCommonHeader) {
349  AllowPromotionWithoutCommonHeader = false;
350  return false;
351  }
352 
353  unsigned getCacheLineSize() { return 0; }
354 
356  switch (Level) {
360  return llvm::Optional<unsigned>();
361  }
362 
363  llvm_unreachable("Unknown TargetTransformInfo::CacheLevel");
364  }
365 
368  switch (Level) {
372  return llvm::Optional<unsigned>();
373  }
374 
375  llvm_unreachable("Unknown TargetTransformInfo::CacheLevel");
376  }
377 
378  unsigned getPrefetchDistance() { return 0; }
379 
380  unsigned getMinPrefetchStride() { return 1; }
381 
382  unsigned getMaxPrefetchIterationsAhead() { return UINT_MAX; }
383 
384  unsigned getMaxInterleaveFactor(unsigned VF) { return 1; }
385 
386  unsigned getArithmeticInstrCost(unsigned Opcode, Type *Ty,
387  TTI::OperandValueKind Opd1Info,
388  TTI::OperandValueKind Opd2Info,
389  TTI::OperandValueProperties Opd1PropInfo,
390  TTI::OperandValueProperties Opd2PropInfo,
392  return 1;
393  }
394 
395  unsigned getShuffleCost(TTI::ShuffleKind Kind, Type *Ty, int Index,
396  Type *SubTp) {
397  return 1;
398  }
399 
400  unsigned getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src,
401  const Instruction *I) { return 1; }
402 
403  unsigned getExtractWithExtendCost(unsigned Opcode, Type *Dst,
404  VectorType *VecTy, unsigned Index) {
405  return 1;
406  }
407 
408  unsigned getCFInstrCost(unsigned Opcode) { return 1; }
409 
410  unsigned getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy,
411  const Instruction *I) {
412  return 1;
413  }
414 
415  unsigned getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index) {
416  return 1;
417  }
418 
419  unsigned getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
420  unsigned AddressSpace, const Instruction *I) {
421  return 1;
422  }
423 
424  unsigned getMaskedMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
425  unsigned AddressSpace) {
426  return 1;
427  }
428 
429  unsigned getGatherScatterOpCost(unsigned Opcode, Type *DataTy, Value *Ptr,
430  bool VariableMask,
431  unsigned Alignment) {
432  return 1;
433  }
434 
435  unsigned getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy,
436  unsigned Factor,
437  ArrayRef<unsigned> Indices,
438  unsigned Alignment,
439  unsigned AddressSpace) {
440  return 1;
441  }
442 
445  unsigned ScalarizationCostPassed) {
446  return 1;
447  }
449  ArrayRef<Value *> Args, FastMathFlags FMF, unsigned VF) {
450  return 1;
451  }
452 
453  unsigned getCallInstrCost(Function *F, Type *RetTy, ArrayRef<Type *> Tys) {
454  return 1;
455  }
456 
457  unsigned getNumberOfParts(Type *Tp) { return 0; }
458 
460  const SCEV *) {
461  return 0;
462  }
463 
464  unsigned getArithmeticReductionCost(unsigned, Type *, bool) { return 1; }
465 
466  unsigned getMinMaxReductionCost(Type *, Type *, bool, bool) { return 1; }
467 
468  unsigned getCostOfKeepingLiveOverCall(ArrayRef<Type *> Tys) { return 0; }
469 
471  return false;
472  }
473 
475  // Note for overrides: You must ensure for all element unordered-atomic
476  // memory intrinsics that all power-of-2 element sizes up to, and
477  // including, the return value of this method have a corresponding
478  // runtime lib call. These runtime lib call definitions can be found
479  // in RuntimeLibcalls.h
480  return 0;
481  }
482 
484  Type *ExpectedType) {
485  return nullptr;
486  }
487 
489  unsigned SrcAlign, unsigned DestAlign) const {
490  return Type::getInt8Ty(Context);
491  }
492 
495  unsigned RemainingBytes,
496  unsigned SrcAlign,
497  unsigned DestAlign) const {
498  for (unsigned i = 0; i != RemainingBytes; ++i)
499  OpsOut.push_back(Type::getInt8Ty(Context));
500  }
501 
502  bool areInlineCompatible(const Function *Caller,
503  const Function *Callee) const {
504  return (Caller->getFnAttribute("target-cpu") ==
505  Callee->getFnAttribute("target-cpu")) &&
506  (Caller->getFnAttribute("target-features") ==
507  Callee->getFnAttribute("target-features"));
508  }
509 
510  unsigned getLoadStoreVecRegBitWidth(unsigned AddrSpace) const { return 128; }
511 
512  bool isLegalToVectorizeLoad(LoadInst *LI) const { return true; }
513 
514  bool isLegalToVectorizeStore(StoreInst *SI) const { return true; }
515 
516  bool isLegalToVectorizeLoadChain(unsigned ChainSizeInBytes,
517  unsigned Alignment,
518  unsigned AddrSpace) const {
519  return true;
520  }
521 
522  bool isLegalToVectorizeStoreChain(unsigned ChainSizeInBytes,
523  unsigned Alignment,
524  unsigned AddrSpace) const {
525  return true;
526  }
527 
528  unsigned getLoadVectorFactor(unsigned VF, unsigned LoadSize,
529  unsigned ChainSizeInBytes,
530  VectorType *VecTy) const {
531  return VF;
532  }
533 
534  unsigned getStoreVectorFactor(unsigned VF, unsigned StoreSize,
535  unsigned ChainSizeInBytes,
536  VectorType *VecTy) const {
537  return VF;
538  }
539 
540  bool useReductionIntrinsic(unsigned Opcode, Type *Ty,
541  TTI::ReductionFlags Flags) const {
542  return false;
543  }
544 
545  bool shouldExpandReduction(const IntrinsicInst *II) const {
546  return true;
547  }
548 
549 protected:
550  // Obtain the minimum required size to hold the value (without the sign)
551  // In case of a vector it returns the min required size for one element.
552  unsigned minRequiredElementSize(const Value* Val, bool &isSigned) {
553  if (isa<ConstantDataVector>(Val) || isa<ConstantVector>(Val)) {
554  const auto* VectorValue = cast<Constant>(Val);
555 
556  // In case of a vector need to pick the max between the min
557  // required size for each element
558  auto *VT = cast<VectorType>(Val->getType());
559 
560  // Assume unsigned elements
561  isSigned = false;
562 
563  // The max required size is the total vector width divided by num
564  // of elements in the vector
565  unsigned MaxRequiredSize = VT->getBitWidth() / VT->getNumElements();
566 
567  unsigned MinRequiredSize = 0;
568  for(unsigned i = 0, e = VT->getNumElements(); i < e; ++i) {
569  if (auto* IntElement =
570  dyn_cast<ConstantInt>(VectorValue->getAggregateElement(i))) {
571  bool signedElement = IntElement->getValue().isNegative();
572  // Get the element min required size.
573  unsigned ElementMinRequiredSize =
574  IntElement->getValue().getMinSignedBits() - 1;
575  // In case one element is signed then all the vector is signed.
576  isSigned |= signedElement;
577  // Save the max required bit size between all the elements.
578  MinRequiredSize = std::max(MinRequiredSize, ElementMinRequiredSize);
579  }
580  else {
581  // not an int constant element
582  return MaxRequiredSize;
583  }
584  }
585  return MinRequiredSize;
586  }
587 
588  if (const auto* CI = dyn_cast<ConstantInt>(Val)) {
589  isSigned = CI->getValue().isNegative();
590  return CI->getValue().getMinSignedBits() - 1;
591  }
592 
593  if (const auto* Cast = dyn_cast<SExtInst>(Val)) {
594  isSigned = true;
595  return Cast->getSrcTy()->getScalarSizeInBits() - 1;
596  }
597 
598  if (const auto* Cast = dyn_cast<ZExtInst>(Val)) {
599  isSigned = false;
600  return Cast->getSrcTy()->getScalarSizeInBits();
601  }
602 
603  isSigned = false;
604  return Val->getType()->getScalarSizeInBits();
605  }
606 
607  bool isStridedAccess(const SCEV *Ptr) {
608  return Ptr && isa<SCEVAddRecExpr>(Ptr);
609  }
610 
612  const SCEV *Ptr) {
613  if (!isStridedAccess(Ptr))
614  return nullptr;
615  const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ptr);
616  return dyn_cast<SCEVConstant>(AddRec->getStepRecurrence(*SE));
617  }
618 
620  int64_t MergeDistance) {
621  const SCEVConstant *Step = getConstantStrideStep(SE, Ptr);
622  if (!Step)
623  return false;
624  APInt StrideVal = Step->getAPInt();
625  if (StrideVal.getBitWidth() > 64)
626  return false;
627  // FIXME: Need to take absolute value for negative stride case.
628  return StrideVal.getSExtValue() < MergeDistance;
629  }
630 };
631 
632 /// \brief CRTP base class for use as a mix-in that aids implementing
633 /// a TargetTransformInfo-compatible class.
634 template <typename T>
636 private:
638 
639 protected:
640  explicit TargetTransformInfoImplCRTPBase(const DataLayout &DL) : BaseT(DL) {}
641 
642 public:
643  using BaseT::getCallCost;
644 
645  unsigned getCallCost(const Function *F, int NumArgs) {
646  assert(F && "A concrete function must be provided to this routine.");
647 
648  if (NumArgs < 0)
649  // Set the argument number to the number of explicit arguments in the
650  // function.
651  NumArgs = F->arg_size();
652 
653  if (Intrinsic::ID IID = F->getIntrinsicID()) {
654  FunctionType *FTy = F->getFunctionType();
655  SmallVector<Type *, 8> ParamTys(FTy->param_begin(), FTy->param_end());
656  return static_cast<T *>(this)
657  ->getIntrinsicCost(IID, FTy->getReturnType(), ParamTys);
658  }
659 
660  if (!static_cast<T *>(this)->isLoweredToCall(F))
661  return TTI::TCC_Basic; // Give a basic cost if it will be lowered
662  // directly.
663 
664  return static_cast<T *>(this)->getCallCost(F->getFunctionType(), NumArgs);
665  }
666 
668  // Simply delegate to generic handling of the call.
669  // FIXME: We should use instsimplify or something else to catch calls which
670  // will constant fold with these arguments.
671  return static_cast<T *>(this)->getCallCost(F, Arguments.size());
672  }
673 
674  using BaseT::getGEPCost;
675 
676  int getGEPCost(Type *PointeeType, const Value *Ptr,
677  ArrayRef<const Value *> Operands) {
678  const GlobalValue *BaseGV = nullptr;
679  if (Ptr != nullptr) {
680  // TODO: will remove this when pointers have an opaque type.
682  PointeeType &&
683  "explicit pointee type doesn't match operand's pointee type");
684  BaseGV = dyn_cast<GlobalValue>(Ptr->stripPointerCasts());
685  }
686  bool HasBaseReg = (BaseGV == nullptr);
687 
688  auto PtrSizeBits = DL.getPointerTypeSizeInBits(Ptr->getType());
689  APInt BaseOffset(PtrSizeBits, 0);
690  int64_t Scale = 0;
691 
692  auto GTI = gep_type_begin(PointeeType, Operands);
693  Type *TargetType = nullptr;
694 
695  // Handle the case where the GEP instruction has a single operand,
696  // the basis, therefore TargetType is a nullptr.
697  if (Operands.empty())
698  return !BaseGV ? TTI::TCC_Free : TTI::TCC_Basic;
699 
700  for (auto I = Operands.begin(); I != Operands.end(); ++I, ++GTI) {
701  TargetType = GTI.getIndexedType();
702  // We assume that the cost of Scalar GEP with constant index and the
703  // cost of Vector GEP with splat constant index are the same.
704  const ConstantInt *ConstIdx = dyn_cast<ConstantInt>(*I);
705  if (!ConstIdx)
706  if (auto Splat = getSplatValue(*I))
707  ConstIdx = dyn_cast<ConstantInt>(Splat);
708  if (StructType *STy = GTI.getStructTypeOrNull()) {
709  // For structures the index is always splat or scalar constant
710  assert(ConstIdx && "Unexpected GEP index");
711  uint64_t Field = ConstIdx->getZExtValue();
712  BaseOffset += DL.getStructLayout(STy)->getElementOffset(Field);
713  } else {
714  int64_t ElementSize = DL.getTypeAllocSize(GTI.getIndexedType());
715  if (ConstIdx) {
716  BaseOffset +=
717  ConstIdx->getValue().sextOrTrunc(PtrSizeBits) * ElementSize;
718  } else {
719  // Needs scale register.
720  if (Scale != 0)
721  // No addressing mode takes two scale registers.
722  return TTI::TCC_Basic;
723  Scale = ElementSize;
724  }
725  }
726  }
727 
728  // Assumes the address space is 0 when Ptr is nullptr.
729  unsigned AS =
730  (Ptr == nullptr ? 0 : Ptr->getType()->getPointerAddressSpace());
731 
732  if (static_cast<T *>(this)->isLegalAddressingMode(
733  TargetType, const_cast<GlobalValue *>(BaseGV),
734  BaseOffset.sextOrTrunc(64).getSExtValue(), HasBaseReg, Scale, AS))
735  return TTI::TCC_Free;
736  return TTI::TCC_Basic;
737  }
738 
739  using BaseT::getIntrinsicCost;
740 
741  unsigned getIntrinsicCost(Intrinsic::ID IID, Type *RetTy,
743  // Delegate to the generic intrinsic handling code. This mostly provides an
744  // opportunity for targets to (for example) special case the cost of
745  // certain intrinsics based on constants used as arguments.
746  SmallVector<Type *, 8> ParamTys;
747  ParamTys.reserve(Arguments.size());
748  for (unsigned Idx = 0, Size = Arguments.size(); Idx != Size; ++Idx)
749  ParamTys.push_back(Arguments[Idx]->getType());
750  return static_cast<T *>(this)->getIntrinsicCost(IID, RetTy, ParamTys);
751  }
752 
753  unsigned getUserCost(const User *U, ArrayRef<const Value *> Operands) {
754  if (isa<PHINode>(U))
755  return TTI::TCC_Free; // Model all PHI nodes as free.
756 
757  // Static alloca doesn't generate target instructions.
758  if (auto *A = dyn_cast<AllocaInst>(U))
759  if (A->isStaticAlloca())
760  return TTI::TCC_Free;
761 
762  if (const GEPOperator *GEP = dyn_cast<GEPOperator>(U)) {
763  return static_cast<T *>(this)->getGEPCost(GEP->getSourceElementType(),
764  GEP->getPointerOperand(),
765  Operands.drop_front());
766  }
767 
768  if (auto CS = ImmutableCallSite(U)) {
769  const Function *F = CS.getCalledFunction();
770  if (!F) {
771  // Just use the called value type.
772  Type *FTy = CS.getCalledValue()->getType()->getPointerElementType();
773  return static_cast<T *>(this)
774  ->getCallCost(cast<FunctionType>(FTy), CS.arg_size());
775  }
776 
777  SmallVector<const Value *, 8> Arguments(CS.arg_begin(), CS.arg_end());
778  return static_cast<T *>(this)->getCallCost(F, Arguments);
779  }
780 
781  if (const CastInst *CI = dyn_cast<CastInst>(U)) {
782  // Result of a cmp instruction is often extended (to be used by other
783  // cmp instructions, logical or return instructions). These are usually
784  // nop on most sane targets.
785  if (isa<CmpInst>(CI->getOperand(0)))
786  return TTI::TCC_Free;
787  if (isa<SExtInst>(CI) || isa<ZExtInst>(CI) || isa<FPExtInst>(CI))
788  return static_cast<T *>(this)->getExtCost(CI, Operands.back());
789  }
790 
791  return static_cast<T *>(this)->getOperationCost(
792  Operator::getOpcode(U), U->getType(),
793  U->getNumOperands() == 1 ? U->getOperand(0)->getType() : nullptr);
794  }
795 
798  I->value_op_end());
799  if (getUserCost(I, Operands) == TTI::TCC_Free)
800  return 0;
801 
802  if (isa<LoadInst>(I))
803  return 4;
804 
805  Type *DstTy = I->getType();
806 
807  // Usually an intrinsic is a simple instruction.
808  // A real function call is much slower.
809  if (auto *CI = dyn_cast<CallInst>(I)) {
810  const Function *F = CI->getCalledFunction();
811  if (!F || static_cast<T *>(this)->isLoweredToCall(F))
812  return 40;
813  // Some intrinsics return a value and a flag, we use the value type
814  // to decide its latency.
815  if (StructType* StructTy = dyn_cast<StructType>(DstTy))
816  DstTy = StructTy->getElementType(0);
817  // Fall through to simple instructions.
818  }
819 
820  if (VectorType *VectorTy = dyn_cast<VectorType>(DstTy))
821  DstTy = VectorTy->getElementType();
822  if (DstTy->isFloatingPointTy())
823  return 3;
824 
825  return 1;
826  }
827 };
828 }
829 
830 #endif
uint64_t CallInst * C
unsigned getNumCases() const
Return the number of &#39;cases&#39; in this switch instruction, excluding the default case.
bool isIntrinsic() const
isIntrinsic - Returns true if the function&#39;s name starts with "llvm.".
Definition: Function.h:180
Base class for use as a mix-in that aids implementing a TargetTransformInfo-compatible class...
A parsed version of the target data layout string in and methods for querying it. ...
Definition: DataLayout.h:109
bool isConstantStridedAccessLessThan(ScalarEvolution *SE, const SCEV *Ptr, int64_t MergeDistance)
bool hasLocalLinkage() const
Definition: GlobalValue.h:430
unsigned getIntrinsicInstrCost(Intrinsic::ID ID, Type *RetTy, ArrayRef< Type *> Tys, FastMathFlags FMF, unsigned ScalarizationCostPassed)
GCNRegPressure max(const GCNRegPressure &P1, const GCNRegPressure &P2)
LLVMContext & Context
const T & back() const
back - Get the last element.
Definition: ArrayRef.h:158
unsigned getGatherScatterOpCost(unsigned Opcode, Type *DataTy, Value *Ptr, bool VariableMask, unsigned Alignment)
Compute iterated dominance frontiers using a linear time algorithm.
Definition: AllocatorList.h:24
unsigned minRequiredElementSize(const Value *Val, bool &isSigned)
void getUnrollingPreferences(Loop *, ScalarEvolution &, TTI::UnrollingPreferences &)
unsigned getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index)
iterator begin() const
Definition: ArrayRef.h:137
unsigned getCostOfKeepingLiveOverCall(ArrayRef< Type *> Tys)
const Value * getSplatValue(const Value *V)
Get splat value if the input is a splat vector or return nullptr.
AMDGPU Rewrite Out Arguments
const StructLayout * getStructLayout(StructType *Ty) const
Returns a StructLayout object, indicating the alignment of the struct, its size, and the offsets of i...
Definition: DataLayout.cpp:562
value_op_iterator value_op_begin()
Definition: User.h:240
The main scalar evolution driver.
unsigned getCallCost(const Function *F, int NumArgs)
unsigned getArithmeticInstrCost(unsigned Opcode, Type *Ty, TTI::OperandValueKind Opd1Info, TTI::OperandValueKind Opd2Info, TTI::OperandValueProperties Opd1PropInfo, TTI::OperandValueProperties Opd2PropInfo, ArrayRef< const Value *> Args)
unsigned getOperationCost(unsigned Opcode, Type *Ty, Type *OpTy)
value_op_iterator value_op_end()
Definition: User.h:243
F(f)
unsigned getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src, const Instruction *I)
unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
Definition: DerivedTypes.h:503
param_iterator param_end() const
Definition: DerivedTypes.h:129
An instruction for reading from memory.
Definition: Instructions.h:164
Hexagon Common GEP
unsigned getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment, unsigned AddressSpace, const Instruction *I)
void reserve(size_type N)
Definition: SmallVector.h:378
unsigned getIntImmCost(const APInt &Imm, Type *Ty)
int getExtCost(const Instruction *I, const Value *Src)
unsigned getBitWidth() const
Return the number of bits in the APInt.
Definition: APInt.h:1488
bool isLegalToVectorizeStoreChain(unsigned ChainSizeInBytes, unsigned Alignment, unsigned AddrSpace) const
CRTP base class for use as a mix-in that aids implementing a TargetTransformInfo-compatible class...
bool getTgtMemIntrinsic(IntrinsicInst *Inst, MemIntrinsicInfo &Info)
unsigned getLoadVectorFactor(unsigned VF, unsigned LoadSize, unsigned ChainSizeInBytes, VectorType *VecTy) const
Type * getPointerElementType() const
Definition: Type.h:373
This is the base class for all instructions that perform data casts.
Definition: InstrTypes.h:560
unsigned getPointerTypeSizeInBits(Type *) const
Layout pointer size, in bits, based on the type.
Definition: DataLayout.cpp:614
bool isFloatingPointTy() const
Return true if this is one of the six floating-point types.
Definition: Type.h:162
Class to represent struct types.
Definition: DerivedTypes.h:201
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: APFloat.h:42
unsigned getArithmeticReductionCost(unsigned, Type *, bool)
const APInt & getAPInt() const
bool isLegalToVectorizeLoad(LoadInst *LI) const
Type * getMemcpyLoopLoweringType(LLVMContext &Context, Value *Length, unsigned SrcAlign, unsigned DestAlign) const
bool isTruncateFree(Type *Ty1, Type *Ty2)
bool enableAggressiveInterleaving(bool LoopHasReductions)
Class to represent function types.
Definition: DerivedTypes.h:103
int64_t getSExtValue() const
Get sign extended value.
Definition: APInt.h:1554
llvm::Optional< unsigned > getCacheSize(TargetTransformInfo::CacheLevel Level)
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:245
This node represents a polynomial recurrence on the trip count of the specified loop.
void getMemcpyLoopResidualLoweringType(SmallVectorImpl< Type *> &OpsOut, LLVMContext &Context, unsigned RemainingBytes, unsigned SrcAlign, unsigned DestAlign) const
PopcntSupportKind
Flags indicating the kind of support for population count.
APInt sextOrTrunc(unsigned width) const
Sign extend or truncate to width.
Definition: APInt.cpp:891
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory)...
Definition: APInt.h:33
const APInt & getValue() const
Return the constant as an APInt value reference.
Definition: Constants.h:138
An instruction for storing to memory.
Definition: Instructions.h:306
const SCEV * getStepRecurrence(ScalarEvolution &SE) const
Constructs and returns the recurrence indicating how much this expression steps by.
unsigned getExtractWithExtendCost(unsigned Opcode, Type *Dst, VectorType *VecTy, unsigned Index)
amdgpu Simplify well known AMD library false Value * Callee
Value * getOperand(unsigned i) const
Definition: User.h:154
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return &#39;this&#39;.
Definition: Type.h:301
bool isLegalToVectorizeStore(StoreInst *SI) const
If not nullptr, enable inline expansion of memcmp.
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
Definition: Constants.h:149
unsigned getShuffleCost(TTI::ShuffleKind Kind, Type *Ty, int Index, Type *SubTp)
unsigned getCallInstrCost(Function *F, Type *RetTy, ArrayRef< Type *> Tys)
bool hasName() const
Definition: Value.h:251
Flags describing the kind of vector reduction.
The instances of the Type class are immutable: once they are created, they are never changed...
Definition: Type.h:46
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:69
unsigned getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy, const Instruction *I)
unsigned getIntrinsicCost(Intrinsic::ID IID, Type *RetTy, ArrayRef< const Value *> Arguments)
TargetTransformInfoImplBase(const TargetTransformInfoImplBase &Arg)
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:149
This is an important base class in LLVM.
Definition: Constant.h:42
bool isPointerTy() const
True if this is an instance of PointerType.
Definition: Type.h:221
bool isLegalToVectorizeLoadChain(unsigned ChainSizeInBytes, unsigned Alignment, unsigned AddrSpace) const
unsigned getNumParams() const
Return the number of fixed parameters this function type requires.
Definition: DerivedTypes.h:139
unsigned getIntImmCost(unsigned Opcode, unsigned Idx, const APInt &Imm, Type *Ty)
bool hasVolatileVariant(Instruction *I, unsigned AddrSpace)
Expected to fold away in lowering.
unsigned getUserCost(const User *U, ArrayRef< const Value *> Operands)
bool isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset, bool HasBaseReg, int64_t Scale, unsigned AddrSpace, Instruction *I=nullptr)
param_iterator param_begin() const
Definition: DerivedTypes.h:128
Fast - This calling convention attempts to make calls as fast as possible (e.g.
Definition: CallingConv.h:43
const TTI::MemCmpExpansionOptions * enableMemCmpExpansion(bool IsZeroCmp) const
unsigned getEstimatedNumberOfCaseClusters(const SwitchInst &SI, unsigned &JTSize)
size_t arg_size() const
Definition: Function.h:676
const AMDGPUAS & AS
const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs, and aliases.
Definition: Value.cpp:567
unsigned getCallCost(FunctionType *FTy, int NumArgs)
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
const SCEVConstant * getConstantStrideStep(ScalarEvolution *SE, const SCEV *Ptr)
OperandValueProperties
Additional properties of an operand&#39;s values.
int getIntImmCodeSizeCost(unsigned Opcode, unsigned Idx, const APInt &Imm, Type *Ty)
unsigned getNumOperands() const
Definition: User.h:176
This is the shared class of boolean and integer constants.
Definition: Constants.h:84
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
unsigned getStoreVectorFactor(unsigned VF, unsigned StoreSize, unsigned ChainSizeInBytes, VectorType *VecTy) const
unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type...
Definition: Type.cpp:130
unsigned getScalarizationOverhead(Type *Ty, bool Insert, bool Extract)
TargetTransformInfoImplBase(const DataLayout &DL)
This is a &#39;vector&#39; (really, a variable-sized array), optimized for the case when the array is small...
Definition: SmallVector.h:862
AddressSpace
Definition: NVPTXBaseInfo.h:22
iterator end() const
Definition: ArrayRef.h:138
bool isLegalInteger(uint64_t Width) const
Returns true if the specified type is known to be a native integer type supported by the CPU...
Definition: DataLayout.h:238
Type * getReturnType() const
Definition: DerivedTypes.h:124
bool useReductionIntrinsic(unsigned Opcode, Type *Ty, TTI::ReductionFlags Flags) const
bool allowsMisalignedMemoryAccesses(LLVMContext &Context, unsigned BitWidth, unsigned AddressSpace, unsigned Alignment, bool *Fast)
Intrinsic::ID getIntrinsicID() const LLVM_READONLY
getIntrinsicID - This method returns the ID number of the specified function, or Intrinsic::not_intri...
Definition: Function.h:175
bool shouldConsiderAddressTypePromotion(const Instruction &I, bool &AllowPromotionWithoutCommonHeader)
FunctionType * getFunctionType() const
Returns the FunctionType for me.
Definition: Function.h:145
int getScalingFactorCost(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset, bool HasBaseReg, int64_t Scale, unsigned AddrSpace)
Class to represent vector types.
Definition: DerivedTypes.h:393
Class for arbitrary precision integers.
Definition: APInt.h:69
unsigned getLoadStoreVecRegBitWidth(unsigned AddrSpace) const
unsigned getMinMaxReductionCost(Type *, Type *, bool, bool)
amdgpu Simplify well known AMD library false Value Value * Arg
uint64_t getTypeSizeInBits(Type *Ty) const
Size examples:
Definition: DataLayout.h:530
bool isLSRCostLess(TTI::LSRCost &C1, TTI::LSRCost &C2)
uint64_t getTypeAllocSize(Type *Ty) const
Returns the offset in bytes between successive objects of the specified type, including alignment pad...
Definition: DataLayout.h:403
unsigned getMaskedMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment, unsigned AddressSpace)
unsigned getCFInstrCost(unsigned Opcode)
uint64_t getElementOffset(unsigned Idx) const
Definition: DataLayout.h:513
This class represents an analyzed expression in the program.
Represents a single loop in the control flow graph.
Definition: LoopInfo.h:439
ArrayRef< T > drop_front(size_t N=1) const
Drop the first N elements of the array.
Definition: ArrayRef.h:188
Parameters that control the generic loop unrolling transformation.
StringRef getName() const
Return a constant reference to the value&#39;s name.
Definition: Value.cpp:224
Establish a view to a call site for examination.
Definition: CallSite.h:713
#define I(x, y, z)
Definition: MD5.cpp:58
unsigned getOperandsScalarizationOverhead(ArrayRef< const Value *> Args, unsigned VF)
int getGEPCost(Type *PointeeType, const Value *Ptr, ArrayRef< const Value *> Operands)
bool areInlineCompatible(const Function *Caller, const Function *Callee) const
LLVM_NODISCARD std::enable_if<!is_simple_type< Y >::value, typename cast_retty< X, const Y >::ret_type >::type dyn_cast(const Y &Val)
Definition: Casting.h:323
unsigned getAddressComputationCost(Type *Tp, ScalarEvolution *, const SCEV *)
unsigned getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef< unsigned > Indices, unsigned Alignment, unsigned AddressSpace)
const unsigned Kind
Multiway switch.
TargetTransformInfoImplBase(TargetTransformInfoImplBase &&Arg)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
The cost of a typical &#39;add&#39; instruction.
LLVM Value Representation.
Definition: Value.h:73
unsigned getIntImmCost(Intrinsic::ID IID, unsigned Idx, const APInt &Imm, Type *Ty)
constexpr char Size[]
Key for Kernel::Arg::Metadata::mSize.
unsigned getOpcode() const
Return the opcode for this Instruction or ConstantExpr.
Definition: Operator.h:41
#define LLVM_FALLTHROUGH
LLVM_FALLTHROUGH - Mark fallthrough cases in switch statements.
Definition: Compiler.h:235
bool shouldExpandReduction(const IntrinsicInst *II) const
Attribute getFnAttribute(Attribute::AttrKind Kind) const
Return the attribute for the given attribute kind.
Definition: Function.h:312
const DataLayout & getDataLayout() const
Convenience struct for specifying and reasoning about fast-math flags.
Definition: Operator.h:160
OperandValueKind
Additional information about an operand&#39;s possible values.
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:49
This pass exposes codegen information to IR-level passes.
llvm::Optional< unsigned > getCacheAssociativity(TargetTransformInfo::CacheLevel Level)
CacheLevel
The possible cache levels.
unsigned getRegisterBitWidth(bool Vector) const
Information about a load/store intrinsic defined by the target.
unsigned getIntrinsicCost(Intrinsic::ID IID, Type *RetTy, ArrayRef< Type *> ParamTys)
The cost of a &#39;div&#39; instruction on x86.
static IntegerType * getInt8Ty(LLVMContext &C)
Definition: Type.cpp:174
int getGEPCost(Type *PointeeType, const Value *Ptr, ArrayRef< const Value *> Operands)
unsigned getCallCost(const Function *F, ArrayRef< const Value *> Arguments)
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
bool hasDivRemOp(Type *DataType, bool IsSigned)
TTI::PopcntSupportKind getPopcntSupport(unsigned IntTyWidthInBit)
Value * getOrCreateResultFromMemIntrinsic(IntrinsicInst *Inst, Type *ExpectedType)
bool empty() const
empty - Check if the array is empty.
Definition: ArrayRef.h:144
A wrapper class for inspecting calls to intrinsic functions.
Definition: IntrinsicInst.h:44
This class represents a constant integer value.
unsigned getIntrinsicInstrCost(Intrinsic::ID ID, Type *RetTy, ArrayRef< Value *> Args, FastMathFlags FMF, unsigned VF)
ShuffleKind
The various kinds of shuffle patterns for vector queries.
gep_type_iterator gep_type_begin(const User *GEP)