LLVM  9.0.0svn
TargetTransformInfoImpl.h
Go to the documentation of this file.
1 //===- TargetTransformInfoImpl.h --------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 /// \file
9 /// This file provides helpers for the implementation of
10 /// a TargetTransformInfo-conforming class.
11 ///
12 //===----------------------------------------------------------------------===//
13 
14 #ifndef LLVM_ANALYSIS_TARGETTRANSFORMINFOIMPL_H
15 #define LLVM_ANALYSIS_TARGETTRANSFORMINFOIMPL_H
16 
20 #include "llvm/IR/CallSite.h"
21 #include "llvm/IR/DataLayout.h"
22 #include "llvm/IR/Function.h"
24 #include "llvm/IR/Operator.h"
25 #include "llvm/IR/Type.h"
26 
27 namespace llvm {
28 
29 /// Base class for use as a mix-in that aids implementing
30 /// a TargetTransformInfo-compatible class.
32 protected:
34 
35  const DataLayout &DL;
36 
37  explicit TargetTransformInfoImplBase(const DataLayout &DL) : DL(DL) {}
38 
39 public:
40  // Provide value semantics. MSVC requires that we spell all of these out.
42  : DL(Arg.DL) {}
44 
45  const DataLayout &getDataLayout() const { return DL; }
46 
47  unsigned getOperationCost(unsigned Opcode, Type *Ty, Type *OpTy) {
48  switch (Opcode) {
49  default:
50  // By default, just classify everything as 'basic'.
51  return TTI::TCC_Basic;
52 
53  case Instruction::GetElementPtr:
54  llvm_unreachable("Use getGEPCost for GEP operations!");
55 
56  case Instruction::BitCast:
57  assert(OpTy && "Cast instructions must provide the operand type");
58  if (Ty == OpTy || (Ty->isPointerTy() && OpTy->isPointerTy()))
59  // Identity and pointer-to-pointer casts are free.
60  return TTI::TCC_Free;
61 
62  // Otherwise, the default basic cost is used.
63  return TTI::TCC_Basic;
64 
65  case Instruction::FDiv:
66  case Instruction::FRem:
67  case Instruction::SDiv:
68  case Instruction::SRem:
69  case Instruction::UDiv:
70  case Instruction::URem:
71  return TTI::TCC_Expensive;
72 
73  case Instruction::IntToPtr: {
74  // An inttoptr cast is free so long as the input is a legal integer type
75  // which doesn't contain values outside the range of a pointer.
76  unsigned OpSize = OpTy->getScalarSizeInBits();
77  if (DL.isLegalInteger(OpSize) &&
78  OpSize <= DL.getPointerTypeSizeInBits(Ty))
79  return TTI::TCC_Free;
80 
81  // Otherwise it's not a no-op.
82  return TTI::TCC_Basic;
83  }
84  case Instruction::PtrToInt: {
85  // A ptrtoint cast is free so long as the result is large enough to store
86  // the pointer, and a legal integer type.
87  unsigned DestSize = Ty->getScalarSizeInBits();
88  if (DL.isLegalInteger(DestSize) &&
89  DestSize >= DL.getPointerTypeSizeInBits(OpTy))
90  return TTI::TCC_Free;
91 
92  // Otherwise it's not a no-op.
93  return TTI::TCC_Basic;
94  }
95  case Instruction::Trunc:
96  // trunc to a native type is free (assuming the target has compare and
97  // shift-right of the same width).
98  if (DL.isLegalInteger(DL.getTypeSizeInBits(Ty)))
99  return TTI::TCC_Free;
100 
101  return TTI::TCC_Basic;
102  }
103  }
104 
105  int getGEPCost(Type *PointeeType, const Value *Ptr,
106  ArrayRef<const Value *> Operands) {
107  // In the basic model, we just assume that all-constant GEPs will be folded
108  // into their uses via addressing modes.
109  for (unsigned Idx = 0, Size = Operands.size(); Idx != Size; ++Idx)
110  if (!isa<Constant>(Operands[Idx]))
111  return TTI::TCC_Basic;
112 
113  return TTI::TCC_Free;
114  }
115 
117  unsigned &JTSize) {
118  JTSize = 0;
119  return SI.getNumCases();
120  }
121 
122  int getExtCost(const Instruction *I, const Value *Src) {
123  return TTI::TCC_Basic;
124  }
125 
126  unsigned getCallCost(FunctionType *FTy, int NumArgs, const User *U) {
127  assert(FTy && "FunctionType must be provided to this routine.");
128 
129  // The target-independent implementation just measures the size of the
130  // function by approximating that each argument will take on average one
131  // instruction to prepare.
132 
133  if (NumArgs < 0)
134  // Set the argument number to the number of explicit arguments in the
135  // function.
136  NumArgs = FTy->getNumParams();
137 
138  return TTI::TCC_Basic * (NumArgs + 1);
139  }
140 
141  unsigned getInliningThresholdMultiplier() { return 1; }
142 
143  unsigned getMemcpyCost(const Instruction *I) {
144  return TTI::TCC_Expensive;
145  }
146 
147  bool hasBranchDivergence() { return false; }
148 
149  bool isSourceOfDivergence(const Value *V) { return false; }
150 
151  bool isAlwaysUniform(const Value *V) { return false; }
152 
153  unsigned getFlatAddressSpace () {
154  return -1;
155  }
156 
157  bool isLoweredToCall(const Function *F) {
158  assert(F && "A concrete function must be provided to this routine.");
159 
160  // FIXME: These should almost certainly not be handled here, and instead
161  // handled with the help of TLI or the target itself. This was largely
162  // ported from existing analysis heuristics here so that such refactorings
163  // can take place in the future.
164 
165  if (F->isIntrinsic())
166  return false;
167 
168  if (F->hasLocalLinkage() || !F->hasName())
169  return true;
170 
171  StringRef Name = F->getName();
172 
173  // These will all likely lower to a single selection DAG node.
174  if (Name == "copysign" || Name == "copysignf" || Name == "copysignl" ||
175  Name == "fabs" || Name == "fabsf" || Name == "fabsl" || Name == "sin" ||
176  Name == "fmin" || Name == "fminf" || Name == "fminl" ||
177  Name == "fmax" || Name == "fmaxf" || Name == "fmaxl" ||
178  Name == "sinf" || Name == "sinl" || Name == "cos" || Name == "cosf" ||
179  Name == "cosl" || Name == "sqrt" || Name == "sqrtf" || Name == "sqrtl")
180  return false;
181 
182  // These are all likely to be optimized into something smaller.
183  if (Name == "pow" || Name == "powf" || Name == "powl" || Name == "exp2" ||
184  Name == "exp2l" || Name == "exp2f" || Name == "floor" ||
185  Name == "floorf" || Name == "ceil" || Name == "round" ||
186  Name == "ffs" || Name == "ffsl" || Name == "abs" || Name == "labs" ||
187  Name == "llabs")
188  return false;
189 
190  return true;
191  }
192 
194  AssumptionCache &AC,
195  TargetLibraryInfo *LibInfo,
196  TTI::HardwareLoopInfo &HWLoopInfo) {
197  return false;
198  }
199 
202 
203  bool isLegalAddImmediate(int64_t Imm) { return false; }
204 
205  bool isLegalICmpImmediate(int64_t Imm) { return false; }
206 
207  bool isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset,
208  bool HasBaseReg, int64_t Scale,
209  unsigned AddrSpace, Instruction *I = nullptr) {
210  // Guess that only reg and reg+reg addressing is allowed. This heuristic is
211  // taken from the implementation of LSR.
212  return !BaseGV && BaseOffset == 0 && (Scale == 0 || Scale == 1);
213  }
214 
216  return std::tie(C1.NumRegs, C1.AddRecCost, C1.NumIVMuls, C1.NumBaseAdds,
217  C1.ScaleCost, C1.ImmCost, C1.SetupCost) <
218  std::tie(C2.NumRegs, C2.AddRecCost, C2.NumIVMuls, C2.NumBaseAdds,
219  C2.ScaleCost, C2.ImmCost, C2.SetupCost);
220  }
221 
222  bool canMacroFuseCmp() { return false; }
223 
224  bool shouldFavorPostInc() const { return false; }
225 
226  bool shouldFavorBackedgeIndex(const Loop *L) const { return false; }
227 
228  bool isLegalMaskedStore(Type *DataType) { return false; }
229 
230  bool isLegalMaskedLoad(Type *DataType) { return false; }
231 
232  bool isLegalMaskedScatter(Type *DataType) { return false; }
233 
234  bool isLegalMaskedGather(Type *DataType) { return false; }
235 
236  bool isLegalMaskedCompressStore(Type *DataType) { return false; }
237 
238  bool isLegalMaskedExpandLoad(Type *DataType) { return false; }
239 
240  bool hasDivRemOp(Type *DataType, bool IsSigned) { return false; }
241 
242  bool hasVolatileVariant(Instruction *I, unsigned AddrSpace) { return false; }
243 
244  bool prefersVectorizedAddressing() { return true; }
245 
246  int getScalingFactorCost(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset,
247  bool HasBaseReg, int64_t Scale, unsigned AddrSpace) {
248  // Guess that all legal addressing mode are free.
249  if (isLegalAddressingMode(Ty, BaseGV, BaseOffset, HasBaseReg,
250  Scale, AddrSpace))
251  return 0;
252  return -1;
253  }
254 
255  bool LSRWithInstrQueries() { return false; }
256 
257  bool isTruncateFree(Type *Ty1, Type *Ty2) { return false; }
258 
259  bool isProfitableToHoist(Instruction *I) { return true; }
260 
261  bool useAA() { return false; }
262 
263  bool isTypeLegal(Type *Ty) { return false; }
264 
265  unsigned getJumpBufAlignment() { return 0; }
266 
267  unsigned getJumpBufSize() { return 0; }
268 
269  bool shouldBuildLookupTables() { return true; }
271 
272  bool useColdCCForColdCall(Function &F) { return false; }
273 
274  unsigned getScalarizationOverhead(Type *Ty, bool Insert, bool Extract) {
275  return 0;
276  }
277 
279  unsigned VF) { return 0; }
280 
282 
283  bool enableAggressiveInterleaving(bool LoopHasReductions) { return false; }
284 
286  bool IsZeroCmp) const {
287  return nullptr;
288  }
289 
290  bool enableInterleavedAccessVectorization() { return false; }
291 
293 
294  bool isFPVectorizationPotentiallyUnsafe() { return false; }
295 
297  unsigned BitWidth,
298  unsigned AddressSpace,
299  unsigned Alignment,
300  bool *Fast) { return false; }
301 
302  TTI::PopcntSupportKind getPopcntSupport(unsigned IntTyWidthInBit) {
303  return TTI::PSK_Software;
304  }
305 
306  bool haveFastSqrt(Type *Ty) { return false; }
307 
308  bool isFCmpOrdCheaperThanFCmpZero(Type *Ty) { return true; }
309 
311 
312  int getIntImmCodeSizeCost(unsigned Opcode, unsigned Idx, const APInt &Imm,
313  Type *Ty) {
314  return 0;
315  }
316 
317  unsigned getIntImmCost(const APInt &Imm, Type *Ty) { return TTI::TCC_Basic; }
318 
319  unsigned getIntImmCost(unsigned Opcode, unsigned Idx, const APInt &Imm,
320  Type *Ty) {
321  return TTI::TCC_Free;
322  }
323 
324  unsigned getIntImmCost(Intrinsic::ID IID, unsigned Idx, const APInt &Imm,
325  Type *Ty) {
326  return TTI::TCC_Free;
327  }
328 
329  unsigned getNumberOfRegisters(bool Vector) { return 8; }
330 
331  unsigned getRegisterBitWidth(bool Vector) const { return 32; }
332 
333  unsigned getMinVectorRegisterBitWidth() { return 128; }
334 
335  bool shouldMaximizeVectorBandwidth(bool OptSize) const { return false; }
336 
337  unsigned getMinimumVF(unsigned ElemWidth) const { return 0; }
338 
339  bool
341  bool &AllowPromotionWithoutCommonHeader) {
342  AllowPromotionWithoutCommonHeader = false;
343  return false;
344  }
345 
346  unsigned getCacheLineSize() { return 0; }
347 
349  switch (Level) {
353  return llvm::Optional<unsigned>();
354  }
355 
356  llvm_unreachable("Unknown TargetTransformInfo::CacheLevel");
357  }
358 
361  switch (Level) {
365  return llvm::Optional<unsigned>();
366  }
367 
368  llvm_unreachable("Unknown TargetTransformInfo::CacheLevel");
369  }
370 
371  unsigned getPrefetchDistance() { return 0; }
372 
373  unsigned getMinPrefetchStride() { return 1; }
374 
375  unsigned getMaxPrefetchIterationsAhead() { return UINT_MAX; }
376 
377  unsigned getMaxInterleaveFactor(unsigned VF) { return 1; }
378 
379  unsigned getArithmeticInstrCost(unsigned Opcode, Type *Ty,
380  TTI::OperandValueKind Opd1Info,
381  TTI::OperandValueKind Opd2Info,
382  TTI::OperandValueProperties Opd1PropInfo,
383  TTI::OperandValueProperties Opd2PropInfo,
385  return 1;
386  }
387 
389  Type *SubTp) {
390  return 1;
391  }
392 
393  unsigned getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src,
394  const Instruction *I) { return 1; }
395 
396  unsigned getExtractWithExtendCost(unsigned Opcode, Type *Dst,
397  VectorType *VecTy, unsigned Index) {
398  return 1;
399  }
400 
401  unsigned getCFInstrCost(unsigned Opcode) { return 1; }
402 
403  unsigned getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy,
404  const Instruction *I) {
405  return 1;
406  }
407 
408  unsigned getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index) {
409  return 1;
410  }
411 
412  unsigned getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
413  unsigned AddressSpace, const Instruction *I) {
414  return 1;
415  }
416 
417  unsigned getMaskedMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
418  unsigned AddressSpace) {
419  return 1;
420  }
421 
422  unsigned getGatherScatterOpCost(unsigned Opcode, Type *DataTy, Value *Ptr,
423  bool VariableMask,
424  unsigned Alignment) {
425  return 1;
426  }
427 
428  unsigned getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy,
429  unsigned Factor,
430  ArrayRef<unsigned> Indices,
431  unsigned Alignment, unsigned AddressSpace,
432  bool UseMaskForCond = false,
433  bool UseMaskForGaps = false) {
434  return 1;
435  }
436 
439  unsigned ScalarizationCostPassed) {
440  return 1;
441  }
443  ArrayRef<Value *> Args, FastMathFlags FMF, unsigned VF) {
444  return 1;
445  }
446 
447  unsigned getCallInstrCost(Function *F, Type *RetTy, ArrayRef<Type *> Tys) {
448  return 1;
449  }
450 
451  unsigned getNumberOfParts(Type *Tp) { return 0; }
452 
454  const SCEV *) {
455  return 0;
456  }
457 
458  unsigned getArithmeticReductionCost(unsigned, Type *, bool) { return 1; }
459 
460  unsigned getMinMaxReductionCost(Type *, Type *, bool, bool) { return 1; }
461 
462  unsigned getCostOfKeepingLiveOverCall(ArrayRef<Type *> Tys) { return 0; }
463 
465  return false;
466  }
467 
469  // Note for overrides: You must ensure for all element unordered-atomic
470  // memory intrinsics that all power-of-2 element sizes up to, and
471  // including, the return value of this method have a corresponding
472  // runtime lib call. These runtime lib call definitions can be found
473  // in RuntimeLibcalls.h
474  return 0;
475  }
476 
478  Type *ExpectedType) {
479  return nullptr;
480  }
481 
483  unsigned SrcAlign, unsigned DestAlign) const {
484  return Type::getInt8Ty(Context);
485  }
486 
489  unsigned RemainingBytes,
490  unsigned SrcAlign,
491  unsigned DestAlign) const {
492  for (unsigned i = 0; i != RemainingBytes; ++i)
493  OpsOut.push_back(Type::getInt8Ty(Context));
494  }
495 
496  bool areInlineCompatible(const Function *Caller,
497  const Function *Callee) const {
498  return (Caller->getFnAttribute("target-cpu") ==
499  Callee->getFnAttribute("target-cpu")) &&
500  (Caller->getFnAttribute("target-features") ==
501  Callee->getFnAttribute("target-features"));
502  }
503 
504  bool areFunctionArgsABICompatible(const Function *Caller, const Function *Callee,
506  return (Caller->getFnAttribute("target-cpu") ==
507  Callee->getFnAttribute("target-cpu")) &&
508  (Caller->getFnAttribute("target-features") ==
509  Callee->getFnAttribute("target-features"));
510  }
511 
513  const DataLayout &DL) const {
514  return false;
515  }
516 
518  const DataLayout &DL) const {
519  return false;
520  }
521 
522  unsigned getLoadStoreVecRegBitWidth(unsigned AddrSpace) const { return 128; }
523 
524  bool isLegalToVectorizeLoad(LoadInst *LI) const { return true; }
525 
526  bool isLegalToVectorizeStore(StoreInst *SI) const { return true; }
527 
528  bool isLegalToVectorizeLoadChain(unsigned ChainSizeInBytes,
529  unsigned Alignment,
530  unsigned AddrSpace) const {
531  return true;
532  }
533 
534  bool isLegalToVectorizeStoreChain(unsigned ChainSizeInBytes,
535  unsigned Alignment,
536  unsigned AddrSpace) const {
537  return true;
538  }
539 
540  unsigned getLoadVectorFactor(unsigned VF, unsigned LoadSize,
541  unsigned ChainSizeInBytes,
542  VectorType *VecTy) const {
543  return VF;
544  }
545 
546  unsigned getStoreVectorFactor(unsigned VF, unsigned StoreSize,
547  unsigned ChainSizeInBytes,
548  VectorType *VecTy) const {
549  return VF;
550  }
551 
552  bool useReductionIntrinsic(unsigned Opcode, Type *Ty,
553  TTI::ReductionFlags Flags) const {
554  return false;
555  }
556 
557  bool shouldExpandReduction(const IntrinsicInst *II) const {
558  return true;
559  }
560 
561 protected:
562  // Obtain the minimum required size to hold the value (without the sign)
563  // In case of a vector it returns the min required size for one element.
564  unsigned minRequiredElementSize(const Value* Val, bool &isSigned) {
565  if (isa<ConstantDataVector>(Val) || isa<ConstantVector>(Val)) {
566  const auto* VectorValue = cast<Constant>(Val);
567 
568  // In case of a vector need to pick the max between the min
569  // required size for each element
570  auto *VT = cast<VectorType>(Val->getType());
571 
572  // Assume unsigned elements
573  isSigned = false;
574 
575  // The max required size is the total vector width divided by num
576  // of elements in the vector
577  unsigned MaxRequiredSize = VT->getBitWidth() / VT->getNumElements();
578 
579  unsigned MinRequiredSize = 0;
580  for(unsigned i = 0, e = VT->getNumElements(); i < e; ++i) {
581  if (auto* IntElement =
582  dyn_cast<ConstantInt>(VectorValue->getAggregateElement(i))) {
583  bool signedElement = IntElement->getValue().isNegative();
584  // Get the element min required size.
585  unsigned ElementMinRequiredSize =
586  IntElement->getValue().getMinSignedBits() - 1;
587  // In case one element is signed then all the vector is signed.
588  isSigned |= signedElement;
589  // Save the max required bit size between all the elements.
590  MinRequiredSize = std::max(MinRequiredSize, ElementMinRequiredSize);
591  }
592  else {
593  // not an int constant element
594  return MaxRequiredSize;
595  }
596  }
597  return MinRequiredSize;
598  }
599 
600  if (const auto* CI = dyn_cast<ConstantInt>(Val)) {
601  isSigned = CI->getValue().isNegative();
602  return CI->getValue().getMinSignedBits() - 1;
603  }
604 
605  if (const auto* Cast = dyn_cast<SExtInst>(Val)) {
606  isSigned = true;
607  return Cast->getSrcTy()->getScalarSizeInBits() - 1;
608  }
609 
610  if (const auto* Cast = dyn_cast<ZExtInst>(Val)) {
611  isSigned = false;
612  return Cast->getSrcTy()->getScalarSizeInBits();
613  }
614 
615  isSigned = false;
616  return Val->getType()->getScalarSizeInBits();
617  }
618 
619  bool isStridedAccess(const SCEV *Ptr) {
620  return Ptr && isa<SCEVAddRecExpr>(Ptr);
621  }
622 
624  const SCEV *Ptr) {
625  if (!isStridedAccess(Ptr))
626  return nullptr;
627  const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ptr);
628  return dyn_cast<SCEVConstant>(AddRec->getStepRecurrence(*SE));
629  }
630 
632  int64_t MergeDistance) {
633  const SCEVConstant *Step = getConstantStrideStep(SE, Ptr);
634  if (!Step)
635  return false;
636  APInt StrideVal = Step->getAPInt();
637  if (StrideVal.getBitWidth() > 64)
638  return false;
639  // FIXME: Need to take absolute value for negative stride case.
640  return StrideVal.getSExtValue() < MergeDistance;
641  }
642 };
643 
644 /// CRTP base class for use as a mix-in that aids implementing
645 /// a TargetTransformInfo-compatible class.
646 template <typename T>
648 private:
650 
651 protected:
652  explicit TargetTransformInfoImplCRTPBase(const DataLayout &DL) : BaseT(DL) {}
653 
654 public:
655  using BaseT::getCallCost;
656 
657  unsigned getCallCost(const Function *F, int NumArgs, const User *U) {
658  assert(F && "A concrete function must be provided to this routine.");
659 
660  if (NumArgs < 0)
661  // Set the argument number to the number of explicit arguments in the
662  // function.
663  NumArgs = F->arg_size();
664 
665  if (Intrinsic::ID IID = F->getIntrinsicID()) {
666  FunctionType *FTy = F->getFunctionType();
667  SmallVector<Type *, 8> ParamTys(FTy->param_begin(), FTy->param_end());
668  return static_cast<T *>(this)
669  ->getIntrinsicCost(IID, FTy->getReturnType(), ParamTys, U);
670  }
671 
672  if (!static_cast<T *>(this)->isLoweredToCall(F))
673  return TTI::TCC_Basic; // Give a basic cost if it will be lowered
674  // directly.
675 
676  return static_cast<T *>(this)->getCallCost(F->getFunctionType(), NumArgs, U);
677  }
678 
680  const User *U) {
681  // Simply delegate to generic handling of the call.
682  // FIXME: We should use instsimplify or something else to catch calls which
683  // will constant fold with these arguments.
684  return static_cast<T *>(this)->getCallCost(F, Arguments.size(), U);
685  }
686 
687  using BaseT::getGEPCost;
688 
689  int getGEPCost(Type *PointeeType, const Value *Ptr,
690  ArrayRef<const Value *> Operands) {
691  assert(PointeeType && Ptr && "can't get GEPCost of nullptr");
692  // TODO: will remove this when pointers have an opaque type.
694  PointeeType &&
695  "explicit pointee type doesn't match operand's pointee type");
696  auto *BaseGV = dyn_cast<GlobalValue>(Ptr->stripPointerCasts());
697  bool HasBaseReg = (BaseGV == nullptr);
698 
699  auto PtrSizeBits = DL.getPointerTypeSizeInBits(Ptr->getType());
700  APInt BaseOffset(PtrSizeBits, 0);
701  int64_t Scale = 0;
702 
703  auto GTI = gep_type_begin(PointeeType, Operands);
704  Type *TargetType = nullptr;
705 
706  // Handle the case where the GEP instruction has a single operand,
707  // the basis, therefore TargetType is a nullptr.
708  if (Operands.empty())
709  return !BaseGV ? TTI::TCC_Free : TTI::TCC_Basic;
710 
711  for (auto I = Operands.begin(); I != Operands.end(); ++I, ++GTI) {
712  TargetType = GTI.getIndexedType();
713  // We assume that the cost of Scalar GEP with constant index and the
714  // cost of Vector GEP with splat constant index are the same.
715  const ConstantInt *ConstIdx = dyn_cast<ConstantInt>(*I);
716  if (!ConstIdx)
717  if (auto Splat = getSplatValue(*I))
718  ConstIdx = dyn_cast<ConstantInt>(Splat);
719  if (StructType *STy = GTI.getStructTypeOrNull()) {
720  // For structures the index is always splat or scalar constant
721  assert(ConstIdx && "Unexpected GEP index");
722  uint64_t Field = ConstIdx->getZExtValue();
723  BaseOffset += DL.getStructLayout(STy)->getElementOffset(Field);
724  } else {
725  int64_t ElementSize = DL.getTypeAllocSize(GTI.getIndexedType());
726  if (ConstIdx) {
727  BaseOffset +=
728  ConstIdx->getValue().sextOrTrunc(PtrSizeBits) * ElementSize;
729  } else {
730  // Needs scale register.
731  if (Scale != 0)
732  // No addressing mode takes two scale registers.
733  return TTI::TCC_Basic;
734  Scale = ElementSize;
735  }
736  }
737  }
738 
739  if (static_cast<T *>(this)->isLegalAddressingMode(
740  TargetType, const_cast<GlobalValue *>(BaseGV),
741  BaseOffset.sextOrTrunc(64).getSExtValue(), HasBaseReg, Scale,
742  Ptr->getType()->getPointerAddressSpace()))
743  return TTI::TCC_Free;
744  return TTI::TCC_Basic;
745  }
746 
747  unsigned getIntrinsicCost(Intrinsic::ID IID, Type *RetTy,
748  ArrayRef<Type *> ParamTys, const User *U) {
749  switch (IID) {
750  default:
751  // Intrinsics rarely (if ever) have normal argument setup constraints.
752  // Model them as having a basic instruction cost.
753  return TTI::TCC_Basic;
754 
755  // TODO: other libc intrinsics.
756  case Intrinsic::memcpy:
757  return static_cast<T *>(this)->getMemcpyCost(dyn_cast<Instruction>(U));
758 
759  case Intrinsic::annotation:
760  case Intrinsic::assume:
761  case Intrinsic::sideeffect:
762  case Intrinsic::dbg_declare:
763  case Intrinsic::dbg_value:
764  case Intrinsic::dbg_label:
765  case Intrinsic::invariant_start:
766  case Intrinsic::invariant_end:
767  case Intrinsic::launder_invariant_group:
768  case Intrinsic::strip_invariant_group:
769  case Intrinsic::is_constant:
770  case Intrinsic::lifetime_start:
771  case Intrinsic::lifetime_end:
772  case Intrinsic::objectsize:
773  case Intrinsic::ptr_annotation:
774  case Intrinsic::var_annotation:
775  case Intrinsic::experimental_gc_result:
776  case Intrinsic::experimental_gc_relocate:
777  case Intrinsic::coro_alloc:
778  case Intrinsic::coro_begin:
779  case Intrinsic::coro_free:
780  case Intrinsic::coro_end:
781  case Intrinsic::coro_frame:
782  case Intrinsic::coro_size:
783  case Intrinsic::coro_suspend:
784  case Intrinsic::coro_param:
785  case Intrinsic::coro_subfn_addr:
786  // These intrinsics don't actually represent code after lowering.
787  return TTI::TCC_Free;
788  }
789  }
790 
791  unsigned getIntrinsicCost(Intrinsic::ID IID, Type *RetTy,
793  // Delegate to the generic intrinsic handling code. This mostly provides an
794  // opportunity for targets to (for example) special case the cost of
795  // certain intrinsics based on constants used as arguments.
796  SmallVector<Type *, 8> ParamTys;
797  ParamTys.reserve(Arguments.size());
798  for (unsigned Idx = 0, Size = Arguments.size(); Idx != Size; ++Idx)
799  ParamTys.push_back(Arguments[Idx]->getType());
800  return static_cast<T *>(this)->getIntrinsicCost(IID, RetTy, ParamTys, U);
801  }
802 
803  unsigned getUserCost(const User *U, ArrayRef<const Value *> Operands) {
804  if (isa<PHINode>(U))
805  return TTI::TCC_Free; // Model all PHI nodes as free.
806 
807  // Static alloca doesn't generate target instructions.
808  if (auto *A = dyn_cast<AllocaInst>(U))
809  if (A->isStaticAlloca())
810  return TTI::TCC_Free;
811 
812  if (const GEPOperator *GEP = dyn_cast<GEPOperator>(U)) {
813  return static_cast<T *>(this)->getGEPCost(GEP->getSourceElementType(),
814  GEP->getPointerOperand(),
815  Operands.drop_front());
816  }
817 
818  if (auto CS = ImmutableCallSite(U)) {
819  const Function *F = CS.getCalledFunction();
820  if (!F) {
821  // Just use the called value type.
822  Type *FTy = CS.getCalledValue()->getType()->getPointerElementType();
823  return static_cast<T *>(this)
824  ->getCallCost(cast<FunctionType>(FTy), CS.arg_size(), U);
825  }
826 
827  SmallVector<const Value *, 8> Arguments(CS.arg_begin(), CS.arg_end());
828  return static_cast<T *>(this)->getCallCost(F, Arguments, U);
829  }
830 
831  if (isa<SExtInst>(U) || isa<ZExtInst>(U) || isa<FPExtInst>(U))
832  // The old behaviour of generally treating extensions of icmp to be free
833  // has been removed. A target that needs it should override getUserCost().
834  return static_cast<T *>(this)->getExtCost(cast<Instruction>(U),
835  Operands.back());
836 
837  return static_cast<T *>(this)->getOperationCost(
838  Operator::getOpcode(U), U->getType(),
839  U->getNumOperands() == 1 ? U->getOperand(0)->getType() : nullptr);
840  }
841 
844  I->value_op_end());
845  if (getUserCost(I, Operands) == TTI::TCC_Free)
846  return 0;
847 
848  if (isa<LoadInst>(I))
849  return 4;
850 
851  Type *DstTy = I->getType();
852 
853  // Usually an intrinsic is a simple instruction.
854  // A real function call is much slower.
855  if (auto *CI = dyn_cast<CallInst>(I)) {
856  const Function *F = CI->getCalledFunction();
857  if (!F || static_cast<T *>(this)->isLoweredToCall(F))
858  return 40;
859  // Some intrinsics return a value and a flag, we use the value type
860  // to decide its latency.
861  if (StructType* StructTy = dyn_cast<StructType>(DstTy))
862  DstTy = StructTy->getElementType(0);
863  // Fall through to simple instructions.
864  }
865 
866  if (VectorType *VectorTy = dyn_cast<VectorType>(DstTy))
867  DstTy = VectorTy->getElementType();
868  if (DstTy->isFloatingPointTy())
869  return 3;
870 
871  return 1;
872  }
873 };
874 }
875 
876 #endif
uint64_t CallInst * C
unsigned getNumCases() const
Return the number of &#39;cases&#39; in this switch instruction, excluding the default case.
bool isIntrinsic() const
isIntrinsic - Returns true if the function&#39;s name starts with "llvm.".
Definition: Function.h:198
Base class for use as a mix-in that aids implementing a TargetTransformInfo-compatible class...
A parsed version of the target data layout string in and methods for querying it. ...
Definition: DataLayout.h:110
bool areFunctionArgsABICompatible(const Function *Caller, const Function *Callee, SmallPtrSetImpl< Argument *> &Args) const
bool isConstantStridedAccessLessThan(ScalarEvolution *SE, const SCEV *Ptr, int64_t MergeDistance)
bool hasLocalLinkage() const
Definition: GlobalValue.h:445
unsigned getIntrinsicInstrCost(Intrinsic::ID ID, Type *RetTy, ArrayRef< Type *> Tys, FastMathFlags FMF, unsigned ScalarizationCostPassed)
GCNRegPressure max(const GCNRegPressure &P1, const GCNRegPressure &P2)
LLVMContext & Context
const T & back() const
back - Get the last element.
Definition: ArrayRef.h:157
SI Whole Quad Mode
unsigned getGatherScatterOpCost(unsigned Opcode, Type *DataTy, Value *Ptr, bool VariableMask, unsigned Alignment)
This class represents lattice values for constants.
Definition: AllocatorList.h:23
unsigned minRequiredElementSize(const Value *Val, bool &isSigned)
void getUnrollingPreferences(Loop *, ScalarEvolution &, TTI::UnrollingPreferences &)
unsigned getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index)
iterator begin() const
Definition: ArrayRef.h:136
amdgpu Simplify well known AMD library false FunctionCallee Value const Twine & Name
unsigned getCostOfKeepingLiveOverCall(ArrayRef< Type *> Tys)
const Value * getSplatValue(const Value *V)
Get splat value if the input is a splat vector or return nullptr.
const StructLayout * getStructLayout(StructType *Ty) const
Returns a StructLayout object, indicating the alignment of the struct, its size, and the offsets of i...
Definition: DataLayout.cpp:607
value_op_iterator value_op_begin()
Definition: User.h:255
The main scalar evolution driver.
MemIndexedMode
The type of load/store indexing.
unsigned getArithmeticInstrCost(unsigned Opcode, Type *Ty, TTI::OperandValueKind Opd1Info, TTI::OperandValueKind Opd2Info, TTI::OperandValueProperties Opd1PropInfo, TTI::OperandValueProperties Opd2PropInfo, ArrayRef< const Value *> Args)
A cache of @llvm.assume calls within a function.
unsigned getOperationCost(unsigned Opcode, Type *Ty, Type *OpTy)
value_op_iterator value_op_end()
Definition: User.h:258
F(f)
unsigned getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src, const Instruction *I)
unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
Definition: DerivedTypes.h:534
param_iterator param_end() const
Definition: DerivedTypes.h:128
An instruction for reading from memory.
Definition: Instructions.h:167
Hexagon Common GEP
unsigned getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment, unsigned AddressSpace, const Instruction *I)
void reserve(size_type N)
Definition: SmallVector.h:369
unsigned getIntImmCost(const APInt &Imm, Type *Ty)
int getExtCost(const Instruction *I, const Value *Src)
unsigned getBitWidth() const
Return the number of bits in the APInt.
Definition: APInt.h:1508
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
Definition: SmallPtrSet.h:343
bool isLegalToVectorizeStoreChain(unsigned ChainSizeInBytes, unsigned Alignment, unsigned AddrSpace) const
bool isIndexedLoadLegal(TTI::MemIndexedMode Mode, Type *Ty, const DataLayout &DL) const
CRTP base class for use as a mix-in that aids implementing a TargetTransformInfo-compatible class...
bool getTgtMemIntrinsic(IntrinsicInst *Inst, MemIntrinsicInfo &Info)
unsigned getLoadVectorFactor(unsigned VF, unsigned LoadSize, unsigned ChainSizeInBytes, VectorType *VecTy) const
unsigned getMemcpyCost(const Instruction *I)
unsigned getIntrinsicCost(Intrinsic::ID IID, Type *RetTy, ArrayRef< const Value *> Arguments, const User *U)
Type * getPointerElementType() const
Definition: Type.h:375
unsigned getPointerTypeSizeInBits(Type *) const
Layout pointer size, in bits, based on the type.
Definition: DataLayout.cpp:665
bool isFloatingPointTy() const
Return true if this is one of the six floating-point types.
Definition: Type.h:161
Class to represent struct types.
Definition: DerivedTypes.h:232
unsigned getCallCost(const Function *F, ArrayRef< const Value *> Arguments, const User *U)
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: APFloat.h:41
unsigned getArithmeticReductionCost(unsigned, Type *, bool)
unsigned getIntrinsicCost(Intrinsic::ID IID, Type *RetTy, ArrayRef< Type *> ParamTys, const User *U)
const APInt & getAPInt() const
bool isLegalToVectorizeLoad(LoadInst *LI) const
Type * getMemcpyLoopLoweringType(LLVMContext &Context, Value *Length, unsigned SrcAlign, unsigned DestAlign) const
bool isTruncateFree(Type *Ty1, Type *Ty2)
bool enableAggressiveInterleaving(bool LoopHasReductions)
Class to represent function types.
Definition: DerivedTypes.h:102
unsigned getCallCost(FunctionType *FTy, int NumArgs, const User *U)
int64_t getSExtValue() const
Get sign extended value.
Definition: APInt.h:1574
llvm::Optional< unsigned > getCacheSize(TargetTransformInfo::CacheLevel Level)
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:244
This node represents a polynomial recurrence on the trip count of the specified loop.
void getMemcpyLoopResidualLoweringType(SmallVectorImpl< Type *> &OpsOut, LLVMContext &Context, unsigned RemainingBytes, unsigned SrcAlign, unsigned DestAlign) const
PopcntSupportKind
Flags indicating the kind of support for population count.
APInt sextOrTrunc(unsigned width) const
Sign extend or truncate to width.
Definition: APInt.cpp:883
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory)...
Definition: APInt.h:32
const APInt & getValue() const
Return the constant as an APInt value reference.
Definition: Constants.h:137
An instruction for storing to memory.
Definition: Instructions.h:320
Attributes of a target dependent hardware loop.
const SCEV * getStepRecurrence(ScalarEvolution &SE) const
Constructs and returns the recurrence indicating how much this expression steps by.
unsigned getExtractWithExtendCost(unsigned Opcode, Type *Dst, VectorType *VecTy, unsigned Index)
Value * getOperand(unsigned i) const
Definition: User.h:169
Analysis containing CSE Info
Definition: CSEInfo.cpp:20
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return &#39;this&#39;.
Definition: Type.h:303
bool isLegalToVectorizeStore(StoreInst *SI) const
unsigned getMinimumVF(unsigned ElemWidth) const
If not nullptr, enable inline expansion of memcmp.
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
Definition: Constants.h:148
unsigned getShuffleCost(TTI::ShuffleKind Kind, Type *Ty, int Index, Type *SubTp)
unsigned getCallInstrCost(Function *F, Type *RetTy, ArrayRef< Type *> Tys)
bool hasName() const
Definition: Value.h:250
Flags describing the kind of vector reduction.
The instances of the Type class are immutable: once they are created, they are never changed...
Definition: Type.h:45
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:64
unsigned getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy, const Instruction *I)
TargetTransformInfoImplBase(const TargetTransformInfoImplBase &Arg)
bool shouldFavorBackedgeIndex(const Loop *L) const
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:148
This is an important base class in LLVM.
Definition: Constant.h:41
bool isPointerTy() const
True if this is an instance of PointerType.
Definition: Type.h:223
bool isLegalToVectorizeLoadChain(unsigned ChainSizeInBytes, unsigned Alignment, unsigned AddrSpace) const
unsigned getNumParams() const
Return the number of fixed parameters this function type requires.
Definition: DerivedTypes.h:138
unsigned getIntImmCost(unsigned Opcode, unsigned Idx, const APInt &Imm, Type *Ty)
bool hasVolatileVariant(Instruction *I, unsigned AddrSpace)
Expected to fold away in lowering.
AMDGPU Lower Kernel Arguments
unsigned getUserCost(const User *U, ArrayRef< const Value *> Operands)
bool isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset, bool HasBaseReg, int64_t Scale, unsigned AddrSpace, Instruction *I=nullptr)
param_iterator param_begin() const
Definition: DerivedTypes.h:127
const TTI::MemCmpExpansionOptions * enableMemCmpExpansion(bool IsZeroCmp) const
unsigned getEstimatedNumberOfCaseClusters(const SwitchInst &SI, unsigned &JTSize)
unsigned getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef< unsigned > Indices, unsigned Alignment, unsigned AddressSpace, bool UseMaskForCond=false, bool UseMaskForGaps=false)
amdgpu Simplify well known AMD library false FunctionCallee Value * Arg
size_t arg_size() const
Definition: Function.h:714
const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs, address space casts, and aliases.
Definition: Value.cpp:535
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
const SCEVConstant * getConstantStrideStep(ScalarEvolution *SE, const SCEV *Ptr)
OperandValueProperties
Additional properties of an operand&#39;s values.
int getIntImmCodeSizeCost(unsigned Opcode, unsigned Idx, const APInt &Imm, Type *Ty)
unsigned getNumOperands() const
Definition: User.h:191
This is the shared class of boolean and integer constants.
Definition: Constants.h:83
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
unsigned getStoreVectorFactor(unsigned VF, unsigned StoreSize, unsigned ChainSizeInBytes, VectorType *VecTy) const
unsigned getCallCost(const Function *F, int NumArgs, const User *U)
unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type...
Definition: Type.cpp:129
unsigned getScalarizationOverhead(Type *Ty, bool Insert, bool Extract)
TargetTransformInfoImplBase(const DataLayout &DL)
This is a &#39;vector&#39; (really, a variable-sized array), optimized for the case when the array is small...
Definition: SmallVector.h:837
Provides information about what library functions are available for the current target.
AddressSpace
Definition: NVPTXBaseInfo.h:21
iterator end() const
Definition: ArrayRef.h:137
bool isLegalInteger(uint64_t Width) const
Returns true if the specified type is known to be a native integer type supported by the CPU...
Definition: DataLayout.h:254
Type * getReturnType() const
Definition: DerivedTypes.h:123
bool useReductionIntrinsic(unsigned Opcode, Type *Ty, TTI::ReductionFlags Flags) const
bool allowsMisalignedMemoryAccesses(LLVMContext &Context, unsigned BitWidth, unsigned AddressSpace, unsigned Alignment, bool *Fast)
Intrinsic::ID getIntrinsicID() const LLVM_READONLY
getIntrinsicID - This method returns the ID number of the specified function, or Intrinsic::not_intri...
Definition: Function.h:193
bool shouldConsiderAddressTypePromotion(const Instruction &I, bool &AllowPromotionWithoutCommonHeader)
FunctionType * getFunctionType() const
Returns the FunctionType for me.
Definition: Function.h:163
int getScalingFactorCost(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset, bool HasBaseReg, int64_t Scale, unsigned AddrSpace)
Class to represent vector types.
Definition: DerivedTypes.h:424
Fast - This calling convention attempts to make calls as fast as possible (e.g.
Definition: CallingConv.h:42
Class for arbitrary precision integers.
Definition: APInt.h:69
unsigned getLoadStoreVecRegBitWidth(unsigned AddrSpace) const
unsigned getMinMaxReductionCost(Type *, Type *, bool, bool)
uint64_t getTypeSizeInBits(Type *Ty) const
Size examples:
Definition: DataLayout.h:601
bool isLSRCostLess(TTI::LSRCost &C1, TTI::LSRCost &C2)
uint64_t getTypeAllocSize(Type *Ty) const
Returns the offset in bytes between successive objects of the specified type, including alignment pad...
Definition: DataLayout.h:469
unsigned getMaskedMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment, unsigned AddressSpace)
unsigned getCFInstrCost(unsigned Opcode)
uint64_t getElementOffset(unsigned Idx) const
Definition: DataLayout.h:584
This class represents an analyzed expression in the program.
Represents a single loop in the control flow graph.
Definition: LoopInfo.h:467
ArrayRef< T > drop_front(size_t N=1) const
Drop the first N elements of the array.
Definition: ArrayRef.h:187
Parameters that control the generic loop unrolling transformation.
StringRef getName() const
Return a constant reference to the value&#39;s name.
Definition: Value.cpp:214
Establish a view to a call site for examination.
Definition: CallSite.h:897
#define I(x, y, z)
Definition: MD5.cpp:58
unsigned getOperandsScalarizationOverhead(ArrayRef< const Value *> Args, unsigned VF)
int getGEPCost(Type *PointeeType, const Value *Ptr, ArrayRef< const Value *> Operands)
bool isIndexedStoreLegal(TTI::MemIndexedMode Mode, Type *Ty, const DataLayout &DL) const
bool areInlineCompatible(const Function *Caller, const Function *Callee) const
LLVM_NODISCARD std::enable_if<!is_simple_type< Y >::value, typename cast_retty< X, const Y >::ret_type >::type dyn_cast(const Y &Val)
Definition: Casting.h:332
uint32_t Size
Definition: Profile.cpp:46
unsigned getAddressComputationCost(Type *Tp, ScalarEvolution *, const SCEV *)
Multiway switch.
TargetTransformInfoImplBase(TargetTransformInfoImplBase &&Arg)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
The cost of a typical &#39;add&#39; instruction.
LLVM Value Representation.
Definition: Value.h:72
unsigned getIntImmCost(Intrinsic::ID IID, unsigned Idx, const APInt &Imm, Type *Ty)
unsigned getOpcode() const
Return the opcode for this Instruction or ConstantExpr.
Definition: Operator.h:40
#define LLVM_FALLTHROUGH
LLVM_FALLTHROUGH - Mark fallthrough cases in switch statements.
Definition: Compiler.h:250
bool shouldExpandReduction(const IntrinsicInst *II) const
bool isHardwareLoopProfitable(Loop *L, ScalarEvolution &SE, AssumptionCache &AC, TargetLibraryInfo *LibInfo, TTI::HardwareLoopInfo &HWLoopInfo)
Attribute getFnAttribute(Attribute::AttrKind Kind) const
Return the attribute for the given attribute kind.
Definition: Function.h:333
bool shouldMaximizeVectorBandwidth(bool OptSize) const
const DataLayout & getDataLayout() const
Convenience struct for specifying and reasoning about fast-math flags.
Definition: Operator.h:159
OperandValueKind
Additional information about an operand&#39;s possible values.
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:48
This pass exposes codegen information to IR-level passes.
llvm::Optional< unsigned > getCacheAssociativity(TargetTransformInfo::CacheLevel Level)
CacheLevel
The possible cache levels.
unsigned getRegisterBitWidth(bool Vector) const
Information about a load/store intrinsic defined by the target.
The cost of a &#39;div&#39; instruction on x86.
static IntegerType * getInt8Ty(LLVMContext &C)
Definition: Type.cpp:173
int getGEPCost(Type *PointeeType, const Value *Ptr, ArrayRef< const Value *> Operands)
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
bool hasDivRemOp(Type *DataType, bool IsSigned)
TTI::PopcntSupportKind getPopcntSupport(unsigned IntTyWidthInBit)
Value * getOrCreateResultFromMemIntrinsic(IntrinsicInst *Inst, Type *ExpectedType)
bool empty() const
empty - Check if the array is empty.
Definition: ArrayRef.h:143
A wrapper class for inspecting calls to intrinsic functions.
Definition: IntrinsicInst.h:43
This class represents a constant integer value.
unsigned getIntrinsicInstrCost(Intrinsic::ID ID, Type *RetTy, ArrayRef< Value *> Args, FastMathFlags FMF, unsigned VF)
ShuffleKind
The various kinds of shuffle patterns for vector queries.
gep_type_iterator gep_type_begin(const User *GEP)