LLVM  6.0.0svn
PPCTargetTransformInfo.cpp
Go to the documentation of this file.
1 //===-- PPCTargetTransformInfo.cpp - PPC specific TTI ---------------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 
10 #include "PPCTargetTransformInfo.h"
13 #include "llvm/CodeGen/CostTable.h"
16 #include "llvm/Support/Debug.h"
17 using namespace llvm;
18 
19 #define DEBUG_TYPE "ppctti"
20 
21 static cl::opt<bool> DisablePPCConstHoist("disable-ppc-constant-hoisting",
22 cl::desc("disable constant hoisting on PPC"), cl::init(false), cl::Hidden);
23 
24 // This is currently only used for the data prefetch pass which is only enabled
25 // for BG/Q by default.
26 static cl::opt<unsigned>
27 CacheLineSize("ppc-loop-prefetch-cache-line", cl::Hidden, cl::init(64),
28  cl::desc("The loop prefetch cache line size"));
29 
30 //===----------------------------------------------------------------------===//
31 //
32 // PPC cost model.
33 //
34 //===----------------------------------------------------------------------===//
35 
37 PPCTTIImpl::getPopcntSupport(unsigned TyWidth) {
38  assert(isPowerOf2_32(TyWidth) && "Ty width must be power of 2");
39  if (ST->hasPOPCNTD() != PPCSubtarget::POPCNTD_Unavailable && TyWidth <= 64)
40  return ST->hasPOPCNTD() == PPCSubtarget::POPCNTD_Slow ?
42  return TTI::PSK_Software;
43 }
44 
45 int PPCTTIImpl::getIntImmCost(const APInt &Imm, Type *Ty) {
47  return BaseT::getIntImmCost(Imm, Ty);
48 
49  assert(Ty->isIntegerTy());
50 
51  unsigned BitSize = Ty->getPrimitiveSizeInBits();
52  if (BitSize == 0)
53  return ~0U;
54 
55  if (Imm == 0)
56  return TTI::TCC_Free;
57 
58  if (Imm.getBitWidth() <= 64) {
59  if (isInt<16>(Imm.getSExtValue()))
60  return TTI::TCC_Basic;
61 
62  if (isInt<32>(Imm.getSExtValue())) {
63  // A constant that can be materialized using lis.
64  if ((Imm.getZExtValue() & 0xFFFF) == 0)
65  return TTI::TCC_Basic;
66 
67  return 2 * TTI::TCC_Basic;
68  }
69  }
70 
71  return 4 * TTI::TCC_Basic;
72 }
73 
74 int PPCTTIImpl::getIntImmCost(Intrinsic::ID IID, unsigned Idx, const APInt &Imm,
75  Type *Ty) {
77  return BaseT::getIntImmCost(IID, Idx, Imm, Ty);
78 
79  assert(Ty->isIntegerTy());
80 
81  unsigned BitSize = Ty->getPrimitiveSizeInBits();
82  if (BitSize == 0)
83  return ~0U;
84 
85  switch (IID) {
86  default:
87  return TTI::TCC_Free;
88  case Intrinsic::sadd_with_overflow:
89  case Intrinsic::uadd_with_overflow:
90  case Intrinsic::ssub_with_overflow:
91  case Intrinsic::usub_with_overflow:
92  if ((Idx == 1) && Imm.getBitWidth() <= 64 && isInt<16>(Imm.getSExtValue()))
93  return TTI::TCC_Free;
94  break;
95  case Intrinsic::experimental_stackmap:
96  if ((Idx < 2) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
97  return TTI::TCC_Free;
98  break;
99  case Intrinsic::experimental_patchpoint_void:
100  case Intrinsic::experimental_patchpoint_i64:
101  if ((Idx < 4) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
102  return TTI::TCC_Free;
103  break;
104  }
105  return PPCTTIImpl::getIntImmCost(Imm, Ty);
106 }
107 
108 int PPCTTIImpl::getIntImmCost(unsigned Opcode, unsigned Idx, const APInt &Imm,
109  Type *Ty) {
111  return BaseT::getIntImmCost(Opcode, Idx, Imm, Ty);
112 
113  assert(Ty->isIntegerTy());
114 
115  unsigned BitSize = Ty->getPrimitiveSizeInBits();
116  if (BitSize == 0)
117  return ~0U;
118 
119  unsigned ImmIdx = ~0U;
120  bool ShiftedFree = false, RunFree = false, UnsignedFree = false,
121  ZeroFree = false;
122  switch (Opcode) {
123  default:
124  return TTI::TCC_Free;
125  case Instruction::GetElementPtr:
126  // Always hoist the base address of a GetElementPtr. This prevents the
127  // creation of new constants for every base constant that gets constant
128  // folded with the offset.
129  if (Idx == 0)
130  return 2 * TTI::TCC_Basic;
131  return TTI::TCC_Free;
132  case Instruction::And:
133  RunFree = true; // (for the rotate-and-mask instructions)
135  case Instruction::Add:
136  case Instruction::Or:
137  case Instruction::Xor:
138  ShiftedFree = true;
140  case Instruction::Sub:
141  case Instruction::Mul:
142  case Instruction::Shl:
143  case Instruction::LShr:
144  case Instruction::AShr:
145  ImmIdx = 1;
146  break;
147  case Instruction::ICmp:
148  UnsignedFree = true;
149  ImmIdx = 1;
150  // Zero comparisons can use record-form instructions.
152  case Instruction::Select:
153  ZeroFree = true;
154  break;
155  case Instruction::PHI:
156  case Instruction::Call:
157  case Instruction::Ret:
158  case Instruction::Load:
159  case Instruction::Store:
160  break;
161  }
162 
163  if (ZeroFree && Imm == 0)
164  return TTI::TCC_Free;
165 
166  if (Idx == ImmIdx && Imm.getBitWidth() <= 64) {
167  if (isInt<16>(Imm.getSExtValue()))
168  return TTI::TCC_Free;
169 
170  if (RunFree) {
171  if (Imm.getBitWidth() <= 32 &&
172  (isShiftedMask_32(Imm.getZExtValue()) ||
174  return TTI::TCC_Free;
175 
176  if (ST->isPPC64() &&
177  (isShiftedMask_64(Imm.getZExtValue()) ||
179  return TTI::TCC_Free;
180  }
181 
182  if (UnsignedFree && isUInt<16>(Imm.getZExtValue()))
183  return TTI::TCC_Free;
184 
185  if (ShiftedFree && (Imm.getZExtValue() & 0xFFFF) == 0)
186  return TTI::TCC_Free;
187  }
188 
189  return PPCTTIImpl::getIntImmCost(Imm, Ty);
190 }
191 
192 unsigned PPCTTIImpl::getUserCost(const User *U,
193  ArrayRef<const Value *> Operands) {
194  if (U->getType()->isVectorTy()) {
195  // Instructions that need to be split should cost more.
196  std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, U->getType());
197  return LT.first * BaseT::getUserCost(U, Operands);
198  }
199 
200  return BaseT::getUserCost(U, Operands);
201 }
202 
205  if (ST->getDarwinDirective() == PPC::DIR_A2) {
206  // The A2 is in-order with a deep pipeline, and concatenation unrolling
207  // helps expose latency-hiding opportunities to the instruction scheduler.
208  UP.Partial = UP.Runtime = true;
209 
210  // We unroll a lot on the A2 (hundreds of instructions), and the benefits
211  // often outweigh the cost of a division to compute the trip count.
212  UP.AllowExpensiveTripCount = true;
213  }
214 
216 }
217 
218 bool PPCTTIImpl::enableAggressiveInterleaving(bool LoopHasReductions) {
219  // On the A2, always unroll aggressively. For QPX unaligned loads, we depend
220  // on combining the loads generated for consecutive accesses, and failure to
221  // do so is particularly expensive. This makes it much more likely (compared
222  // to only using concatenation unrolling).
223  if (ST->getDarwinDirective() == PPC::DIR_A2)
224  return true;
225 
226  return LoopHasReductions;
227 }
228 
230 PPCTTIImpl::enableMemCmpExpansion(bool IsZeroCmp) const {
231  static const auto Options = []() {
233  Options.LoadSizes.push_back(8);
234  Options.LoadSizes.push_back(4);
235  Options.LoadSizes.push_back(2);
236  Options.LoadSizes.push_back(1);
237  return Options;
238  }();
239  return &Options;
240 }
241 
243  return true;
244 }
245 
246 unsigned PPCTTIImpl::getNumberOfRegisters(bool Vector) {
247  if (Vector && !ST->hasAltivec() && !ST->hasQPX())
248  return 0;
249  return ST->hasVSX() ? 64 : 32;
250 }
251 
252 unsigned PPCTTIImpl::getRegisterBitWidth(bool Vector) const {
253  if (Vector) {
254  if (ST->hasQPX()) return 256;
255  if (ST->hasAltivec()) return 128;
256  return 0;
257  }
258 
259  if (ST->isPPC64())
260  return 64;
261  return 32;
262 
263 }
264 
266  // Check first if the user specified a custom line size.
267  if (CacheLineSize.getNumOccurrences() > 0)
268  return CacheLineSize;
269 
270  // On P7, P8 or P9 we have a cache line size of 128.
271  unsigned Directive = ST->getDarwinDirective();
272  if (Directive == PPC::DIR_PWR7 || Directive == PPC::DIR_PWR8 ||
273  Directive == PPC::DIR_PWR9)
274  return 128;
275 
276  // On other processors return a default of 64 bytes.
277  return 64;
278 }
279 
281  // This seems like a reasonable default for the BG/Q (this pass is enabled, by
282  // default, only on the BG/Q).
283  return 300;
284 }
285 
286 unsigned PPCTTIImpl::getMaxInterleaveFactor(unsigned VF) {
287  unsigned Directive = ST->getDarwinDirective();
288  // The 440 has no SIMD support, but floating-point instructions
289  // have a 5-cycle latency, so unroll by 5x for latency hiding.
290  if (Directive == PPC::DIR_440)
291  return 5;
292 
293  // The A2 has no SIMD support, but floating-point instructions
294  // have a 6-cycle latency, so unroll by 6x for latency hiding.
295  if (Directive == PPC::DIR_A2)
296  return 6;
297 
298  // FIXME: For lack of any better information, do no harm...
299  if (Directive == PPC::DIR_E500mc || Directive == PPC::DIR_E5500)
300  return 1;
301 
302  // For P7 and P8, floating-point instructions have a 6-cycle latency and
303  // there are two execution units, so unroll by 12x for latency hiding.
304  // FIXME: the same for P9 as previous gen until POWER9 scheduling is ready
305  if (Directive == PPC::DIR_PWR7 || Directive == PPC::DIR_PWR8 ||
306  Directive == PPC::DIR_PWR9)
307  return 12;
308 
309  // For most things, modern systems have two execution units (and
310  // out-of-order execution).
311  return 2;
312 }
313 
315  unsigned Opcode, Type *Ty, TTI::OperandValueKind Op1Info,
318  assert(TLI->InstructionOpcodeToISD(Opcode) && "Invalid opcode");
319 
320  // Fallback to the default implementation.
321  return BaseT::getArithmeticInstrCost(Opcode, Ty, Op1Info, Op2Info,
322  Opd1PropInfo, Opd2PropInfo);
323 }
324 
326  Type *SubTp) {
327  // Legalize the type.
328  std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp);
329 
330  // PPC, for both Altivec/VSX and QPX, support cheap arbitrary permutations
331  // (at least in the sense that there need only be one non-loop-invariant
332  // instruction). We need one such shuffle instruction for each actual
333  // register (this is not true for arbitrary shuffles, but is true for the
334  // structured types of shuffles covered by TTI::ShuffleKind).
335  return LT.first;
336 }
337 
338 int PPCTTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src,
339  const Instruction *I) {
340  assert(TLI->InstructionOpcodeToISD(Opcode) && "Invalid opcode");
341 
342  return BaseT::getCastInstrCost(Opcode, Dst, Src);
343 }
344 
345 int PPCTTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy,
346  const Instruction *I) {
347  return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, I);
348 }
349 
350 int PPCTTIImpl::getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index) {
351  assert(Val->isVectorTy() && "This must be a vector type");
352 
353  int ISD = TLI->InstructionOpcodeToISD(Opcode);
354  assert(ISD && "Invalid opcode");
355 
356  if (ST->hasVSX() && Val->getScalarType()->isDoubleTy()) {
357  // Double-precision scalars are already located in index #0.
358  if (Index == 0)
359  return 0;
360 
361  return BaseT::getVectorInstrCost(Opcode, Val, Index);
362  } else if (ST->hasQPX() && Val->getScalarType()->isFloatingPointTy()) {
363  // Floating point scalars are already located in index #0.
364  if (Index == 0)
365  return 0;
366 
367  return BaseT::getVectorInstrCost(Opcode, Val, Index);
368  }
369 
370  // Estimated cost of a load-hit-store delay. This was obtained
371  // experimentally as a minimum needed to prevent unprofitable
372  // vectorization for the paq8p benchmark. It may need to be
373  // raised further if other unprofitable cases remain.
374  unsigned LHSPenalty = 2;
375  if (ISD == ISD::INSERT_VECTOR_ELT)
376  LHSPenalty += 7;
377 
378  // Vector element insert/extract with Altivec is very expensive,
379  // because they require store and reload with the attendant
380  // processor stall for load-hit-store. Until VSX is available,
381  // these need to be estimated as very costly.
382  if (ISD == ISD::EXTRACT_VECTOR_ELT ||
383  ISD == ISD::INSERT_VECTOR_ELT)
384  return LHSPenalty + BaseT::getVectorInstrCost(Opcode, Val, Index);
385 
386  return BaseT::getVectorInstrCost(Opcode, Val, Index);
387 }
388 
389 int PPCTTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
390  unsigned AddressSpace, const Instruction *I) {
391  // Legalize the type.
392  std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Src);
393  assert((Opcode == Instruction::Load || Opcode == Instruction::Store) &&
394  "Invalid Opcode");
395 
396  int Cost = BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace);
397 
398  bool IsAltivecType = ST->hasAltivec() &&
399  (LT.second == MVT::v16i8 || LT.second == MVT::v8i16 ||
400  LT.second == MVT::v4i32 || LT.second == MVT::v4f32);
401  bool IsVSXType = ST->hasVSX() &&
402  (LT.second == MVT::v2f64 || LT.second == MVT::v2i64);
403  bool IsQPXType = ST->hasQPX() &&
404  (LT.second == MVT::v4f64 || LT.second == MVT::v4f32);
405 
406  // VSX has 32b/64b load instructions. Legalization can handle loading of
407  // 32b/64b to VSR correctly and cheaply. But BaseT::getMemoryOpCost and
408  // PPCTargetLowering can't compute the cost appropriately. So here we
409  // explicitly check this case.
410  unsigned MemBytes = Src->getPrimitiveSizeInBits();
411  if (Opcode == Instruction::Load && ST->hasVSX() && IsAltivecType &&
412  (MemBytes == 64 || (ST->hasP8Vector() && MemBytes == 32)))
413  return 1;
414 
415  // Aligned loads and stores are easy.
416  unsigned SrcBytes = LT.second.getStoreSize();
417  if (!SrcBytes || !Alignment || Alignment >= SrcBytes)
418  return Cost;
419 
420  // If we can use the permutation-based load sequence, then this is also
421  // relatively cheap (not counting loop-invariant instructions): one load plus
422  // one permute (the last load in a series has extra cost, but we're
423  // neglecting that here). Note that on the P7, we could do unaligned loads
424  // for Altivec types using the VSX instructions, but that's more expensive
425  // than using the permutation-based load sequence. On the P8, that's no
426  // longer true.
427  if (Opcode == Instruction::Load &&
428  ((!ST->hasP8Vector() && IsAltivecType) || IsQPXType) &&
429  Alignment >= LT.second.getScalarType().getStoreSize())
430  return Cost + LT.first; // Add the cost of the permutations.
431 
432  // For VSX, we can do unaligned loads and stores on Altivec/VSX types. On the
433  // P7, unaligned vector loads are more expensive than the permutation-based
434  // load sequence, so that might be used instead, but regardless, the net cost
435  // is about the same (not counting loop-invariant instructions).
436  if (IsVSXType || (ST->hasVSX() && IsAltivecType))
437  return Cost;
438 
439  // Newer PPC supports unaligned memory access.
440  if (TLI->allowsMisalignedMemoryAccesses(LT.second, 0))
441  return Cost;
442 
443  // PPC in general does not support unaligned loads and stores. They'll need
444  // to be decomposed based on the alignment factor.
445 
446  // Add the cost of each scalar load or store.
447  Cost += LT.first*(SrcBytes/Alignment-1);
448 
449  // For a vector type, there is also scalarization overhead (only for
450  // stores, loads are expanded using the vector-load + permutation sequence,
451  // which is much less expensive).
452  if (Src->isVectorTy() && Opcode == Instruction::Store)
453  for (int i = 0, e = Src->getVectorNumElements(); i < e; ++i)
454  Cost += getVectorInstrCost(Instruction::ExtractElement, Src, i);
455 
456  return Cost;
457 }
458 
459 int PPCTTIImpl::getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy,
460  unsigned Factor,
461  ArrayRef<unsigned> Indices,
462  unsigned Alignment,
463  unsigned AddressSpace) {
464  assert(isa<VectorType>(VecTy) &&
465  "Expect a vector type for interleaved memory op");
466 
467  // Legalize the type.
468  std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, VecTy);
469 
470  // Firstly, the cost of load/store operation.
471  int Cost = getMemoryOpCost(Opcode, VecTy, Alignment, AddressSpace);
472 
473  // PPC, for both Altivec/VSX and QPX, support cheap arbitrary permutations
474  // (at least in the sense that there need only be one non-loop-invariant
475  // instruction). For each result vector, we need one shuffle per incoming
476  // vector (except that the first shuffle can take two incoming vectors
477  // because it does not need to take itself).
478  Cost += Factor*(LT.first-1);
479 
480  return Cost;
481 }
482 
void push_back(const T &Elt)
Definition: SmallVector.h:212
bool Partial
Allow partial unrolling (unrolling of loops to expand the size of the loop body, not only to eliminat...
unsigned getArithmeticInstrCost(unsigned Opcode, Type *Ty, TTI::OperandValueKind Opd1Info=TTI::OK_AnyValue, TTI::OperandValueKind Opd2Info=TTI::OK_AnyValue, TTI::OperandValueProperties Opd1PropInfo=TTI::OP_None, TTI::OperandValueProperties Opd2PropInfo=TTI::OP_None, ArrayRef< const Value * > Args=ArrayRef< const Value * >())
Definition: BasicTTIImpl.h:469
uint64_t getZExtValue() const
Get zero extended value.
Definition: APInt.h:1542
Compute iterated dominance frontiers using a linear time algorithm.
Definition: AllocatorList.h:24
Cost tables and simple lookup functions.
bool isPPC64() const
isPPC64 - Return true if we are generating code for 64-bit pointer mode.
The main scalar evolution driver.
bool hasVSX() const
Definition: PPCSubtarget.h:242
bool hasQPX() const
Definition: PPCSubtarget.h:241
constexpr bool isInt< 16 >(int64_t x)
Definition: MathExtras.h:298
bool isVectorTy() const
True if this is an instance of VectorType.
Definition: Type.h:227
unsigned getIntImmCost(const APInt &Imm, Type *Ty)
unsigned getBitWidth() const
Return the number of bits in the APInt.
Definition: APInt.h:1488
int getShuffleCost(TTI::ShuffleKind Kind, Type *Tp, int Index, Type *SubTp)
unsigned getUserCost(const User *U, ArrayRef< const Value *> Operands)
bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AddrSpace, unsigned Align=1, bool *Fast=nullptr) const override
Is unaligned memory access allowed for the given type, and is it fast relative to software emulation...
int getIntImmCost(const APInt &Imm, Type *Ty)
bool isFloatingPointTy() const
Return true if this is one of the six floating-point types.
Definition: Type.h:162
This file a TargetTransformInfo::Concept conforming object specific to the PPC target machine...
int getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index)
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition: Type.h:197
bool AllowExpensiveTripCount
Allow emitting expensive instructions (such as divisions) when computing the trip count of a loop for...
unsigned getMaxInterleaveFactor(unsigned VF)
static cl::opt< bool > DisablePPCConstHoist("disable-ppc-constant-hoisting", cl::desc("disable constant hoisting on PPC"), cl::init(false), cl::Hidden)
unsigned getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy, const Instruction *I)
Definition: BasicTTIImpl.h:663
int64_t getSExtValue() const
Get sign extended value.
Definition: APInt.h:1554
unsigned getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src, const Instruction *I=nullptr)
Definition: BasicTTIImpl.h:525
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:245
PopcntSupportKind
Flags indicating the kind of support for population count.
static cl::opt< unsigned > CacheLineSize("ppc-loop-prefetch-cache-line", cl::Hidden, cl::init(64), cl::desc("The loop prefetch cache line size"))
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory)...
Definition: APInt.h:33
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return &#39;this&#39;.
Definition: Type.h:301
const TTI::MemCmpExpansionOptions * enableMemCmpExpansion(bool IsZeroCmp) const
void getUnrollingPreferences(Loop *L, ScalarEvolution &SE, TTI::UnrollingPreferences &UP)
Definition: BasicTTIImpl.h:325
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:406
unsigned getRegisterBitWidth(bool Vector) const
If not nullptr, enable inline expansion of memcmp.
* if(!EatIfPresent(lltok::kw_thread_local)) return false
ParseOptionalThreadLocal := /*empty.
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Definition: MathExtras.h:421
The instances of the Type class are immutable: once they are created, they are never changed...
Definition: Type.h:46
INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element at IDX replaced with VAL...
Definition: ISDOpcodes.h:307
Expected to fold away in lowering.
int getArithmeticInstrCost(unsigned Opcode, Type *Ty, TTI::OperandValueKind Opd1Info=TTI::OK_AnyValue, TTI::OperandValueKind Opd2Info=TTI::OK_AnyValue, TTI::OperandValueProperties Opd1PropInfo=TTI::OP_None, TTI::OperandValueProperties Opd2PropInfo=TTI::OP_None, ArrayRef< const Value *> Args=ArrayRef< const Value *>())
unsigned getUserCost(const User *U, ArrayRef< const Value * > Operands)
This file provides a helper that implements much of the TTI interface in terms of the target-independ...
unsigned getDarwinDirective() const
getDarwinDirective - Returns the -m directive specified for the cpu.
Definition: PPCSubtarget.h:168
unsigned getNumberOfRegisters(bool Vector)
bool hasP8Vector() const
Definition: PPCSubtarget.h:243
OperandValueProperties
Additional properties of an operand&#39;s values.
constexpr bool isInt< 32 >(int64_t x)
Definition: MathExtras.h:301
EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR identified by the (potentially...
Definition: ISDOpcodes.h:314
AddressSpace
Definition: NVPTXBaseInfo.h:22
bool Runtime
Allow runtime unrolling (unrolling of loops to expand the size of the loop body even when the number ...
bool enableAggressiveInterleaving(bool LoopHasReductions)
unsigned getVectorNumElements() const
Definition: DerivedTypes.h:462
void getUnrollingPreferences(Loop *L, ScalarEvolution &SE, TTI::UnrollingPreferences &UP)
Class for arbitrary precision integers.
Definition: APInt.h:69
unsigned getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment, unsigned AddressSpace, const Instruction *I=nullptr)
Definition: BasicTTIImpl.h:710
int InstructionOpcodeToISD(unsigned Opcode) const
Get the ISD node that corresponds to the Instruction class opcode.
int getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy, const Instruction *I=nullptr)
int getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef< unsigned > Indices, unsigned Alignment, unsigned AddressSpace)
constexpr bool isShiftedMask_64(uint64_t Value)
Return true if the argument contains a non-empty sequence of ones with the remainder zero (64 bit ver...
Definition: MathExtras.h:415
TTI::PopcntSupportKind getPopcntSupport(unsigned TyWidth)
Represents a single loop in the control flow graph.
Definition: LoopInfo.h:439
Parameters that control the generic loop unrolling transformation.
#define I(x, y, z)
Definition: MD5.cpp:58
bool hasAltivec() const
Definition: PPCSubtarget.h:239
unsigned getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index)
Definition: BasicTTIImpl.h:703
constexpr bool isUInt< 16 >(uint64_t x)
Definition: MathExtras.h:338
const unsigned Kind
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
The cost of a typical &#39;add&#39; instruction.
POPCNTDKind hasPOPCNTD() const
Definition: PPCSubtarget.h:291
unsigned getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
Definition: Type.cpp:115
int getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src, const Instruction *I=nullptr)
#define LLVM_FALLTHROUGH
LLVM_FALLTHROUGH - Mark fallthrough cases in switch statements.
Definition: Compiler.h:235
constexpr bool isShiftedMask_32(uint32_t Value)
Return true if the argument contains a non-empty sequence of ones with the remainder zero (32 bit ver...
Definition: MathExtras.h:409
OperandValueKind
Additional information about an operand&#39;s possible values.
This pass exposes codegen information to IR-level passes.
bool isDoubleTy() const
Return true if this is &#39;double&#39;, a 64-bit IEEE fp type.
Definition: Type.h:150
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
std::pair< int, MVT > getTypeLegalizationCost(const DataLayout &DL, Type *Ty) const
Estimate the cost of type-legalization and the legalized type.
This file describes how to lower LLVM code to machine code.
int getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment, unsigned AddressSpace, const Instruction *I=nullptr)
ShuffleKind
The various kinds of shuffle patterns for vector queries.