LLVM  9.0.0svn
PPCTargetTransformInfo.cpp
Go to the documentation of this file.
1 //===-- PPCTargetTransformInfo.cpp - PPC specific TTI ---------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
12 #include "llvm/CodeGen/CostTable.h"
15 #include "llvm/Support/Debug.h"
16 using namespace llvm;
17 
18 #define DEBUG_TYPE "ppctti"
19 
20 static cl::opt<bool> DisablePPCConstHoist("disable-ppc-constant-hoisting",
21 cl::desc("disable constant hoisting on PPC"), cl::init(false), cl::Hidden);
22 
23 // This is currently only used for the data prefetch pass which is only enabled
24 // for BG/Q by default.
25 static cl::opt<unsigned>
26 CacheLineSize("ppc-loop-prefetch-cache-line", cl::Hidden, cl::init(64),
27  cl::desc("The loop prefetch cache line size"));
28 
29 static cl::opt<bool>
30 EnablePPCColdCC("ppc-enable-coldcc", cl::Hidden, cl::init(false),
31  cl::desc("Enable using coldcc calling conv for cold "
32  "internal functions"));
33 
34 //===----------------------------------------------------------------------===//
35 //
36 // PPC cost model.
37 //
38 //===----------------------------------------------------------------------===//
39 
41 PPCTTIImpl::getPopcntSupport(unsigned TyWidth) {
42  assert(isPowerOf2_32(TyWidth) && "Ty width must be power of 2");
43  if (ST->hasPOPCNTD() != PPCSubtarget::POPCNTD_Unavailable && TyWidth <= 64)
44  return ST->hasPOPCNTD() == PPCSubtarget::POPCNTD_Slow ?
46  return TTI::PSK_Software;
47 }
48 
49 int PPCTTIImpl::getIntImmCost(const APInt &Imm, Type *Ty) {
51  return BaseT::getIntImmCost(Imm, Ty);
52 
53  assert(Ty->isIntegerTy());
54 
55  unsigned BitSize = Ty->getPrimitiveSizeInBits();
56  if (BitSize == 0)
57  return ~0U;
58 
59  if (Imm == 0)
60  return TTI::TCC_Free;
61 
62  if (Imm.getBitWidth() <= 64) {
63  if (isInt<16>(Imm.getSExtValue()))
64  return TTI::TCC_Basic;
65 
66  if (isInt<32>(Imm.getSExtValue())) {
67  // A constant that can be materialized using lis.
68  if ((Imm.getZExtValue() & 0xFFFF) == 0)
69  return TTI::TCC_Basic;
70 
71  return 2 * TTI::TCC_Basic;
72  }
73  }
74 
75  return 4 * TTI::TCC_Basic;
76 }
77 
78 int PPCTTIImpl::getIntImmCost(Intrinsic::ID IID, unsigned Idx, const APInt &Imm,
79  Type *Ty) {
81  return BaseT::getIntImmCost(IID, Idx, Imm, Ty);
82 
83  assert(Ty->isIntegerTy());
84 
85  unsigned BitSize = Ty->getPrimitiveSizeInBits();
86  if (BitSize == 0)
87  return ~0U;
88 
89  switch (IID) {
90  default:
91  return TTI::TCC_Free;
92  case Intrinsic::sadd_with_overflow:
93  case Intrinsic::uadd_with_overflow:
94  case Intrinsic::ssub_with_overflow:
95  case Intrinsic::usub_with_overflow:
96  if ((Idx == 1) && Imm.getBitWidth() <= 64 && isInt<16>(Imm.getSExtValue()))
97  return TTI::TCC_Free;
98  break;
99  case Intrinsic::experimental_stackmap:
100  if ((Idx < 2) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
101  return TTI::TCC_Free;
102  break;
103  case Intrinsic::experimental_patchpoint_void:
104  case Intrinsic::experimental_patchpoint_i64:
105  if ((Idx < 4) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
106  return TTI::TCC_Free;
107  break;
108  }
109  return PPCTTIImpl::getIntImmCost(Imm, Ty);
110 }
111 
112 int PPCTTIImpl::getIntImmCost(unsigned Opcode, unsigned Idx, const APInt &Imm,
113  Type *Ty) {
115  return BaseT::getIntImmCost(Opcode, Idx, Imm, Ty);
116 
117  assert(Ty->isIntegerTy());
118 
119  unsigned BitSize = Ty->getPrimitiveSizeInBits();
120  if (BitSize == 0)
121  return ~0U;
122 
123  unsigned ImmIdx = ~0U;
124  bool ShiftedFree = false, RunFree = false, UnsignedFree = false,
125  ZeroFree = false;
126  switch (Opcode) {
127  default:
128  return TTI::TCC_Free;
129  case Instruction::GetElementPtr:
130  // Always hoist the base address of a GetElementPtr. This prevents the
131  // creation of new constants for every base constant that gets constant
132  // folded with the offset.
133  if (Idx == 0)
134  return 2 * TTI::TCC_Basic;
135  return TTI::TCC_Free;
136  case Instruction::And:
137  RunFree = true; // (for the rotate-and-mask instructions)
139  case Instruction::Add:
140  case Instruction::Or:
141  case Instruction::Xor:
142  ShiftedFree = true;
144  case Instruction::Sub:
145  case Instruction::Mul:
146  case Instruction::Shl:
147  case Instruction::LShr:
148  case Instruction::AShr:
149  ImmIdx = 1;
150  break;
151  case Instruction::ICmp:
152  UnsignedFree = true;
153  ImmIdx = 1;
154  // Zero comparisons can use record-form instructions.
156  case Instruction::Select:
157  ZeroFree = true;
158  break;
159  case Instruction::PHI:
160  case Instruction::Call:
161  case Instruction::Ret:
162  case Instruction::Load:
163  case Instruction::Store:
164  break;
165  }
166 
167  if (ZeroFree && Imm == 0)
168  return TTI::TCC_Free;
169 
170  if (Idx == ImmIdx && Imm.getBitWidth() <= 64) {
171  if (isInt<16>(Imm.getSExtValue()))
172  return TTI::TCC_Free;
173 
174  if (RunFree) {
175  if (Imm.getBitWidth() <= 32 &&
176  (isShiftedMask_32(Imm.getZExtValue()) ||
178  return TTI::TCC_Free;
179 
180  if (ST->isPPC64() &&
181  (isShiftedMask_64(Imm.getZExtValue()) ||
183  return TTI::TCC_Free;
184  }
185 
186  if (UnsignedFree && isUInt<16>(Imm.getZExtValue()))
187  return TTI::TCC_Free;
188 
189  if (ShiftedFree && (Imm.getZExtValue() & 0xFFFF) == 0)
190  return TTI::TCC_Free;
191  }
192 
193  return PPCTTIImpl::getIntImmCost(Imm, Ty);
194 }
195 
196 unsigned PPCTTIImpl::getUserCost(const User *U,
197  ArrayRef<const Value *> Operands) {
198  if (U->getType()->isVectorTy()) {
199  // Instructions that need to be split should cost more.
200  std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, U->getType());
201  return LT.first * BaseT::getUserCost(U, Operands);
202  }
203 
204  return BaseT::getUserCost(U, Operands);
205 }
206 
209  if (ST->getDarwinDirective() == PPC::DIR_A2) {
210  // The A2 is in-order with a deep pipeline, and concatenation unrolling
211  // helps expose latency-hiding opportunities to the instruction scheduler.
212  UP.Partial = UP.Runtime = true;
213 
214  // We unroll a lot on the A2 (hundreds of instructions), and the benefits
215  // often outweigh the cost of a division to compute the trip count.
216  UP.AllowExpensiveTripCount = true;
217  }
218 
220 }
221 
222 // This function returns true to allow using coldcc calling convention.
223 // Returning true results in coldcc being used for functions which are cold at
224 // all call sites when the callers of the functions are not calling any other
225 // non coldcc functions.
227  return EnablePPCColdCC;
228 }
229 
230 bool PPCTTIImpl::enableAggressiveInterleaving(bool LoopHasReductions) {
231  // On the A2, always unroll aggressively. For QPX unaligned loads, we depend
232  // on combining the loads generated for consecutive accesses, and failure to
233  // do so is particularly expensive. This makes it much more likely (compared
234  // to only using concatenation unrolling).
235  if (ST->getDarwinDirective() == PPC::DIR_A2)
236  return true;
237 
238  return LoopHasReductions;
239 }
240 
242 PPCTTIImpl::enableMemCmpExpansion(bool IsZeroCmp) const {
243  static const auto Options = []() {
245  Options.LoadSizes.push_back(8);
246  Options.LoadSizes.push_back(4);
247  Options.LoadSizes.push_back(2);
248  Options.LoadSizes.push_back(1);
249  return Options;
250  }();
251  return &Options;
252 }
253 
255  return true;
256 }
257 
258 unsigned PPCTTIImpl::getNumberOfRegisters(bool Vector) {
259  if (Vector && !ST->hasAltivec() && !ST->hasQPX())
260  return 0;
261  return ST->hasVSX() ? 64 : 32;
262 }
263 
264 unsigned PPCTTIImpl::getRegisterBitWidth(bool Vector) const {
265  if (Vector) {
266  if (ST->hasQPX()) return 256;
267  if (ST->hasAltivec()) return 128;
268  return 0;
269  }
270 
271  if (ST->isPPC64())
272  return 64;
273  return 32;
274 
275 }
276 
278  // Check first if the user specified a custom line size.
279  if (CacheLineSize.getNumOccurrences() > 0)
280  return CacheLineSize;
281 
282  // On P7, P8 or P9 we have a cache line size of 128.
283  unsigned Directive = ST->getDarwinDirective();
284  if (Directive == PPC::DIR_PWR7 || Directive == PPC::DIR_PWR8 ||
285  Directive == PPC::DIR_PWR9)
286  return 128;
287 
288  // On other processors return a default of 64 bytes.
289  return 64;
290 }
291 
293  // This seems like a reasonable default for the BG/Q (this pass is enabled, by
294  // default, only on the BG/Q).
295  return 300;
296 }
297 
298 unsigned PPCTTIImpl::getMaxInterleaveFactor(unsigned VF) {
299  unsigned Directive = ST->getDarwinDirective();
300  // The 440 has no SIMD support, but floating-point instructions
301  // have a 5-cycle latency, so unroll by 5x for latency hiding.
302  if (Directive == PPC::DIR_440)
303  return 5;
304 
305  // The A2 has no SIMD support, but floating-point instructions
306  // have a 6-cycle latency, so unroll by 6x for latency hiding.
307  if (Directive == PPC::DIR_A2)
308  return 6;
309 
310  // FIXME: For lack of any better information, do no harm...
311  if (Directive == PPC::DIR_E500mc || Directive == PPC::DIR_E5500)
312  return 1;
313 
314  // For P7 and P8, floating-point instructions have a 6-cycle latency and
315  // there are two execution units, so unroll by 12x for latency hiding.
316  // FIXME: the same for P9 as previous gen until POWER9 scheduling is ready
317  if (Directive == PPC::DIR_PWR7 || Directive == PPC::DIR_PWR8 ||
318  Directive == PPC::DIR_PWR9)
319  return 12;
320 
321  // For most things, modern systems have two execution units (and
322  // out-of-order execution).
323  return 2;
324 }
325 
326 // Adjust the cost of vector instructions on targets which there is overlap
327 // between the vector and scalar units, thereby reducing the overall throughput
328 // of vector code wrt. scalar code.
329 int PPCTTIImpl::vectorCostAdjustment(int Cost, unsigned Opcode, Type *Ty1,
330  Type *Ty2) {
331  if (!ST->vectorsUseTwoUnits() || !Ty1->isVectorTy())
332  return Cost;
333 
334  std::pair<int, MVT> LT1 = TLI->getTypeLegalizationCost(DL, Ty1);
335  // If type legalization involves splitting the vector, we don't want to
336  // double the cost at every step - only the last step.
337  if (LT1.first != 1 || !LT1.second.isVector())
338  return Cost;
339 
340  int ISD = TLI->InstructionOpcodeToISD(Opcode);
341  if (TLI->isOperationExpand(ISD, LT1.second))
342  return Cost;
343 
344  if (Ty2) {
345  std::pair<int, MVT> LT2 = TLI->getTypeLegalizationCost(DL, Ty2);
346  if (LT2.first != 1 || !LT2.second.isVector())
347  return Cost;
348  }
349 
350  return Cost * 2;
351 }
352 
354  unsigned Opcode, Type *Ty, TTI::OperandValueKind Op1Info,
357  assert(TLI->InstructionOpcodeToISD(Opcode) && "Invalid opcode");
358 
359  // Fallback to the default implementation.
360  int Cost = BaseT::getArithmeticInstrCost(Opcode, Ty, Op1Info, Op2Info,
361  Opd1PropInfo, Opd2PropInfo);
362  return vectorCostAdjustment(Cost, Opcode, Ty, nullptr);
363 }
364 
366  Type *SubTp) {
367  // Legalize the type.
368  std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp);
369 
370  // PPC, for both Altivec/VSX and QPX, support cheap arbitrary permutations
371  // (at least in the sense that there need only be one non-loop-invariant
372  // instruction). We need one such shuffle instruction for each actual
373  // register (this is not true for arbitrary shuffles, but is true for the
374  // structured types of shuffles covered by TTI::ShuffleKind).
375  return vectorCostAdjustment(LT.first, Instruction::ShuffleVector, Tp,
376  nullptr);
377 }
378 
379 int PPCTTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src,
380  const Instruction *I) {
381  assert(TLI->InstructionOpcodeToISD(Opcode) && "Invalid opcode");
382 
383  int Cost = BaseT::getCastInstrCost(Opcode, Dst, Src);
384  return vectorCostAdjustment(Cost, Opcode, Dst, Src);
385 }
386 
387 int PPCTTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy,
388  const Instruction *I) {
389  int Cost = BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, I);
390  return vectorCostAdjustment(Cost, Opcode, ValTy, nullptr);
391 }
392 
393 int PPCTTIImpl::getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index) {
394  assert(Val->isVectorTy() && "This must be a vector type");
395 
396  int ISD = TLI->InstructionOpcodeToISD(Opcode);
397  assert(ISD && "Invalid opcode");
398 
399  int Cost = BaseT::getVectorInstrCost(Opcode, Val, Index);
400  Cost = vectorCostAdjustment(Cost, Opcode, Val, nullptr);
401 
402  if (ST->hasVSX() && Val->getScalarType()->isDoubleTy()) {
403  // Double-precision scalars are already located in index #0 (or #1 if LE).
404  if (ISD == ISD::EXTRACT_VECTOR_ELT && Index == ST->isLittleEndian() ? 1 : 0)
405  return 0;
406 
407  return Cost;
408 
409  } else if (ST->hasQPX() && Val->getScalarType()->isFloatingPointTy()) {
410  // Floating point scalars are already located in index #0.
411  if (Index == 0)
412  return 0;
413 
414  return Cost;
415  }
416 
417  // Estimated cost of a load-hit-store delay. This was obtained
418  // experimentally as a minimum needed to prevent unprofitable
419  // vectorization for the paq8p benchmark. It may need to be
420  // raised further if other unprofitable cases remain.
421  unsigned LHSPenalty = 2;
422  if (ISD == ISD::INSERT_VECTOR_ELT)
423  LHSPenalty += 7;
424 
425  // Vector element insert/extract with Altivec is very expensive,
426  // because they require store and reload with the attendant
427  // processor stall for load-hit-store. Until VSX is available,
428  // these need to be estimated as very costly.
429  if (ISD == ISD::EXTRACT_VECTOR_ELT ||
430  ISD == ISD::INSERT_VECTOR_ELT)
431  return LHSPenalty + Cost;
432 
433  return Cost;
434 }
435 
436 int PPCTTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
437  unsigned AddressSpace, const Instruction *I) {
438  // Legalize the type.
439  std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Src);
440  assert((Opcode == Instruction::Load || Opcode == Instruction::Store) &&
441  "Invalid Opcode");
442 
443  int Cost = BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace);
444  Cost = vectorCostAdjustment(Cost, Opcode, Src, nullptr);
445 
446  bool IsAltivecType = ST->hasAltivec() &&
447  (LT.second == MVT::v16i8 || LT.second == MVT::v8i16 ||
448  LT.second == MVT::v4i32 || LT.second == MVT::v4f32);
449  bool IsVSXType = ST->hasVSX() &&
450  (LT.second == MVT::v2f64 || LT.second == MVT::v2i64);
451  bool IsQPXType = ST->hasQPX() &&
452  (LT.second == MVT::v4f64 || LT.second == MVT::v4f32);
453 
454  // VSX has 32b/64b load instructions. Legalization can handle loading of
455  // 32b/64b to VSR correctly and cheaply. But BaseT::getMemoryOpCost and
456  // PPCTargetLowering can't compute the cost appropriately. So here we
457  // explicitly check this case.
458  unsigned MemBytes = Src->getPrimitiveSizeInBits();
459  if (Opcode == Instruction::Load && ST->hasVSX() && IsAltivecType &&
460  (MemBytes == 64 || (ST->hasP8Vector() && MemBytes == 32)))
461  return 1;
462 
463  // Aligned loads and stores are easy.
464  unsigned SrcBytes = LT.second.getStoreSize();
465  if (!SrcBytes || !Alignment || Alignment >= SrcBytes)
466  return Cost;
467 
468  // If we can use the permutation-based load sequence, then this is also
469  // relatively cheap (not counting loop-invariant instructions): one load plus
470  // one permute (the last load in a series has extra cost, but we're
471  // neglecting that here). Note that on the P7, we could do unaligned loads
472  // for Altivec types using the VSX instructions, but that's more expensive
473  // than using the permutation-based load sequence. On the P8, that's no
474  // longer true.
475  if (Opcode == Instruction::Load &&
476  ((!ST->hasP8Vector() && IsAltivecType) || IsQPXType) &&
477  Alignment >= LT.second.getScalarType().getStoreSize())
478  return Cost + LT.first; // Add the cost of the permutations.
479 
480  // For VSX, we can do unaligned loads and stores on Altivec/VSX types. On the
481  // P7, unaligned vector loads are more expensive than the permutation-based
482  // load sequence, so that might be used instead, but regardless, the net cost
483  // is about the same (not counting loop-invariant instructions).
484  if (IsVSXType || (ST->hasVSX() && IsAltivecType))
485  return Cost;
486 
487  // Newer PPC supports unaligned memory access.
488  if (TLI->allowsMisalignedMemoryAccesses(LT.second, 0))
489  return Cost;
490 
491  // PPC in general does not support unaligned loads and stores. They'll need
492  // to be decomposed based on the alignment factor.
493 
494  // Add the cost of each scalar load or store.
495  Cost += LT.first*(SrcBytes/Alignment-1);
496 
497  // For a vector type, there is also scalarization overhead (only for
498  // stores, loads are expanded using the vector-load + permutation sequence,
499  // which is much less expensive).
500  if (Src->isVectorTy() && Opcode == Instruction::Store)
501  for (int i = 0, e = Src->getVectorNumElements(); i < e; ++i)
502  Cost += getVectorInstrCost(Instruction::ExtractElement, Src, i);
503 
504  return Cost;
505 }
506 
507 int PPCTTIImpl::getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy,
508  unsigned Factor,
509  ArrayRef<unsigned> Indices,
510  unsigned Alignment,
511  unsigned AddressSpace,
512  bool UseMaskForCond,
513  bool UseMaskForGaps) {
514  if (UseMaskForCond || UseMaskForGaps)
515  return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
516  Alignment, AddressSpace,
517  UseMaskForCond, UseMaskForGaps);
518 
519  assert(isa<VectorType>(VecTy) &&
520  "Expect a vector type for interleaved memory op");
521 
522  // Legalize the type.
523  std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, VecTy);
524 
525  // Firstly, the cost of load/store operation.
526  int Cost = getMemoryOpCost(Opcode, VecTy, Alignment, AddressSpace);
527 
528  // PPC, for both Altivec/VSX and QPX, support cheap arbitrary permutations
529  // (at least in the sense that there need only be one non-loop-invariant
530  // instruction). For each result vector, we need one shuffle per incoming
531  // vector (except that the first shuffle can take two incoming vectors
532  // because it does not need to take itself).
533  Cost += Factor*(LT.first-1);
534 
535  return Cost;
536 }
537 
bool Partial
Allow partial unrolling (unrolling of loops to expand the size of the loop body, not only to eliminat...
unsigned getArithmeticInstrCost(unsigned Opcode, Type *Ty, TTI::OperandValueKind Opd1Info=TTI::OK_AnyValue, TTI::OperandValueKind Opd2Info=TTI::OK_AnyValue, TTI::OperandValueProperties Opd1PropInfo=TTI::OP_None, TTI::OperandValueProperties Opd2PropInfo=TTI::OP_None, ArrayRef< const Value * > Args=ArrayRef< const Value * >())
Definition: BasicTTIImpl.h:567
uint64_t getZExtValue() const
Get zero extended value.
Definition: APInt.h:1562
bool useColdCCForColdCall(Function &F)
This class represents lattice values for constants.
Definition: AllocatorList.h:23
Cost tables and simple lookup functions.
bool isPPC64() const
isPPC64 - Return true if we are generating code for 64-bit pointer mode.
void push_back(const T &Elt)
Definition: SmallVector.h:211
The main scalar evolution driver.
bool hasVSX() const
Definition: PPCSubtarget.h:246
bool hasQPX() const
Definition: PPCSubtarget.h:245
constexpr bool isInt< 16 >(int64_t x)
Definition: MathExtras.h:305
F(f)
bool isVectorTy() const
True if this is an instance of VectorType.
Definition: Type.h:229
unsigned getIntImmCost(const APInt &Imm, Type *Ty)
unsigned getBitWidth() const
Return the number of bits in the APInt.
Definition: APInt.h:1508
int getShuffleCost(TTI::ShuffleKind Kind, Type *Tp, int Index, Type *SubTp)
unsigned getUserCost(const User *U, ArrayRef< const Value *> Operands)
bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AddrSpace, unsigned Align=1, bool *Fast=nullptr) const override
Is unaligned memory access allowed for the given type, and is it fast relative to software emulation...
int getIntImmCost(const APInt &Imm, Type *Ty)
bool isFloatingPointTy() const
Return true if this is one of the six floating-point types.
Definition: Type.h:161
This file a TargetTransformInfo::Concept conforming object specific to the PPC target machine...
int getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index)
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition: Type.h:196
bool AllowExpensiveTripCount
Allow emitting expensive instructions (such as divisions) when computing the trip count of a loop for...
unsigned getMaxInterleaveFactor(unsigned VF)
static cl::opt< bool > DisablePPCConstHoist("disable-ppc-constant-hoisting", cl::desc("disable constant hoisting on PPC"), cl::init(false), cl::Hidden)
unsigned getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy, const Instruction *I)
Definition: BasicTTIImpl.h:771
int64_t getSExtValue() const
Get sign extended value.
Definition: APInt.h:1574
unsigned getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src, const Instruction *I=nullptr)
Definition: BasicTTIImpl.h:633
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:244
PopcntSupportKind
Flags indicating the kind of support for population count.
static cl::opt< unsigned > CacheLineSize("ppc-loop-prefetch-cache-line", cl::Hidden, cl::init(64), cl::desc("The loop prefetch cache line size"))
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory)...
Definition: APInt.h:32
bool vectorsUseTwoUnits() const
Definition: PPCSubtarget.h:263
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return &#39;this&#39;.
Definition: Type.h:303
const TTI::MemCmpExpansionOptions * enableMemCmpExpansion(bool IsZeroCmp) const
void getUnrollingPreferences(Loop *L, ScalarEvolution &SE, TTI::UnrollingPreferences &UP)
Definition: BasicTTIImpl.h:423
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:423
unsigned getRegisterBitWidth(bool Vector) const
If not nullptr, enable inline expansion of memcmp.
static cl::opt< bool > EnablePPCColdCC("ppc-enable-coldcc", cl::Hidden, cl::init(false), cl::desc("Enable using coldcc calling conv for cold " "internal functions"))
* if(!EatIfPresent(lltok::kw_thread_local)) return false
ParseOptionalThreadLocal := /*empty.
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Definition: MathExtras.h:428
The instances of the Type class are immutable: once they are created, they are never changed...
Definition: Type.h:45
unsigned getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef< unsigned > Indices, unsigned Alignment, unsigned AddressSpace, bool UseMaskForCond=false, bool UseMaskForGaps=false)
Definition: BasicTTIImpl.h:849
INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element at IDX replaced with VAL...
Definition: ISDOpcodes.h:331
Expected to fold away in lowering.
int getArithmeticInstrCost(unsigned Opcode, Type *Ty, TTI::OperandValueKind Opd1Info=TTI::OK_AnyValue, TTI::OperandValueKind Opd2Info=TTI::OK_AnyValue, TTI::OperandValueProperties Opd1PropInfo=TTI::OP_None, TTI::OperandValueProperties Opd2PropInfo=TTI::OP_None, ArrayRef< const Value *> Args=ArrayRef< const Value *>())
unsigned getUserCost(const User *U, ArrayRef< const Value * > Operands)
This file provides a helper that implements much of the TTI interface in terms of the target-independ...
bool isLittleEndian() const
Definition: PPCSubtarget.h:228
unsigned getDarwinDirective() const
getDarwinDirective - Returns the -m directive specified for the cpu.
Definition: PPCSubtarget.h:171
unsigned getNumberOfRegisters(bool Vector)
bool hasP8Vector() const
Definition: PPCSubtarget.h:247
OperandValueProperties
Additional properties of an operand&#39;s values.
constexpr bool isInt< 32 >(int64_t x)
Definition: MathExtras.h:308
EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR identified by the (potentially...
Definition: ISDOpcodes.h:338
AddressSpace
Definition: NVPTXBaseInfo.h:21
bool Runtime
Allow runtime unrolling (unrolling of loops to expand the size of the loop body even when the number ...
bool enableAggressiveInterleaving(bool LoopHasReductions)
unsigned getVectorNumElements() const
Definition: DerivedTypes.h:493
void getUnrollingPreferences(Loop *L, ScalarEvolution &SE, TTI::UnrollingPreferences &UP)
Class for arbitrary precision integers.
Definition: APInt.h:69
unsigned getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment, unsigned AddressSpace, const Instruction *I=nullptr)
Definition: BasicTTIImpl.h:818
int InstructionOpcodeToISD(unsigned Opcode) const
Get the ISD node that corresponds to the Instruction class opcode.
int vectorCostAdjustment(int Cost, unsigned Opcode, Type *Ty1, Type *Ty2)
int getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy, const Instruction *I=nullptr)
constexpr bool isShiftedMask_64(uint64_t Value)
Return true if the argument contains a non-empty sequence of ones with the remainder zero (64 bit ver...
Definition: MathExtras.h:422
TTI::PopcntSupportKind getPopcntSupport(unsigned TyWidth)
Represents a single loop in the control flow graph.
Definition: LoopInfo.h:464
Parameters that control the generic loop unrolling transformation.
#define I(x, y, z)
Definition: MD5.cpp:58
bool hasAltivec() const
Definition: PPCSubtarget.h:242
unsigned getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index)
Definition: BasicTTIImpl.h:811
constexpr bool isUInt< 16 >(uint64_t x)
Definition: MathExtras.h:345
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
The cost of a typical &#39;add&#39; instruction.
POPCNTDKind hasPOPCNTD() const
Definition: PPCSubtarget.h:297
unsigned getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
Definition: Type.cpp:114
int getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src, const Instruction *I=nullptr)
#define LLVM_FALLTHROUGH
LLVM_FALLTHROUGH - Mark fallthrough cases in switch statements.
Definition: Compiler.h:250
constexpr bool isShiftedMask_32(uint32_t Value)
Return true if the argument contains a non-empty sequence of ones with the remainder zero (32 bit ver...
Definition: MathExtras.h:416
OperandValueKind
Additional information about an operand&#39;s possible values.
int getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef< unsigned > Indices, unsigned Alignment, unsigned AddressSpace, bool UseMaskForCond=false, bool UseMaskForGaps=false)
This pass exposes codegen information to IR-level passes.
bool isDoubleTy() const
Return true if this is &#39;double&#39;, a 64-bit IEEE fp type.
Definition: Type.h:149
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
bool isOperationExpand(unsigned Op, EVT VT) const
Return true if the specified operation is illegal on this target or unlikely to be made legal with cu...
std::pair< int, MVT > getTypeLegalizationCost(const DataLayout &DL, Type *Ty) const
Estimate the cost of type-legalization and the legalized type.
This file describes how to lower LLVM code to machine code.
int getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment, unsigned AddressSpace, const Instruction *I=nullptr)
ShuffleKind
The various kinds of shuffle patterns for vector queries.