LLVM 23.0.0git
PPCTargetTransformInfo.cpp
Go to the documentation of this file.
1//===-- PPCTargetTransformInfo.cpp - PPC specific TTI ---------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
16#include "llvm/IR/IntrinsicsPowerPC.h"
21#include <optional>
22
23using namespace llvm;
24
25#define DEBUG_TYPE "ppctti"
26
27static cl::opt<bool> PPCEVL("ppc-evl",
28 cl::desc("Allow EVL type vp.load/vp.store"),
29 cl::init(false), cl::Hidden);
30
31static cl::opt<bool> Pwr9EVL("ppc-pwr9-evl",
32 cl::desc("Allow vp.load and vp.store for pwr9"),
33 cl::init(false), cl::Hidden);
34
35static cl::opt<bool> VecMaskCost("ppc-vec-mask-cost",
36cl::desc("add masking cost for i1 vectors"), cl::init(true), cl::Hidden);
37
38static cl::opt<bool> DisablePPCConstHoist("disable-ppc-constant-hoisting",
39cl::desc("disable constant hoisting on PPC"), cl::init(false), cl::Hidden);
40
41static cl::opt<bool>
42EnablePPCColdCC("ppc-enable-coldcc", cl::Hidden, cl::init(false),
43 cl::desc("Enable using coldcc calling conv for cold "
44 "internal functions"));
45
46static cl::opt<bool>
47LsrNoInsnsCost("ppc-lsr-no-insns-cost", cl::Hidden, cl::init(false),
48 cl::desc("Do not add instruction count to lsr cost model"));
49
50// The latency of mtctr is only justified if there are more than 4
51// comparisons that will be removed as a result.
53SmallCTRLoopThreshold("min-ctr-loop-threshold", cl::init(4), cl::Hidden,
54 cl::desc("Loops with a constant trip count smaller than "
55 "this value will not use the count register."));
56
57//===----------------------------------------------------------------------===//
58//
59// PPC cost model.
60//
61//===----------------------------------------------------------------------===//
62
64PPCTTIImpl::getPopcntSupport(unsigned TyWidth) const {
65 assert(isPowerOf2_32(TyWidth) && "Ty width must be power of 2");
66 if (ST->hasPOPCNTD() != PPCSubtarget::POPCNTD_Unavailable && TyWidth <= 64)
67 return ST->hasPOPCNTD() == PPCSubtarget::POPCNTD_Slow ?
69 return TTI::PSK_Software;
70}
71
72std::optional<Instruction *>
74 Intrinsic::ID IID = II.getIntrinsicID();
75 switch (IID) {
76 default:
77 break;
78 case Intrinsic::ppc_altivec_lvx:
79 case Intrinsic::ppc_altivec_lvxl:
80 // Turn PPC lvx -> load if the pointer is known aligned.
82 II.getArgOperand(0), Align(16), IC.getDataLayout(), &II,
83 &IC.getAssumptionCache(), &IC.getDominatorTree()) >= 16) {
84 Value *Ptr = II.getArgOperand(0);
85 return new LoadInst(II.getType(), Ptr, "", false, Align(16));
86 }
87 break;
88 case Intrinsic::ppc_vsx_lxvw4x:
89 case Intrinsic::ppc_vsx_lxvd2x: {
90 // Turn PPC VSX loads into normal loads.
91 Value *Ptr = II.getArgOperand(0);
92 return new LoadInst(II.getType(), Ptr, Twine(""), false, Align(1));
93 }
94 case Intrinsic::ppc_altivec_stvx:
95 case Intrinsic::ppc_altivec_stvxl:
96 // Turn stvx -> store if the pointer is known aligned.
98 II.getArgOperand(1), Align(16), IC.getDataLayout(), &II,
99 &IC.getAssumptionCache(), &IC.getDominatorTree()) >= 16) {
100 Value *Ptr = II.getArgOperand(1);
101 return new StoreInst(II.getArgOperand(0), Ptr, false, Align(16));
102 }
103 break;
104 case Intrinsic::ppc_vsx_stxvw4x:
105 case Intrinsic::ppc_vsx_stxvd2x: {
106 // Turn PPC VSX stores into normal stores.
107 Value *Ptr = II.getArgOperand(1);
108 return new StoreInst(II.getArgOperand(0), Ptr, false, Align(1));
109 }
110 case Intrinsic::ppc_altivec_vperm:
111 // Turn vperm(V1,V2,mask) -> shuffle(V1,V2,mask) if mask is a constant.
112 // Note that ppc_altivec_vperm has a big-endian bias, so when creating
113 // a vectorshuffle for little endian, we must undo the transformation
114 // performed on vec_perm in altivec.h. That is, we must complement
115 // the permutation mask with respect to 31 and reverse the order of
116 // V1 and V2.
117 if (Constant *Mask = dyn_cast<Constant>(II.getArgOperand(2))) {
118 assert(cast<FixedVectorType>(Mask->getType())->getNumElements() == 16 &&
119 "Bad type for intrinsic!");
120
121 // Check that all of the elements are integer constants or undefs.
122 bool AllEltsOk = true;
123 for (unsigned I = 0; I != 16; ++I) {
124 Constant *Elt = Mask->getAggregateElement(I);
125 if (!Elt || !(isa<ConstantInt>(Elt) || isa<UndefValue>(Elt))) {
126 AllEltsOk = false;
127 break;
128 }
129 }
130
131 if (AllEltsOk) {
132 // Cast the input vectors to byte vectors.
133 Value *Op0 =
134 IC.Builder.CreateBitCast(II.getArgOperand(0), Mask->getType());
135 Value *Op1 =
136 IC.Builder.CreateBitCast(II.getArgOperand(1), Mask->getType());
137 Value *Result = PoisonValue::get(Op0->getType());
138
139 // Only extract each element once.
140 Value *ExtractedElts[32];
141 memset(ExtractedElts, 0, sizeof(ExtractedElts));
142
143 for (unsigned I = 0; I != 16; ++I) {
144 if (isa<UndefValue>(Mask->getAggregateElement(I)))
145 continue;
146 unsigned Idx =
147 cast<ConstantInt>(Mask->getAggregateElement(I))->getZExtValue();
148 Idx &= 31; // Match the hardware behavior.
149 if (DL.isLittleEndian())
150 Idx = 31 - Idx;
151
152 if (!ExtractedElts[Idx]) {
153 Value *Op0ToUse = (DL.isLittleEndian()) ? Op1 : Op0;
154 Value *Op1ToUse = (DL.isLittleEndian()) ? Op0 : Op1;
155 ExtractedElts[Idx] = IC.Builder.CreateExtractElement(
156 Idx < 16 ? Op0ToUse : Op1ToUse, IC.Builder.getInt32(Idx & 15));
157 }
158
159 // Insert this value into the result vector.
160 Result = IC.Builder.CreateInsertElement(Result, ExtractedElts[Idx],
161 IC.Builder.getInt32(I));
162 }
163 return CastInst::Create(Instruction::BitCast, Result, II.getType());
164 }
165 }
166 break;
167 }
168 return std::nullopt;
169}
170
174 return BaseT::getIntImmCost(Imm, Ty, CostKind);
175
176 assert(Ty->isIntegerTy());
177
178 unsigned BitSize = Ty->getPrimitiveSizeInBits();
179 if (BitSize == 0)
180 return ~0U;
181
182 if (Imm == 0)
183 return TTI::TCC_Free;
184
185 if (Imm.getBitWidth() <= 64) {
186 if (isInt<16>(Imm.getSExtValue()))
187 return TTI::TCC_Basic;
188
189 if (isInt<32>(Imm.getSExtValue())) {
190 // A constant that can be materialized using lis.
191 if ((Imm.getZExtValue() & 0xFFFF) == 0)
192 return TTI::TCC_Basic;
193
194 return 2 * TTI::TCC_Basic;
195 }
196 }
197
198 return 4 * TTI::TCC_Basic;
199}
200
203 const APInt &Imm, Type *Ty,
206 return BaseT::getIntImmCostIntrin(IID, Idx, Imm, Ty, CostKind);
207
208 assert(Ty->isIntegerTy());
209
210 unsigned BitSize = Ty->getPrimitiveSizeInBits();
211 if (BitSize == 0)
212 return ~0U;
213
214 switch (IID) {
215 default:
216 return TTI::TCC_Free;
217 case Intrinsic::sadd_with_overflow:
218 case Intrinsic::uadd_with_overflow:
219 case Intrinsic::ssub_with_overflow:
220 case Intrinsic::usub_with_overflow:
221 if ((Idx == 1) && Imm.getBitWidth() <= 64 && isInt<16>(Imm.getSExtValue()))
222 return TTI::TCC_Free;
223 break;
224 case Intrinsic::experimental_stackmap:
225 if ((Idx < 2) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
226 return TTI::TCC_Free;
227 break;
228 case Intrinsic::experimental_patchpoint_void:
229 case Intrinsic::experimental_patchpoint:
230 if ((Idx < 4) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
231 return TTI::TCC_Free;
232 break;
233 }
234 return PPCTTIImpl::getIntImmCost(Imm, Ty, CostKind);
235}
236
237InstructionCost PPCTTIImpl::getIntImmCostInst(unsigned Opcode, unsigned Idx,
238 const APInt &Imm, Type *Ty,
240 Instruction *Inst) const {
242 return BaseT::getIntImmCostInst(Opcode, Idx, Imm, Ty, CostKind, Inst);
243
244 assert(Ty->isIntegerTy());
245
246 unsigned BitSize = Ty->getPrimitiveSizeInBits();
247 if (BitSize == 0)
248 return ~0U;
249
250 unsigned ImmIdx = ~0U;
251 bool ShiftedFree = false, RunFree = false, UnsignedFree = false,
252 ZeroFree = false;
253 switch (Opcode) {
254 default:
255 return TTI::TCC_Free;
256 case Instruction::GetElementPtr:
257 // Always hoist the base address of a GetElementPtr. This prevents the
258 // creation of new constants for every base constant that gets constant
259 // folded with the offset.
260 if (Idx == 0)
261 return 2 * TTI::TCC_Basic;
262 return TTI::TCC_Free;
263 case Instruction::And:
264 RunFree = true; // (for the rotate-and-mask instructions)
265 [[fallthrough]];
266 case Instruction::Add:
267 case Instruction::Or:
268 case Instruction::Xor:
269 ShiftedFree = true;
270 [[fallthrough]];
271 case Instruction::Sub:
272 case Instruction::Mul:
273 case Instruction::Shl:
274 case Instruction::LShr:
275 case Instruction::AShr:
276 ImmIdx = 1;
277 break;
278 case Instruction::ICmp:
279 UnsignedFree = true;
280 ImmIdx = 1;
281 // Zero comparisons can use record-form instructions.
282 [[fallthrough]];
283 case Instruction::Select:
284 ZeroFree = true;
285 break;
286 case Instruction::PHI:
287 case Instruction::Call:
288 case Instruction::Ret:
289 case Instruction::Load:
290 case Instruction::Store:
291 break;
292 }
293
294 if (ZeroFree && Imm == 0)
295 return TTI::TCC_Free;
296
297 if (Idx == ImmIdx && Imm.getBitWidth() <= 64) {
298 if (isInt<16>(Imm.getSExtValue()))
299 return TTI::TCC_Free;
300
301 if (RunFree) {
302 if (Imm.getBitWidth() <= 32 &&
303 (isShiftedMask_32(Imm.getZExtValue()) ||
304 isShiftedMask_32(~Imm.getZExtValue())))
305 return TTI::TCC_Free;
306
307 if (ST->isPPC64() &&
308 (isShiftedMask_64(Imm.getZExtValue()) ||
309 isShiftedMask_64(~Imm.getZExtValue())))
310 return TTI::TCC_Free;
311 }
312
313 if (UnsignedFree && isUInt<16>(Imm.getZExtValue()))
314 return TTI::TCC_Free;
315
316 if (ShiftedFree && (Imm.getZExtValue() & 0xFFFF) == 0)
317 return TTI::TCC_Free;
318 }
319
320 return PPCTTIImpl::getIntImmCost(Imm, Ty, CostKind);
321}
322
323// Check if the current Type is an MMA vector type. Valid MMA types are
324// v256i1 and v512i1 respectively.
325static bool isMMAType(Type *Ty) {
326 return Ty->isVectorTy() && (Ty->getScalarSizeInBits() == 1) &&
327 (Ty->getPrimitiveSizeInBits() > 128);
328}
329
333 // We already implement getCastInstrCost and getMemoryOpCost where we perform
334 // the vector adjustment there.
335 if (isa<CastInst>(U) || isa<LoadInst>(U) || isa<StoreInst>(U))
336 return BaseT::getInstructionCost(U, Operands, CostKind);
337
338 if (U->getType()->isVectorTy()) {
339 // Instructions that need to be split should cost more.
340 std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(U->getType());
341 return LT.first * BaseT::getInstructionCost(U, Operands, CostKind);
342 }
343
344 return BaseT::getInstructionCost(U, Operands, CostKind);
345}
346
348 AssumptionCache &AC,
349 TargetLibraryInfo *LibInfo,
350 HardwareLoopInfo &HWLoopInfo) const {
351 const PPCTargetMachine &TM = ST->getTargetMachine();
352 TargetSchedModel SchedModel;
353 SchedModel.init(ST);
354
355 // FIXME: Sure there is no other way to get TTI? This should be cheap though.
357 TM.getTargetTransformInfo(*L->getHeader()->getParent());
358
359 // Do not convert small short loops to CTR loop.
360 unsigned ConstTripCount = SE.getSmallConstantTripCount(L);
361 if (ConstTripCount && ConstTripCount < SmallCTRLoopThreshold) {
363 CodeMetrics::collectEphemeralValues(L, &AC, EphValues);
365 for (BasicBlock *BB : L->blocks())
366 Metrics.analyzeBasicBlock(BB, TTI, EphValues);
367 // 6 is an approximate latency for the mtctr instruction.
368 if (Metrics.NumInsts <= (6 * SchedModel.getIssueWidth()))
369 return false;
370 }
371
372 // Check that there is no hardware loop related intrinsics in the loop.
373 for (auto *BB : L->getBlocks())
374 for (auto &I : *BB)
375 if (auto *Call = dyn_cast<IntrinsicInst>(&I))
376 if (Call->getIntrinsicID() == Intrinsic::set_loop_iterations ||
377 Call->getIntrinsicID() == Intrinsic::loop_decrement)
378 return false;
379
380 SmallVector<BasicBlock*, 4> ExitingBlocks;
381 L->getExitingBlocks(ExitingBlocks);
382
383 // If there is an exit edge known to be frequently taken,
384 // we should not transform this loop.
385 for (auto &BB : ExitingBlocks) {
386 Instruction *TI = BB->getTerminator();
387 if (!TI) continue;
388
389 if (BranchInst *BI = dyn_cast<BranchInst>(TI)) {
390 uint64_t TrueWeight = 0, FalseWeight = 0;
391 if (!BI->isConditional() ||
392 !extractBranchWeights(*BI, TrueWeight, FalseWeight))
393 continue;
394
395 // If the exit path is more frequent than the loop path,
396 // we return here without further analysis for this loop.
397 bool TrueIsExit = !L->contains(BI->getSuccessor(0));
398 if (( TrueIsExit && FalseWeight < TrueWeight) ||
399 (!TrueIsExit && FalseWeight > TrueWeight))
400 return false;
401 }
402 }
403
404 LLVMContext &C = L->getHeader()->getContext();
405 HWLoopInfo.CountType = TM.isPPC64() ?
407 HWLoopInfo.LoopDecrement = ConstantInt::get(HWLoopInfo.CountType, 1);
408 return true;
409}
410
413 OptimizationRemarkEmitter *ORE) const {
414 if (ST->getCPUDirective() == PPC::DIR_A2) {
415 // The A2 is in-order with a deep pipeline, and concatenation unrolling
416 // helps expose latency-hiding opportunities to the instruction scheduler.
417 UP.Partial = UP.Runtime = true;
418
419 // We unroll a lot on the A2 (hundreds of instructions), and the benefits
420 // often outweigh the cost of a division to compute the trip count.
421 UP.AllowExpensiveTripCount = true;
422 }
423
424 BaseT::getUnrollingPreferences(L, SE, UP, ORE);
425}
426
431// This function returns true to allow using coldcc calling convention.
432// Returning true results in coldcc being used for functions which are cold at
433// all call sites when the callers of the functions are not calling any other
434// non coldcc functions.
438
439bool PPCTTIImpl::enableAggressiveInterleaving(bool LoopHasReductions) const {
440 // On the A2, always unroll aggressively.
441 if (ST->getCPUDirective() == PPC::DIR_A2)
442 return true;
443
444 return LoopHasReductions;
445}
446
448PPCTTIImpl::enableMemCmpExpansion(bool OptSize, bool IsZeroCmp) const {
450 if (getST()->hasAltivec())
451 Options.LoadSizes = {16, 8, 4, 2, 1};
452 else
453 Options.LoadSizes = {8, 4, 2, 1};
454
455 Options.MaxNumLoads = TLI->getMaxExpandSizeMemcmp(OptSize);
456 return Options;
457}
458
460
461unsigned PPCTTIImpl::getNumberOfRegisters(unsigned ClassID) const {
462 assert(ClassID == GPRRC || ClassID == FPRRC ||
463 ClassID == VRRC || ClassID == VSXRC);
464 if (ST->hasVSX()) {
465 assert(ClassID == GPRRC || ClassID == VSXRC || ClassID == VRRC);
466 return ClassID == VSXRC ? 64 : 32;
467 }
468 assert(ClassID == GPRRC || ClassID == FPRRC || ClassID == VRRC);
469 return 32;
470}
471
473 if (Vector)
474 return ST->hasVSX() ? VSXRC : VRRC;
475 if (Ty &&
476 (Ty->getScalarType()->isFloatTy() || Ty->getScalarType()->isDoubleTy()))
477 return ST->hasVSX() ? VSXRC : FPRRC;
478 if (Ty && (Ty->getScalarType()->isFP128Ty() ||
479 Ty->getScalarType()->isPPC_FP128Ty()))
480 return VRRC;
481 if (Ty && Ty->getScalarType()->isHalfTy())
482 return VSXRC;
483 return GPRRC;
484}
485
486const char* PPCTTIImpl::getRegisterClassName(unsigned ClassID) const {
487
488 switch (ClassID) {
489 default:
490 llvm_unreachable("unknown register class");
491 return "PPC::unknown register class";
492 case GPRRC: return "PPC::GPRRC";
493 case FPRRC: return "PPC::FPRRC";
494 case VRRC: return "PPC::VRRC";
495 case VSXRC: return "PPC::VSXRC";
496 }
497}
498
501 switch (K) {
503 return TypeSize::getFixed(ST->isPPC64() ? 64 : 32);
505 return TypeSize::getFixed(ST->hasAltivec() ? 128 : 0);
507 return TypeSize::getScalable(0);
508 }
509
510 llvm_unreachable("Unsupported register kind");
511}
512
514 // Starting with P7 we have a cache line size of 128.
515 unsigned Directive = ST->getCPUDirective();
516 // Assume that Future CPU has the same cache line size as the others.
520 return 128;
521
522 // On other processors return a default of 64 bytes.
523 return 64;
524}
525
527 return 300;
528}
529
531 unsigned Directive = ST->getCPUDirective();
532 // The 440 has no SIMD support, but floating-point instructions
533 // have a 5-cycle latency, so unroll by 5x for latency hiding.
534 if (Directive == PPC::DIR_440)
535 return 5;
536
537 // The A2 has no SIMD support, but floating-point instructions
538 // have a 6-cycle latency, so unroll by 6x for latency hiding.
539 if (Directive == PPC::DIR_A2)
540 return 6;
541
542 // FIXME: For lack of any better information, do no harm...
544 return 1;
545
546 // For P7 and P8, floating-point instructions have a 6-cycle latency and
547 // there are two execution units, so unroll by 12x for latency hiding.
548 // FIXME: the same for P9 as previous gen until POWER9 scheduling is ready
549 // FIXME: the same for P10 as previous gen until POWER10 scheduling is ready
550 // Assume that future is the same as the others.
554 return 12;
555
556 // For most things, modern systems have two execution units (and
557 // out-of-order execution).
558 return 2;
559}
560
561// Returns a cost adjustment factor to adjust the cost of vector instructions
562// on targets which there is overlap between the vector and scalar units,
563// thereby reducing the overall throughput of vector code wrt. scalar code.
564// An invalid instruction cost is returned if the type is an MMA vector type.
566 Type *Ty1,
567 Type *Ty2) const {
568 // If the vector type is of an MMA type (v256i1, v512i1), an invalid
569 // instruction cost is returned. This is to signify to other cost computing
570 // functions to return the maximum instruction cost in order to prevent any
571 // opportunities for the optimizer to produce MMA types within the IR.
572 if (isMMAType(Ty1))
574
575 if (!ST->vectorsUseTwoUnits() || !Ty1->isVectorTy())
576 return InstructionCost(1);
577
578 std::pair<InstructionCost, MVT> LT1 = getTypeLegalizationCost(Ty1);
579 // If type legalization involves splitting the vector, we don't want to
580 // double the cost at every step - only the last step.
581 if (LT1.first != 1 || !LT1.second.isVector())
582 return InstructionCost(1);
583
584 int ISD = TLI->InstructionOpcodeToISD(Opcode);
585 if (TLI->isOperationExpand(ISD, LT1.second))
586 return InstructionCost(1);
587
588 if (Ty2) {
589 std::pair<InstructionCost, MVT> LT2 = getTypeLegalizationCost(Ty2);
590 if (LT2.first != 1 || !LT2.second.isVector())
591 return InstructionCost(1);
592 }
593
594 return InstructionCost(2);
595}
596
598 unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind,
600 ArrayRef<const Value *> Args, const Instruction *CxtI) const {
601 assert(TLI->InstructionOpcodeToISD(Opcode) && "Invalid opcode");
602
603 InstructionCost CostFactor = vectorCostAdjustmentFactor(Opcode, Ty, nullptr);
604 if (!CostFactor.isValid())
606
607 // TODO: Handle more cost kinds.
609 return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Op1Info,
610 Op2Info, Args, CxtI);
611
612 // Fallback to the default implementation.
614 Opcode, Ty, CostKind, Op1Info, Op2Info);
615 return Cost * CostFactor;
616}
617
619 VectorType *DstTy, VectorType *SrcTy,
620 ArrayRef<int> Mask,
622 int Index, VectorType *SubTp,
624 const Instruction *CxtI) const {
625
626 InstructionCost CostFactor =
627 vectorCostAdjustmentFactor(Instruction::ShuffleVector, SrcTy, nullptr);
628 if (!CostFactor.isValid())
630
631 // Legalize the type.
632 std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(SrcTy);
633
634 // PPC, for both Altivec/VSX, support cheap arbitrary permutations
635 // (at least in the sense that there need only be one non-loop-invariant
636 // instruction). We need one such shuffle instruction for each actual
637 // register (this is not true for arbitrary shuffles, but is true for the
638 // structured types of shuffles covered by TTI::ShuffleKind).
639 return LT.first * CostFactor;
640}
641
644 const Instruction *I) const {
646 return Opcode == Instruction::PHI ? 0 : 1;
647 // Branches are assumed to be predicted.
648 return 0;
649}
650
652 Type *Src,
655 const Instruction *I) const {
656 assert(TLI->InstructionOpcodeToISD(Opcode) && "Invalid opcode");
657
658 InstructionCost CostFactor = vectorCostAdjustmentFactor(Opcode, Dst, Src);
659 if (!CostFactor.isValid())
661
663 BaseT::getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I);
664 Cost *= CostFactor;
665 // TODO: Allow non-throughput costs that aren't binary.
667 return Cost == 0 ? 0 : 1;
668 return Cost;
669}
670
672 unsigned Opcode, Type *ValTy, Type *CondTy, CmpInst::Predicate VecPred,
674 TTI::OperandValueInfo Op2Info, const Instruction *I) const {
675 InstructionCost CostFactor =
676 vectorCostAdjustmentFactor(Opcode, ValTy, nullptr);
677 if (!CostFactor.isValid())
679
681 Opcode, ValTy, CondTy, VecPred, CostKind, Op1Info, Op2Info, I);
682 // TODO: Handle other cost kinds.
684 return Cost;
685 return Cost * CostFactor;
686}
687
690 unsigned Index, const Value *Op0,
691 const Value *Op1) const {
692 assert(Val->isVectorTy() && "This must be a vector type");
693
694 int ISD = TLI->InstructionOpcodeToISD(Opcode);
695 assert(ISD && "Invalid opcode");
696
697 InstructionCost CostFactor = vectorCostAdjustmentFactor(Opcode, Val, nullptr);
698 if (!CostFactor.isValid())
700
702 BaseT::getVectorInstrCost(Opcode, Val, CostKind, Index, Op0, Op1);
703 Cost *= CostFactor;
704
705 if (ST->hasVSX() && Val->getScalarType()->isDoubleTy()) {
706 // Double-precision scalars are already located in index #0 (or #1 if LE).
708 Index == (ST->isLittleEndian() ? 1 : 0))
709 return 0;
710
711 return Cost;
712 }
713 if (Val->getScalarType()->isIntegerTy()) {
714 unsigned EltSize = Val->getScalarSizeInBits();
715 // Computing on 1 bit values requires extra mask or compare operations.
716 unsigned MaskCostForOneBitSize = (VecMaskCost && EltSize == 1) ? 1 : 0;
717 // Computing on non const index requires extra mask or compare operations.
718 unsigned MaskCostForIdx = (Index != -1U) ? 0 : 1;
719 if (ST->hasP9Altivec()) {
720 // P10 has vxform insert which can handle non const index. The
721 // MaskCostForIdx is for masking the index.
722 // P9 has insert for const index. A move-to VSR and a permute/insert.
723 // Assume vector operation cost for both (cost will be 2x on P9).
725 if (ST->hasP10Vector())
726 return CostFactor + MaskCostForIdx;
727 if (Index != -1U)
728 return 2 * CostFactor;
729 } else if (ISD == ISD::EXTRACT_VECTOR_ELT) {
730 // It's an extract. Maybe we can do a cheap move-from VSR.
731 unsigned EltSize = Val->getScalarSizeInBits();
732 // P9 has both mfvsrd and mfvsrld for 64 bit integer.
733 if (EltSize == 64 && Index != -1U)
734 return 1;
735 if (EltSize == 32) {
736 unsigned MfvsrwzIndex = ST->isLittleEndian() ? 2 : 1;
737 if (Index == MfvsrwzIndex)
738 return 1;
739
740 // For other indexs like non const, P9 has vxform extract. The
741 // MaskCostForIdx is for masking the index.
742 return CostFactor + MaskCostForIdx;
743 }
744
745 // We need a vector extract (or mfvsrld). Assume vector operation cost.
746 // The cost of the load constant for a vector extract is disregarded
747 // (invariant, easily schedulable).
748 return CostFactor + MaskCostForOneBitSize + MaskCostForIdx;
749 }
750 } else if (ST->hasDirectMove() && Index != -1U) {
751 // Assume permute has standard cost.
752 // Assume move-to/move-from VSR have 2x standard cost.
754 return 3;
755 return 3 + MaskCostForOneBitSize;
756 }
757 }
758
759 // Estimated cost of a load-hit-store delay. This was obtained
760 // experimentally as a minimum needed to prevent unprofitable
761 // vectorization for the paq8p benchmark. It may need to be
762 // raised further if other unprofitable cases remain.
763 unsigned LHSPenalty = 2;
765 LHSPenalty += 7;
766
767 // Vector element insert/extract with Altivec is very expensive,
768 // because they require store and reload with the attendant
769 // processor stall for load-hit-store. Until VSX is available,
770 // these need to be estimated as very costly.
773 return LHSPenalty + Cost;
774
775 return Cost;
776}
777
779 Align Alignment,
780 unsigned AddressSpace,
783 const Instruction *I) const {
784
785 InstructionCost CostFactor = vectorCostAdjustmentFactor(Opcode, Src, nullptr);
786 if (!CostFactor.isValid())
788
789 if (TLI->getValueType(DL, Src, true) == MVT::Other)
790 return BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace,
791 CostKind);
792 // Legalize the type.
793 std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Src);
794 assert((Opcode == Instruction::Load || Opcode == Instruction::Store) &&
795 "Invalid Opcode");
796
798 BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace, CostKind);
799 // TODO: Handle other cost kinds.
801 return Cost;
802
803 Cost *= CostFactor;
804
805 bool IsAltivecType = ST->hasAltivec() &&
806 (LT.second == MVT::v16i8 || LT.second == MVT::v8i16 ||
807 LT.second == MVT::v4i32 || LT.second == MVT::v4f32);
808 bool IsVSXType = ST->hasVSX() &&
809 (LT.second == MVT::v2f64 || LT.second == MVT::v2i64);
810
811 // VSX has 32b/64b load instructions. Legalization can handle loading of
812 // 32b/64b to VSR correctly and cheaply. But BaseT::getMemoryOpCost and
813 // PPCTargetLowering can't compute the cost appropriately. So here we
814 // explicitly check this case. There are also corresponding store
815 // instructions.
816 unsigned MemBits = Src->getPrimitiveSizeInBits();
817 unsigned SrcBytes = LT.second.getStoreSize();
818 if (ST->hasVSX() && IsAltivecType) {
819 if (MemBits == 64 || (ST->hasP8Vector() && MemBits == 32))
820 return 1;
821
822 // Use lfiwax/xxspltw
823 if (Opcode == Instruction::Load && MemBits == 32 && Alignment < SrcBytes)
824 return 2;
825 }
826
827 // Aligned loads and stores are easy.
828 if (!SrcBytes || Alignment >= SrcBytes)
829 return Cost;
830
831 // If we can use the permutation-based load sequence, then this is also
832 // relatively cheap (not counting loop-invariant instructions): one load plus
833 // one permute (the last load in a series has extra cost, but we're
834 // neglecting that here). Note that on the P7, we could do unaligned loads
835 // for Altivec types using the VSX instructions, but that's more expensive
836 // than using the permutation-based load sequence. On the P8, that's no
837 // longer true.
838 if (Opcode == Instruction::Load && (!ST->hasP8Vector() && IsAltivecType) &&
839 Alignment >= LT.second.getScalarType().getStoreSize())
840 return Cost + LT.first; // Add the cost of the permutations.
841
842 // For VSX, we can do unaligned loads and stores on Altivec/VSX types. On the
843 // P7, unaligned vector loads are more expensive than the permutation-based
844 // load sequence, so that might be used instead, but regardless, the net cost
845 // is about the same (not counting loop-invariant instructions).
846 if (IsVSXType || (ST->hasVSX() && IsAltivecType))
847 return Cost;
848
849 // Newer PPC supports unaligned memory access.
850 if (TLI->allowsMisalignedMemoryAccesses(LT.second, 0))
851 return Cost;
852
853 // PPC in general does not support unaligned loads and stores. They'll need
854 // to be decomposed based on the alignment factor.
855
856 // Add the cost of each scalar load or store.
857 Cost += LT.first * ((SrcBytes / Alignment.value()) - 1);
858
859 // For a vector type, there is also scalarization overhead (only for
860 // stores, loads are expanded using the vector-load + permutation sequence,
861 // which is much less expensive).
862 if (Src->isVectorTy() && Opcode == Instruction::Store)
863 for (int I = 0, E = cast<FixedVectorType>(Src)->getNumElements(); I < E;
864 ++I)
865 Cost += getVectorInstrCost(Instruction::ExtractElement, Src, CostKind, I,
866 nullptr, nullptr);
867
868 return Cost;
869}
870
872 unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices,
873 Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind,
874 bool UseMaskForCond, bool UseMaskForGaps) const {
875 InstructionCost CostFactor =
876 vectorCostAdjustmentFactor(Opcode, VecTy, nullptr);
877 if (!CostFactor.isValid())
879
880 if (UseMaskForCond || UseMaskForGaps)
881 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
882 Alignment, AddressSpace, CostKind,
883 UseMaskForCond, UseMaskForGaps);
884
885 assert(isa<VectorType>(VecTy) &&
886 "Expect a vector type for interleaved memory op");
887
888 // Legalize the type.
889 std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(VecTy);
890
891 // Firstly, the cost of load/store operation.
893 getMemoryOpCost(Opcode, VecTy, Alignment, AddressSpace, CostKind);
894
895 // PPC, for both Altivec/VSX, support cheap arbitrary permutations
896 // (at least in the sense that there need only be one non-loop-invariant
897 // instruction). For each result vector, we need one shuffle per incoming
898 // vector (except that the first shuffle can take two incoming vectors
899 // because it does not need to take itself).
900 Cost += Factor*(LT.first-1);
901
902 return Cost;
903}
904
908
911
912 if (ICA.getID() == Intrinsic::vp_load) {
913 MemIntrinsicCostAttributes MICA(Intrinsic::masked_load, ICA.getReturnType(),
914 Align(1), 0);
916 }
917
918 if (ICA.getID() == Intrinsic::vp_store) {
919 MemIntrinsicCostAttributes MICA(Intrinsic::masked_store,
920 ICA.getArgTypes()[0], Align(1), 0);
922 }
923
925}
926
928 const Function *Callee) const {
929 const TargetMachine &TM = getTLI()->getTargetMachine();
930
931 const FeatureBitset &CallerBits =
932 TM.getSubtargetImpl(*Caller)->getFeatureBits();
933 const FeatureBitset &CalleeBits =
934 TM.getSubtargetImpl(*Callee)->getFeatureBits();
935
936 // Check that targets features are exactly the same. We can revisit to see if
937 // we can improve this.
938 return CallerBits == CalleeBits;
939}
940
942 const Function *Callee,
943 ArrayRef<Type *> Types) const {
944
945 // We need to ensure that argument promotion does not
946 // attempt to promote pointers to MMA types (__vector_pair
947 // and __vector_quad) since these types explicitly cannot be
948 // passed as arguments. Both of these types are larger than
949 // the 128-bit Altivec vectors and have a scalar size of 1 bit.
950 if (!BaseT::areTypesABICompatible(Caller, Callee, Types))
951 return false;
952
953 return llvm::none_of(Types, [](Type *Ty) {
954 if (Ty->isSized())
955 return Ty->isIntOrIntVectorTy(1) && Ty->getPrimitiveSizeInBits() > 128;
956 return false;
957 });
958}
959
961 LoopInfo *LI, DominatorTree *DT,
962 AssumptionCache *AC,
963 TargetLibraryInfo *LibInfo) const {
964 // Process nested loops first.
965 for (Loop *I : *L)
966 if (canSaveCmp(I, BI, SE, LI, DT, AC, LibInfo))
967 return false; // Stop search.
968
969 HardwareLoopInfo HWLoopInfo(L);
970
971 if (!HWLoopInfo.canAnalyze(*LI))
972 return false;
973
974 if (!isHardwareLoopProfitable(L, *SE, *AC, LibInfo, HWLoopInfo))
975 return false;
976
977 if (!HWLoopInfo.isHardwareLoopCandidate(*SE, *LI, *DT))
978 return false;
979
980 *BI = HWLoopInfo.ExitBranch;
981 return true;
982}
983
985 const TargetTransformInfo::LSRCost &C2) const {
986 // PowerPC default behaviour here is "instruction number 1st priority".
987 // If LsrNoInsnsCost is set, call default implementation.
988 if (!LsrNoInsnsCost)
989 return std::tie(C1.Insns, C1.NumRegs, C1.AddRecCost, C1.NumIVMuls,
990 C1.NumBaseAdds, C1.ScaleCost, C1.ImmCost, C1.SetupCost) <
991 std::tie(C2.Insns, C2.NumRegs, C2.AddRecCost, C2.NumIVMuls,
992 C2.NumBaseAdds, C2.ScaleCost, C2.ImmCost, C2.SetupCost);
994}
995
996bool PPCTTIImpl::isNumRegsMajorCostOfLSR() const { return false; }
997
999 const PPCTargetMachine &TM = ST->getTargetMachine();
1000 // XCOFF hasn't implemented lowerRelativeReference, disable non-ELF for now.
1001 if (!TM.isELFv2ABI())
1002 return false;
1004}
1005
1007 MemIntrinsicInfo &Info) const {
1008 switch (Inst->getIntrinsicID()) {
1009 case Intrinsic::ppc_altivec_lvx:
1010 case Intrinsic::ppc_altivec_lvxl:
1011 case Intrinsic::ppc_altivec_lvebx:
1012 case Intrinsic::ppc_altivec_lvehx:
1013 case Intrinsic::ppc_altivec_lvewx:
1014 case Intrinsic::ppc_vsx_lxvd2x:
1015 case Intrinsic::ppc_vsx_lxvw4x:
1016 case Intrinsic::ppc_vsx_lxvd2x_be:
1017 case Intrinsic::ppc_vsx_lxvw4x_be:
1018 case Intrinsic::ppc_vsx_lxvl:
1019 case Intrinsic::ppc_vsx_lxvll:
1020 case Intrinsic::ppc_vsx_lxvp: {
1021 Info.PtrVal = Inst->getArgOperand(0);
1022 Info.ReadMem = true;
1023 Info.WriteMem = false;
1024 return true;
1025 }
1026 case Intrinsic::ppc_altivec_stvx:
1027 case Intrinsic::ppc_altivec_stvxl:
1028 case Intrinsic::ppc_altivec_stvebx:
1029 case Intrinsic::ppc_altivec_stvehx:
1030 case Intrinsic::ppc_altivec_stvewx:
1031 case Intrinsic::ppc_vsx_stxvd2x:
1032 case Intrinsic::ppc_vsx_stxvw4x:
1033 case Intrinsic::ppc_vsx_stxvd2x_be:
1034 case Intrinsic::ppc_vsx_stxvw4x_be:
1035 case Intrinsic::ppc_vsx_stxvl:
1036 case Intrinsic::ppc_vsx_stxvll:
1037 case Intrinsic::ppc_vsx_stxvp: {
1038 Info.PtrVal = Inst->getArgOperand(1);
1039 Info.ReadMem = false;
1040 Info.WriteMem = true;
1041 return true;
1042 }
1043 case Intrinsic::ppc_stbcx:
1044 case Intrinsic::ppc_sthcx:
1045 case Intrinsic::ppc_stdcx:
1046 case Intrinsic::ppc_stwcx: {
1047 Info.PtrVal = Inst->getArgOperand(0);
1048 Info.ReadMem = false;
1049 Info.WriteMem = true;
1050 return true;
1051 }
1052 default:
1053 break;
1054 }
1055
1056 return false;
1057}
1058
1060 return TLI->supportsTailCallFor(CB);
1061}
1062
1063// Target hook used by CodeGen to decide whether to expand vector predication
1064// intrinsics into scalar operations or to use special ISD nodes to represent
1065// them. The Target will not see the intrinsics.
1069 unsigned Directive = ST->getCPUDirective();
1070 VPLegalization DefaultLegalization = BaseT::getVPLegalizationStrategy(PI);
1073 return DefaultLegalization;
1074
1075 if (!ST->isPPC64())
1076 return DefaultLegalization;
1077
1078 unsigned IID = PI.getIntrinsicID();
1079 if (IID != Intrinsic::vp_load && IID != Intrinsic::vp_store)
1080 return DefaultLegalization;
1081
1082 bool IsLoad = IID == Intrinsic::vp_load;
1083 Type *VecTy = IsLoad ? PI.getType() : PI.getOperand(0)->getType();
1084 EVT VT = TLI->getValueType(DL, VecTy, true);
1085 if (VT != MVT::v2i64 && VT != MVT::v4i32 && VT != MVT::v8i16 &&
1086 VT != MVT::v16i8)
1087 return DefaultLegalization;
1088
1089 auto IsAllTrueMask = [](Value *MaskVal) {
1090 if (Value *SplattedVal = getSplatValue(MaskVal))
1091 if (auto *ConstValue = dyn_cast<Constant>(SplattedVal))
1092 return ConstValue->isAllOnesValue();
1093 return false;
1094 };
1095 unsigned MaskIx = IsLoad ? 1 : 2;
1096 if (!IsAllTrueMask(PI.getOperand(MaskIx)))
1097 return DefaultLegalization;
1098
1100}
1101
1103 if (!PPCEVL || !ST->isPPC64())
1104 return false;
1105 unsigned CPU = ST->getCPUDirective();
1106 return CPU == PPC::DIR_PWR10 || CPU == PPC::DIR_PWR_FUTURE ||
1107 (Pwr9EVL && CPU == PPC::DIR_PWR9);
1108}
1109
1110bool PPCTTIImpl::isLegalMaskedLoad(Type *DataType, Align Alignment,
1111 unsigned AddressSpace,
1112 TTI::MaskKind MaskKind) const {
1113 if (!hasActiveVectorLength())
1114 return false;
1115
1116 auto IsLegalLoadWithLengthType = [](EVT VT) {
1117 if (VT != MVT::i64 && VT != MVT::i32 && VT != MVT::i16 && VT != MVT::i8)
1118 return false;
1119 return true;
1120 };
1121
1122 return IsLegalLoadWithLengthType(TLI->getValueType(DL, DataType, true));
1123}
1124
1126 unsigned AddressSpace,
1127 TTI::MaskKind MaskKind) const {
1128 return isLegalMaskedLoad(DataType, Alignment, AddressSpace);
1129}
1130
1134
1136
1137 unsigned Opcode;
1138 switch (MICA.getID()) {
1139 case Intrinsic::masked_load:
1140 Opcode = Instruction::Load;
1141 break;
1142 case Intrinsic::masked_store:
1143 Opcode = Instruction::Store;
1144 break;
1145 default:
1146 return BaseCost;
1147 }
1148
1149 Type *DataTy = MICA.getDataType();
1150 Align Alignment = MICA.getAlignment();
1151 unsigned AddressSpace = MICA.getAddressSpace();
1152
1153 auto VecTy = dyn_cast<FixedVectorType>(DataTy);
1154 if (!VecTy)
1155 return BaseCost;
1156 if (Opcode == Instruction::Load) {
1157 if (!isLegalMaskedLoad(VecTy->getScalarType(), Alignment, AddressSpace))
1158 return BaseCost;
1159 } else {
1160 if (!isLegalMaskedStore(VecTy->getScalarType(), Alignment, AddressSpace))
1161 return BaseCost;
1162 }
1163 if (VecTy->getPrimitiveSizeInBits() > 128)
1164 return BaseCost;
1165
1166 // Cost is 1 (scalar compare) + 1 (scalar select) +
1167 // 1 * vectorCostAdjustmentFactor (vector load with length)
1168 // Maybe + 1 (scalar shift)
1170 1 + 1 + vectorCostAdjustmentFactor(Opcode, DataTy, nullptr);
1171 if (ST->getCPUDirective() != PPC::DIR_PWR_FUTURE ||
1172 VecTy->getScalarSizeInBits() != 8)
1173 Cost += 1; // need shift for length
1174 return Cost;
1175}
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
This file provides a helper that implements much of the TTI interface in terms of the target-independ...
static cl::opt< OutputCostKind > CostKind("cost-kind", cl::desc("Target cost kind"), cl::init(OutputCostKind::RecipThroughput), cl::values(clEnumValN(OutputCostKind::RecipThroughput, "throughput", "Reciprocal throughput"), clEnumValN(OutputCostKind::Latency, "latency", "Instruction latency"), clEnumValN(OutputCostKind::CodeSize, "code-size", "Code size"), clEnumValN(OutputCostKind::SizeAndLatency, "size-latency", "Code size and latency"), clEnumValN(OutputCostKind::All, "all", "Print all cost kinds")))
TargetTransformInfo::VPLegalization VPLegalization
This file provides the interface for the instcombine pass implementation.
static LVOptions Options
Definition LVOptions.cpp:25
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
Machine Trace Metrics
uint64_t IntrinsicInst * II
static cl::opt< bool > PPCEVL("ppc-evl", cl::desc("Allow EVL type vp.load/vp.store"), cl::init(false), cl::Hidden)
static cl::opt< bool > VecMaskCost("ppc-vec-mask-cost", cl::desc("add masking cost for i1 vectors"), cl::init(true), cl::Hidden)
static cl::opt< bool > Pwr9EVL("ppc-pwr9-evl", cl::desc("Allow vp.load and vp.store for pwr9"), cl::init(false), cl::Hidden)
static cl::opt< bool > DisablePPCConstHoist("disable-ppc-constant-hoisting", cl::desc("disable constant hoisting on PPC"), cl::init(false), cl::Hidden)
static cl::opt< unsigned > SmallCTRLoopThreshold("min-ctr-loop-threshold", cl::init(4), cl::Hidden, cl::desc("Loops with a constant trip count smaller than " "this value will not use the count register."))
static bool isMMAType(Type *Ty)
static cl::opt< bool > EnablePPCColdCC("ppc-enable-coldcc", cl::Hidden, cl::init(false), cl::desc("Enable using coldcc calling conv for cold " "internal functions"))
static cl::opt< bool > LsrNoInsnsCost("ppc-lsr-no-insns-cost", cl::Hidden, cl::init(false), cl::desc("Do not add instruction count to lsr cost model"))
This file a TargetTransformInfoImplBase conforming object specific to the PPC target machine.
This file contains the declarations for profiling metadata utility functions.
static unsigned getNumElements(Type *Ty)
This file describes how to lower LLVM code to machine code.
This pass exposes codegen information to IR-level passes.
Class for arbitrary precision integers.
Definition APInt.h:78
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:40
A cache of @llvm.assume calls within a function.
LLVM Basic Block Representation.
Definition BasicBlock.h:62
InstructionCost getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef< unsigned > Indices, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, bool UseMaskForCond=false, bool UseMaskForGaps=false) const override
InstructionCost getVectorInstrCost(unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index, const Value *Op0, const Value *Op1) const override
InstructionCost getArithmeticInstrCost(unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Opd1Info={TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Opd2Info={TTI::OK_AnyValue, TTI::OP_None}, ArrayRef< const Value * > Args={}, const Instruction *CxtI=nullptr) const override
bool shouldBuildRelLookupTables() const override
InstructionCost getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy, CmpInst::Predicate VecPred, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Op1Info={TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Op2Info={TTI::OK_AnyValue, TTI::OP_None}, const Instruction *I=nullptr) const override
void getUnrollingPreferences(Loop *L, ScalarEvolution &SE, TTI::UnrollingPreferences &UP, OptimizationRemarkEmitter *ORE) const override
void getPeelingPreferences(Loop *L, ScalarEvolution &SE, TTI::PeelingPreferences &PP) const override
InstructionCost getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src, TTI::CastContextHint CCH, TTI::TargetCostKind CostKind, const Instruction *I=nullptr) const override
std::pair< InstructionCost, MVT > getTypeLegalizationCost(Type *Ty) const
InstructionCost getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, TTI::TargetCostKind CostKind) const override
InstructionCost getMemIntrinsicInstrCost(const MemIntrinsicCostAttributes &MICA, TTI::TargetCostKind CostKind) const override
InstructionCost getMemoryOpCost(unsigned Opcode, Type *Src, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, TTI::OperandValueInfo OpInfo={TTI::OK_AnyValue, TTI::OP_None}, const Instruction *I=nullptr) const override
Conditional or Unconditional Branch instruction.
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Value * getArgOperand(unsigned i) const
static LLVM_ABI CastInst * Create(Instruction::CastOps, Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Provides a way to construct any of the CastInst subclasses using an opcode instead of the subclass's ...
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition InstrTypes.h:676
This is an important base class in LLVM.
Definition Constant.h:43
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition Dominators.h:164
Container class for subtarget features.
Value * CreateInsertElement(Type *VecTy, Value *NewElt, Value *Idx, const Twine &Name="")
Definition IRBuilder.h:2553
Value * CreateExtractElement(Value *Vec, Value *Idx, const Twine &Name="")
Definition IRBuilder.h:2541
ConstantInt * getInt32(uint32_t C)
Get a constant 32-bit value.
Definition IRBuilder.h:522
Value * CreateBitCast(Value *V, Type *DestTy, const Twine &Name="")
Definition IRBuilder.h:2175
The core instruction combiner logic.
const DataLayout & getDataLayout() const
DominatorTree & getDominatorTree() const
BuilderTy & Builder
AssumptionCache & getAssumptionCache() const
static InstructionCost getInvalid(CostType Val=0)
static InstructionCost getMax()
const SmallVectorImpl< Type * > & getArgTypes() const
A wrapper class for inspecting calls to intrinsic functions.
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
This is an important class for using LLVM in a threaded context.
Definition LLVMContext.h:68
An instruction for reading from memory.
Represents a single loop in the control flow graph.
Definition LoopInfo.h:40
const FeatureBitset & getFeatureBits() const
Information for memory intrinsic cost model.
The optimization diagnostic interface.
bool isLSRCostLess(const TargetTransformInfo::LSRCost &C1, const TargetTransformInfo::LSRCost &C2) const override
InstructionCost vectorCostAdjustmentFactor(unsigned Opcode, Type *Ty1, Type *Ty2) const
InstructionCost getArithmeticInstrCost(unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Op1Info={TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Op2Info={TTI::OK_AnyValue, TTI::OP_None}, ArrayRef< const Value * > Args={}, const Instruction *CxtI=nullptr) const override
InstructionCost getMemoryOpCost(unsigned Opcode, Type *Src, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, TTI::OperandValueInfo OpInfo={TTI::OK_AnyValue, TTI::OP_None}, const Instruction *I=nullptr) const override
bool isLegalMaskedLoad(Type *DataType, Align Alignment, unsigned AddressSpace, TTI::MaskKind MaskKind=TTI::MaskKind::VariableOrConstantMask) const override
InstructionCost getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef< unsigned > Indices, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, bool UseMaskForCond=false, bool UseMaskForGaps=false) const override
bool enableInterleavedAccessVectorization() const override
unsigned getRegisterClassForType(bool Vector, Type *Ty=nullptr) const override
bool canSaveCmp(Loop *L, BranchInst **BI, ScalarEvolution *SE, LoopInfo *LI, DominatorTree *DT, AssumptionCache *AC, TargetLibraryInfo *LibInfo) const override
TTI::MemCmpExpansionOptions enableMemCmpExpansion(bool OptSize, bool IsZeroCmp) const override
unsigned getCacheLineSize() const override
bool hasActiveVectorLength() const override
InstructionCost getMemIntrinsicInstrCost(const MemIntrinsicCostAttributes &MICA, TTI::TargetCostKind CostKind) const override
Get memory intrinsic cost based on arguments.
bool useColdCCForColdCall(Function &F) const override
void getUnrollingPreferences(Loop *L, ScalarEvolution &SE, TTI::UnrollingPreferences &UP, OptimizationRemarkEmitter *ORE) const override
bool getTgtMemIntrinsic(IntrinsicInst *Inst, MemIntrinsicInfo &Info) const override
TypeSize getRegisterBitWidth(TargetTransformInfo::RegisterKind K) const override
bool isLegalMaskedStore(Type *DataType, Align Alignment, unsigned AddressSpace, TTI::MaskKind MaskKind=TTI::MaskKind::VariableOrConstantMask) const override
bool isNumRegsMajorCostOfLSR() const override
unsigned getPrefetchDistance() const override
TargetTransformInfo::VPLegalization getVPLegalizationStrategy(const VPIntrinsic &PI) const override
bool areInlineCompatible(const Function *Caller, const Function *Callee) const override
unsigned getNumberOfRegisters(unsigned ClassID) const override
bool isHardwareLoopProfitable(Loop *L, ScalarEvolution &SE, AssumptionCache &AC, TargetLibraryInfo *LibInfo, HardwareLoopInfo &HWLoopInfo) const override
void getPeelingPreferences(Loop *L, ScalarEvolution &SE, TTI::PeelingPreferences &PP) const override
InstructionCost getIntImmCostInst(unsigned Opcode, unsigned Idx, const APInt &Imm, Type *Ty, TTI::TargetCostKind CostKind, Instruction *Inst=nullptr) const override
InstructionCost getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind, const Instruction *I=nullptr) const override
InstructionCost getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, TTI::TargetCostKind CostKind) const override
Get intrinsic cost based on arguments.
const char * getRegisterClassName(unsigned ClassID) const override
InstructionCost getInstructionCost(const User *U, ArrayRef< const Value * > Operands, TTI::TargetCostKind CostKind) const override
unsigned getMaxInterleaveFactor(ElementCount VF) const override
bool shouldBuildRelLookupTables() const override
bool supportsTailCallFor(const CallBase *CB) const override
InstructionCost getShuffleCost(TTI::ShuffleKind Kind, VectorType *DstTy, VectorType *SrcTy, ArrayRef< int > Mask, TTI::TargetCostKind CostKind, int Index, VectorType *SubTp, ArrayRef< const Value * > Args={}, const Instruction *CxtI=nullptr) const override
InstructionCost getIntImmCostIntrin(Intrinsic::ID IID, unsigned Idx, const APInt &Imm, Type *Ty, TTI::TargetCostKind CostKind) const override
bool areTypesABICompatible(const Function *Caller, const Function *Callee, ArrayRef< Type * > Types) const override
TTI::PopcntSupportKind getPopcntSupport(unsigned TyWidth) const override
InstructionCost getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy, CmpInst::Predicate VecPred, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Op1Info={TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Op2Info={TTI::OK_AnyValue, TTI::OP_None}, const Instruction *I=nullptr) const override
bool enableAggressiveInterleaving(bool LoopHasReductions) const override
InstructionCost getIntImmCost(const APInt &Imm, Type *Ty, TTI::TargetCostKind CostKind) const override
InstructionCost getVectorInstrCost(unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index, const Value *Op0, const Value *Op1) const override
InstructionCost getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src, TTI::CastContextHint CCH, TTI::TargetCostKind CostKind, const Instruction *I=nullptr) const override
std::optional< Instruction * > instCombineIntrinsic(InstCombiner &IC, IntrinsicInst &II) const override
Common code between 32-bit and 64-bit PowerPC targets.
TargetTransformInfo getTargetTransformInfo(const Function &F) const override
Get a TargetTransformInfo implementation for the target.
static LLVM_ABI PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
The main scalar evolution driver.
LLVM_ABI unsigned getSmallConstantTripCount(const Loop *L)
Returns the exact trip count of the loop if we can compute it, and the result is a small constant.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
Provides information about what library functions are available for the current target.
Primary interface to the complete machine description for the target machine.
virtual const TargetSubtargetInfo * getSubtargetImpl(const Function &) const
Virtual method implemented by subclasses that returns a reference to that target's TargetSubtargetInf...
Provide an instruction scheduling machine model to CodeGen passes.
unsigned getIssueWidth() const
Maximum number of micro-ops that may be scheduled per cycle.
LLVM_ABI void init(const TargetSubtargetInfo *TSInfo, bool EnableSModel=true, bool EnableSItins=true)
Initialize the machine model for instruction scheduling.
virtual TargetTransformInfo::VPLegalization getVPLegalizationStrategy(const VPIntrinsic &PI) const
virtual InstructionCost getIntImmCost(const APInt &Imm, Type *Ty, TTI::TargetCostKind CostKind) const
virtual InstructionCost getIntImmCostInst(unsigned Opcode, unsigned Idx, const APInt &Imm, Type *Ty, TTI::TargetCostKind CostKind, Instruction *Inst=nullptr) const
virtual InstructionCost getInstructionCost(const User *U, ArrayRef< const Value * > Operands, TTI::TargetCostKind CostKind) const
virtual bool isLSRCostLess(const TTI::LSRCost &C1, const TTI::LSRCost &C2) const
virtual InstructionCost getIntImmCostIntrin(Intrinsic::ID IID, unsigned Idx, const APInt &Imm, Type *Ty, TTI::TargetCostKind CostKind) const
virtual bool areTypesABICompatible(const Function *Caller, const Function *Callee, ArrayRef< Type * > Types) const
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
MaskKind
Some targets only support masked load/store with a constant mask.
TargetCostKind
The kind of cost model.
@ TCK_RecipThroughput
Reciprocal throughput.
PopcntSupportKind
Flags indicating the kind of support for population count.
@ TCC_Free
Expected to fold away in lowering.
@ TCC_Basic
The cost of a typical 'add' instruction.
ShuffleKind
The various kinds of shuffle patterns for vector queries.
CastContextHint
Represents a hint about the context in which a cast is used.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition Twine.h:82
static constexpr TypeSize getFixed(ScalarTy ExactSize)
Definition TypeSize.h:343
static constexpr TypeSize getScalable(ScalarTy MinimumSize)
Definition TypeSize.h:346
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
static LLVM_ABI IntegerType * getInt64Ty(LLVMContext &C)
Definition Type.cpp:297
bool isVectorTy() const
True if this is an instance of VectorType.
Definition Type.h:273
static LLVM_ABI IntegerType * getInt32Ty(LLVMContext &C)
Definition Type.cpp:296
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
Definition Type.h:352
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
Definition Type.cpp:230
bool isDoubleTy() const
Return true if this is 'double', a 64-bit IEEE fp type.
Definition Type.h:156
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition Type.h:240
Value * getOperand(unsigned i) const
Definition User.h:207
This is the common base class for vector predication intrinsics.
static LLVM_ABI bool isVPIntrinsic(Intrinsic::ID)
LLVM Value Representation.
Definition Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:256
Base class of all SIMD vector types.
CallInst * Call
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
ISD namespace - This namespace contains an enum which represents all of the SelectionDAG node types a...
Definition ISDOpcodes.h:24
@ EXTRACT_VECTOR_ELT
EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR identified by the (potentially...
Definition ISDOpcodes.h:576
@ INSERT_VECTOR_ELT
INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element at IDX replaced with VAL.
Definition ISDOpcodes.h:565
initializer< Ty > init(const Ty &Val)
This is an optimization pass for GlobalISel generic memory operations.
Definition Types.h:26
InstructionCost Cost
constexpr bool isInt(int64_t x)
Checks if an integer fits into the given bit width.
Definition MathExtras.h:165
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
LLVM_ABI Value * getSplatValue(const Value *V)
Get splat value if the input is a splat vector or return nullptr.
constexpr bool isShiftedMask_32(uint32_t Value)
Return true if the argument contains a non-empty sequence of ones with the remainder zero (32 bit ver...
Definition MathExtras.h:267
constexpr bool isShiftedMask_64(uint64_t Value)
Return true if the argument contains a non-empty sequence of ones with the remainder zero (64 bit ver...
Definition MathExtras.h:273
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Definition MathExtras.h:279
LLVM_ABI Align getOrEnforceKnownAlignment(Value *V, MaybeAlign PrefAlign, const DataLayout &DL, const Instruction *CxtI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr)
Try to ensure that the alignment of V is at least PrefAlign bytes.
Definition Local.cpp:1566
bool none_of(R &&Range, UnaryPredicate P)
Provide wrappers to std::none_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1751
constexpr bool isUInt(uint64_t x)
Checks if an unsigned integer fits into the given bit width.
Definition MathExtras.h:189
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
LLVM_ABI bool extractBranchWeights(const MDNode *ProfileData, SmallVectorImpl< uint32_t > &Weights)
Extract branch weights from MD_prof metadata.
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
constexpr uint64_t value() const
This is a hole in the type system and should not be abused.
Definition Alignment.h:77
Utility to calculate the size and a few similar metrics for a set of basic blocks.
Definition CodeMetrics.h:34
static LLVM_ABI void collectEphemeralValues(const Loop *L, AssumptionCache *AC, SmallPtrSetImpl< const Value * > &EphValues)
Collect a loop's ephemeral values (those used only by an assume or similar intrinsics in the loop).
Extended Value Type.
Definition ValueTypes.h:35
Attributes of a target dependent hardware loop.
LLVM_ABI bool canAnalyze(LoopInfo &LI)
LLVM_ABI bool isHardwareLoopCandidate(ScalarEvolution &SE, LoopInfo &LI, DominatorTree &DT, bool ForceNestedLoop=false, bool ForceHardwareLoopPHI=false)
Information about a load/store intrinsic defined by the target.
unsigned Insns
TODO: Some of these could be merged.
Returns options for expansion of memcmp. IsZeroCmp is.
Parameters that control the generic loop unrolling transformation.
bool Runtime
Allow runtime unrolling (unrolling of loops to expand the size of the loop body even when the number ...
bool Partial
Allow partial unrolling (unrolling of loops to expand the size of the loop body, not only to eliminat...
bool AllowExpensiveTripCount
Allow emitting expensive instructions (such as divisions) when computing the trip count of a loop for...