LLVM 23.0.0git
AMDGPUTargetTransformInfo.cpp
Go to the documentation of this file.
1//===- AMDGPUTargetTransformInfo.cpp - AMDGPU specific TTI pass -----------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// \file
10// This file implements a TargetTransformInfo analysis pass specific to the
11// AMDGPU target machine. It uses the target's detailed information to provide
12// more precise answers to certain TTI queries, while letting the target
13// independent and default TTI implementations handle the rest.
14//
15//===----------------------------------------------------------------------===//
16
18#include "AMDGPUTargetMachine.h"
25#include "llvm/IR/Function.h"
26#include "llvm/IR/IRBuilder.h"
27#include "llvm/IR/IntrinsicsAMDGPU.h"
30#include <optional>
31
32using namespace llvm;
33
34#define DEBUG_TYPE "AMDGPUtti"
35
37 "amdgpu-unroll-threshold-private",
38 cl::desc("Unroll threshold for AMDGPU if private memory used in a loop"),
39 cl::init(2700), cl::Hidden);
40
42 "amdgpu-unroll-threshold-local",
43 cl::desc("Unroll threshold for AMDGPU if local memory used in a loop"),
44 cl::init(1000), cl::Hidden);
45
47 "amdgpu-unroll-threshold-if",
48 cl::desc("Unroll threshold increment for AMDGPU for each if statement inside loop"),
49 cl::init(200), cl::Hidden);
50
52 "amdgpu-unroll-runtime-local",
53 cl::desc("Allow runtime unroll for AMDGPU if local memory used in a loop"),
54 cl::init(true), cl::Hidden);
55
57 "amdgpu-unroll-max-block-to-analyze",
58 cl::desc("Inner loop block size threshold to analyze in unroll for AMDGPU"),
59 cl::init(32), cl::Hidden);
60
61static cl::opt<unsigned> ArgAllocaCost("amdgpu-inline-arg-alloca-cost",
62 cl::Hidden, cl::init(4000),
63 cl::desc("Cost of alloca argument"));
64
65// If the amount of scratch memory to eliminate exceeds our ability to allocate
66// it into registers we gain nothing by aggressively inlining functions for that
67// heuristic.
69 ArgAllocaCutoff("amdgpu-inline-arg-alloca-cutoff", cl::Hidden,
70 cl::init(256),
71 cl::desc("Maximum alloca size to use for inline cost"));
72
73// Inliner constraint to achieve reasonable compilation time.
75 "amdgpu-inline-max-bb", cl::Hidden, cl::init(1100),
76 cl::desc("Maximum number of BBs allowed in a function after inlining"
77 " (compile time constraint)"));
78
79// This default unroll factor is based on microbenchmarks on gfx1030.
81 "amdgpu-memcpy-loop-unroll",
82 cl::desc("Unroll factor (affecting 4x32-bit operations) to use for memory "
83 "operations when lowering memcpy as a loop"),
84 cl::init(16), cl::Hidden);
85
86static bool dependsOnLocalPhi(const Loop *L, const Value *Cond,
87 unsigned Depth = 0) {
89 if (!I)
90 return false;
91
92 for (const Value *V : I->operand_values()) {
93 if (!L->contains(I))
94 continue;
95 if (const PHINode *PHI = dyn_cast<PHINode>(V)) {
96 if (llvm::none_of(L->getSubLoops(), [PHI](const Loop* SubLoop) {
97 return SubLoop->contains(PHI); }))
98 return true;
99 } else if (Depth < 10 && dependsOnLocalPhi(L, V, Depth+1))
100 return true;
101 }
102 return false;
103}
104
106 : BaseT(TM, F.getDataLayout()),
107 TargetTriple(TM->getTargetTriple()),
108 ST(static_cast<const GCNSubtarget *>(TM->getSubtargetImpl(F))),
109 TLI(ST->getTargetLowering()) {}
110
113 OptimizationRemarkEmitter *ORE) const {
114 const Function &F = *L->getHeader()->getParent();
115 UP.Threshold =
116 F.getFnAttributeAsParsedInteger("amdgpu-unroll-threshold", 300);
117 UP.MaxCount = std::numeric_limits<unsigned>::max();
118 UP.Partial = true;
119
120 // Conditional branch in a loop back edge needs 3 additional exec
121 // manipulations in average.
122 UP.BEInsns += 3;
123
124 // We want to run unroll even for the loops which have been vectorized.
125 UP.UnrollVectorizedLoop = true;
126
127 // TODO: Do we want runtime unrolling?
128
129 // Maximum alloca size than can fit registers. Reserve 16 registers.
130 const unsigned MaxAlloca = (256 - 16) * 4;
131 unsigned ThresholdPrivate = UnrollThresholdPrivate;
132 unsigned ThresholdLocal = UnrollThresholdLocal;
133
134 // If this loop has the amdgpu.loop.unroll.threshold metadata we will use the
135 // provided threshold value as the default for Threshold
136 if (MDNode *LoopUnrollThreshold =
137 findOptionMDForLoop(L, "amdgpu.loop.unroll.threshold")) {
138 if (LoopUnrollThreshold->getNumOperands() == 2) {
140 LoopUnrollThreshold->getOperand(1));
141 if (MetaThresholdValue) {
142 // We will also use the supplied value for PartialThreshold for now.
143 // We may introduce additional metadata if it becomes necessary in the
144 // future.
145 UP.Threshold = MetaThresholdValue->getSExtValue();
147 ThresholdPrivate = std::min(ThresholdPrivate, UP.Threshold);
148 ThresholdLocal = std::min(ThresholdLocal, UP.Threshold);
149 }
150 }
151 }
152
153 unsigned MaxBoost = std::max(ThresholdPrivate, ThresholdLocal);
154 for (const BasicBlock *BB : L->getBlocks()) {
155 const DataLayout &DL = BB->getDataLayout();
156 unsigned LocalGEPsSeen = 0;
157
158 if (llvm::any_of(L->getSubLoops(), [BB](const Loop* SubLoop) {
159 return SubLoop->contains(BB); }))
160 continue; // Block belongs to an inner loop.
161
162 for (const Instruction &I : *BB) {
163 // Unroll a loop which contains an "if" statement whose condition
164 // defined by a PHI belonging to the loop. This may help to eliminate
165 // if region and potentially even PHI itself, saving on both divergence
166 // and registers used for the PHI.
167 // Add a small bonus for each of such "if" statements.
168 if (const BranchInst *Br = dyn_cast<BranchInst>(&I)) {
169 if (UP.Threshold < MaxBoost && Br->isConditional()) {
170 BasicBlock *Succ0 = Br->getSuccessor(0);
171 BasicBlock *Succ1 = Br->getSuccessor(1);
172 if ((L->contains(Succ0) && L->isLoopExiting(Succ0)) ||
173 (L->contains(Succ1) && L->isLoopExiting(Succ1)))
174 continue;
175 if (dependsOnLocalPhi(L, Br->getCondition())) {
177 LLVM_DEBUG(dbgs() << "Set unroll threshold " << UP.Threshold
178 << " for loop:\n"
179 << *L << " due to " << *Br << '\n');
180 if (UP.Threshold >= MaxBoost)
181 return;
182 }
183 }
184 continue;
185 }
186
188 if (!GEP)
189 continue;
190
191 unsigned AS = GEP->getAddressSpace();
192 unsigned Threshold = 0;
194 Threshold = ThresholdPrivate;
196 Threshold = ThresholdLocal;
197 else
198 continue;
199
200 if (UP.Threshold >= Threshold)
201 continue;
202
203 if (AS == AMDGPUAS::PRIVATE_ADDRESS) {
204 const Value *Ptr = GEP->getPointerOperand();
205 const AllocaInst *Alloca =
207 if (!Alloca || !Alloca->isStaticAlloca())
208 continue;
209 Type *Ty = Alloca->getAllocatedType();
210 unsigned AllocaSize = Ty->isSized() ? DL.getTypeAllocSize(Ty) : 0;
211 if (AllocaSize > MaxAlloca)
212 continue;
213 } else if (AS == AMDGPUAS::LOCAL_ADDRESS ||
215 LocalGEPsSeen++;
216 // Inhibit unroll for local memory if we have seen addressing not to
217 // a variable, most likely we will be unable to combine it.
218 // Do not unroll too deep inner loops for local memory to give a chance
219 // to unroll an outer loop for a more important reason.
220 if (LocalGEPsSeen > 1 || L->getLoopDepth() > 2)
221 continue;
222
223 const Value *V = getUnderlyingObject(GEP->getPointerOperand());
224 if (!isa<GlobalVariable>(V) && !isa<Argument>(V))
225 continue;
226
227 LLVM_DEBUG(dbgs() << "Allow unroll runtime for loop:\n"
228 << *L << " due to LDS use.\n");
230 }
231
232 // Check if GEP depends on a value defined by this loop itself.
233 bool HasLoopDef = false;
234 for (const Value *Op : GEP->operands()) {
235 const Instruction *Inst = dyn_cast<Instruction>(Op);
236 if (!Inst || L->isLoopInvariant(Op))
237 continue;
238
239 if (llvm::any_of(L->getSubLoops(), [Inst](const Loop* SubLoop) {
240 return SubLoop->contains(Inst); }))
241 continue;
242 HasLoopDef = true;
243 break;
244 }
245 if (!HasLoopDef)
246 continue;
247
248 // We want to do whatever we can to limit the number of alloca
249 // instructions that make it through to the code generator. allocas
250 // require us to use indirect addressing, which is slow and prone to
251 // compiler bugs. If this loop does an address calculation on an
252 // alloca ptr, then we want to use a higher than normal loop unroll
253 // threshold. This will give SROA a better chance to eliminate these
254 // allocas.
255 //
256 // We also want to have more unrolling for local memory to let ds
257 // instructions with different offsets combine.
258 //
259 // Don't use the maximum allowed value here as it will make some
260 // programs way too big.
261 UP.Threshold = Threshold;
262 LLVM_DEBUG(dbgs() << "Set unroll threshold " << Threshold
263 << " for loop:\n"
264 << *L << " due to " << *GEP << '\n');
265 if (UP.Threshold >= MaxBoost)
266 return;
267 }
268
269 // If we got a GEP in a small BB from inner loop then increase max trip
270 // count to analyze for better estimation cost in unroll
271 if (L->isInnermost() && BB->size() < UnrollMaxBlockToAnalyze)
273 }
274}
275
280
284
285const FeatureBitset GCNTTIImpl::InlineFeatureIgnoreList = {
286 // Codegen control options which don't matter.
287 AMDGPU::FeatureEnableLoadStoreOpt, AMDGPU::FeatureEnableSIScheduler,
288 AMDGPU::FeatureEnableUnsafeDSOffsetFolding, AMDGPU::FeatureFlatForGlobal,
289 AMDGPU::FeaturePromoteAlloca, AMDGPU::FeatureUnalignedScratchAccess,
290 AMDGPU::FeatureUnalignedAccessMode,
291
292 AMDGPU::FeatureAutoWaitcntBeforeBarrier,
293
294 // Property of the kernel/environment which can't actually differ.
295 AMDGPU::FeatureSGPRInitBug, AMDGPU::FeatureXNACK,
296 AMDGPU::FeatureTrapHandler,
297
298 // The default assumption needs to be ecc is enabled, but no directly
299 // exposed operations depend on it, so it can be safely inlined.
300 AMDGPU::FeatureSRAMECC,
301
302 // Perf-tuning features
303 AMDGPU::FeatureFastFMAF32, AMDGPU::HalfRate64Ops};
304
306 : BaseT(TM, F.getDataLayout()),
307 ST(static_cast<const GCNSubtarget *>(TM->getSubtargetImpl(F))),
308 TLI(ST->getTargetLowering()), CommonTTI(TM, F),
309 IsGraphics(AMDGPU::isGraphics(F.getCallingConv())) {
311 HasFP32Denormals = Mode.FP32Denormals != DenormalMode::getPreserveSign();
312 HasFP64FP16Denormals =
313 Mode.FP64FP16Denormals != DenormalMode::getPreserveSign();
314}
315
317 return !F || !ST->isSingleLaneExecution(*F);
318}
319
320unsigned GCNTTIImpl::getNumberOfRegisters(unsigned RCID) const {
321 // NB: RCID is not an RCID. In fact it is 0 or 1 for scalar or vector
322 // registers. See getRegisterClassForType for the implementation.
323 // In this case vector registers are not vector in terms of
324 // VGPRs, but those which can hold multiple values.
325
326 // This is really the number of registers to fill when vectorizing /
327 // interleaving loops, so we lie to avoid trying to use all registers.
328 return 4;
329}
330
333 switch (K) {
335 return TypeSize::getFixed(32);
337 return TypeSize::getFixed(ST->hasPackedFP32Ops() ? 64 : 32);
339 return TypeSize::getScalable(0);
340 }
341 llvm_unreachable("Unsupported register kind");
342}
343
345 return 32;
346}
347
348unsigned GCNTTIImpl::getMaximumVF(unsigned ElemWidth, unsigned Opcode) const {
349 if (Opcode == Instruction::Load || Opcode == Instruction::Store)
350 return 32 * 4 / ElemWidth;
351 // For a given width return the max 0number of elements that can be combined
352 // into a wider bit value:
353 return (ElemWidth == 8 && ST->has16BitInsts()) ? 4
354 : (ElemWidth == 16 && ST->has16BitInsts()) ? 2
355 : (ElemWidth == 32 && ST->hasPackedFP32Ops()) ? 2
356 : 1;
357}
358
359unsigned GCNTTIImpl::getLoadVectorFactor(unsigned VF, unsigned LoadSize,
360 unsigned ChainSizeInBytes,
361 VectorType *VecTy) const {
362 unsigned VecRegBitWidth = VF * LoadSize;
363 if (VecRegBitWidth > 128 && VecTy->getScalarSizeInBits() < 32)
364 // TODO: Support element-size less than 32bit?
365 return 128 / LoadSize;
366
367 return VF;
368}
369
370unsigned GCNTTIImpl::getStoreVectorFactor(unsigned VF, unsigned StoreSize,
371 unsigned ChainSizeInBytes,
372 VectorType *VecTy) const {
373 unsigned VecRegBitWidth = VF * StoreSize;
374 if (VecRegBitWidth > 128)
375 return 128 / StoreSize;
376
377 return VF;
378}
379
380unsigned GCNTTIImpl::getLoadStoreVecRegBitWidth(unsigned AddrSpace) const {
381 if (AddrSpace == AMDGPUAS::GLOBAL_ADDRESS ||
382 AddrSpace == AMDGPUAS::CONSTANT_ADDRESS ||
384 AddrSpace == AMDGPUAS::BUFFER_FAT_POINTER ||
385 AddrSpace == AMDGPUAS::BUFFER_RESOURCE ||
387 return 512;
388 }
389
390 if (AddrSpace == AMDGPUAS::PRIVATE_ADDRESS)
391 return 8 * ST->getMaxPrivateElementSize();
392
393 // Common to flat, global, local and region. Assume for unknown addrspace.
394 return 128;
395}
396
397bool GCNTTIImpl::isLegalToVectorizeMemChain(unsigned ChainSizeInBytes,
398 Align Alignment,
399 unsigned AddrSpace) const {
400 // We allow vectorization of flat stores, even though we may need to decompose
401 // them later if they may access private memory. We don't have enough context
402 // here, and legalization can handle it.
403 if (AddrSpace == AMDGPUAS::PRIVATE_ADDRESS) {
404 return (Alignment >= 4 || ST->hasUnalignedScratchAccessEnabled()) &&
405 ChainSizeInBytes <= ST->getMaxPrivateElementSize();
406 }
407 return true;
408}
409
410bool GCNTTIImpl::isLegalToVectorizeLoadChain(unsigned ChainSizeInBytes,
411 Align Alignment,
412 unsigned AddrSpace) const {
413 return isLegalToVectorizeMemChain(ChainSizeInBytes, Alignment, AddrSpace);
414}
415
416bool GCNTTIImpl::isLegalToVectorizeStoreChain(unsigned ChainSizeInBytes,
417 Align Alignment,
418 unsigned AddrSpace) const {
419 return isLegalToVectorizeMemChain(ChainSizeInBytes, Alignment, AddrSpace);
420}
421
425
427 LLVMContext &Context, Value *Length, unsigned SrcAddrSpace,
428 unsigned DestAddrSpace, Align SrcAlign, Align DestAlign,
429 std::optional<uint32_t> AtomicElementSize) const {
430
431 if (AtomicElementSize)
432 return Type::getIntNTy(Context, *AtomicElementSize * 8);
433
434 // 16-byte accesses achieve the highest copy throughput.
435 // If the operation has a fixed known length that is large enough, it is
436 // worthwhile to return an even wider type and let legalization lower it into
437 // multiple accesses, effectively unrolling the memcpy loop.
438 // We also rely on legalization to decompose into smaller accesses for
439 // subtargets and address spaces where it is necessary.
440 //
441 // Don't unroll if Length is not a constant, since unrolling leads to worse
442 // performance for length values that are smaller or slightly larger than the
443 // total size of the type returned here. Mitigating that would require a more
444 // complex lowering for variable-length memcpy and memmove.
445 unsigned I32EltsInVector = 4;
448 MemcpyLoopUnroll * I32EltsInVector);
449
450 return FixedVectorType::get(Type::getInt32Ty(Context), I32EltsInVector);
451}
452
454 SmallVectorImpl<Type *> &OpsOut, LLVMContext &Context,
455 unsigned RemainingBytes, unsigned SrcAddrSpace, unsigned DestAddrSpace,
456 Align SrcAlign, Align DestAlign,
457 std::optional<uint32_t> AtomicCpySize) const {
458
459 if (AtomicCpySize)
461 OpsOut, Context, RemainingBytes, SrcAddrSpace, DestAddrSpace, SrcAlign,
462 DestAlign, AtomicCpySize);
463
464 Type *I32x4Ty = FixedVectorType::get(Type::getInt32Ty(Context), 4);
465 while (RemainingBytes >= 16) {
466 OpsOut.push_back(I32x4Ty);
467 RemainingBytes -= 16;
468 }
469
470 Type *I64Ty = Type::getInt64Ty(Context);
471 while (RemainingBytes >= 8) {
472 OpsOut.push_back(I64Ty);
473 RemainingBytes -= 8;
474 }
475
476 Type *I32Ty = Type::getInt32Ty(Context);
477 while (RemainingBytes >= 4) {
478 OpsOut.push_back(I32Ty);
479 RemainingBytes -= 4;
480 }
481
482 Type *I16Ty = Type::getInt16Ty(Context);
483 while (RemainingBytes >= 2) {
484 OpsOut.push_back(I16Ty);
485 RemainingBytes -= 2;
486 }
487
488 Type *I8Ty = Type::getInt8Ty(Context);
489 while (RemainingBytes) {
490 OpsOut.push_back(I8Ty);
491 --RemainingBytes;
492 }
493}
494
496 // Disable unrolling if the loop is not vectorized.
497 // TODO: Enable this again.
498 if (VF.isScalar())
499 return 1;
500
501 return 8;
502}
503
505 MemIntrinsicInfo &Info) const {
506 switch (Inst->getIntrinsicID()) {
507 case Intrinsic::amdgcn_ds_ordered_add:
508 case Intrinsic::amdgcn_ds_ordered_swap: {
509 auto *Ordering = dyn_cast<ConstantInt>(Inst->getArgOperand(2));
510 auto *Volatile = dyn_cast<ConstantInt>(Inst->getArgOperand(4));
511 if (!Ordering || !Volatile)
512 return false; // Invalid.
513
514 unsigned OrderingVal = Ordering->getZExtValue();
515 if (OrderingVal > static_cast<unsigned>(AtomicOrdering::SequentiallyConsistent))
516 return false;
517
518 Info.PtrVal = Inst->getArgOperand(0);
519 Info.Ordering = static_cast<AtomicOrdering>(OrderingVal);
520 Info.ReadMem = true;
521 Info.WriteMem = true;
522 Info.IsVolatile = !Volatile->isZero();
523 return true;
524 }
525 default:
526 return false;
527 }
528}
529
531 unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind,
533 ArrayRef<const Value *> Args, const Instruction *CxtI) const {
534
535 // Legalize the type.
536 std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Ty);
537 int ISD = TLI->InstructionOpcodeToISD(Opcode);
538
539 // Because we don't have any legal vector operations, but the legal types, we
540 // need to account for split vectors.
541 unsigned NElts = LT.second.isVector() ?
542 LT.second.getVectorNumElements() : 1;
543
544 MVT::SimpleValueType SLT = LT.second.getScalarType().SimpleTy;
545
546 switch (ISD) {
547 case ISD::SHL:
548 case ISD::SRL:
549 case ISD::SRA:
550 if (SLT == MVT::i64)
551 return get64BitInstrCost(CostKind) * LT.first * NElts;
552
553 if (ST->has16BitInsts() && SLT == MVT::i16)
554 NElts = (NElts + 1) / 2;
555
556 // i32
557 return getFullRateInstrCost() * LT.first * NElts;
558 case ISD::ADD:
559 case ISD::SUB:
560 case ISD::AND:
561 case ISD::OR:
562 case ISD::XOR:
563 if (SLT == MVT::i64) {
564 // and, or and xor are typically split into 2 VALU instructions.
565 return 2 * getFullRateInstrCost() * LT.first * NElts;
566 }
567
568 if (ST->has16BitInsts() && SLT == MVT::i16)
569 NElts = (NElts + 1) / 2;
570
571 return LT.first * NElts * getFullRateInstrCost();
572 case ISD::MUL: {
573 const int QuarterRateCost = getQuarterRateInstrCost(CostKind);
574 if (SLT == MVT::i64) {
575 const int FullRateCost = getFullRateInstrCost();
576 return (4 * QuarterRateCost + (2 * 2) * FullRateCost) * LT.first * NElts;
577 }
578
579 if (ST->has16BitInsts() && SLT == MVT::i16)
580 NElts = (NElts + 1) / 2;
581
582 // i32
583 return QuarterRateCost * NElts * LT.first;
584 }
585 case ISD::FMUL:
586 // Check possible fuse {fadd|fsub}(a,fmul(b,c)) and return zero cost for
587 // fmul(b,c) supposing the fadd|fsub will get estimated cost for the whole
588 // fused operation.
589 if (CxtI && CxtI->hasOneUse())
590 if (const auto *FAdd = dyn_cast<BinaryOperator>(*CxtI->user_begin())) {
591 const int OPC = TLI->InstructionOpcodeToISD(FAdd->getOpcode());
592 if (OPC == ISD::FADD || OPC == ISD::FSUB) {
593 if (ST->hasMadMacF32Insts() && SLT == MVT::f32 && !HasFP32Denormals)
595 if (ST->has16BitInsts() && SLT == MVT::f16 && !HasFP64FP16Denormals)
597
598 // Estimate all types may be fused with contract/unsafe flags
599 const TargetOptions &Options = TLI->getTargetMachine().Options;
600 if (Options.AllowFPOpFusion == FPOpFusion::Fast ||
601 (FAdd->hasAllowContract() && CxtI->hasAllowContract()))
603 }
604 }
605 [[fallthrough]];
606 case ISD::FADD:
607 case ISD::FSUB:
608 if (ST->hasPackedFP32Ops() && SLT == MVT::f32)
609 NElts = (NElts + 1) / 2;
610 if (ST->hasBF16PackedInsts() && SLT == MVT::bf16)
611 NElts = (NElts + 1) / 2;
612 if (SLT == MVT::f64)
613 return LT.first * NElts * get64BitInstrCost(CostKind);
614
615 if (ST->has16BitInsts() && SLT == MVT::f16)
616 NElts = (NElts + 1) / 2;
617
618 if (SLT == MVT::f32 || SLT == MVT::f16 || SLT == MVT::bf16)
619 return LT.first * NElts * getFullRateInstrCost();
620 break;
621 case ISD::FDIV:
622 case ISD::FREM:
623 // FIXME: frem should be handled separately. The fdiv in it is most of it,
624 // but the current lowering is also not entirely correct.
625 if (SLT == MVT::f64) {
626 int Cost = 7 * get64BitInstrCost(CostKind) +
627 getQuarterRateInstrCost(CostKind) +
628 3 * getHalfRateInstrCost(CostKind);
629 // Add cost of workaround.
630 if (!ST->hasUsableDivScaleConditionOutput())
631 Cost += 3 * getFullRateInstrCost();
632
633 return LT.first * Cost * NElts;
634 }
635
636 if (!Args.empty() && match(Args[0], PatternMatch::m_FPOne())) {
637 // TODO: This is more complicated, unsafe flags etc.
638 if ((SLT == MVT::f32 && !HasFP32Denormals) ||
639 (SLT == MVT::f16 && ST->has16BitInsts())) {
640 return LT.first * getQuarterRateInstrCost(CostKind) * NElts;
641 }
642 }
643
644 if (SLT == MVT::f16 && ST->has16BitInsts()) {
645 // 2 x v_cvt_f32_f16
646 // f32 rcp
647 // f32 fmul
648 // v_cvt_f16_f32
649 // f16 div_fixup
650 int Cost =
651 4 * getFullRateInstrCost() + 2 * getQuarterRateInstrCost(CostKind);
652 return LT.first * Cost * NElts;
653 }
654
655 if (SLT == MVT::f32 && (CxtI && CxtI->hasApproxFunc())) {
656 // Fast unsafe fdiv lowering:
657 // f32 rcp
658 // f32 fmul
659 int Cost = getQuarterRateInstrCost(CostKind) + getFullRateInstrCost();
660 return LT.first * Cost * NElts;
661 }
662
663 if (SLT == MVT::f32 || SLT == MVT::f16) {
664 // 4 more v_cvt_* insts without f16 insts support
665 int Cost = (SLT == MVT::f16 ? 14 : 10) * getFullRateInstrCost() +
666 1 * getQuarterRateInstrCost(CostKind);
667
668 if (!HasFP32Denormals) {
669 // FP mode switches.
670 Cost += 2 * getFullRateInstrCost();
671 }
672
673 return LT.first * NElts * Cost;
674 }
675 break;
676 case ISD::FNEG:
677 // Use the backend' estimation. If fneg is not free each element will cost
678 // one additional instruction.
679 return TLI->isFNegFree(SLT) ? 0 : NElts;
680 default:
681 break;
682 }
683
684 return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Op1Info, Op2Info,
685 Args, CxtI);
686}
687
688// Return true if there's a potential benefit from using v2f16/v2i16
689// instructions for an intrinsic, even if it requires nontrivial legalization.
691 switch (ID) {
692 case Intrinsic::fma:
693 case Intrinsic::fmuladd:
694 case Intrinsic::copysign:
695 case Intrinsic::minimumnum:
696 case Intrinsic::maximumnum:
697 case Intrinsic::canonicalize:
698 // There's a small benefit to using vector ops in the legalized code.
699 case Intrinsic::round:
700 case Intrinsic::uadd_sat:
701 case Intrinsic::usub_sat:
702 case Intrinsic::sadd_sat:
703 case Intrinsic::ssub_sat:
704 case Intrinsic::abs:
705 return true;
706 default:
707 return false;
708 }
709}
710
714 switch (ICA.getID()) {
715 case Intrinsic::fabs:
716 // Free source modifier in the common case.
717 return 0;
718 case Intrinsic::amdgcn_workitem_id_x:
719 case Intrinsic::amdgcn_workitem_id_y:
720 case Intrinsic::amdgcn_workitem_id_z:
721 // TODO: If hasPackedTID, or if the calling context is not an entry point
722 // there may be a bit instruction.
723 return 0;
724 case Intrinsic::amdgcn_workgroup_id_x:
725 case Intrinsic::amdgcn_workgroup_id_y:
726 case Intrinsic::amdgcn_workgroup_id_z:
727 case Intrinsic::amdgcn_lds_kernel_id:
728 case Intrinsic::amdgcn_dispatch_ptr:
729 case Intrinsic::amdgcn_dispatch_id:
730 case Intrinsic::amdgcn_implicitarg_ptr:
731 case Intrinsic::amdgcn_queue_ptr:
732 // Read from an argument register.
733 return 0;
734 default:
735 break;
736 }
737
740
741 Type *RetTy = ICA.getReturnType();
742
743 // Legalize the type.
744 std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(RetTy);
745
746 unsigned NElts = LT.second.isVector() ?
747 LT.second.getVectorNumElements() : 1;
748
749 MVT::SimpleValueType SLT = LT.second.getScalarType().SimpleTy;
750
751 if ((ST->hasVOP3PInsts() &&
752 (SLT == MVT::f16 || SLT == MVT::i16 ||
753 (SLT == MVT::bf16 && ST->hasBF16PackedInsts()))) ||
754 (ST->hasPackedFP32Ops() && SLT == MVT::f32))
755 NElts = (NElts + 1) / 2;
756
757 // TODO: Get more refined intrinsic costs?
758 unsigned InstRate = getQuarterRateInstrCost(CostKind);
759
760 switch (ICA.getID()) {
761 case Intrinsic::fma:
762 case Intrinsic::fmuladd:
763 if (SLT == MVT::f64) {
764 InstRate = get64BitInstrCost(CostKind);
765 break;
766 }
767
768 if ((SLT == MVT::f32 && ST->hasFastFMAF32()) || SLT == MVT::f16)
769 InstRate = getFullRateInstrCost();
770 else {
771 InstRate = ST->hasFastFMAF32() ? getHalfRateInstrCost(CostKind)
772 : getQuarterRateInstrCost(CostKind);
773 }
774 break;
775 case Intrinsic::copysign:
776 return NElts * getFullRateInstrCost();
777 case Intrinsic::minimumnum:
778 case Intrinsic::maximumnum: {
779 // Instruction + 2 canonicalizes. For cases that need type promotion, we the
780 // promotion takes the place of the canonicalize.
781 unsigned NumOps = 3;
782 if (const IntrinsicInst *II = ICA.getInst()) {
783 // Directly legal with ieee=0
784 // TODO: Not directly legal with strictfp
786 NumOps = 1;
787 }
788
789 unsigned BaseRate =
790 SLT == MVT::f64 ? get64BitInstrCost(CostKind) : getFullRateInstrCost();
791 InstRate = BaseRate * NumOps;
792 break;
793 }
794 case Intrinsic::canonicalize: {
795 InstRate =
796 SLT == MVT::f64 ? get64BitInstrCost(CostKind) : getFullRateInstrCost();
797 break;
798 }
799 case Intrinsic::uadd_sat:
800 case Intrinsic::usub_sat:
801 case Intrinsic::sadd_sat:
802 case Intrinsic::ssub_sat: {
803 if (SLT == MVT::i16 || SLT == MVT::i32)
804 InstRate = getFullRateInstrCost();
805
806 static const auto ValidSatTys = {MVT::v2i16, MVT::v4i16};
807 if (any_of(ValidSatTys, equal_to(LT.second)))
808 NElts = 1;
809 break;
810 }
811 case Intrinsic::abs:
812 // Expansion takes 2 instructions for VALU
813 if (SLT == MVT::i16 || SLT == MVT::i32)
814 InstRate = 2 * getFullRateInstrCost();
815 break;
816 default:
817 break;
818 }
819
820 return LT.first * NElts * InstRate;
821}
822
825 const Instruction *I) const {
826 assert((I == nullptr || I->getOpcode() == Opcode) &&
827 "Opcode should reflect passed instruction.");
828 const bool SCost =
830 const int CBrCost = SCost ? 5 : 7;
831 switch (Opcode) {
832 case Instruction::Br: {
833 // Branch instruction takes about 4 slots on gfx900.
834 const auto *BI = dyn_cast_or_null<BranchInst>(I);
835 if (BI && BI->isUnconditional())
836 return SCost ? 1 : 4;
837 // Suppose conditional branch takes additional 3 exec manipulations
838 // instructions in average.
839 return CBrCost;
840 }
841 case Instruction::Switch: {
842 const auto *SI = dyn_cast_or_null<SwitchInst>(I);
843 // Each case (including default) takes 1 cmp + 1 cbr instructions in
844 // average.
845 return (SI ? (SI->getNumCases() + 1) : 4) * (CBrCost + 1);
846 }
847 case Instruction::Ret:
848 return SCost ? 1 : 10;
849 }
850 return BaseT::getCFInstrCost(Opcode, CostKind, I);
851}
852
855 std::optional<FastMathFlags> FMF,
858 return BaseT::getArithmeticReductionCost(Opcode, Ty, FMF, CostKind);
859
860 EVT OrigTy = TLI->getValueType(DL, Ty);
861
862 // Computes cost on targets that have packed math instructions(which support
863 // 16-bit types only).
864 if (!ST->hasVOP3PInsts() || OrigTy.getScalarSizeInBits() != 16)
865 return BaseT::getArithmeticReductionCost(Opcode, Ty, FMF, CostKind);
866
867 std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Ty);
868 return LT.first * getFullRateInstrCost();
869}
870
873 FastMathFlags FMF,
875 EVT OrigTy = TLI->getValueType(DL, Ty);
876
877 // Computes cost on targets that have packed math instructions(which support
878 // 16-bit types only).
879 if (!ST->hasVOP3PInsts() || OrigTy.getScalarSizeInBits() != 16)
880 return BaseT::getMinMaxReductionCost(IID, Ty, FMF, CostKind);
881
882 std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Ty);
883 return LT.first * getHalfRateInstrCost(CostKind);
884}
885
888 unsigned Index, const Value *Op0,
889 const Value *Op1) const {
890 switch (Opcode) {
891 case Instruction::ExtractElement:
892 case Instruction::InsertElement: {
893 unsigned EltSize
894 = DL.getTypeSizeInBits(cast<VectorType>(ValTy)->getElementType());
895 if (EltSize < 32) {
896 if (EltSize == 16 && Index == 0 && ST->has16BitInsts())
897 return 0;
898 return BaseT::getVectorInstrCost(Opcode, ValTy, CostKind, Index, Op0,
899 Op1);
900 }
901
902 // Extracts are just reads of a subregister, so are free. Inserts are
903 // considered free because we don't want to have any cost for scalarizing
904 // operations, and we don't have to copy into a different register class.
905
906 // Dynamic indexing isn't free and is best avoided.
907 return Index == ~0u ? 2 : 0;
908 }
909 default:
910 return BaseT::getVectorInstrCost(Opcode, ValTy, CostKind, Index, Op0, Op1);
911 }
912}
913
914/// Analyze if the results of inline asm are divergent. If \p Indices is empty,
915/// this is analyzing the collective result of all output registers. Otherwise,
916/// this is only querying a specific result index if this returns multiple
917/// registers in a struct.
919 const CallInst *CI, ArrayRef<unsigned> Indices) const {
920 // TODO: Handle complex extract indices
921 if (Indices.size() > 1)
922 return true;
923
924 const DataLayout &DL = CI->getDataLayout();
925 const SIRegisterInfo *TRI = ST->getRegisterInfo();
926 TargetLowering::AsmOperandInfoVector TargetConstraints =
927 TLI->ParseConstraints(DL, ST->getRegisterInfo(), *CI);
928
929 const int TargetOutputIdx = Indices.empty() ? -1 : Indices[0];
930
931 int OutputIdx = 0;
932 for (auto &TC : TargetConstraints) {
933 if (TC.Type != InlineAsm::isOutput)
934 continue;
935
936 // Skip outputs we don't care about.
937 if (TargetOutputIdx != -1 && TargetOutputIdx != OutputIdx++)
938 continue;
939
940 TLI->ComputeConstraintToUse(TC, SDValue());
941
942 const TargetRegisterClass *RC = TLI->getRegForInlineAsmConstraint(
943 TRI, TC.ConstraintCode, TC.ConstraintVT).second;
944
945 // For AGPR constraints null is returned on subtargets without AGPRs, so
946 // assume divergent for null.
947 if (!RC || !TRI->isSGPRClass(RC))
948 return true;
949 }
950
951 return false;
952}
953
955 const IntrinsicInst *ReadReg) const {
956 Metadata *MD =
957 cast<MetadataAsValue>(ReadReg->getArgOperand(0))->getMetadata();
959 cast<MDString>(cast<MDNode>(MD)->getOperand(0))->getString();
960
961 // Special case registers that look like VCC.
962 MVT VT = MVT::getVT(ReadReg->getType());
963 if (VT == MVT::i1)
964 return true;
965
966 // Special case scalar registers that start with 'v'.
967 if (RegName.starts_with("vcc") || RegName.empty())
968 return false;
969
970 // VGPR or AGPR is divergent. There aren't any specially named vector
971 // registers.
972 return RegName[0] == 'v' || RegName[0] == 'a';
973}
974
975/// \returns true if the result of the value could potentially be
976/// different across workitems in a wavefront.
977bool GCNTTIImpl::isSourceOfDivergence(const Value *V) const {
978 if (const Argument *A = dyn_cast<Argument>(V))
980
981 // Loads from the private and flat address spaces are divergent, because
982 // threads can execute the load instruction with the same inputs and get
983 // different results.
984 //
985 // All other loads are not divergent, because if threads issue loads with the
986 // same arguments, they will always get the same result.
987 if (const LoadInst *Load = dyn_cast<LoadInst>(V))
988 return Load->getPointerAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS ||
989 Load->getPointerAddressSpace() == AMDGPUAS::FLAT_ADDRESS;
990
991 // Atomics are divergent because they are executed sequentially: when an
992 // atomic operation refers to the same address in each thread, then each
993 // thread after the first sees the value written by the previous thread as
994 // original value.
996 return true;
997
999 Intrinsic::ID IID = Intrinsic->getIntrinsicID();
1000 switch (IID) {
1001 case Intrinsic::read_register:
1003 case Intrinsic::amdgcn_addrspacecast_nonnull: {
1004 unsigned SrcAS =
1005 Intrinsic->getOperand(0)->getType()->getPointerAddressSpace();
1006 unsigned DstAS = Intrinsic->getType()->getPointerAddressSpace();
1007 return SrcAS == AMDGPUAS::PRIVATE_ADDRESS &&
1008 DstAS == AMDGPUAS::FLAT_ADDRESS &&
1010 }
1011 case Intrinsic::amdgcn_workitem_id_y:
1012 case Intrinsic::amdgcn_workitem_id_z: {
1013 const Function *F = Intrinsic->getFunction();
1014 bool HasUniformYZ =
1015 ST->hasWavefrontsEvenlySplittingXDim(*F, /*RequitezUniformYZ=*/true);
1016 std::optional<unsigned> ThisDimSize = ST->getReqdWorkGroupSize(
1017 *F, IID == Intrinsic::amdgcn_workitem_id_y ? 1 : 2);
1018 return !HasUniformYZ && (!ThisDimSize || *ThisDimSize != 1);
1019 }
1020 default:
1022 }
1023 }
1024
1025 // Assume all function calls are a source of divergence.
1026 if (const CallInst *CI = dyn_cast<CallInst>(V)) {
1027 if (CI->isInlineAsm())
1029 return true;
1030 }
1031
1032 // Assume all function calls are a source of divergence.
1033 if (isa<InvokeInst>(V))
1034 return true;
1035
1036 // If the target supports globally addressable scratch, the mapping from
1037 // scratch memory to the flat aperture changes therefore an address space cast
1038 // is no longer uniform.
1039 if (auto *CastI = dyn_cast<AddrSpaceCastInst>(V)) {
1040 return CastI->getSrcAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS &&
1041 CastI->getDestAddressSpace() == AMDGPUAS::FLAT_ADDRESS &&
1042 ST->hasGloballyAddressableScratch();
1043 }
1044
1045 return false;
1046}
1047
1048bool GCNTTIImpl::isAlwaysUniform(const Value *V) const {
1049 if (const IntrinsicInst *Intrinsic = dyn_cast<IntrinsicInst>(V))
1050 return AMDGPU::isIntrinsicAlwaysUniform(Intrinsic->getIntrinsicID());
1051
1052 if (const CallInst *CI = dyn_cast<CallInst>(V)) {
1053 if (CI->isInlineAsm())
1055 return false;
1056 }
1057
1058 // In most cases TID / wavefrontsize is uniform.
1059 //
1060 // However, if a kernel has uneven dimesions we can have a value of
1061 // workitem-id-x divided by the wavefrontsize non-uniform. For example
1062 // dimensions (65, 2) will have workitems with address (64, 0) and (0, 1)
1063 // packed into a same wave which gives 1 and 0 after the division by 64
1064 // respectively.
1065 //
1066 // The X dimension doesn't reset within a wave if either both the Y
1067 // and Z dimensions are of length 1, or if the X dimension's required
1068 // size is a power of 2. Note, however, if the X dimension's maximum
1069 // size is a power of 2 < the wavefront size, division by the wavefront
1070 // size is guaranteed to yield 0, so this is also a no-reset case.
1071 bool XDimDoesntResetWithinWaves = false;
1072 if (auto *I = dyn_cast<Instruction>(V)) {
1073 const Function *F = I->getFunction();
1074 XDimDoesntResetWithinWaves = ST->hasWavefrontsEvenlySplittingXDim(*F);
1075 }
1076 using namespace llvm::PatternMatch;
1077 uint64_t C;
1079 m_ConstantInt(C))) ||
1081 m_ConstantInt(C)))) {
1082 return C >= ST->getWavefrontSizeLog2() && XDimDoesntResetWithinWaves;
1083 }
1084
1085 Value *Mask;
1087 m_Value(Mask)))) {
1088 return computeKnownBits(Mask, DL).countMinTrailingZeros() >=
1089 ST->getWavefrontSizeLog2() &&
1090 XDimDoesntResetWithinWaves;
1091 }
1092
1093 const ExtractValueInst *ExtValue = dyn_cast<ExtractValueInst>(V);
1094 if (!ExtValue)
1095 return false;
1096
1097 const CallInst *CI = dyn_cast<CallInst>(ExtValue->getOperand(0));
1098 if (!CI)
1099 return false;
1100
1101 if (const IntrinsicInst *Intrinsic = dyn_cast<IntrinsicInst>(CI)) {
1102 switch (Intrinsic->getIntrinsicID()) {
1103 default:
1104 return false;
1105 case Intrinsic::amdgcn_if:
1106 case Intrinsic::amdgcn_else: {
1107 ArrayRef<unsigned> Indices = ExtValue->getIndices();
1108 return Indices.size() == 1 && Indices[0] == 1;
1109 }
1110 }
1111 }
1112
1113 // If we have inline asm returning mixed SGPR and VGPR results, we inferred
1114 // divergent for the overall struct return. We need to override it in the
1115 // case we're extracting an SGPR component here.
1116 if (CI->isInlineAsm())
1117 return !isInlineAsmSourceOfDivergence(CI, ExtValue->getIndices());
1118
1119 return false;
1120}
1121
1123 Intrinsic::ID IID) const {
1124 switch (IID) {
1125 case Intrinsic::amdgcn_is_shared:
1126 case Intrinsic::amdgcn_is_private:
1127 case Intrinsic::amdgcn_flat_atomic_fmax_num:
1128 case Intrinsic::amdgcn_flat_atomic_fmin_num:
1129 case Intrinsic::amdgcn_load_to_lds:
1130 case Intrinsic::amdgcn_make_buffer_rsrc:
1131 OpIndexes.push_back(0);
1132 return true;
1133 default:
1134 return false;
1135 }
1136}
1137
1139 Value *OldV,
1140 Value *NewV) const {
1141 auto IntrID = II->getIntrinsicID();
1142 switch (IntrID) {
1143 case Intrinsic::amdgcn_is_shared:
1144 case Intrinsic::amdgcn_is_private: {
1145 unsigned TrueAS = IntrID == Intrinsic::amdgcn_is_shared ?
1147 unsigned NewAS = NewV->getType()->getPointerAddressSpace();
1148 LLVMContext &Ctx = NewV->getType()->getContext();
1149 ConstantInt *NewVal = (TrueAS == NewAS) ?
1151 return NewVal;
1152 }
1153 case Intrinsic::amdgcn_flat_atomic_fmax_num:
1154 case Intrinsic::amdgcn_flat_atomic_fmin_num: {
1155 Type *DestTy = II->getType();
1156 Type *SrcTy = NewV->getType();
1157 unsigned NewAS = SrcTy->getPointerAddressSpace();
1159 return nullptr;
1160 Module *M = II->getModule();
1162 M, II->getIntrinsicID(), {DestTy, SrcTy, DestTy});
1163 II->setArgOperand(0, NewV);
1164 II->setCalledFunction(NewDecl);
1165 return II;
1166 }
1167 case Intrinsic::amdgcn_load_to_lds: {
1168 Type *SrcTy = NewV->getType();
1169 Module *M = II->getModule();
1170 Function *NewDecl =
1171 Intrinsic::getOrInsertDeclaration(M, II->getIntrinsicID(), {SrcTy});
1172 II->setArgOperand(0, NewV);
1173 II->setCalledFunction(NewDecl);
1174 return II;
1175 }
1176 case Intrinsic::amdgcn_make_buffer_rsrc: {
1177 Type *SrcTy = NewV->getType();
1178 Type *DstTy = II->getType();
1179 Module *M = II->getModule();
1181 M, II->getIntrinsicID(), {DstTy, SrcTy});
1182 II->setArgOperand(0, NewV);
1183 II->setCalledFunction(NewDecl);
1184 return II;
1185 }
1186 default:
1187 return nullptr;
1188 }
1189}
1190
1192 VectorType *DstTy, VectorType *SrcTy,
1193 ArrayRef<int> Mask,
1195 int Index, VectorType *SubTp,
1197 const Instruction *CxtI) const {
1198 if (!isa<FixedVectorType>(SrcTy))
1199 return BaseT::getShuffleCost(Kind, DstTy, SrcTy, Mask, CostKind, Index,
1200 SubTp);
1201
1202 Kind = improveShuffleKindFromMask(Kind, Mask, SrcTy, Index, SubTp);
1203
1204 unsigned ScalarSize = DL.getTypeSizeInBits(SrcTy->getElementType());
1205 if (ST->getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS &&
1206 (ScalarSize == 16 || ScalarSize == 8)) {
1207 // Larger vector widths may require additional instructions, but are
1208 // typically cheaper than scalarized versions.
1209 //
1210 // We assume that shuffling at a register granularity can be done for free.
1211 // This is not true for vectors fed into memory instructions, but it is
1212 // effectively true for all other shuffling. The emphasis of the logic here
1213 // is to assist generic transform in cleaning up / canonicalizing those
1214 // shuffles.
1215
1216 // With op_sel VOP3P instructions freely can access the low half or high
1217 // half of a register, so any swizzle of two elements is free.
1218 if (auto *SrcVecTy = dyn_cast<FixedVectorType>(SrcTy)) {
1219 unsigned NumSrcElts = SrcVecTy->getNumElements();
1220 if (ST->hasVOP3PInsts() && ScalarSize == 16 && NumSrcElts == 2 &&
1221 (Kind == TTI::SK_Broadcast || Kind == TTI::SK_Reverse ||
1222 Kind == TTI::SK_PermuteSingleSrc))
1223 return 0;
1224 }
1225
1226 unsigned EltsPerReg = 32 / ScalarSize;
1227 switch (Kind) {
1228 case TTI::SK_Broadcast:
1229 // A single v_perm_b32 can be re-used for all destination registers.
1230 return 1;
1231 case TTI::SK_Reverse:
1232 // One instruction per register.
1233 if (auto *DstVecTy = dyn_cast<FixedVectorType>(DstTy))
1234 return divideCeil(DstVecTy->getNumElements(), EltsPerReg);
1237 if (Index % EltsPerReg == 0)
1238 return 0; // Shuffling at register granularity
1239 if (auto *DstVecTy = dyn_cast<FixedVectorType>(DstTy))
1240 return divideCeil(DstVecTy->getNumElements(), EltsPerReg);
1243 auto *DstVecTy = dyn_cast<FixedVectorType>(DstTy);
1244 if (!DstVecTy)
1246 unsigned NumDstElts = DstVecTy->getNumElements();
1247 unsigned NumInsertElts = cast<FixedVectorType>(SubTp)->getNumElements();
1248 unsigned EndIndex = Index + NumInsertElts;
1249 unsigned BeginSubIdx = Index % EltsPerReg;
1250 unsigned EndSubIdx = EndIndex % EltsPerReg;
1251 unsigned Cost = 0;
1252
1253 if (BeginSubIdx != 0) {
1254 // Need to shift the inserted vector into place. The cost is the number
1255 // of destination registers overlapped by the inserted vector.
1256 Cost = divideCeil(EndIndex, EltsPerReg) - (Index / EltsPerReg);
1257 }
1258
1259 // If the last register overlap is partial, there may be three source
1260 // registers feeding into it; that takes an extra instruction.
1261 if (EndIndex < NumDstElts && BeginSubIdx < EndSubIdx)
1262 Cost += 1;
1263
1264 return Cost;
1265 }
1266 case TTI::SK_Splice: {
1267 auto *DstVecTy = dyn_cast<FixedVectorType>(DstTy);
1268 if (!DstVecTy)
1270 unsigned NumElts = DstVecTy->getNumElements();
1271 assert(NumElts == cast<FixedVectorType>(SrcTy)->getNumElements());
1272 // Determine the sub-region of the result vector that requires
1273 // sub-register shuffles / mixing.
1274 unsigned EltsFromLHS = NumElts - Index;
1275 bool LHSIsAligned = (Index % EltsPerReg) == 0;
1276 bool RHSIsAligned = (EltsFromLHS % EltsPerReg) == 0;
1277 if (LHSIsAligned && RHSIsAligned)
1278 return 0;
1279 if (LHSIsAligned && !RHSIsAligned)
1280 return divideCeil(NumElts, EltsPerReg) - (EltsFromLHS / EltsPerReg);
1281 if (!LHSIsAligned && RHSIsAligned)
1282 return divideCeil(EltsFromLHS, EltsPerReg);
1283 return divideCeil(NumElts, EltsPerReg);
1284 }
1285 default:
1286 break;
1287 }
1288
1289 if (!Mask.empty()) {
1290 unsigned NumSrcElts = cast<FixedVectorType>(SrcTy)->getNumElements();
1291
1292 // Generically estimate the cost by assuming that each destination
1293 // register is derived from sources via v_perm_b32 instructions if it
1294 // can't be copied as-is.
1295 //
1296 // For each destination register, derive the cost of obtaining it based
1297 // on the number of source registers that feed into it.
1298 unsigned Cost = 0;
1299 for (unsigned DstIdx = 0; DstIdx < Mask.size(); DstIdx += EltsPerReg) {
1301 bool Aligned = true;
1302 for (unsigned I = 0; I < EltsPerReg && DstIdx + I < Mask.size(); ++I) {
1303 int SrcIdx = Mask[DstIdx + I];
1304 if (SrcIdx == -1)
1305 continue;
1306 int Reg;
1307 if (SrcIdx < (int)NumSrcElts) {
1308 Reg = SrcIdx / EltsPerReg;
1309 if (SrcIdx % EltsPerReg != I)
1310 Aligned = false;
1311 } else {
1312 Reg = NumSrcElts + (SrcIdx - NumSrcElts) / EltsPerReg;
1313 if ((SrcIdx - NumSrcElts) % EltsPerReg != I)
1314 Aligned = false;
1315 }
1316 if (!llvm::is_contained(Regs, Reg))
1317 Regs.push_back(Reg);
1318 }
1319 if (Regs.size() >= 2)
1320 Cost += Regs.size() - 1;
1321 else if (!Aligned)
1322 Cost += 1;
1323 }
1324 return Cost;
1325 }
1326 }
1327
1328 return BaseT::getShuffleCost(Kind, DstTy, SrcTy, Mask, CostKind, Index,
1329 SubTp);
1330}
1331
1332/// Whether it is profitable to sink the operands of an
1333/// Instruction I to the basic block of I.
1334/// This helps using several modifiers (like abs and neg) more often.
1336 SmallVectorImpl<Use *> &Ops) const {
1337 using namespace PatternMatch;
1338
1339 for (auto &Op : I->operands()) {
1340 // Ensure we are not already sinking this operand.
1341 if (any_of(Ops, [&](Use *U) { return U->get() == Op.get(); }))
1342 continue;
1343
1344 if (match(&Op, m_FAbs(m_Value())) || match(&Op, m_FNeg(m_Value())))
1345 Ops.push_back(&Op);
1346 }
1347
1348 return !Ops.empty();
1349}
1350
1352 const Function *Callee) const {
1353 const TargetMachine &TM = getTLI()->getTargetMachine();
1354 const GCNSubtarget *CallerST
1355 = static_cast<const GCNSubtarget *>(TM.getSubtargetImpl(*Caller));
1356 const GCNSubtarget *CalleeST
1357 = static_cast<const GCNSubtarget *>(TM.getSubtargetImpl(*Callee));
1358
1359 const FeatureBitset &CallerBits = CallerST->getFeatureBits();
1360 const FeatureBitset &CalleeBits = CalleeST->getFeatureBits();
1361
1362 FeatureBitset RealCallerBits = CallerBits & ~InlineFeatureIgnoreList;
1363 FeatureBitset RealCalleeBits = CalleeBits & ~InlineFeatureIgnoreList;
1364 if ((RealCallerBits & RealCalleeBits) != RealCalleeBits)
1365 return false;
1366
1367 // FIXME: dx10_clamp can just take the caller setting, but there seems to be
1368 // no way to support merge for backend defined attributes.
1369 SIModeRegisterDefaults CallerMode(*Caller, *CallerST);
1370 SIModeRegisterDefaults CalleeMode(*Callee, *CalleeST);
1371 if (!CallerMode.isInlineCompatible(CalleeMode))
1372 return false;
1373
1374 if (Callee->hasFnAttribute(Attribute::AlwaysInline) ||
1375 Callee->hasFnAttribute(Attribute::InlineHint))
1376 return true;
1377
1378 // Hack to make compile times reasonable.
1379 if (InlineMaxBB) {
1380 // Single BB does not increase total BB amount.
1381 if (Callee->size() == 1)
1382 return true;
1383 size_t BBSize = Caller->size() + Callee->size() - 1;
1384 return BBSize <= InlineMaxBB;
1385 }
1386
1387 return true;
1388}
1389
1391 const SITargetLowering *TLI,
1392 const GCNTTIImpl *TTIImpl) {
1393 const int NrOfSGPRUntilSpill = 26;
1394 const int NrOfVGPRUntilSpill = 32;
1395
1396 const DataLayout &DL = TTIImpl->getDataLayout();
1397
1398 unsigned adjustThreshold = 0;
1399 int SGPRsInUse = 0;
1400 int VGPRsInUse = 0;
1401 for (const Use &A : CB->args()) {
1402 SmallVector<EVT, 4> ValueVTs;
1403 ComputeValueVTs(*TLI, DL, A.get()->getType(), ValueVTs);
1404 for (auto ArgVT : ValueVTs) {
1405 unsigned CCRegNum = TLI->getNumRegistersForCallingConv(
1406 CB->getContext(), CB->getCallingConv(), ArgVT);
1408 SGPRsInUse += CCRegNum;
1409 else
1410 VGPRsInUse += CCRegNum;
1411 }
1412 }
1413
1414 // The cost of passing function arguments through the stack:
1415 // 1 instruction to put a function argument on the stack in the caller.
1416 // 1 instruction to take a function argument from the stack in callee.
1417 // 1 instruction is explicitly take care of data dependencies in callee
1418 // function.
1419 InstructionCost ArgStackCost(1);
1420 ArgStackCost += const_cast<GCNTTIImpl *>(TTIImpl)->getMemoryOpCost(
1421 Instruction::Store, Type::getInt32Ty(CB->getContext()), Align(4),
1423 ArgStackCost += const_cast<GCNTTIImpl *>(TTIImpl)->getMemoryOpCost(
1424 Instruction::Load, Type::getInt32Ty(CB->getContext()), Align(4),
1426
1427 // The penalty cost is computed relative to the cost of instructions and does
1428 // not model any storage costs.
1429 adjustThreshold += std::max(0, SGPRsInUse - NrOfSGPRUntilSpill) *
1430 ArgStackCost.getValue() * InlineConstants::getInstrCost();
1431 adjustThreshold += std::max(0, VGPRsInUse - NrOfVGPRUntilSpill) *
1432 ArgStackCost.getValue() * InlineConstants::getInstrCost();
1433 return adjustThreshold;
1434}
1435
1436static unsigned getCallArgsTotalAllocaSize(const CallBase *CB,
1437 const DataLayout &DL) {
1438 // If we have a pointer to a private array passed into a function
1439 // it will not be optimized out, leaving scratch usage.
1440 // This function calculates the total size in bytes of the memory that would
1441 // end in scratch if the call was not inlined.
1442 unsigned AllocaSize = 0;
1444 for (Value *PtrArg : CB->args()) {
1445 PointerType *Ty = dyn_cast<PointerType>(PtrArg->getType());
1446 if (!Ty)
1447 continue;
1448
1449 unsigned AddrSpace = Ty->getAddressSpace();
1450 if (AddrSpace != AMDGPUAS::FLAT_ADDRESS &&
1451 AddrSpace != AMDGPUAS::PRIVATE_ADDRESS)
1452 continue;
1453
1455 if (!AI || !AI->isStaticAlloca() || !AIVisited.insert(AI).second)
1456 continue;
1457
1458 AllocaSize += DL.getTypeAllocSize(AI->getAllocatedType());
1459 }
1460 return AllocaSize;
1461}
1462
1467
1469 unsigned Threshold = adjustInliningThresholdUsingCallee(CB, TLI, this);
1470
1471 // Private object passed as arguments may end up in scratch usage if the call
1472 // is not inlined. Increase the inline threshold to promote inlining.
1473 unsigned AllocaSize = getCallArgsTotalAllocaSize(CB, DL);
1474 if (AllocaSize > 0)
1475 Threshold += ArgAllocaCost;
1476 return Threshold;
1477}
1478
1480 const AllocaInst *AI) const {
1481
1482 // Below the cutoff, assume that the private memory objects would be
1483 // optimized
1484 auto AllocaSize = getCallArgsTotalAllocaSize(CB, DL);
1485 if (AllocaSize <= ArgAllocaCutoff)
1486 return 0;
1487
1488 // Above the cutoff, we give a cost to each private memory object
1489 // depending its size. If the array can be optimized by SROA this cost is not
1490 // added to the total-cost in the inliner cost analysis.
1491 //
1492 // We choose the total cost of the alloca such that their sum cancels the
1493 // bonus given in the threshold (ArgAllocaCost).
1494 //
1495 // Cost_Alloca_0 + ... + Cost_Alloca_N == ArgAllocaCost
1496 //
1497 // Awkwardly, the ArgAllocaCost bonus is multiplied by threshold-multiplier,
1498 // the single-bb bonus and the vector-bonus.
1499 //
1500 // We compensate the first two multipliers, by repeating logic from the
1501 // inliner-cost in here. The vector-bonus is 0 on AMDGPU.
1502 static_assert(InlinerVectorBonusPercent == 0, "vector bonus assumed to be 0");
1503 unsigned Threshold = ArgAllocaCost * getInliningThresholdMultiplier();
1504
1505 bool SingleBB = none_of(*CB->getCalledFunction(), [](const BasicBlock &BB) {
1506 return BB.getTerminator()->getNumSuccessors() > 1;
1507 });
1508 if (SingleBB) {
1509 Threshold += Threshold / 2;
1510 }
1511
1512 auto ArgAllocaSize = DL.getTypeAllocSize(AI->getAllocatedType());
1513
1514 // Attribute the bonus proportionally to the alloca size
1515 unsigned AllocaThresholdBonus = (Threshold * ArgAllocaSize) / AllocaSize;
1516
1517 return AllocaThresholdBonus;
1518}
1519
1522 OptimizationRemarkEmitter *ORE) const {
1523 CommonTTI.getUnrollingPreferences(L, SE, UP, ORE);
1524}
1525
1527 TTI::PeelingPreferences &PP) const {
1528 CommonTTI.getPeelingPreferences(L, SE, PP);
1529}
1530
1531int GCNTTIImpl::get64BitInstrCost(TTI::TargetCostKind CostKind) const {
1532 return ST->hasFullRate64Ops()
1533 ? getFullRateInstrCost()
1534 : ST->hasHalfRate64Ops() ? getHalfRateInstrCost(CostKind)
1535 : getQuarterRateInstrCost(CostKind);
1536}
1537
1538std::pair<InstructionCost, MVT>
1539GCNTTIImpl::getTypeLegalizationCost(Type *Ty) const {
1540 std::pair<InstructionCost, MVT> Cost = BaseT::getTypeLegalizationCost(Ty);
1541 auto Size = DL.getTypeSizeInBits(Ty);
1542 // Maximum load or store can handle 8 dwords for scalar and 4 for
1543 // vector ALU. Let's assume anything above 8 dwords is expensive
1544 // even if legal.
1545 if (Size <= 256)
1546 return Cost;
1547
1548 Cost.first += (Size + 255) / 256;
1549 return Cost;
1550}
1551
1553 return ST->hasPrefetch() ? 128 : 0;
1554}
1555
1558}
1559
1561 const Function &F,
1562 SmallVectorImpl<std::pair<StringRef, int64_t>> &LB) const {
1563 SmallVector<unsigned> MaxNumWorkgroups = ST->getMaxNumWorkGroups(F);
1564 LB.push_back({"amdgpu-max-num-workgroups[0]", MaxNumWorkgroups[0]});
1565 LB.push_back({"amdgpu-max-num-workgroups[1]", MaxNumWorkgroups[1]});
1566 LB.push_back({"amdgpu-max-num-workgroups[2]", MaxNumWorkgroups[2]});
1567 std::pair<unsigned, unsigned> FlatWorkGroupSize =
1568 ST->getFlatWorkGroupSizes(F);
1569 LB.push_back({"amdgpu-flat-work-group-size[0]", FlatWorkGroupSize.first});
1570 LB.push_back({"amdgpu-flat-work-group-size[1]", FlatWorkGroupSize.second});
1571 std::pair<unsigned, unsigned> WavesPerEU = ST->getWavesPerEU(F);
1572 LB.push_back({"amdgpu-waves-per-eu[0]", WavesPerEU.first});
1573 LB.push_back({"amdgpu-waves-per-eu[1]", WavesPerEU.second});
1574}
1575
1578 if (!ST->hasIEEEMode()) // Only mode on gfx12
1579 return KnownIEEEMode::On;
1580
1581 const Function *F = I.getFunction();
1582 if (!F)
1584
1585 Attribute IEEEAttr = F->getFnAttribute("amdgpu-ieee");
1586 if (IEEEAttr.isValid())
1588
1589 return AMDGPU::isShader(F->getCallingConv()) ? KnownIEEEMode::Off
1591}
1592
1594 Align Alignment,
1595 unsigned AddressSpace,
1597 TTI::OperandValueInfo OpInfo,
1598 const Instruction *I) const {
1599 if (VectorType *VecTy = dyn_cast<VectorType>(Src)) {
1600 if ((Opcode == Instruction::Load || Opcode == Instruction::Store) &&
1601 VecTy->getElementType()->isIntegerTy(8)) {
1602 return divideCeil(DL.getTypeSizeInBits(VecTy) - 1,
1604 }
1605 }
1606 return BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace, CostKind,
1607 OpInfo, I);
1608}
1609
1611 if (VectorType *VecTy = dyn_cast<VectorType>(Tp)) {
1612 if (VecTy->getElementType()->isIntegerTy(8)) {
1613 unsigned ElementCount = VecTy->getElementCount().getFixedValue();
1614 return divideCeil(ElementCount - 1, 4);
1615 }
1616 }
1617 return BaseT::getNumberOfParts(Tp);
1618}
1619
1622 if (isAlwaysUniform(V))
1624
1625 if (isSourceOfDivergence(V))
1627
1629}
return SDValue()
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
aarch64 promote const
Provides AMDGPU specific target descriptions.
Rewrite undef for PHI
The AMDGPU TargetMachine interface definition for hw codegen targets.
static cl::opt< unsigned > UnrollThresholdIf("amdgpu-unroll-threshold-if", cl::desc("Unroll threshold increment for AMDGPU for each if statement inside loop"), cl::init(200), cl::Hidden)
static cl::opt< unsigned > ArgAllocaCost("amdgpu-inline-arg-alloca-cost", cl::Hidden, cl::init(4000), cl::desc("Cost of alloca argument"))
static bool dependsOnLocalPhi(const Loop *L, const Value *Cond, unsigned Depth=0)
static cl::opt< bool > UnrollRuntimeLocal("amdgpu-unroll-runtime-local", cl::desc("Allow runtime unroll for AMDGPU if local memory used in a loop"), cl::init(true), cl::Hidden)
static unsigned adjustInliningThresholdUsingCallee(const CallBase *CB, const SITargetLowering *TLI, const GCNTTIImpl *TTIImpl)
static cl::opt< unsigned > ArgAllocaCutoff("amdgpu-inline-arg-alloca-cutoff", cl::Hidden, cl::init(256), cl::desc("Maximum alloca size to use for inline cost"))
static cl::opt< size_t > InlineMaxBB("amdgpu-inline-max-bb", cl::Hidden, cl::init(1100), cl::desc("Maximum number of BBs allowed in a function after inlining" " (compile time constraint)"))
static bool intrinsicHasPackedVectorBenefit(Intrinsic::ID ID)
static cl::opt< unsigned > UnrollMaxBlockToAnalyze("amdgpu-unroll-max-block-to-analyze", cl::desc("Inner loop block size threshold to analyze in unroll for AMDGPU"), cl::init(32), cl::Hidden)
static unsigned getCallArgsTotalAllocaSize(const CallBase *CB, const DataLayout &DL)
static cl::opt< unsigned > UnrollThresholdPrivate("amdgpu-unroll-threshold-private", cl::desc("Unroll threshold for AMDGPU if private memory used in a loop"), cl::init(2700), cl::Hidden)
static cl::opt< unsigned > MemcpyLoopUnroll("amdgpu-memcpy-loop-unroll", cl::desc("Unroll factor (affecting 4x32-bit operations) to use for memory " "operations when lowering memcpy as a loop"), cl::init(16), cl::Hidden)
static cl::opt< unsigned > UnrollThresholdLocal("amdgpu-unroll-threshold-local", cl::desc("Unroll threshold for AMDGPU if local memory used in a loop"), cl::init(1000), cl::Hidden)
This file a TargetTransformInfoImplBase conforming object specific to the AMDGPU target machine.
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static cl::opt< OutputCostKind > CostKind("cost-kind", cl::desc("Target cost kind"), cl::init(OutputCostKind::RecipThroughput), cl::values(clEnumValN(OutputCostKind::RecipThroughput, "throughput", "Reciprocal throughput"), clEnumValN(OutputCostKind::Latency, "latency", "Instruction latency"), clEnumValN(OutputCostKind::CodeSize, "code-size", "Code size"), clEnumValN(OutputCostKind::SizeAndLatency, "size-latency", "Code size and latency"), clEnumValN(OutputCostKind::All, "all", "Print all cost kinds")))
Hexagon Common GEP
const size_t AbstractManglingParser< Derived, Alloc >::NumOps
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
#define RegName(no)
static LVOptions Options
Definition LVOptions.cpp:25
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
Register const TargetRegisterInfo * TRI
uint64_t IntrinsicInst * II
const SmallVectorImpl< MachineOperand > & Cond
static cl::opt< RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode > Mode("regalloc-enable-advisor", cl::Hidden, cl::init(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Default), cl::desc("Enable regalloc advisor mode"), cl::values(clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Default, "default", "Default"), clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Release, "release", "precompiled"), clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Development, "development", "for training")))
static unsigned getNumElements(Type *Ty)
#define LLVM_DEBUG(...)
Definition Debug.h:114
std::optional< unsigned > getReqdWorkGroupSize(const Function &F, unsigned Dim) const
bool hasWavefrontsEvenlySplittingXDim(const Function &F, bool REquiresUniformYZ=false) const
uint64_t getMaxMemIntrinsicInlineSizeThreshold() const override
AMDGPUTTIImpl(const AMDGPUTargetMachine *TM, const Function &F)
void getPeelingPreferences(Loop *L, ScalarEvolution &SE, TTI::PeelingPreferences &PP) const override
void getUnrollingPreferences(Loop *L, ScalarEvolution &SE, TTI::UnrollingPreferences &UP, OptimizationRemarkEmitter *ORE) const override
an instruction to allocate memory on the stack
LLVM_ABI bool isStaticAlloca() const
Return true if this alloca is in the entry block of the function and is a constant size.
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
This class represents an incoming formal argument to a Function.
Definition Argument.h:32
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:40
size_t size() const
size - Get the array size.
Definition ArrayRef.h:142
bool empty() const
empty - Check if the array is empty.
Definition ArrayRef.h:137
Functions, function parameters, and return types can have attributes to indicate how they should be t...
Definition Attributes.h:69
LLVM_ABI bool getValueAsBool() const
Return the attribute's value as a boolean.
bool isValid() const
Return true if the attribute is any kind of attribute.
Definition Attributes.h:223
LLVM Basic Block Representation.
Definition BasicBlock.h:62
InstructionCost getVectorInstrCost(unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index, const Value *Op0, const Value *Op1) const override
InstructionCost getArithmeticInstrCost(unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Opd1Info={TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Opd2Info={TTI::OK_AnyValue, TTI::OP_None}, ArrayRef< const Value * > Args={}, const Instruction *CxtI=nullptr) const override
InstructionCost getMinMaxReductionCost(Intrinsic::ID IID, VectorType *Ty, FastMathFlags FMF, TTI::TargetCostKind CostKind) const override
InstructionCost getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind, const Instruction *I=nullptr) const override
unsigned getNumberOfParts(Type *Tp) const override
TTI::ShuffleKind improveShuffleKindFromMask(TTI::ShuffleKind Kind, ArrayRef< int > Mask, VectorType *SrcTy, int &Index, VectorType *&SubTy) const
InstructionCost getShuffleCost(TTI::ShuffleKind Kind, VectorType *DstTy, VectorType *SrcTy, ArrayRef< int > Mask, TTI::TargetCostKind CostKind, int Index, VectorType *SubTp, ArrayRef< const Value * > Args={}, const Instruction *CxtI=nullptr) const override
InstructionCost getArithmeticReductionCost(unsigned Opcode, VectorType *Ty, std::optional< FastMathFlags > FMF, TTI::TargetCostKind CostKind) const override
void getPeelingPreferences(Loop *L, ScalarEvolution &SE, TTI::PeelingPreferences &PP) const override
std::pair< InstructionCost, MVT > getTypeLegalizationCost(Type *Ty) const
InstructionCost getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, TTI::TargetCostKind CostKind) const override
InstructionCost getMemoryOpCost(unsigned Opcode, Type *Src, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, TTI::OperandValueInfo OpInfo={TTI::OK_AnyValue, TTI::OP_None}, const Instruction *I=nullptr) const override
Conditional or Unconditional Branch instruction.
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
bool isInlineAsm() const
Check if this call is an inline asm statement.
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
CallingConv::ID getCallingConv() const
Value * getArgOperand(unsigned i) const
iterator_range< User::op_iterator > args()
Iteration adapter for range-for loops.
unsigned getArgOperandNo(const Use *U) const
Given a use for a arg operand, get the arg operand number that corresponds to it.
This class represents a function call, abstracting a target machine's calling convention.
This is the shared class of boolean and integer constants.
Definition Constants.h:87
static LLVM_ABI ConstantInt * getTrue(LLVMContext &Context)
static LLVM_ABI ConstantInt * getFalse(LLVMContext &Context)
int64_t getSExtValue() const
Return the constant as a 64-bit integer value after it has been sign extended as appropriate for the ...
Definition Constants.h:174
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:64
constexpr bool isScalar() const
Exactly one element.
Definition TypeSize.h:320
ArrayRef< unsigned > getIndices() const
Convenience struct for specifying and reasoning about fast-math flags.
Definition FMF.h:22
Container class for subtarget features.
static LLVM_ABI FixedVectorType * get(Type *ElementType, unsigned NumElts)
Definition Type.cpp:802
bool hasGloballyAddressableScratch() const
bool hasFullRate64Ops() const
GCNTTIImpl(const AMDGPUTargetMachine *TM, const Function &F)
unsigned getLoadStoreVecRegBitWidth(unsigned AddrSpace) const override
InstructionCost getShuffleCost(TTI::ShuffleKind Kind, VectorType *DstTy, VectorType *SrcTy, ArrayRef< int > Mask, TTI::TargetCostKind CostKind, int Index, VectorType *SubTp, ArrayRef< const Value * > Args={}, const Instruction *CxtI=nullptr) const override
InstructionCost getMemoryOpCost(unsigned Opcode, Type *Src, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, TTI::OperandValueInfo OpInfo={TTI::OK_AnyValue, TTI::OP_None}, const Instruction *I=nullptr) const override
Account for loads of i8 vector types to have reduced cost.
InstructionCost getArithmeticInstrCost(unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Op1Info={TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Op2Info={TTI::OK_AnyValue, TTI::OP_None}, ArrayRef< const Value * > Args={}, const Instruction *CxtI=nullptr) const override
void collectKernelLaunchBounds(const Function &F, SmallVectorImpl< std::pair< StringRef, int64_t > > &LB) const override
bool isLegalToVectorizeStoreChain(unsigned ChainSizeInBytes, Align Alignment, unsigned AddrSpace) const override
bool isInlineAsmSourceOfDivergence(const CallInst *CI, ArrayRef< unsigned > Indices={}) const
Analyze if the results of inline asm are divergent.
bool isReadRegisterSourceOfDivergence(const IntrinsicInst *ReadReg) const
unsigned getMaximumVF(unsigned ElemWidth, unsigned Opcode) const override
unsigned getNumberOfRegisters(unsigned RCID) const override
bool isLegalToVectorizeLoadChain(unsigned ChainSizeInBytes, Align Alignment, unsigned AddrSpace) const override
unsigned getStoreVectorFactor(unsigned VF, unsigned StoreSize, unsigned ChainSizeInBytes, VectorType *VecTy) const override
bool isLegalToVectorizeMemChain(unsigned ChainSizeInBytes, Align Alignment, unsigned AddrSpace) const
bool shouldPrefetchAddressSpace(unsigned AS) const override
bool hasBranchDivergence(const Function *F=nullptr) const override
Value * rewriteIntrinsicWithAddressSpace(IntrinsicInst *II, Value *OldV, Value *NewV) const override
unsigned getCallerAllocaCost(const CallBase *CB, const AllocaInst *AI) const override
unsigned getMaxInterleaveFactor(ElementCount VF) const override
void getMemcpyLoopResidualLoweringType(SmallVectorImpl< Type * > &OpsOut, LLVMContext &Context, unsigned RemainingBytes, unsigned SrcAddrSpace, unsigned DestAddrSpace, Align SrcAlign, Align DestAlign, std::optional< uint32_t > AtomicCpySize) const override
InstructionCost getArithmeticReductionCost(unsigned Opcode, VectorType *Ty, std::optional< FastMathFlags > FMF, TTI::TargetCostKind CostKind) const override
InstructionCost getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, TTI::TargetCostKind CostKind) const override
Get intrinsic cost based on arguments.
unsigned getInliningThresholdMultiplier() const override
unsigned getLoadVectorFactor(unsigned VF, unsigned LoadSize, unsigned ChainSizeInBytes, VectorType *VecTy) const override
unsigned getPrefetchDistance() const override
How much before a load we should place the prefetch instruction.
InstructionCost getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind, const Instruction *I=nullptr) const override
KnownIEEEMode fpenvIEEEMode(const Instruction &I) const
Return KnownIEEEMode::On if we know if the use context can assume "amdgpu-ieee"="true" and KnownIEEEM...
unsigned adjustInliningThreshold(const CallBase *CB) const override
bool isProfitableToSinkOperands(Instruction *I, SmallVectorImpl< Use * > &Ops) const override
Whether it is profitable to sink the operands of an Instruction I to the basic block of I.
bool getTgtMemIntrinsic(IntrinsicInst *Inst, MemIntrinsicInfo &Info) const override
bool areInlineCompatible(const Function *Caller, const Function *Callee) const override
InstructionCost getMinMaxReductionCost(Intrinsic::ID IID, VectorType *Ty, FastMathFlags FMF, TTI::TargetCostKind CostKind) const override
Try to calculate op costs for min/max reduction operations.
int getInliningLastCallToStaticBonus() const override
InstructionCost getVectorInstrCost(unsigned Opcode, Type *ValTy, TTI::TargetCostKind CostKind, unsigned Index, const Value *Op0, const Value *Op1) const override
bool collectFlatAddressOperands(SmallVectorImpl< int > &OpIndexes, Intrinsic::ID IID) const override
unsigned getNumberOfParts(Type *Tp) const override
When counting parts on AMD GPUs, account for i8s being grouped together under a single i32 value.
void getPeelingPreferences(Loop *L, ScalarEvolution &SE, TTI::PeelingPreferences &PP) const override
unsigned getMinVectorRegisterBitWidth() const override
TypeSize getRegisterBitWidth(TargetTransformInfo::RegisterKind Vector) const override
void getUnrollingPreferences(Loop *L, ScalarEvolution &SE, TTI::UnrollingPreferences &UP, OptimizationRemarkEmitter *ORE) const override
Type * getMemcpyLoopLoweringType(LLVMContext &Context, Value *Length, unsigned SrcAddrSpace, unsigned DestAddrSpace, Align SrcAlign, Align DestAlign, std::optional< uint32_t > AtomicElementSize) const override
uint64_t getMaxMemIntrinsicInlineSizeThreshold() const override
InstructionUniformity getInstructionUniformity(const Value *V) const override
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
static InstructionCost getInvalid(CostType Val=0)
CostType getValue() const
This function is intended to be used as sparingly as possible, since the class provides the full rang...
LLVM_ABI bool hasApproxFunc() const LLVM_READONLY
Determine whether the approximate-math-functions flag is set.
LLVM_ABI bool hasAllowContract() const LLVM_READONLY
Determine whether the allow-contract flag is set.
LLVM_ABI const DataLayout & getDataLayout() const
Get the data layout of the module this instruction belongs to.
const IntrinsicInst * getInst() const
A wrapper class for inspecting calls to intrinsic functions.
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
This is an important class for using LLVM in a threaded context.
Definition LLVMContext.h:68
An instruction for reading from memory.
Represents a single loop in the control flow graph.
Definition LoopInfo.h:40
Metadata node.
Definition Metadata.h:1078
Machine Value Type.
static LLVM_ABI MVT getVT(Type *Ty, bool HandleUnknown=false)
Return the value type corresponding to the specified type.
Root of the metadata hierarchy.
Definition Metadata.h:64
A Module instance is used to store all the information related to an LLVM module.
Definition Module.h:67
The optimization diagnostic interface.
unsigned getNumRegistersForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const override
Certain targets require unusual breakdowns of certain types.
The main scalar evolution driver.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
std::vector< AsmOperandInfo > AsmOperandInfoVector
Primary interface to the complete machine description for the target machine.
virtual const TargetSubtargetInfo * getSubtargetImpl(const Function &) const
Virtual method implemented by subclasses that returns a reference to that target's TargetSubtargetInf...
virtual const DataLayout & getDataLayout() const
virtual void getMemcpyLoopResidualLoweringType(SmallVectorImpl< Type * > &OpsOut, LLVMContext &Context, unsigned RemainingBytes, unsigned SrcAddrSpace, unsigned DestAddrSpace, Align SrcAlign, Align DestAlign, std::optional< uint32_t > AtomicCpySize) const
TargetCostKind
The kind of cost model.
@ TCK_CodeSize
Instruction code size.
@ TCK_SizeAndLatency
The weighted sum of size and latency.
static bool requiresOrderedReduction(std::optional< FastMathFlags > FMF)
A helper function to determine the type of reduction algorithm used for a given Opcode and set of Fas...
@ TCC_Free
Expected to fold away in lowering.
ShuffleKind
The various kinds of shuffle patterns for vector queries.
@ SK_InsertSubvector
InsertSubvector. Index indicates start offset.
@ SK_PermuteSingleSrc
Shuffle elements of single source vector with any shuffle mask.
@ SK_Splice
Concatenates elements from the first input vector with elements of the second input vector.
@ SK_Broadcast
Broadcast element 0 to all other elements.
@ SK_Reverse
Reverse the order of the vector.
@ SK_ExtractSubvector
ExtractSubvector Index indicates start offset.
static constexpr TypeSize getFixed(ScalarTy ExactSize)
Definition TypeSize.h:343
static constexpr TypeSize getScalable(ScalarTy MinimumSize)
Definition TypeSize.h:346
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
static LLVM_ABI IntegerType * getInt64Ty(LLVMContext &C)
Definition Type.cpp:297
static LLVM_ABI IntegerType * getInt32Ty(LLVMContext &C)
Definition Type.cpp:296
LLVM_ABI unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
static LLVM_ABI IntegerType * getInt8Ty(LLVMContext &C)
Definition Type.cpp:294
static LLVM_ABI IntegerType * getInt16Ty(LLVMContext &C)
Definition Type.cpp:295
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
Definition Type.h:311
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
Definition Type.h:128
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
Definition Type.cpp:230
static LLVM_ABI IntegerType * getIntNTy(LLVMContext &C, unsigned N)
Definition Type.cpp:300
A Use represents the edge between a Value definition and its users.
Definition Use.h:35
Value * getOperand(unsigned i) const
Definition User.h:233
LLVM Value Representation.
Definition Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:256
user_iterator user_begin()
Definition Value.h:402
bool hasOneUse() const
Return true if there is exactly one use of this value.
Definition Value.h:439
LLVM_ABI LLVMContext & getContext() const
All values hold a context through their type.
Definition Value.cpp:1106
Base class of all SIMD vector types.
constexpr ScalarTy getFixedValue() const
Definition TypeSize.h:200
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ CONSTANT_ADDRESS_32BIT
Address space for 32-bit constant memory.
@ BUFFER_STRIDED_POINTER
Address space for 192-bit fat buffer pointers with an additional index.
@ REGION_ADDRESS
Address space for region memory. (GDS)
@ LOCAL_ADDRESS
Address space for local memory.
@ CONSTANT_ADDRESS
Address space for constant memory (VTX2).
@ FLAT_ADDRESS
Address space for flat memory.
@ GLOBAL_ADDRESS
Address space for global memory (RAT0, VTX0).
@ BUFFER_FAT_POINTER
Address space for 160-bit buffer fat pointers.
@ PRIVATE_ADDRESS
Address space for private memory.
@ BUFFER_RESOURCE
Address space for 128-bit buffer resources.
LLVM_READNONE constexpr bool isShader(CallingConv::ID CC)
bool isFlatGlobalAddrSpace(unsigned AS)
bool isArgPassedInSGPR(const Argument *A)
bool isIntrinsicAlwaysUniform(unsigned IntrID)
bool isIntrinsicSourceOfDivergence(unsigned IntrID)
bool isExtendedGlobalAddrSpace(unsigned AS)
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
ISD namespace - This namespace contains an enum which represents all of the SelectionDAG node types a...
Definition ISDOpcodes.h:24
@ ADD
Simple integer binary arithmetic operators.
Definition ISDOpcodes.h:264
@ FADD
Simple binary floating point operators.
Definition ISDOpcodes.h:417
@ FNEG
Perform various unary floating-point operations inspired by libm.
@ SHL
Shift and rotation operations.
Definition ISDOpcodes.h:764
@ AND
Bitwise operators - logical and, logical or, logical xor.
Definition ISDOpcodes.h:738
LLVM_ABI int getInstrCost()
This namespace contains an enum with a value for every intrinsic/builtin function known by LLVM.
LLVM_ABI Function * getOrInsertDeclaration(Module *M, ID id, ArrayRef< Type * > Tys={})
Look up the Function declaration of the intrinsic id in the Module M.
BinaryOp_match< LHS, RHS, Instruction::AShr > m_AShr(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::And, true > m_c_And(const LHS &L, const RHS &R)
Matches an And with LHS and RHS in either order.
bool match(Val *V, const Pattern &P)
class_match< ConstantInt > m_ConstantInt()
Match an arbitrary ConstantInt and ignore it.
IntrinsicID_match m_Intrinsic()
Match intrinsic calls like this: m_Intrinsic<Intrinsic::fabs>(m_Value(X))
specific_fpval m_FPOne()
Match a float 1.0 or vector with all elements equal to 1.0.
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
BinaryOp_match< LHS, RHS, Instruction::LShr > m_LShr(const LHS &L, const RHS &R)
FNeg_match< OpTy > m_FNeg(const OpTy &X)
Match 'fneg X' as 'fsub -0.0, X'.
m_Intrinsic_Ty< Opnd0 >::Ty m_FAbs(const Opnd0 &Op0)
initializer< Ty > init(const Ty &Val)
std::enable_if_t< detail::IsValidPointer< X, Y >::value, X * > extract_or_null(Y &&MD)
Extract a Value from Metadata, allowing null.
Definition Metadata.h:682
This is an optimization pass for GlobalISel generic memory operations.
Definition Types.h:26
@ Length
Definition DWP.cpp:532
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
InstructionCost Cost
void ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL, Type *Ty, SmallVectorImpl< EVT > &ValueVTs, SmallVectorImpl< EVT > *MemVTs=nullptr, SmallVectorImpl< TypeSize > *Offsets=nullptr, TypeSize StartingOffset=TypeSize::getZero())
ComputeValueVTs - Given an LLVM IR type, compute a sequence of EVTs that represent all the individual...
Definition Analysis.cpp:119
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
LLVM_ABI MDNode * findOptionMDForLoop(const Loop *TheLoop, StringRef Name)
Find string metadata for a loop.
constexpr auto equal_to(T &&Arg)
Functor variant of std::equal_to that can be used as a UnaryPredicate in functional algorithms like a...
Definition STLExtras.h:2163
auto dyn_cast_or_null(const Y &Val)
Definition Casting.h:753
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1744
LLVM_ABI void computeKnownBits(const Value *V, KnownBits &Known, const DataLayout &DL, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true, unsigned Depth=0)
Determine which bits of V are known to be either zero or one and return them in the KnownZero/KnownOn...
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
bool none_of(R &&Range, UnaryPredicate P)
Provide wrappers to std::none_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1751
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
AtomicOrdering
Atomic ordering for LLVM's memory model.
constexpr T divideCeil(U Numerator, V Denominator)
Returns the integer ceil(Numerator / Denominator).
Definition MathExtras.h:394
@ FAdd
Sum of floats.
DWARFExpression::Operation Op
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition STLExtras.h:1945
LLVM_ABI const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=MaxLookupSearchDepth)
This method strips off any GEP address adjustments, pointer casts or llvm.threadlocal....
InstructionUniformity
Enum describing how instructions behave with respect to uniformity and divergence,...
Definition Uniformity.h:18
@ AlwaysUniform
The result values are always uniform.
Definition Uniformity.h:23
@ NeverUniform
The result values can never be assumed to be uniform.
Definition Uniformity.h:26
@ Default
The result values are uniform if and only if all operands are uniform.
Definition Uniformity.h:20
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
static constexpr DenormalMode getPreserveSign()
Extended Value Type.
Definition ValueTypes.h:35
uint64_t getScalarSizeInBits() const
Definition ValueTypes.h:385
Information about a load/store intrinsic defined by the target.
bool isInlineCompatible(SIModeRegisterDefaults CalleeMode) const
Parameters that control the generic loop unrolling transformation.
unsigned Threshold
The cost threshold for the unrolled loop.
bool UnrollVectorizedLoop
Don't disable runtime unroll for the loops which were vectorized.
unsigned MaxIterationsCountToAnalyze
Don't allow loop unrolling to simulate more than this number of iterations when checking full unroll ...
unsigned PartialThreshold
The cost threshold for the unrolled loop, like Threshold, but used for partial/runtime unrolling (set...
bool Runtime
Allow runtime unrolling (unrolling of loops to expand the size of the loop body even when the number ...
bool Partial
Allow partial unrolling (unrolling of loops to expand the size of the loop body, not only to eliminat...