LLVM 20.0.0git
AMDGPUTargetTransformInfo.cpp
Go to the documentation of this file.
1//===- AMDGPUTargetTransformInfo.cpp - AMDGPU specific TTI pass -----------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// \file
10// This file implements a TargetTransformInfo analysis pass specific to the
11// AMDGPU target machine. It uses the target's detailed information to provide
12// more precise answers to certain TTI queries, while letting the target
13// independent and default TTI implementations handle the rest.
14//
15//===----------------------------------------------------------------------===//
16
18#include "AMDGPUTargetMachine.h"
25#include "llvm/IR/IRBuilder.h"
26#include "llvm/IR/IntrinsicsAMDGPU.h"
29#include <optional>
30
31using namespace llvm;
32
33#define DEBUG_TYPE "AMDGPUtti"
34
36 "amdgpu-unroll-threshold-private",
37 cl::desc("Unroll threshold for AMDGPU if private memory used in a loop"),
38 cl::init(2700), cl::Hidden);
39
41 "amdgpu-unroll-threshold-local",
42 cl::desc("Unroll threshold for AMDGPU if local memory used in a loop"),
43 cl::init(1000), cl::Hidden);
44
46 "amdgpu-unroll-threshold-if",
47 cl::desc("Unroll threshold increment for AMDGPU for each if statement inside loop"),
48 cl::init(200), cl::Hidden);
49
51 "amdgpu-unroll-runtime-local",
52 cl::desc("Allow runtime unroll for AMDGPU if local memory used in a loop"),
53 cl::init(true), cl::Hidden);
54
56 "amdgpu-unroll-max-block-to-analyze",
57 cl::desc("Inner loop block size threshold to analyze in unroll for AMDGPU"),
58 cl::init(32), cl::Hidden);
59
60static cl::opt<unsigned> ArgAllocaCost("amdgpu-inline-arg-alloca-cost",
61 cl::Hidden, cl::init(4000),
62 cl::desc("Cost of alloca argument"));
63
64// If the amount of scratch memory to eliminate exceeds our ability to allocate
65// it into registers we gain nothing by aggressively inlining functions for that
66// heuristic.
68 ArgAllocaCutoff("amdgpu-inline-arg-alloca-cutoff", cl::Hidden,
69 cl::init(256),
70 cl::desc("Maximum alloca size to use for inline cost"));
71
72// Inliner constraint to achieve reasonable compilation time.
74 "amdgpu-inline-max-bb", cl::Hidden, cl::init(1100),
75 cl::desc("Maximum number of BBs allowed in a function after inlining"
76 " (compile time constraint)"));
77
78// This default unroll factor is based on microbenchmarks on gfx1030.
80 "amdgpu-memcpy-loop-unroll",
81 cl::desc("Unroll factor (affecting 4x32-bit operations) to use for memory "
82 "operations when lowering memcpy as a loop"),
83 cl::init(16), cl::Hidden);
84
85static bool dependsOnLocalPhi(const Loop *L, const Value *Cond,
86 unsigned Depth = 0) {
87 const Instruction *I = dyn_cast<Instruction>(Cond);
88 if (!I)
89 return false;
90
91 for (const Value *V : I->operand_values()) {
92 if (!L->contains(I))
93 continue;
94 if (const PHINode *PHI = dyn_cast<PHINode>(V)) {
95 if (llvm::none_of(L->getSubLoops(), [PHI](const Loop* SubLoop) {
96 return SubLoop->contains(PHI); }))
97 return true;
98 } else if (Depth < 10 && dependsOnLocalPhi(L, V, Depth+1))
99 return true;
100 }
101 return false;
102}
103
105 : BaseT(TM, F.getDataLayout()),
106 TargetTriple(TM->getTargetTriple()),
107 ST(static_cast<const GCNSubtarget *>(TM->getSubtargetImpl(F))),
108 TLI(ST->getTargetLowering()) {}
109
113 const Function &F = *L->getHeader()->getParent();
114 UP.Threshold =
115 F.getFnAttributeAsParsedInteger("amdgpu-unroll-threshold", 300);
116 UP.MaxCount = std::numeric_limits<unsigned>::max();
117 UP.Partial = true;
118
119 // Conditional branch in a loop back edge needs 3 additional exec
120 // manipulations in average.
121 UP.BEInsns += 3;
122
123 // We want to run unroll even for the loops which have been vectorized.
124 UP.UnrollVectorizedLoop = true;
125
126 // TODO: Do we want runtime unrolling?
127
128 // Maximum alloca size than can fit registers. Reserve 16 registers.
129 const unsigned MaxAlloca = (256 - 16) * 4;
130 unsigned ThresholdPrivate = UnrollThresholdPrivate;
131 unsigned ThresholdLocal = UnrollThresholdLocal;
132
133 // If this loop has the amdgpu.loop.unroll.threshold metadata we will use the
134 // provided threshold value as the default for Threshold
135 if (MDNode *LoopUnrollThreshold =
136 findOptionMDForLoop(L, "amdgpu.loop.unroll.threshold")) {
137 if (LoopUnrollThreshold->getNumOperands() == 2) {
138 ConstantInt *MetaThresholdValue = mdconst::extract_or_null<ConstantInt>(
139 LoopUnrollThreshold->getOperand(1));
140 if (MetaThresholdValue) {
141 // We will also use the supplied value for PartialThreshold for now.
142 // We may introduce additional metadata if it becomes necessary in the
143 // future.
144 UP.Threshold = MetaThresholdValue->getSExtValue();
146 ThresholdPrivate = std::min(ThresholdPrivate, UP.Threshold);
147 ThresholdLocal = std::min(ThresholdLocal, UP.Threshold);
148 }
149 }
150 }
151
152 unsigned MaxBoost = std::max(ThresholdPrivate, ThresholdLocal);
153 for (const BasicBlock *BB : L->getBlocks()) {
154 const DataLayout &DL = BB->getDataLayout();
155 unsigned LocalGEPsSeen = 0;
156
157 if (llvm::any_of(L->getSubLoops(), [BB](const Loop* SubLoop) {
158 return SubLoop->contains(BB); }))
159 continue; // Block belongs to an inner loop.
160
161 for (const Instruction &I : *BB) {
162 // Unroll a loop which contains an "if" statement whose condition
163 // defined by a PHI belonging to the loop. This may help to eliminate
164 // if region and potentially even PHI itself, saving on both divergence
165 // and registers used for the PHI.
166 // Add a small bonus for each of such "if" statements.
167 if (const BranchInst *Br = dyn_cast<BranchInst>(&I)) {
168 if (UP.Threshold < MaxBoost && Br->isConditional()) {
169 BasicBlock *Succ0 = Br->getSuccessor(0);
170 BasicBlock *Succ1 = Br->getSuccessor(1);
171 if ((L->contains(Succ0) && L->isLoopExiting(Succ0)) ||
172 (L->contains(Succ1) && L->isLoopExiting(Succ1)))
173 continue;
174 if (dependsOnLocalPhi(L, Br->getCondition())) {
176 LLVM_DEBUG(dbgs() << "Set unroll threshold " << UP.Threshold
177 << " for loop:\n"
178 << *L << " due to " << *Br << '\n');
179 if (UP.Threshold >= MaxBoost)
180 return;
181 }
182 }
183 continue;
184 }
185
186 const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(&I);
187 if (!GEP)
188 continue;
189
190 unsigned AS = GEP->getAddressSpace();
191 unsigned Threshold = 0;
193 Threshold = ThresholdPrivate;
195 Threshold = ThresholdLocal;
196 else
197 continue;
198
199 if (UP.Threshold >= Threshold)
200 continue;
201
202 if (AS == AMDGPUAS::PRIVATE_ADDRESS) {
203 const Value *Ptr = GEP->getPointerOperand();
204 const AllocaInst *Alloca =
205 dyn_cast<AllocaInst>(getUnderlyingObject(Ptr));
206 if (!Alloca || !Alloca->isStaticAlloca())
207 continue;
208 Type *Ty = Alloca->getAllocatedType();
209 unsigned AllocaSize = Ty->isSized() ? DL.getTypeAllocSize(Ty) : 0;
210 if (AllocaSize > MaxAlloca)
211 continue;
212 } else if (AS == AMDGPUAS::LOCAL_ADDRESS ||
214 LocalGEPsSeen++;
215 // Inhibit unroll for local memory if we have seen addressing not to
216 // a variable, most likely we will be unable to combine it.
217 // Do not unroll too deep inner loops for local memory to give a chance
218 // to unroll an outer loop for a more important reason.
219 if (LocalGEPsSeen > 1 || L->getLoopDepth() > 2 ||
220 (!isa<GlobalVariable>(GEP->getPointerOperand()) &&
221 !isa<Argument>(GEP->getPointerOperand())))
222 continue;
223 LLVM_DEBUG(dbgs() << "Allow unroll runtime for loop:\n"
224 << *L << " due to LDS use.\n");
226 }
227
228 // Check if GEP depends on a value defined by this loop itself.
229 bool HasLoopDef = false;
230 for (const Value *Op : GEP->operands()) {
231 const Instruction *Inst = dyn_cast<Instruction>(Op);
232 if (!Inst || L->isLoopInvariant(Op))
233 continue;
234
235 if (llvm::any_of(L->getSubLoops(), [Inst](const Loop* SubLoop) {
236 return SubLoop->contains(Inst); }))
237 continue;
238 HasLoopDef = true;
239 break;
240 }
241 if (!HasLoopDef)
242 continue;
243
244 // We want to do whatever we can to limit the number of alloca
245 // instructions that make it through to the code generator. allocas
246 // require us to use indirect addressing, which is slow and prone to
247 // compiler bugs. If this loop does an address calculation on an
248 // alloca ptr, then we want to use a higher than normal loop unroll
249 // threshold. This will give SROA a better chance to eliminate these
250 // allocas.
251 //
252 // We also want to have more unrolling for local memory to let ds
253 // instructions with different offsets combine.
254 //
255 // Don't use the maximum allowed value here as it will make some
256 // programs way too big.
257 UP.Threshold = Threshold;
258 LLVM_DEBUG(dbgs() << "Set unroll threshold " << Threshold
259 << " for loop:\n"
260 << *L << " due to " << *GEP << '\n');
261 if (UP.Threshold >= MaxBoost)
262 return;
263 }
264
265 // If we got a GEP in a small BB from inner loop then increase max trip
266 // count to analyze for better estimation cost in unroll
267 if (L->isInnermost() && BB->size() < UnrollMaxBlockToAnalyze)
269 }
270}
271
275}
276
278 return 1024;
279}
280
281const FeatureBitset GCNTTIImpl::InlineFeatureIgnoreList = {
282 // Codegen control options which don't matter.
283 AMDGPU::FeatureEnableLoadStoreOpt, AMDGPU::FeatureEnableSIScheduler,
284 AMDGPU::FeatureEnableUnsafeDSOffsetFolding, AMDGPU::FeatureFlatForGlobal,
285 AMDGPU::FeaturePromoteAlloca, AMDGPU::FeatureUnalignedScratchAccess,
286 AMDGPU::FeatureUnalignedAccessMode,
287
288 AMDGPU::FeatureAutoWaitcntBeforeBarrier,
289
290 // Property of the kernel/environment which can't actually differ.
291 AMDGPU::FeatureSGPRInitBug, AMDGPU::FeatureXNACK,
292 AMDGPU::FeatureTrapHandler,
293
294 // The default assumption needs to be ecc is enabled, but no directly
295 // exposed operations depend on it, so it can be safely inlined.
296 AMDGPU::FeatureSRAMECC,
297
298 // Perf-tuning features
299 AMDGPU::FeatureFastFMAF32, AMDGPU::HalfRate64Ops};
300
302 : BaseT(TM, F.getDataLayout()),
303 ST(static_cast<const GCNSubtarget *>(TM->getSubtargetImpl(F))),
304 TLI(ST->getTargetLowering()), CommonTTI(TM, F),
305 IsGraphics(AMDGPU::isGraphics(F.getCallingConv())) {
306 SIModeRegisterDefaults Mode(F, *ST);
307 HasFP32Denormals = Mode.FP32Denormals != DenormalMode::getPreserveSign();
308 HasFP64FP16Denormals =
309 Mode.FP64FP16Denormals != DenormalMode::getPreserveSign();
310}
311
313 return !F || !ST->isSingleLaneExecution(*F);
314}
315
316unsigned GCNTTIImpl::getNumberOfRegisters(unsigned RCID) const {
317 // NB: RCID is not an RCID. In fact it is 0 or 1 for scalar or vector
318 // registers. See getRegisterClassForType for the implementation.
319 // In this case vector registers are not vector in terms of
320 // VGPRs, but those which can hold multiple values.
321
322 // This is really the number of registers to fill when vectorizing /
323 // interleaving loops, so we lie to avoid trying to use all registers.
324 return 4;
325}
326
329 switch (K) {
331 return TypeSize::getFixed(32);
333 return TypeSize::getFixed(ST->hasPackedFP32Ops() ? 64 : 32);
335 return TypeSize::getScalable(0);
336 }
337 llvm_unreachable("Unsupported register kind");
338}
339
341 return 32;
342}
343
344unsigned GCNTTIImpl::getMaximumVF(unsigned ElemWidth, unsigned Opcode) const {
345 if (Opcode == Instruction::Load || Opcode == Instruction::Store)
346 return 32 * 4 / ElemWidth;
347 return (ElemWidth == 16 && ST->has16BitInsts()) ? 2
348 : (ElemWidth == 32 && ST->hasPackedFP32Ops()) ? 2
349 : 1;
350}
351
352unsigned GCNTTIImpl::getLoadVectorFactor(unsigned VF, unsigned LoadSize,
353 unsigned ChainSizeInBytes,
354 VectorType *VecTy) const {
355 unsigned VecRegBitWidth = VF * LoadSize;
356 if (VecRegBitWidth > 128 && VecTy->getScalarSizeInBits() < 32)
357 // TODO: Support element-size less than 32bit?
358 return 128 / LoadSize;
359
360 return VF;
361}
362
363unsigned GCNTTIImpl::getStoreVectorFactor(unsigned VF, unsigned StoreSize,
364 unsigned ChainSizeInBytes,
365 VectorType *VecTy) const {
366 unsigned VecRegBitWidth = VF * StoreSize;
367 if (VecRegBitWidth > 128)
368 return 128 / StoreSize;
369
370 return VF;
371}
372
373unsigned GCNTTIImpl::getLoadStoreVecRegBitWidth(unsigned AddrSpace) const {
374 if (AddrSpace == AMDGPUAS::GLOBAL_ADDRESS ||
375 AddrSpace == AMDGPUAS::CONSTANT_ADDRESS ||
377 AddrSpace == AMDGPUAS::BUFFER_FAT_POINTER ||
378 AddrSpace == AMDGPUAS::BUFFER_RESOURCE ||
380 return 512;
381 }
382
383 if (AddrSpace == AMDGPUAS::PRIVATE_ADDRESS)
384 return 8 * ST->getMaxPrivateElementSize();
385
386 // Common to flat, global, local and region. Assume for unknown addrspace.
387 return 128;
388}
389
390bool GCNTTIImpl::isLegalToVectorizeMemChain(unsigned ChainSizeInBytes,
391 Align Alignment,
392 unsigned AddrSpace) const {
393 // We allow vectorization of flat stores, even though we may need to decompose
394 // them later if they may access private memory. We don't have enough context
395 // here, and legalization can handle it.
396 if (AddrSpace == AMDGPUAS::PRIVATE_ADDRESS) {
397 return (Alignment >= 4 || ST->hasUnalignedScratchAccessEnabled()) &&
398 ChainSizeInBytes <= ST->getMaxPrivateElementSize();
399 }
400 return true;
401}
402
403bool GCNTTIImpl::isLegalToVectorizeLoadChain(unsigned ChainSizeInBytes,
404 Align Alignment,
405 unsigned AddrSpace) const {
406 return isLegalToVectorizeMemChain(ChainSizeInBytes, Alignment, AddrSpace);
407}
408
409bool GCNTTIImpl::isLegalToVectorizeStoreChain(unsigned ChainSizeInBytes,
410 Align Alignment,
411 unsigned AddrSpace) const {
412 return isLegalToVectorizeMemChain(ChainSizeInBytes, Alignment, AddrSpace);
413}
414
416 return 1024;
417}
418
419// FIXME: Should we use narrower types for local/region, or account for when
420// unaligned access is legal?
422 LLVMContext &Context, Value *Length, unsigned SrcAddrSpace,
423 unsigned DestAddrSpace, Align SrcAlign, Align DestAlign,
424 std::optional<uint32_t> AtomicElementSize) const {
425
426 if (AtomicElementSize)
427 return Type::getIntNTy(Context, *AtomicElementSize * 8);
428
429 Align MinAlign = std::min(SrcAlign, DestAlign);
430
431 // A (multi-)dword access at an address == 2 (mod 4) will be decomposed by the
432 // hardware into byte accesses. If you assume all alignments are equally
433 // probable, it's more efficient on average to use short accesses for this
434 // case.
435 if (MinAlign == Align(2))
436 return Type::getInt16Ty(Context);
437
438 // Not all subtargets have 128-bit DS instructions, and we currently don't
439 // form them by default.
440 if (SrcAddrSpace == AMDGPUAS::LOCAL_ADDRESS ||
441 SrcAddrSpace == AMDGPUAS::REGION_ADDRESS ||
442 DestAddrSpace == AMDGPUAS::LOCAL_ADDRESS ||
443 DestAddrSpace == AMDGPUAS::REGION_ADDRESS) {
444 return FixedVectorType::get(Type::getInt32Ty(Context), 2);
445 }
446
447 // Global memory works best with 16-byte accesses.
448 // If the operation has a fixed known length that is large enough, it is
449 // worthwhile to return an even wider type and let legalization lower it into
450 // multiple accesses, effectively unrolling the memcpy loop. Private memory
451 // also hits this, although accesses may be decomposed.
452 //
453 // Don't unroll if Length is not a constant, since unrolling leads to worse
454 // performance for length values that are smaller or slightly larger than the
455 // total size of the type returned here. Mitigating that would require a more
456 // complex lowering for variable-length memcpy and memmove.
457 unsigned I32EltsInVector = 4;
458 if (MemcpyLoopUnroll > 0 && isa<ConstantInt>(Length))
460 MemcpyLoopUnroll * I32EltsInVector);
461
462 return FixedVectorType::get(Type::getInt32Ty(Context), I32EltsInVector);
463}
464
466 SmallVectorImpl<Type *> &OpsOut, LLVMContext &Context,
467 unsigned RemainingBytes, unsigned SrcAddrSpace, unsigned DestAddrSpace,
468 Align SrcAlign, Align DestAlign,
469 std::optional<uint32_t> AtomicCpySize) const {
470
471 if (AtomicCpySize)
473 OpsOut, Context, RemainingBytes, SrcAddrSpace, DestAddrSpace, SrcAlign,
474 DestAlign, AtomicCpySize);
475
476 Align MinAlign = std::min(SrcAlign, DestAlign);
477
478 if (MinAlign != Align(2)) {
479 Type *I32x4Ty = FixedVectorType::get(Type::getInt32Ty(Context), 4);
480 while (RemainingBytes >= 16) {
481 OpsOut.push_back(I32x4Ty);
482 RemainingBytes -= 16;
483 }
484
485 Type *I64Ty = Type::getInt64Ty(Context);
486 while (RemainingBytes >= 8) {
487 OpsOut.push_back(I64Ty);
488 RemainingBytes -= 8;
489 }
490
491 Type *I32Ty = Type::getInt32Ty(Context);
492 while (RemainingBytes >= 4) {
493 OpsOut.push_back(I32Ty);
494 RemainingBytes -= 4;
495 }
496 }
497
498 Type *I16Ty = Type::getInt16Ty(Context);
499 while (RemainingBytes >= 2) {
500 OpsOut.push_back(I16Ty);
501 RemainingBytes -= 2;
502 }
503
504 Type *I8Ty = Type::getInt8Ty(Context);
505 while (RemainingBytes) {
506 OpsOut.push_back(I8Ty);
507 --RemainingBytes;
508 }
509}
510
512 // Disable unrolling if the loop is not vectorized.
513 // TODO: Enable this again.
514 if (VF.isScalar())
515 return 1;
516
517 return 8;
518}
519
521 MemIntrinsicInfo &Info) const {
522 switch (Inst->getIntrinsicID()) {
523 case Intrinsic::amdgcn_ds_ordered_add:
524 case Intrinsic::amdgcn_ds_ordered_swap: {
525 auto *Ordering = dyn_cast<ConstantInt>(Inst->getArgOperand(2));
526 auto *Volatile = dyn_cast<ConstantInt>(Inst->getArgOperand(4));
527 if (!Ordering || !Volatile)
528 return false; // Invalid.
529
530 unsigned OrderingVal = Ordering->getZExtValue();
531 if (OrderingVal > static_cast<unsigned>(AtomicOrdering::SequentiallyConsistent))
532 return false;
533
534 Info.PtrVal = Inst->getArgOperand(0);
535 Info.Ordering = static_cast<AtomicOrdering>(OrderingVal);
536 Info.ReadMem = true;
537 Info.WriteMem = true;
538 Info.IsVolatile = !Volatile->isZero();
539 return true;
540 }
541 default:
542 return false;
543 }
544}
545
547 unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind,
550 const Instruction *CxtI) {
551
552 // Legalize the type.
553 std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Ty);
554 int ISD = TLI->InstructionOpcodeToISD(Opcode);
555
556 // Because we don't have any legal vector operations, but the legal types, we
557 // need to account for split vectors.
558 unsigned NElts = LT.second.isVector() ?
559 LT.second.getVectorNumElements() : 1;
560
561 MVT::SimpleValueType SLT = LT.second.getScalarType().SimpleTy;
562
563 switch (ISD) {
564 case ISD::SHL:
565 case ISD::SRL:
566 case ISD::SRA:
567 if (SLT == MVT::i64)
568 return get64BitInstrCost(CostKind) * LT.first * NElts;
569
570 if (ST->has16BitInsts() && SLT == MVT::i16)
571 NElts = (NElts + 1) / 2;
572
573 // i32
574 return getFullRateInstrCost() * LT.first * NElts;
575 case ISD::ADD:
576 case ISD::SUB:
577 case ISD::AND:
578 case ISD::OR:
579 case ISD::XOR:
580 if (SLT == MVT::i64) {
581 // and, or and xor are typically split into 2 VALU instructions.
582 return 2 * getFullRateInstrCost() * LT.first * NElts;
583 }
584
585 if (ST->has16BitInsts() && SLT == MVT::i16)
586 NElts = (NElts + 1) / 2;
587
588 return LT.first * NElts * getFullRateInstrCost();
589 case ISD::MUL: {
590 const int QuarterRateCost = getQuarterRateInstrCost(CostKind);
591 if (SLT == MVT::i64) {
592 const int FullRateCost = getFullRateInstrCost();
593 return (4 * QuarterRateCost + (2 * 2) * FullRateCost) * LT.first * NElts;
594 }
595
596 if (ST->has16BitInsts() && SLT == MVT::i16)
597 NElts = (NElts + 1) / 2;
598
599 // i32
600 return QuarterRateCost * NElts * LT.first;
601 }
602 case ISD::FMUL:
603 // Check possible fuse {fadd|fsub}(a,fmul(b,c)) and return zero cost for
604 // fmul(b,c) supposing the fadd|fsub will get estimated cost for the whole
605 // fused operation.
606 if (CxtI && CxtI->hasOneUse())
607 if (const auto *FAdd = dyn_cast<BinaryOperator>(*CxtI->user_begin())) {
608 const int OPC = TLI->InstructionOpcodeToISD(FAdd->getOpcode());
609 if (OPC == ISD::FADD || OPC == ISD::FSUB) {
610 if (ST->hasMadMacF32Insts() && SLT == MVT::f32 && !HasFP32Denormals)
612 if (ST->has16BitInsts() && SLT == MVT::f16 && !HasFP64FP16Denormals)
614
615 // Estimate all types may be fused with contract/unsafe flags
617 if (Options.AllowFPOpFusion == FPOpFusion::Fast ||
618 Options.UnsafeFPMath ||
619 (FAdd->hasAllowContract() && CxtI->hasAllowContract()))
621 }
622 }
623 [[fallthrough]];
624 case ISD::FADD:
625 case ISD::FSUB:
626 if (ST->hasPackedFP32Ops() && SLT == MVT::f32)
627 NElts = (NElts + 1) / 2;
628 if (SLT == MVT::f64)
629 return LT.first * NElts * get64BitInstrCost(CostKind);
630
631 if (ST->has16BitInsts() && SLT == MVT::f16)
632 NElts = (NElts + 1) / 2;
633
634 if (SLT == MVT::f32 || SLT == MVT::f16)
635 return LT.first * NElts * getFullRateInstrCost();
636 break;
637 case ISD::FDIV:
638 case ISD::FREM:
639 // FIXME: frem should be handled separately. The fdiv in it is most of it,
640 // but the current lowering is also not entirely correct.
641 if (SLT == MVT::f64) {
642 int Cost = 7 * get64BitInstrCost(CostKind) +
643 getQuarterRateInstrCost(CostKind) +
644 3 * getHalfRateInstrCost(CostKind);
645 // Add cost of workaround.
647 Cost += 3 * getFullRateInstrCost();
648
649 return LT.first * Cost * NElts;
650 }
651
652 if (!Args.empty() && match(Args[0], PatternMatch::m_FPOne())) {
653 // TODO: This is more complicated, unsafe flags etc.
654 if ((SLT == MVT::f32 && !HasFP32Denormals) ||
655 (SLT == MVT::f16 && ST->has16BitInsts())) {
656 return LT.first * getQuarterRateInstrCost(CostKind) * NElts;
657 }
658 }
659
660 if (SLT == MVT::f16 && ST->has16BitInsts()) {
661 // 2 x v_cvt_f32_f16
662 // f32 rcp
663 // f32 fmul
664 // v_cvt_f16_f32
665 // f16 div_fixup
666 int Cost =
667 4 * getFullRateInstrCost() + 2 * getQuarterRateInstrCost(CostKind);
668 return LT.first * Cost * NElts;
669 }
670
671 if (SLT == MVT::f32 && ((CxtI && CxtI->hasApproxFunc()) ||
673 // Fast unsafe fdiv lowering:
674 // f32 rcp
675 // f32 fmul
676 int Cost = getQuarterRateInstrCost(CostKind) + getFullRateInstrCost();
677 return LT.first * Cost * NElts;
678 }
679
680 if (SLT == MVT::f32 || SLT == MVT::f16) {
681 // 4 more v_cvt_* insts without f16 insts support
682 int Cost = (SLT == MVT::f16 ? 14 : 10) * getFullRateInstrCost() +
683 1 * getQuarterRateInstrCost(CostKind);
684
685 if (!HasFP32Denormals) {
686 // FP mode switches.
687 Cost += 2 * getFullRateInstrCost();
688 }
689
690 return LT.first * NElts * Cost;
691 }
692 break;
693 case ISD::FNEG:
694 // Use the backend' estimation. If fneg is not free each element will cost
695 // one additional instruction.
696 return TLI->isFNegFree(SLT) ? 0 : NElts;
697 default:
698 break;
699 }
700
701 return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Op1Info, Op2Info,
702 Args, CxtI);
703}
704
705// Return true if there's a potential benefit from using v2f16/v2i16
706// instructions for an intrinsic, even if it requires nontrivial legalization.
708 switch (ID) {
709 case Intrinsic::fma:
710 case Intrinsic::fmuladd:
711 case Intrinsic::copysign:
712 case Intrinsic::canonicalize:
713 // There's a small benefit to using vector ops in the legalized code.
714 case Intrinsic::round:
715 case Intrinsic::uadd_sat:
716 case Intrinsic::usub_sat:
717 case Intrinsic::sadd_sat:
718 case Intrinsic::ssub_sat:
719 case Intrinsic::abs:
720 return true;
721 default:
722 return false;
723 }
724}
725
729 if (ICA.getID() == Intrinsic::fabs)
730 return 0;
731
734
735 Type *RetTy = ICA.getReturnType();
736
737 // Legalize the type.
738 std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(RetTy);
739
740 unsigned NElts = LT.second.isVector() ?
741 LT.second.getVectorNumElements() : 1;
742
743 MVT::SimpleValueType SLT = LT.second.getScalarType().SimpleTy;
744
745 if (SLT == MVT::f64)
746 return LT.first * NElts * get64BitInstrCost(CostKind);
747
748 if ((ST->has16BitInsts() && (SLT == MVT::f16 || SLT == MVT::i16)) ||
749 (ST->hasPackedFP32Ops() && SLT == MVT::f32))
750 NElts = (NElts + 1) / 2;
751
752 // TODO: Get more refined intrinsic costs?
753 unsigned InstRate = getQuarterRateInstrCost(CostKind);
754
755 switch (ICA.getID()) {
756 case Intrinsic::fma:
757 case Intrinsic::fmuladd:
758 if ((SLT == MVT::f32 && ST->hasFastFMAF32()) || SLT == MVT::f16)
759 InstRate = getFullRateInstrCost();
760 else {
761 InstRate = ST->hasFastFMAF32() ? getHalfRateInstrCost(CostKind)
762 : getQuarterRateInstrCost(CostKind);
763 }
764 break;
765 case Intrinsic::copysign:
766 return NElts * getFullRateInstrCost();
767 case Intrinsic::canonicalize: {
768 assert(SLT != MVT::f64);
769 InstRate = getFullRateInstrCost();
770 break;
771 }
772 case Intrinsic::uadd_sat:
773 case Intrinsic::usub_sat:
774 case Intrinsic::sadd_sat:
775 case Intrinsic::ssub_sat: {
776 if (SLT == MVT::i16 || SLT == MVT::i32)
777 InstRate = getFullRateInstrCost();
778
779 static const auto ValidSatTys = {MVT::v2i16, MVT::v4i16};
780 if (any_of(ValidSatTys, [&LT](MVT M) { return M == LT.second; }))
781 NElts = 1;
782 break;
783 }
784 case Intrinsic::abs:
785 // Expansion takes 2 instructions for VALU
786 if (SLT == MVT::i16 || SLT == MVT::i32)
787 InstRate = 2 * getFullRateInstrCost();
788 break;
789 default:
790 break;
791 }
792
793 return LT.first * NElts * InstRate;
794}
795
798 const Instruction *I) {
799 assert((I == nullptr || I->getOpcode() == Opcode) &&
800 "Opcode should reflect passed instruction.");
801 const bool SCost =
803 const int CBrCost = SCost ? 5 : 7;
804 switch (Opcode) {
805 case Instruction::Br: {
806 // Branch instruction takes about 4 slots on gfx900.
807 const auto *BI = dyn_cast_or_null<BranchInst>(I);
808 if (BI && BI->isUnconditional())
809 return SCost ? 1 : 4;
810 // Suppose conditional branch takes additional 3 exec manipulations
811 // instructions in average.
812 return CBrCost;
813 }
814 case Instruction::Switch: {
815 const auto *SI = dyn_cast_or_null<SwitchInst>(I);
816 // Each case (including default) takes 1 cmp + 1 cbr instructions in
817 // average.
818 return (SI ? (SI->getNumCases() + 1) : 4) * (CBrCost + 1);
819 }
820 case Instruction::Ret:
821 return SCost ? 1 : 10;
822 }
823 return BaseT::getCFInstrCost(Opcode, CostKind, I);
824}
825
828 std::optional<FastMathFlags> FMF,
831 return BaseT::getArithmeticReductionCost(Opcode, Ty, FMF, CostKind);
832
833 EVT OrigTy = TLI->getValueType(DL, Ty);
834
835 // Computes cost on targets that have packed math instructions(which support
836 // 16-bit types only).
837 if (!ST->hasVOP3PInsts() || OrigTy.getScalarSizeInBits() != 16)
838 return BaseT::getArithmeticReductionCost(Opcode, Ty, FMF, CostKind);
839
840 std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Ty);
841 return LT.first * getFullRateInstrCost();
842}
843
846 FastMathFlags FMF,
848 EVT OrigTy = TLI->getValueType(DL, Ty);
849
850 // Computes cost on targets that have packed math instructions(which support
851 // 16-bit types only).
852 if (!ST->hasVOP3PInsts() || OrigTy.getScalarSizeInBits() != 16)
853 return BaseT::getMinMaxReductionCost(IID, Ty, FMF, CostKind);
854
855 std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Ty);
856 return LT.first * getHalfRateInstrCost(CostKind);
857}
858
861 unsigned Index, Value *Op0,
862 Value *Op1) {
863 switch (Opcode) {
864 case Instruction::ExtractElement:
865 case Instruction::InsertElement: {
866 unsigned EltSize
867 = DL.getTypeSizeInBits(cast<VectorType>(ValTy)->getElementType());
868 if (EltSize < 32) {
869 if (EltSize == 16 && Index == 0 && ST->has16BitInsts())
870 return 0;
871 return BaseT::getVectorInstrCost(Opcode, ValTy, CostKind, Index, Op0,
872 Op1);
873 }
874
875 // Extracts are just reads of a subregister, so are free. Inserts are
876 // considered free because we don't want to have any cost for scalarizing
877 // operations, and we don't have to copy into a different register class.
878
879 // Dynamic indexing isn't free and is best avoided.
880 return Index == ~0u ? 2 : 0;
881 }
882 default:
883 return BaseT::getVectorInstrCost(Opcode, ValTy, CostKind, Index, Op0, Op1);
884 }
885}
886
887/// Analyze if the results of inline asm are divergent. If \p Indices is empty,
888/// this is analyzing the collective result of all output registers. Otherwise,
889/// this is only querying a specific result index if this returns multiple
890/// registers in a struct.
892 const CallInst *CI, ArrayRef<unsigned> Indices) const {
893 // TODO: Handle complex extract indices
894 if (Indices.size() > 1)
895 return true;
896
897 const DataLayout &DL = CI->getDataLayout();
898 const SIRegisterInfo *TRI = ST->getRegisterInfo();
899 TargetLowering::AsmOperandInfoVector TargetConstraints =
900 TLI->ParseConstraints(DL, ST->getRegisterInfo(), *CI);
901
902 const int TargetOutputIdx = Indices.empty() ? -1 : Indices[0];
903
904 int OutputIdx = 0;
905 for (auto &TC : TargetConstraints) {
906 if (TC.Type != InlineAsm::isOutput)
907 continue;
908
909 // Skip outputs we don't care about.
910 if (TargetOutputIdx != -1 && TargetOutputIdx != OutputIdx++)
911 continue;
912
914
916 TRI, TC.ConstraintCode, TC.ConstraintVT).second;
917
918 // For AGPR constraints null is returned on subtargets without AGPRs, so
919 // assume divergent for null.
920 if (!RC || !TRI->isSGPRClass(RC))
921 return true;
922 }
923
924 return false;
925}
926
928 const IntrinsicInst *ReadReg) const {
929 Metadata *MD =
930 cast<MetadataAsValue>(ReadReg->getArgOperand(0))->getMetadata();
932 cast<MDString>(cast<MDNode>(MD)->getOperand(0))->getString();
933
934 // Special case registers that look like VCC.
935 MVT VT = MVT::getVT(ReadReg->getType());
936 if (VT == MVT::i1)
937 return true;
938
939 // Special case scalar registers that start with 'v'.
940 if (RegName.starts_with("vcc") || RegName.empty())
941 return false;
942
943 // VGPR or AGPR is divergent. There aren't any specially named vector
944 // registers.
945 return RegName[0] == 'v' || RegName[0] == 'a';
946}
947
948/// \returns true if the result of the value could potentially be
949/// different across workitems in a wavefront.
951 if (const Argument *A = dyn_cast<Argument>(V))
953
954 // Loads from the private and flat address spaces are divergent, because
955 // threads can execute the load instruction with the same inputs and get
956 // different results.
957 //
958 // All other loads are not divergent, because if threads issue loads with the
959 // same arguments, they will always get the same result.
960 if (const LoadInst *Load = dyn_cast<LoadInst>(V))
961 return Load->getPointerAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS ||
962 Load->getPointerAddressSpace() == AMDGPUAS::FLAT_ADDRESS;
963
964 // Atomics are divergent because they are executed sequentially: when an
965 // atomic operation refers to the same address in each thread, then each
966 // thread after the first sees the value written by the previous thread as
967 // original value.
968 if (isa<AtomicRMWInst>(V) || isa<AtomicCmpXchgInst>(V))
969 return true;
970
971 if (const IntrinsicInst *Intrinsic = dyn_cast<IntrinsicInst>(V)) {
972 if (Intrinsic->getIntrinsicID() == Intrinsic::read_register)
973 return isReadRegisterSourceOfDivergence(Intrinsic);
974
975 return AMDGPU::isIntrinsicSourceOfDivergence(Intrinsic->getIntrinsicID());
976 }
977
978 // Assume all function calls are a source of divergence.
979 if (const CallInst *CI = dyn_cast<CallInst>(V)) {
980 if (CI->isInlineAsm())
982 return true;
983 }
984
985 // Assume all function calls are a source of divergence.
986 if (isa<InvokeInst>(V))
987 return true;
988
989 return false;
990}
991
992bool GCNTTIImpl::isAlwaysUniform(const Value *V) const {
993 if (const IntrinsicInst *Intrinsic = dyn_cast<IntrinsicInst>(V))
994 return AMDGPU::isIntrinsicAlwaysUniform(Intrinsic->getIntrinsicID());
995
996 if (const CallInst *CI = dyn_cast<CallInst>(V)) {
997 if (CI->isInlineAsm())
999 return false;
1000 }
1001
1002 // In most cases TID / wavefrontsize is uniform.
1003 //
1004 // However, if a kernel has uneven dimesions we can have a value of
1005 // workitem-id-x divided by the wavefrontsize non-uniform. For example
1006 // dimensions (65, 2) will have workitems with address (64, 0) and (0, 1)
1007 // packed into a same wave which gives 1 and 0 after the division by 64
1008 // respectively.
1009 //
1010 // FIXME: limit it to 1D kernels only, although that shall be possible
1011 // to perform this optimization is the size of the X dimension is a power
1012 // of 2, we just do not currently have infrastructure to query it.
1013 using namespace llvm::PatternMatch;
1014 uint64_t C;
1015 if (match(V, m_LShr(m_Intrinsic<Intrinsic::amdgcn_workitem_id_x>(),
1016 m_ConstantInt(C))) ||
1017 match(V, m_AShr(m_Intrinsic<Intrinsic::amdgcn_workitem_id_x>(),
1018 m_ConstantInt(C)))) {
1019 const Function *F = cast<Instruction>(V)->getFunction();
1020 return C >= ST->getWavefrontSizeLog2() &&
1021 ST->getMaxWorkitemID(*F, 1) == 0 && ST->getMaxWorkitemID(*F, 2) == 0;
1022 }
1023
1024 Value *Mask;
1025 if (match(V, m_c_And(m_Intrinsic<Intrinsic::amdgcn_workitem_id_x>(),
1026 m_Value(Mask)))) {
1027 const Function *F = cast<Instruction>(V)->getFunction();
1028 const DataLayout &DL = F->getDataLayout();
1029 return computeKnownBits(Mask, DL).countMinTrailingZeros() >=
1030 ST->getWavefrontSizeLog2() &&
1031 ST->getMaxWorkitemID(*F, 1) == 0 && ST->getMaxWorkitemID(*F, 2) == 0;
1032 }
1033
1034 const ExtractValueInst *ExtValue = dyn_cast<ExtractValueInst>(V);
1035 if (!ExtValue)
1036 return false;
1037
1038 const CallInst *CI = dyn_cast<CallInst>(ExtValue->getOperand(0));
1039 if (!CI)
1040 return false;
1041
1042 if (const IntrinsicInst *Intrinsic = dyn_cast<IntrinsicInst>(CI)) {
1043 switch (Intrinsic->getIntrinsicID()) {
1044 default:
1045 return false;
1046 case Intrinsic::amdgcn_if:
1047 case Intrinsic::amdgcn_else: {
1048 ArrayRef<unsigned> Indices = ExtValue->getIndices();
1049 return Indices.size() == 1 && Indices[0] == 1;
1050 }
1051 }
1052 }
1053
1054 // If we have inline asm returning mixed SGPR and VGPR results, we inferred
1055 // divergent for the overall struct return. We need to override it in the
1056 // case we're extracting an SGPR component here.
1057 if (CI->isInlineAsm())
1058 return !isInlineAsmSourceOfDivergence(CI, ExtValue->getIndices());
1059
1060 return false;
1061}
1062
1064 Intrinsic::ID IID) const {
1065 switch (IID) {
1066 case Intrinsic::amdgcn_is_shared:
1067 case Intrinsic::amdgcn_is_private:
1068 case Intrinsic::amdgcn_flat_atomic_fmax_num:
1069 case Intrinsic::amdgcn_flat_atomic_fmin_num:
1070 OpIndexes.push_back(0);
1071 return true;
1072 default:
1073 return false;
1074 }
1075}
1076
1078 Value *OldV,
1079 Value *NewV) const {
1080 auto IntrID = II->getIntrinsicID();
1081 switch (IntrID) {
1082 case Intrinsic::amdgcn_is_shared:
1083 case Intrinsic::amdgcn_is_private: {
1084 unsigned TrueAS = IntrID == Intrinsic::amdgcn_is_shared ?
1086 unsigned NewAS = NewV->getType()->getPointerAddressSpace();
1087 LLVMContext &Ctx = NewV->getType()->getContext();
1088 ConstantInt *NewVal = (TrueAS == NewAS) ?
1090 return NewVal;
1091 }
1092 case Intrinsic::ptrmask: {
1093 unsigned OldAS = OldV->getType()->getPointerAddressSpace();
1094 unsigned NewAS = NewV->getType()->getPointerAddressSpace();
1095 Value *MaskOp = II->getArgOperand(1);
1096 Type *MaskTy = MaskOp->getType();
1097
1098 bool DoTruncate = false;
1099
1100 const GCNTargetMachine &TM =
1101 static_cast<const GCNTargetMachine &>(getTLI()->getTargetMachine());
1102 if (!TM.isNoopAddrSpaceCast(OldAS, NewAS)) {
1103 // All valid 64-bit to 32-bit casts work by chopping off the high
1104 // bits. Any masking only clearing the low bits will also apply in the new
1105 // address space.
1106 if (DL.getPointerSizeInBits(OldAS) != 64 ||
1107 DL.getPointerSizeInBits(NewAS) != 32)
1108 return nullptr;
1109
1110 // TODO: Do we need to thread more context in here?
1111 KnownBits Known = computeKnownBits(MaskOp, DL, 0, nullptr, II);
1112 if (Known.countMinLeadingOnes() < 32)
1113 return nullptr;
1114
1115 DoTruncate = true;
1116 }
1117
1118 IRBuilder<> B(II);
1119 if (DoTruncate) {
1120 MaskTy = B.getInt32Ty();
1121 MaskOp = B.CreateTrunc(MaskOp, MaskTy);
1122 }
1123
1124 return B.CreateIntrinsic(Intrinsic::ptrmask, {NewV->getType(), MaskTy},
1125 {NewV, MaskOp});
1126 }
1127 case Intrinsic::amdgcn_flat_atomic_fmax_num:
1128 case Intrinsic::amdgcn_flat_atomic_fmin_num: {
1129 Type *DestTy = II->getType();
1130 Type *SrcTy = NewV->getType();
1131 unsigned NewAS = SrcTy->getPointerAddressSpace();
1133 return nullptr;
1134 Module *M = II->getModule();
1136 M, II->getIntrinsicID(), {DestTy, SrcTy, DestTy});
1137 II->setArgOperand(0, NewV);
1138 II->setCalledFunction(NewDecl);
1139 return II;
1140 }
1141 default:
1142 return nullptr;
1143 }
1144}
1145
1147 VectorType *VT, ArrayRef<int> Mask,
1149 int Index, VectorType *SubTp,
1151 const Instruction *CxtI) {
1152 if (!isa<FixedVectorType>(VT))
1153 return BaseT::getShuffleCost(Kind, VT, Mask, CostKind, Index, SubTp);
1154
1155 Kind = improveShuffleKindFromMask(Kind, Mask, VT, Index, SubTp);
1156
1157 // Larger vector widths may require additional instructions, but are
1158 // typically cheaper than scalarized versions.
1159 unsigned NumVectorElts = cast<FixedVectorType>(VT)->getNumElements();
1161 DL.getTypeSizeInBits(VT->getElementType()) == 16) {
1162 bool HasVOP3P = ST->hasVOP3PInsts();
1163 unsigned RequestedElts =
1164 count_if(Mask, [](int MaskElt) { return MaskElt != -1; });
1165 if (RequestedElts == 0)
1166 return 0;
1167 switch (Kind) {
1168 case TTI::SK_Broadcast:
1169 case TTI::SK_Reverse:
1171 // With op_sel VOP3P instructions freely can access the low half or high
1172 // half of a register, so any swizzle of two elements is free.
1173 if (HasVOP3P && NumVectorElts == 2)
1174 return 0;
1175 unsigned NumPerms = alignTo(RequestedElts, 2) / 2;
1176 // SK_Broadcast just reuses the same mask
1177 unsigned NumPermMasks = Kind == TTI::SK_Broadcast ? 1 : NumPerms;
1178 return NumPerms + NumPermMasks;
1179 }
1182 // Even aligned accesses are free
1183 if (!(Index % 2))
1184 return 0;
1185 // Insert/extract subvectors only require shifts / extract code to get the
1186 // relevant bits
1187 return alignTo(RequestedElts, 2) / 2;
1188 }
1190 case TTI::SK_Splice:
1191 case TTI::SK_Select: {
1192 unsigned NumPerms = alignTo(RequestedElts, 2) / 2;
1193 // SK_Select just reuses the same mask
1194 unsigned NumPermMasks = Kind == TTI::SK_Select ? 1 : NumPerms;
1195 return NumPerms + NumPermMasks;
1196 }
1197
1198 default:
1199 break;
1200 }
1201 }
1202
1203 return BaseT::getShuffleCost(Kind, VT, Mask, CostKind, Index, SubTp);
1204}
1205
1206/// Whether it is profitable to sink the operands of an
1207/// Instruction I to the basic block of I.
1208/// This helps using several modifiers (like abs and neg) more often.
1210 SmallVectorImpl<Use *> &Ops) const {
1211 using namespace PatternMatch;
1212
1213 for (auto &Op : I->operands()) {
1214 // Ensure we are not already sinking this operand.
1215 if (any_of(Ops, [&](Use *U) { return U->get() == Op.get(); }))
1216 continue;
1217
1218 if (match(&Op, m_FAbs(m_Value())) || match(&Op, m_FNeg(m_Value())))
1219 Ops.push_back(&Op);
1220 }
1221
1222 return !Ops.empty();
1223}
1224
1226 const Function *Callee) const {
1227 const TargetMachine &TM = getTLI()->getTargetMachine();
1228 const GCNSubtarget *CallerST
1229 = static_cast<const GCNSubtarget *>(TM.getSubtargetImpl(*Caller));
1230 const GCNSubtarget *CalleeST
1231 = static_cast<const GCNSubtarget *>(TM.getSubtargetImpl(*Callee));
1232
1233 const FeatureBitset &CallerBits = CallerST->getFeatureBits();
1234 const FeatureBitset &CalleeBits = CalleeST->getFeatureBits();
1235
1236 FeatureBitset RealCallerBits = CallerBits & ~InlineFeatureIgnoreList;
1237 FeatureBitset RealCalleeBits = CalleeBits & ~InlineFeatureIgnoreList;
1238 if ((RealCallerBits & RealCalleeBits) != RealCalleeBits)
1239 return false;
1240
1241 // FIXME: dx10_clamp can just take the caller setting, but there seems to be
1242 // no way to support merge for backend defined attributes.
1243 SIModeRegisterDefaults CallerMode(*Caller, *CallerST);
1244 SIModeRegisterDefaults CalleeMode(*Callee, *CalleeST);
1245 if (!CallerMode.isInlineCompatible(CalleeMode))
1246 return false;
1247
1248 if (Callee->hasFnAttribute(Attribute::AlwaysInline) ||
1249 Callee->hasFnAttribute(Attribute::InlineHint))
1250 return true;
1251
1252 // Hack to make compile times reasonable.
1253 if (InlineMaxBB) {
1254 // Single BB does not increase total BB amount.
1255 if (Callee->size() == 1)
1256 return true;
1257 size_t BBSize = Caller->size() + Callee->size() - 1;
1258 return BBSize <= InlineMaxBB;
1259 }
1260
1261 return true;
1262}
1263
1265 const SITargetLowering *TLI,
1266 const GCNTTIImpl *TTIImpl) {
1267 const int NrOfSGPRUntilSpill = 26;
1268 const int NrOfVGPRUntilSpill = 32;
1269
1270 const DataLayout &DL = TTIImpl->getDataLayout();
1271
1272 unsigned adjustThreshold = 0;
1273 int SGPRsInUse = 0;
1274 int VGPRsInUse = 0;
1275 for (const Use &A : CB->args()) {
1276 SmallVector<EVT, 4> ValueVTs;
1277 ComputeValueVTs(*TLI, DL, A.get()->getType(), ValueVTs);
1278 for (auto ArgVT : ValueVTs) {
1279 unsigned CCRegNum = TLI->getNumRegistersForCallingConv(
1280 CB->getContext(), CB->getCallingConv(), ArgVT);
1282 SGPRsInUse += CCRegNum;
1283 else
1284 VGPRsInUse += CCRegNum;
1285 }
1286 }
1287
1288 // The cost of passing function arguments through the stack:
1289 // 1 instruction to put a function argument on the stack in the caller.
1290 // 1 instruction to take a function argument from the stack in callee.
1291 // 1 instruction is explicitly take care of data dependencies in callee
1292 // function.
1293 InstructionCost ArgStackCost(1);
1294 ArgStackCost += const_cast<GCNTTIImpl *>(TTIImpl)->getMemoryOpCost(
1295 Instruction::Store, Type::getInt32Ty(CB->getContext()), Align(4),
1297 ArgStackCost += const_cast<GCNTTIImpl *>(TTIImpl)->getMemoryOpCost(
1298 Instruction::Load, Type::getInt32Ty(CB->getContext()), Align(4),
1300
1301 // The penalty cost is computed relative to the cost of instructions and does
1302 // not model any storage costs.
1303 adjustThreshold += std::max(0, SGPRsInUse - NrOfSGPRUntilSpill) *
1304 *ArgStackCost.getValue() * InlineConstants::getInstrCost();
1305 adjustThreshold += std::max(0, VGPRsInUse - NrOfVGPRUntilSpill) *
1306 *ArgStackCost.getValue() * InlineConstants::getInstrCost();
1307 return adjustThreshold;
1308}
1309
1310static unsigned getCallArgsTotalAllocaSize(const CallBase *CB,
1311 const DataLayout &DL) {
1312 // If we have a pointer to a private array passed into a function
1313 // it will not be optimized out, leaving scratch usage.
1314 // This function calculates the total size in bytes of the memory that would
1315 // end in scratch if the call was not inlined.
1316 unsigned AllocaSize = 0;
1318 for (Value *PtrArg : CB->args()) {
1319 PointerType *Ty = dyn_cast<PointerType>(PtrArg->getType());
1320 if (!Ty)
1321 continue;
1322
1323 unsigned AddrSpace = Ty->getAddressSpace();
1324 if (AddrSpace != AMDGPUAS::FLAT_ADDRESS &&
1325 AddrSpace != AMDGPUAS::PRIVATE_ADDRESS)
1326 continue;
1327
1328 const AllocaInst *AI = dyn_cast<AllocaInst>(getUnderlyingObject(PtrArg));
1329 if (!AI || !AI->isStaticAlloca() || !AIVisited.insert(AI).second)
1330 continue;
1331
1332 AllocaSize += DL.getTypeAllocSize(AI->getAllocatedType());
1333 }
1334 return AllocaSize;
1335}
1336
1340}
1341
1343 unsigned Threshold = adjustInliningThresholdUsingCallee(CB, TLI, this);
1344
1345 // Private object passed as arguments may end up in scratch usage if the call
1346 // is not inlined. Increase the inline threshold to promote inlining.
1347 unsigned AllocaSize = getCallArgsTotalAllocaSize(CB, DL);
1348 if (AllocaSize > 0)
1349 Threshold += ArgAllocaCost;
1350 return Threshold;
1351}
1352
1354 const AllocaInst *AI) const {
1355
1356 // Below the cutoff, assume that the private memory objects would be
1357 // optimized
1358 auto AllocaSize = getCallArgsTotalAllocaSize(CB, DL);
1359 if (AllocaSize <= ArgAllocaCutoff)
1360 return 0;
1361
1362 // Above the cutoff, we give a cost to each private memory object
1363 // depending its size. If the array can be optimized by SROA this cost is not
1364 // added to the total-cost in the inliner cost analysis.
1365 //
1366 // We choose the total cost of the alloca such that their sum cancels the
1367 // bonus given in the threshold (ArgAllocaCost).
1368 //
1369 // Cost_Alloca_0 + ... + Cost_Alloca_N == ArgAllocaCost
1370 //
1371 // Awkwardly, the ArgAllocaCost bonus is multiplied by threshold-multiplier,
1372 // the single-bb bonus and the vector-bonus.
1373 //
1374 // We compensate the first two multipliers, by repeating logic from the
1375 // inliner-cost in here. The vector-bonus is 0 on AMDGPU.
1376 static_assert(InlinerVectorBonusPercent == 0, "vector bonus assumed to be 0");
1377 unsigned Threshold = ArgAllocaCost * getInliningThresholdMultiplier();
1378
1379 bool SingleBB = none_of(*CB->getCalledFunction(), [](const BasicBlock &BB) {
1380 return BB.getTerminator()->getNumSuccessors() > 1;
1381 });
1382 if (SingleBB) {
1383 Threshold += Threshold / 2;
1384 }
1385
1386 auto ArgAllocaSize = DL.getTypeAllocSize(AI->getAllocatedType());
1387
1388 // Attribute the bonus proportionally to the alloca size
1389 unsigned AllocaThresholdBonus = (Threshold * ArgAllocaSize) / AllocaSize;
1390
1391 return AllocaThresholdBonus;
1392}
1393
1397 CommonTTI.getUnrollingPreferences(L, SE, UP, ORE);
1398}
1399
1402 CommonTTI.getPeelingPreferences(L, SE, PP);
1403}
1404
1405int GCNTTIImpl::get64BitInstrCost(TTI::TargetCostKind CostKind) const {
1406 return ST->hasFullRate64Ops()
1407 ? getFullRateInstrCost()
1408 : ST->hasHalfRate64Ops() ? getHalfRateInstrCost(CostKind)
1409 : getQuarterRateInstrCost(CostKind);
1410}
1411
1412std::pair<InstructionCost, MVT>
1413GCNTTIImpl::getTypeLegalizationCost(Type *Ty) const {
1414 std::pair<InstructionCost, MVT> Cost = BaseT::getTypeLegalizationCost(Ty);
1415 auto Size = DL.getTypeSizeInBits(Ty);
1416 // Maximum load or store can handle 8 dwords for scalar and 4 for
1417 // vector ALU. Let's assume anything above 8 dwords is expensive
1418 // even if legal.
1419 if (Size <= 256)
1420 return Cost;
1421
1422 Cost.first += (Size + 255) / 256;
1423 return Cost;
1424}
1425
1427 return ST->hasPrefetch() ? 128 : 0;
1428}
1429
1432}
aarch64 promote const
Provides AMDGPU specific target descriptions.
Rewrite undef for PHI
The AMDGPU TargetMachine interface definition for hw codegen targets.
static cl::opt< unsigned > UnrollThresholdIf("amdgpu-unroll-threshold-if", cl::desc("Unroll threshold increment for AMDGPU for each if statement inside loop"), cl::init(200), cl::Hidden)
static cl::opt< unsigned > ArgAllocaCost("amdgpu-inline-arg-alloca-cost", cl::Hidden, cl::init(4000), cl::desc("Cost of alloca argument"))
static bool dependsOnLocalPhi(const Loop *L, const Value *Cond, unsigned Depth=0)
static cl::opt< bool > UnrollRuntimeLocal("amdgpu-unroll-runtime-local", cl::desc("Allow runtime unroll for AMDGPU if local memory used in a loop"), cl::init(true), cl::Hidden)
static unsigned adjustInliningThresholdUsingCallee(const CallBase *CB, const SITargetLowering *TLI, const GCNTTIImpl *TTIImpl)
static cl::opt< unsigned > ArgAllocaCutoff("amdgpu-inline-arg-alloca-cutoff", cl::Hidden, cl::init(256), cl::desc("Maximum alloca size to use for inline cost"))
static cl::opt< size_t > InlineMaxBB("amdgpu-inline-max-bb", cl::Hidden, cl::init(1100), cl::desc("Maximum number of BBs allowed in a function after inlining" " (compile time constraint)"))
static bool intrinsicHasPackedVectorBenefit(Intrinsic::ID ID)
static cl::opt< unsigned > UnrollMaxBlockToAnalyze("amdgpu-unroll-max-block-to-analyze", cl::desc("Inner loop block size threshold to analyze in unroll for AMDGPU"), cl::init(32), cl::Hidden)
static unsigned getCallArgsTotalAllocaSize(const CallBase *CB, const DataLayout &DL)
static cl::opt< unsigned > UnrollThresholdPrivate("amdgpu-unroll-threshold-private", cl::desc("Unroll threshold for AMDGPU if private memory used in a loop"), cl::init(2700), cl::Hidden)
static cl::opt< unsigned > MemcpyLoopUnroll("amdgpu-memcpy-loop-unroll", cl::desc("Unroll factor (affecting 4x32-bit operations) to use for memory " "operations when lowering memcpy as a loop"), cl::init(16), cl::Hidden)
static cl::opt< unsigned > UnrollThresholdLocal("amdgpu-unroll-threshold-local", cl::desc("Unroll threshold for AMDGPU if local memory used in a loop"), cl::init(1000), cl::Hidden)
This file a TargetTransformInfo::Concept conforming object specific to the AMDGPU target machine.
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
Analysis containing CSE Info
Definition: CSEInfo.cpp:27
static cl::opt< TargetTransformInfo::TargetCostKind > CostKind("cost-kind", cl::desc("Target cost kind"), cl::init(TargetTransformInfo::TCK_RecipThroughput), cl::values(clEnumValN(TargetTransformInfo::TCK_RecipThroughput, "throughput", "Reciprocal throughput"), clEnumValN(TargetTransformInfo::TCK_Latency, "latency", "Instruction latency"), clEnumValN(TargetTransformInfo::TCK_CodeSize, "code-size", "Code size"), clEnumValN(TargetTransformInfo::TCK_SizeAndLatency, "size-latency", "Code size and latency")))
return RetTy
#define LLVM_DEBUG(...)
Definition: Debug.h:106
uint64_t Size
Hexagon Common GEP
#define RegName(no)
static LVOptions Options
Definition: LVOptions.cpp:25
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
unsigned const TargetRegisterInfo * TRI
uint64_t IntrinsicInst * II
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
bool hasMadMacF32Insts() const
unsigned getMaxWorkitemID(const Function &Kernel, unsigned Dimension) const
Return the maximum workitem ID value in the function, for the given (0, 1, 2) dimension.
unsigned getWavefrontSizeLog2() const
bool has16BitInsts() const
bool hasFastFMAF32() const
bool isSingleLaneExecution(const Function &Kernel) const
Return true if only a single workitem can be active in a wave.
bool hasVOP3PInsts() const
void getPeelingPreferences(Loop *L, ScalarEvolution &SE, TTI::PeelingPreferences &PP)
AMDGPUTTIImpl(const AMDGPUTargetMachine *TM, const Function &F)
void getUnrollingPreferences(Loop *L, ScalarEvolution &SE, TTI::UnrollingPreferences &UP, OptimizationRemarkEmitter *ORE)
int64_t getMaxMemIntrinsicInlineSizeThreshold() const
bool isFNegFree(EVT VT) const override
Return true if an fneg operation is free to the point where it is never worthwhile to replace it with...
an instruction to allocate memory on the stack
Definition: Instructions.h:63
bool isStaticAlloca() const
Return true if this alloca is in the entry block of the function and is a constant size.
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
Definition: Instructions.h:117
This class represents an incoming formal argument to a Function.
Definition: Argument.h:31
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:168
bool empty() const
empty - Check if the array is empty.
Definition: ArrayRef.h:163
LLVM Basic Block Representation.
Definition: BasicBlock.h:61
InstructionCost getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, TTI::TargetCostKind CostKind)
Get intrinsic cost based on arguments.
InstructionCost getVectorInstrCost(unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index, Value *Op0, Value *Op1)
TTI::ShuffleKind improveShuffleKindFromMask(TTI::ShuffleKind Kind, ArrayRef< int > Mask, VectorType *Ty, int &Index, VectorType *&SubTy) const
InstructionCost getMinMaxReductionCost(Intrinsic::ID IID, VectorType *Ty, FastMathFlags FMF, TTI::TargetCostKind CostKind)
Try to calculate op costs for min/max reduction operations.
InstructionCost getShuffleCost(TTI::ShuffleKind Kind, VectorType *Tp, ArrayRef< int > Mask, TTI::TargetCostKind CostKind, int Index, VectorType *SubTp, ArrayRef< const Value * > Args={}, const Instruction *CxtI=nullptr)
void getPeelingPreferences(Loop *L, ScalarEvolution &SE, TTI::PeelingPreferences &PP)
Definition: BasicTTIImpl.h:668
InstructionCost getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind, const Instruction *I=nullptr)
InstructionCost getArithmeticReductionCost(unsigned Opcode, VectorType *Ty, std::optional< FastMathFlags > FMF, TTI::TargetCostKind CostKind)
std::pair< InstructionCost, MVT > getTypeLegalizationCost(Type *Ty) const
Estimate the cost of type-legalization and the legalized type.
Definition: BasicTTIImpl.h:896
InstructionCost getArithmeticInstrCost(unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Opd1Info={TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Opd2Info={TTI::OK_AnyValue, TTI::OP_None}, ArrayRef< const Value * > Args={}, const Instruction *CxtI=nullptr)
Definition: BasicTTIImpl.h:932
Conditional or Unconditional Branch instruction.
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Definition: InstrTypes.h:1120
bool isInlineAsm() const
Check if this call is an inline asm statement.
Definition: InstrTypes.h:1416
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
Definition: InstrTypes.h:1349
CallingConv::ID getCallingConv() const
Definition: InstrTypes.h:1407
Value * getArgOperand(unsigned i) const
Definition: InstrTypes.h:1294
iterator_range< User::op_iterator > args()
Iteration adapter for range-for loops.
Definition: InstrTypes.h:1285
unsigned getArgOperandNo(const Use *U) const
Given a use for a arg operand, get the arg operand number that corresponds to it.
Definition: InstrTypes.h:1325
This class represents a function call, abstracting a target machine's calling convention.
This is the shared class of boolean and integer constants.
Definition: Constants.h:83
static ConstantInt * getTrue(LLVMContext &Context)
Definition: Constants.cpp:866
static ConstantInt * getFalse(LLVMContext &Context)
Definition: Constants.cpp:873
int64_t getSExtValue() const
Return the constant as a 64-bit integer value after it has been sign extended as appropriate for the ...
Definition: Constants.h:163
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:63
unsigned getPointerSizeInBits(unsigned AS=0) const
Layout pointer size, in bits FIXME: The defaults need to be removed once all of the backends/clients ...
Definition: DataLayout.h:364
TypeSize getTypeAllocSize(Type *Ty) const
Returns the offset in bytes between successive objects of the specified type, including alignment pad...
Definition: DataLayout.h:457
TypeSize getTypeSizeInBits(Type *Ty) const
Size examples:
Definition: DataLayout.h:617
constexpr bool isScalar() const
Exactly one element.
Definition: TypeSize.h:322
This instruction extracts a struct member or array element value from an aggregate value.
ArrayRef< unsigned > getIndices() const
Convenience struct for specifying and reasoning about fast-math flags.
Definition: FMF.h:20
Container class for subtarget features.
static FixedVectorType * get(Type *ElementType, unsigned NumElts)
Definition: Type.cpp:791
bool hasPrefetch() const
Definition: GCNSubtarget.h:962
bool hasUsableDivScaleConditionOutput() const
Condition output from div_scale is usable.
Definition: GCNSubtarget.h:487
const SIRegisterInfo * getRegisterInfo() const override
Definition: GCNSubtarget.h:291
bool hasPackedFP32Ops() const
bool hasFullRate64Ops() const
Definition: GCNSubtarget.h:387
bool hasUnalignedScratchAccessEnabled() const
Definition: GCNSubtarget.h:603
unsigned getMaxPrivateElementSize(bool ForBufferRSrc=false) const
Definition: GCNSubtarget.h:354
Generation getGeneration() const
Definition: GCNSubtarget.h:327
TypeSize getRegisterBitWidth(TargetTransformInfo::RegisterKind Vector) const
InstructionCost getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, TTI::TargetCostKind CostKind)
InstructionCost getVectorInstrCost(unsigned Opcode, Type *ValTy, TTI::TargetCostKind CostKind, unsigned Index, Value *Op0, Value *Op1)
bool isAlwaysUniform(const Value *V) const
void getPeelingPreferences(Loop *L, ScalarEvolution &SE, TTI::PeelingPreferences &PP)
GCNTTIImpl(const AMDGPUTargetMachine *TM, const Function &F)
int64_t getMaxMemIntrinsicInlineSizeThreshold() const
bool isLegalToVectorizeLoadChain(unsigned ChainSizeInBytes, Align Alignment, unsigned AddrSpace) const
InstructionCost getMinMaxReductionCost(Intrinsic::ID IID, VectorType *Ty, FastMathFlags FMF, TTI::TargetCostKind CostKind)
bool isInlineAsmSourceOfDivergence(const CallInst *CI, ArrayRef< unsigned > Indices={}) const
Analyze if the results of inline asm are divergent.
bool isReadRegisterSourceOfDivergence(const IntrinsicInst *ReadReg) const
int getInliningLastCallToStaticBonus() const
unsigned getNumberOfRegisters(unsigned RCID) const
bool isLegalToVectorizeMemChain(unsigned ChainSizeInBytes, Align Alignment, unsigned AddrSpace) const
bool shouldPrefetchAddressSpace(unsigned AS) const override
unsigned getStoreVectorFactor(unsigned VF, unsigned StoreSize, unsigned ChainSizeInBytes, VectorType *VecTy) const
bool isLegalToVectorizeStoreChain(unsigned ChainSizeInBytes, Align Alignment, unsigned AddrSpace) const
unsigned getMaxInterleaveFactor(ElementCount VF)
unsigned getInliningThresholdMultiplier() const
bool isProfitableToSinkOperands(Instruction *I, SmallVectorImpl< Use * > &Ops) const
Whether it is profitable to sink the operands of an Instruction I to the basic block of I.
void getUnrollingPreferences(Loop *L, ScalarEvolution &SE, TTI::UnrollingPreferences &UP, OptimizationRemarkEmitter *ORE)
unsigned getMinVectorRegisterBitWidth() const
bool getTgtMemIntrinsic(IntrinsicInst *Inst, MemIntrinsicInfo &Info) const
unsigned getLoadVectorFactor(unsigned VF, unsigned LoadSize, unsigned ChainSizeInBytes, VectorType *VecTy) const
unsigned getPrefetchDistance() const override
How much before a load we should place the prefetch instruction.
Value * rewriteIntrinsicWithAddressSpace(IntrinsicInst *II, Value *OldV, Value *NewV) const
unsigned getMaximumVF(unsigned ElemWidth, unsigned Opcode) const
bool collectFlatAddressOperands(SmallVectorImpl< int > &OpIndexes, Intrinsic::ID IID) const
unsigned adjustInliningThreshold(const CallBase *CB) const
bool areInlineCompatible(const Function *Caller, const Function *Callee) const
unsigned getCallerAllocaCost(const CallBase *CB, const AllocaInst *AI) const
Type * getMemcpyLoopLoweringType(LLVMContext &Context, Value *Length, unsigned SrcAddrSpace, unsigned DestAddrSpace, Align SrcAlign, Align DestAlign, std::optional< uint32_t > AtomicElementSize) const
void getMemcpyLoopResidualLoweringType(SmallVectorImpl< Type * > &OpsOut, LLVMContext &Context, unsigned RemainingBytes, unsigned SrcAddrSpace, unsigned DestAddrSpace, Align SrcAlign, Align DestAlign, std::optional< uint32_t > AtomicCpySize) const
InstructionCost getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind, const Instruction *I=nullptr)
unsigned getLoadStoreVecRegBitWidth(unsigned AddrSpace) const
bool isSourceOfDivergence(const Value *V) const
InstructionCost getArithmeticInstrCost(unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Op1Info={TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Op2Info={TTI::OK_AnyValue, TTI::OP_None}, ArrayRef< const Value * > Args={}, const Instruction *CxtI=nullptr)
bool hasBranchDivergence(const Function *F=nullptr) const
InstructionCost getShuffleCost(TTI::ShuffleKind Kind, VectorType *Tp, ArrayRef< int > Mask, TTI::TargetCostKind CostKind, int Index, VectorType *SubTp, ArrayRef< const Value * > Args={}, const Instruction *CxtI=nullptr)
InstructionCost getArithmeticReductionCost(unsigned Opcode, VectorType *Ty, std::optional< FastMathFlags > FMF, TTI::TargetCostKind CostKind)
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
Definition: Instructions.h:933
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition: IRBuilder.h:2697
std::optional< CostType > getValue() const
This function is intended to be used as sparingly as possible, since the class provides the full rang...
bool hasApproxFunc() const LLVM_READONLY
Determine whether the approximate-math-functions flag is set.
bool hasAllowContract() const LLVM_READONLY
Determine whether the allow-contract flag is set.
const DataLayout & getDataLayout() const
Get the data layout of the module this instruction belongs to.
Definition: Instruction.cpp:74
A wrapper class for inspecting calls to intrinsic functions.
Definition: IntrinsicInst.h:48
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
Definition: IntrinsicInst.h:55
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:67
An instruction for reading from memory.
Definition: Instructions.h:176
Represents a single loop in the control flow graph.
Definition: LoopInfo.h:39
Metadata node.
Definition: Metadata.h:1069
Machine Value Type.
static MVT getVT(Type *Ty, bool HandleUnknown=false)
Return the value type corresponding to the specified type.
Definition: ValueTypes.cpp:237
Root of the metadata hierarchy.
Definition: Metadata.h:62
A Module instance is used to store all the information related to an LLVM module.
Definition: Module.h:65
The optimization diagnostic interface.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const override
Given a physical register constraint (e.g.
unsigned getNumRegistersForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const override
Certain targets require unusual breakdowns of certain types.
The main scalar evolution driver.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:384
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition: SmallPtrSet.h:519
bool empty() const
Definition: SmallVector.h:81
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:573
void push_back(const T &Elt)
Definition: SmallVector.h:413
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1196
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:51
int InstructionOpcodeToISD(unsigned Opcode) const
Get the ISD node that corresponds to the Instruction class opcode.
EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the EVT corresponding to this LLVM type.
const TargetMachine & getTargetMachine() const
std::vector< AsmOperandInfo > AsmOperandInfoVector
virtual AsmOperandInfoVector ParseConstraints(const DataLayout &DL, const TargetRegisterInfo *TRI, const CallBase &Call) const
Split up the constraint string from the inline assembly value into the specific constraints and their...
virtual void ComputeConstraintToUse(AsmOperandInfo &OpInfo, SDValue Op, SelectionDAG *DAG=nullptr) const
Determines the constraint code and constraint type to use for the specific AsmOperandInfo,...
Primary interface to the complete machine description for the target machine.
Definition: TargetMachine.h:77
TargetOptions Options
unsigned UnsafeFPMath
UnsafeFPMath - This flag is enabled when the -enable-unsafe-fp-math flag is specified on the command ...
const DataLayout & getDataLayout() const
void getMemcpyLoopResidualLoweringType(SmallVectorImpl< Type * > &OpsOut, LLVMContext &Context, unsigned RemainingBytes, unsigned SrcAddrSpace, unsigned DestAddrSpace, Align SrcAlign, Align DestAlign, std::optional< uint32_t > AtomicCpySize) const
TargetCostKind
The kind of cost model.
@ TCK_CodeSize
Instruction code size.
@ TCK_SizeAndLatency
The weighted sum of size and latency.
static bool requiresOrderedReduction(std::optional< FastMathFlags > FMF)
A helper function to determine the type of reduction algorithm used for a given Opcode and set of Fas...
@ TCC_Free
Expected to fold away in lowering.
ShuffleKind
The various kinds of shuffle patterns for vector queries.
@ SK_InsertSubvector
InsertSubvector. Index indicates start offset.
@ SK_Select
Selects elements from the corresponding lane of either source operand.
@ SK_PermuteSingleSrc
Shuffle elements of single source vector with any shuffle mask.
@ SK_Splice
Concatenates elements from the first input vector with elements of the second input vector.
@ SK_Broadcast
Broadcast element 0 to all other elements.
@ SK_PermuteTwoSrc
Merge elements from two source vectors into one with any shuffle mask.
@ SK_Reverse
Reverse the order of the vector.
@ SK_ExtractSubvector
ExtractSubvector Index indicates start offset.
static constexpr TypeSize getFixed(ScalarTy ExactSize)
Definition: TypeSize.h:345
static constexpr TypeSize getScalable(ScalarTy MinimumSize)
Definition: TypeSize.h:348
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
static IntegerType * getIntNTy(LLVMContext &C, unsigned N)
unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
Definition: Type.h:310
static IntegerType * getInt16Ty(LLVMContext &C)
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
Definition: Type.h:128
static IntegerType * getInt8Ty(LLVMContext &C)
static IntegerType * getInt32Ty(LLVMContext &C)
static IntegerType * getInt64Ty(LLVMContext &C)
A Use represents the edge between a Value definition and its users.
Definition: Use.h:43
Value * getOperand(unsigned i) const
Definition: User.h:228
LLVM Value Representation.
Definition: Value.h:74
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
user_iterator user_begin()
Definition: Value.h:397
bool hasOneUse() const
Return true if there is exactly one use of this value.
Definition: Value.h:434
LLVMContext & getContext() const
All values hold a context through their type.
Definition: Value.cpp:1075
Base class of all SIMD vector types.
Definition: DerivedTypes.h:427
Type * getElementType() const
Definition: DerivedTypes.h:460
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ CONSTANT_ADDRESS_32BIT
Address space for 32-bit constant memory.
@ BUFFER_STRIDED_POINTER
Address space for 192-bit fat buffer pointers with an additional index.
@ REGION_ADDRESS
Address space for region memory. (GDS)
@ LOCAL_ADDRESS
Address space for local memory.
@ CONSTANT_ADDRESS
Address space for constant memory (VTX2).
@ FLAT_ADDRESS
Address space for flat memory.
@ GLOBAL_ADDRESS
Address space for global memory (RAT0, VTX0).
@ BUFFER_FAT_POINTER
Address space for 160-bit buffer fat pointers.
@ PRIVATE_ADDRESS
Address space for private memory.
@ BUFFER_RESOURCE
Address space for 128-bit buffer resources.
bool isFlatGlobalAddrSpace(unsigned AS)
bool isArgPassedInSGPR(const Argument *A)
bool isIntrinsicAlwaysUniform(unsigned IntrID)
bool isIntrinsicSourceOfDivergence(unsigned IntrID)
bool isExtendedGlobalAddrSpace(unsigned AS)
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
@ ADD
Simple integer binary arithmetic operators.
Definition: ISDOpcodes.h:246
@ FADD
Simple binary floating point operators.
Definition: ISDOpcodes.h:397
@ FNEG
Perform various unary floating-point operations inspired by libm.
Definition: ISDOpcodes.h:981
@ SHL
Shift and rotation operations.
Definition: ISDOpcodes.h:735
@ AND
Bitwise operators - logical and, logical or, logical xor.
Definition: ISDOpcodes.h:709
Function * getOrInsertDeclaration(Module *M, ID id, ArrayRef< Type * > Tys={})
Look up the Function declaration of the intrinsic id in the Module M.
Definition: Intrinsics.cpp:731
BinaryOp_match< LHS, RHS, Instruction::AShr > m_AShr(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::And, true > m_c_And(const LHS &L, const RHS &R)
Matches an And with LHS and RHS in either order.
bool match(Val *V, const Pattern &P)
Definition: PatternMatch.h:49
class_match< ConstantInt > m_ConstantInt()
Match an arbitrary ConstantInt and ignore it.
Definition: PatternMatch.h:168
specific_fpval m_FPOne()
Match a float 1.0 or vector with all elements equal to 1.0.
Definition: PatternMatch.h:931
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
Definition: PatternMatch.h:92
BinaryOp_match< LHS, RHS, Instruction::LShr > m_LShr(const LHS &L, const RHS &R)
FNeg_match< OpTy > m_FNeg(const OpTy &X)
Match 'fneg X' as 'fsub -0.0, X'.
m_Intrinsic_Ty< Opnd0 >::Ty m_FAbs(const Opnd0 &Op0)
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:443
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
@ Length
Definition: DWP.cpp:480
const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=6)
This method strips off any GEP address adjustments, pointer casts or llvm.threadlocal....
MDNode * findOptionMDForLoop(const Loop *TheLoop, StringRef Name)
Find string metadata for a loop.
Definition: LoopInfo.cpp:1055
constexpr T MinAlign(U A, V B)
A and B are either alignments or offsets.
Definition: MathExtras.h:366
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1746
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
bool none_of(R &&Range, UnaryPredicate P)
Provide wrappers to std::none_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1753
AtomicOrdering
Atomic ordering for LLVM's memory model.
@ FAdd
Sum of floats.
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
Definition: Alignment.h:155
void computeKnownBits(const Value *V, KnownBits &Known, const DataLayout &DL, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
Determine which bits of V are known to be either zero or one and return them in the KnownZero/KnownOn...
void ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL, Type *Ty, SmallVectorImpl< EVT > &ValueVTs, SmallVectorImpl< EVT > *MemVTs, SmallVectorImpl< TypeSize > *Offsets=nullptr, TypeSize StartingOffset=TypeSize::getZero())
ComputeValueVTs - Given an LLVM IR type, compute a sequence of EVTs that represent all the individual...
Definition: Analysis.cpp:79
auto count_if(R &&Range, UnaryPredicate P)
Wrapper function around std::count_if to count the number of times an element satisfying a given pred...
Definition: STLExtras.h:1945
InstructionCost Cost
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
static constexpr DenormalMode getPreserveSign()
Extended Value Type.
Definition: ValueTypes.h:35
uint64_t getScalarSizeInBits() const
Definition: ValueTypes.h:380
unsigned countMinLeadingOnes() const
Returns the minimum number of leading one bits.
Definition: KnownBits.h:243
Information about a load/store intrinsic defined by the target.
bool isInlineCompatible(SIModeRegisterDefaults CalleeMode) const
Parameters that control the generic loop unrolling transformation.
unsigned Threshold
The cost threshold for the unrolled loop.
bool UnrollVectorizedLoop
Don't disable runtime unroll for the loops which were vectorized.
unsigned MaxIterationsCountToAnalyze
Don't allow loop unrolling to simulate more than this number of iterations when checking full unroll ...
unsigned PartialThreshold
The cost threshold for the unrolled loop, like Threshold, but used for partial/runtime unrolling (set...
bool Runtime
Allow runtime unrolling (unrolling of loops to expand the size of the loop body even when the number ...
bool Partial
Allow partial unrolling (unrolling of loops to expand the size of the loop body, not only to eliminat...