26#include "llvm/IR/IntrinsicsAMDGPU.h"
33#define DEBUG_TYPE "AMDGPUtti"
36 "amdgpu-unroll-threshold-private",
37 cl::desc(
"Unroll threshold for AMDGPU if private memory used in a loop"),
41 "amdgpu-unroll-threshold-local",
42 cl::desc(
"Unroll threshold for AMDGPU if local memory used in a loop"),
46 "amdgpu-unroll-threshold-if",
47 cl::desc(
"Unroll threshold increment for AMDGPU for each if statement inside loop"),
51 "amdgpu-unroll-runtime-local",
52 cl::desc(
"Allow runtime unroll for AMDGPU if local memory used in a loop"),
56 "amdgpu-unroll-max-block-to-analyze",
57 cl::desc(
"Inner loop block size threshold to analyze in unroll for AMDGPU"),
62 cl::desc(
"Cost of alloca argument"));
70 cl::desc(
"Maximum alloca size to use for inline cost"));
75 cl::desc(
"Maximum number of BBs allowed in a function after inlining"
76 " (compile time constraint)"));
84 for (
const Value *V :
I->operand_values()) {
87 if (
const PHINode *
PHI = dyn_cast<PHINode>(V)) {
89 return SubLoop->contains(PHI); }))
98 :
BaseT(TM,
F.getDataLayout()),
99 TargetTriple(TM->getTargetTriple()),
101 TLI(ST->getTargetLowering()) {}
106 const Function &
F = *L->getHeader()->getParent();
108 F.getFnAttributeAsParsedInteger(
"amdgpu-unroll-threshold", 300);
109 UP.
MaxCount = std::numeric_limits<unsigned>::max();
122 const unsigned MaxAlloca = (256 - 16) * 4;
128 if (
MDNode *LoopUnrollThreshold =
130 if (LoopUnrollThreshold->getNumOperands() == 2) {
131 ConstantInt *MetaThresholdValue = mdconst::extract_or_null<ConstantInt>(
132 LoopUnrollThreshold->getOperand(1));
133 if (MetaThresholdValue) {
139 ThresholdPrivate = std::min(ThresholdPrivate, UP.
Threshold);
140 ThresholdLocal = std::min(ThresholdLocal, UP.
Threshold);
145 unsigned MaxBoost = std::max(ThresholdPrivate, ThresholdLocal);
148 unsigned LocalGEPsSeen = 0;
151 return SubLoop->contains(BB); }))
160 if (
const BranchInst *Br = dyn_cast<BranchInst>(&
I)) {
161 if (UP.
Threshold < MaxBoost && Br->isConditional()) {
164 if ((L->contains(Succ0) && L->isLoopExiting(Succ0)) ||
165 (L->contains(Succ1) && L->isLoopExiting(Succ1)))
171 << *L <<
" due to " << *Br <<
'\n');
183 unsigned AS =
GEP->getAddressSpace();
184 unsigned Threshold = 0;
186 Threshold = ThresholdPrivate;
188 Threshold = ThresholdLocal;
203 if (AllocaSize > MaxAlloca)
212 if (LocalGEPsSeen > 1 || L->getLoopDepth() > 2 ||
213 (!isa<GlobalVariable>(
GEP->getPointerOperand()) &&
214 !isa<Argument>(
GEP->getPointerOperand())))
217 << *L <<
" due to LDS use.\n");
222 bool HasLoopDef =
false;
225 if (!Inst || L->isLoopInvariant(
Op))
229 return SubLoop->contains(Inst); }))
253 << *L <<
" due to " << *
GEP <<
'\n');
276 AMDGPU::FeatureEnableLoadStoreOpt, AMDGPU::FeatureEnableSIScheduler,
277 AMDGPU::FeatureEnableUnsafeDSOffsetFolding, AMDGPU::FeatureFlatForGlobal,
278 AMDGPU::FeaturePromoteAlloca, AMDGPU::FeatureUnalignedScratchAccess,
279 AMDGPU::FeatureUnalignedAccessMode,
281 AMDGPU::FeatureAutoWaitcntBeforeBarrier,
284 AMDGPU::FeatureSGPRInitBug, AMDGPU::FeatureXNACK,
285 AMDGPU::FeatureTrapHandler,
289 AMDGPU::FeatureSRAMECC,
292 AMDGPU::FeatureFastFMAF32, AMDGPU::HalfRate64Ops};
295 :
BaseT(TM,
F.getDataLayout()),
297 TLI(ST->getTargetLowering()), CommonTTI(TM,
F),
298 IsGraphics(AMDGPU::isGraphics(
F.getCallingConv())) {
301 HasFP64FP16Denormals =
338 if (Opcode == Instruction::Load || Opcode == Instruction::Store)
339 return 32 * 4 / ElemWidth;
346 unsigned ChainSizeInBytes,
348 unsigned VecRegBitWidth = VF * LoadSize;
351 return 128 / LoadSize;
357 unsigned ChainSizeInBytes,
359 unsigned VecRegBitWidth = VF * StoreSize;
360 if (VecRegBitWidth > 128)
361 return 128 / StoreSize;
385 unsigned AddrSpace)
const {
398 unsigned AddrSpace)
const {
404 unsigned AddrSpace)
const {
421 unsigned DestAddrSpace,
Align SrcAlign,
Align DestAlign,
422 std::optional<uint32_t> AtomicElementSize)
const {
424 if (AtomicElementSize)
452 unsigned RemainingBytes,
unsigned SrcAddrSpace,
unsigned DestAddrSpace,
454 std::optional<uint32_t> AtomicCpySize)
const {
455 assert(RemainingBytes < 16);
459 OpsOut, Context, RemainingBytes, SrcAddrSpace, DestAddrSpace, SrcAlign,
460 DestAlign, AtomicCpySize);
466 while (RemainingBytes >= 8) {
472 while (RemainingBytes >= 4) {
479 while (RemainingBytes >= 2) {
485 while (RemainingBytes) {
503 case Intrinsic::amdgcn_ds_ordered_add:
504 case Intrinsic::amdgcn_ds_ordered_swap: {
505 auto *Ordering = dyn_cast<ConstantInt>(Inst->
getArgOperand(2));
506 auto *Volatile = dyn_cast<ConstantInt>(Inst->
getArgOperand(4));
507 if (!Ordering || !Volatile)
510 unsigned OrderingVal = Ordering->getZExtValue();
517 Info.WriteMem =
true;
518 Info.IsVolatile = !Volatile->isZero();
533 std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Ty);
538 unsigned NElts = LT.second.isVector() ?
539 LT.second.getVectorNumElements() : 1;
548 return get64BitInstrCost(
CostKind) * LT.first * NElts;
551 NElts = (NElts + 1) / 2;
554 return getFullRateInstrCost() * LT.first * NElts;
560 if (SLT == MVT::i64) {
562 return 2 * getFullRateInstrCost() * LT.first * NElts;
566 NElts = (NElts + 1) / 2;
568 return LT.first * NElts * getFullRateInstrCost();
570 const int QuarterRateCost = getQuarterRateInstrCost(
CostKind);
571 if (SLT == MVT::i64) {
572 const int FullRateCost = getFullRateInstrCost();
573 return (4 * QuarterRateCost + (2 * 2) * FullRateCost) * LT.first * NElts;
577 NElts = (NElts + 1) / 2;
580 return QuarterRateCost * NElts * LT.first;
587 if (
const auto *
FAdd = dyn_cast<BinaryOperator>(*CxtI->
user_begin())) {
592 if (ST->
has16BitInsts() && SLT == MVT::f16 && !HasFP64FP16Denormals)
607 NElts = (NElts + 1) / 2;
609 return LT.first * NElts * get64BitInstrCost(
CostKind);
612 NElts = (NElts + 1) / 2;
614 if (SLT == MVT::f32 || SLT == MVT::f16)
615 return LT.first * NElts * getFullRateInstrCost();
621 if (SLT == MVT::f64) {
627 Cost += 3 * getFullRateInstrCost();
629 return LT.first *
Cost * NElts;
634 if ((SLT == MVT::f32 && !HasFP32Denormals) ||
636 return LT.first * getQuarterRateInstrCost(
CostKind) * NElts;
647 4 * getFullRateInstrCost() + 2 * getQuarterRateInstrCost(
CostKind);
648 return LT.first *
Cost * NElts;
656 int Cost = getQuarterRateInstrCost(
CostKind) + getFullRateInstrCost();
657 return LT.first *
Cost * NElts;
660 if (SLT == MVT::f32 || SLT == MVT::f16) {
662 int Cost = (SLT == MVT::f16 ? 14 : 10) * getFullRateInstrCost() +
663 1 * getQuarterRateInstrCost(
CostKind);
665 if (!HasFP32Denormals) {
667 Cost += 2 * getFullRateInstrCost();
670 return LT.first * NElts *
Cost;
690 case Intrinsic::fmuladd:
691 case Intrinsic::copysign:
692 case Intrinsic::canonicalize:
694 case Intrinsic::round:
695 case Intrinsic::uadd_sat:
696 case Intrinsic::usub_sat:
697 case Intrinsic::sadd_sat:
698 case Intrinsic::ssub_sat:
709 if (ICA.
getID() == Intrinsic::fabs)
718 std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(
RetTy);
720 unsigned NElts = LT.second.isVector() ?
721 LT.second.getVectorNumElements() : 1;
726 return LT.first * NElts * get64BitInstrCost(
CostKind);
728 if ((ST->
has16BitInsts() && (SLT == MVT::f16 || SLT == MVT::i16)) ||
730 NElts = (NElts + 1) / 2;
733 unsigned InstRate = getQuarterRateInstrCost(
CostKind);
735 switch (ICA.
getID()) {
737 case Intrinsic::fmuladd:
738 if ((SLT == MVT::f32 && ST->
hasFastFMAF32()) || SLT == MVT::f16)
739 InstRate = getFullRateInstrCost();
742 : getQuarterRateInstrCost(
CostKind);
745 case Intrinsic::copysign:
746 return NElts * getFullRateInstrCost();
747 case Intrinsic::canonicalize: {
749 SLT == MVT::f64 ? get64BitInstrCost(
CostKind) : getFullRateInstrCost();
752 case Intrinsic::uadd_sat:
753 case Intrinsic::usub_sat:
754 case Intrinsic::sadd_sat:
755 case Intrinsic::ssub_sat: {
756 if (SLT == MVT::i16 || SLT == MVT::i32)
757 InstRate = getFullRateInstrCost();
759 static const auto ValidSatTys = {MVT::v2i16, MVT::v4i16};
760 if (
any_of(ValidSatTys, [<](
MVT M) {
return M == LT.second; }))
766 if (SLT == MVT::i16 || SLT == MVT::i32)
767 InstRate = 2 * getFullRateInstrCost();
773 return LT.first * NElts * InstRate;
779 assert((
I ==
nullptr ||
I->getOpcode() == Opcode) &&
780 "Opcode should reflect passed instruction.");
783 const int CBrCost = SCost ? 5 : 7;
785 case Instruction::Br: {
787 auto BI = dyn_cast_or_null<BranchInst>(
I);
788 if (BI && BI->isUnconditional())
789 return SCost ? 1 : 4;
794 case Instruction::Switch: {
795 auto SI = dyn_cast_or_null<SwitchInst>(
I);
798 return (SI ? (SI->getNumCases() + 1) : 4) * (CBrCost + 1);
800 case Instruction::Ret:
801 return SCost ? 1 : 10;
808 std::optional<FastMathFlags> FMF,
820 std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Ty);
821 return LT.first * getFullRateInstrCost();
835 std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Ty);
836 return LT.first * getHalfRateInstrCost(
CostKind);
844 case Instruction::ExtractElement:
845 case Instruction::InsertElement: {
860 return Index == ~0u ? 2 : 0;
874 if (Indices.
size() > 1)
882 const int TargetOutputIdx = Indices.
empty() ? -1 : Indices[0];
885 for (
auto &TC : TargetConstraints) {
890 if (TargetOutputIdx != -1 && TargetOutputIdx != OutputIdx++)
896 TRI, TC.ConstraintCode, TC.ConstraintVT).second;
900 if (!RC || !
TRI->isSGPRClass(RC))
910 cast<MetadataAsValue>(ReadReg->
getArgOperand(0))->getMetadata();
912 cast<MDString>(cast<MDNode>(MD)->getOperand(0))->getString();
931 if (
const Argument *
A = dyn_cast<Argument>(V))
940 if (
const LoadInst *Load = dyn_cast<LoadInst>(V))
948 if (isa<AtomicRMWInst>(V) || isa<AtomicCmpXchgInst>(V))
951 if (
const IntrinsicInst *Intrinsic = dyn_cast<IntrinsicInst>(V)) {
952 if (Intrinsic->getIntrinsicID() == Intrinsic::read_register)
959 if (
const CallInst *CI = dyn_cast<CallInst>(V)) {
960 if (CI->isInlineAsm())
966 if (isa<InvokeInst>(V))
973 if (
const IntrinsicInst *Intrinsic = dyn_cast<IntrinsicInst>(V))
976 if (
const CallInst *CI = dyn_cast<CallInst>(V)) {
977 if (CI->isInlineAsm())
995 if (
match(V,
m_LShr(m_Intrinsic<Intrinsic::amdgcn_workitem_id_x>(),
997 match(V,
m_AShr(m_Intrinsic<Intrinsic::amdgcn_workitem_id_x>(),
999 const Function *
F = cast<Instruction>(V)->getFunction();
1005 if (
match(V,
m_c_And(m_Intrinsic<Intrinsic::amdgcn_workitem_id_x>(),
1007 const Function *
F = cast<Instruction>(V)->getFunction();
1022 if (
const IntrinsicInst *Intrinsic = dyn_cast<IntrinsicInst>(CI)) {
1023 switch (Intrinsic->getIntrinsicID()) {
1026 case Intrinsic::amdgcn_if:
1027 case Intrinsic::amdgcn_else: {
1029 return Indices.
size() == 1 && Indices[0] == 1;
1046 case Intrinsic::amdgcn_is_shared:
1047 case Intrinsic::amdgcn_is_private:
1048 case Intrinsic::amdgcn_flat_atomic_fadd:
1049 case Intrinsic::amdgcn_flat_atomic_fmax:
1050 case Intrinsic::amdgcn_flat_atomic_fmin:
1051 case Intrinsic::amdgcn_flat_atomic_fmax_num:
1052 case Intrinsic::amdgcn_flat_atomic_fmin_num:
1062 Value *NewV)
const {
1063 auto IntrID =
II->getIntrinsicID();
1065 case Intrinsic::amdgcn_is_shared:
1066 case Intrinsic::amdgcn_is_private: {
1067 unsigned TrueAS = IntrID == Intrinsic::amdgcn_is_shared ?
1075 case Intrinsic::ptrmask: {
1078 Value *MaskOp =
II->getArgOperand(1);
1081 bool DoTruncate =
false;
1085 if (!TM.isNoopAddrSpaceCast(OldAS, NewAS)) {
1103 MaskTy =
B.getInt32Ty();
1104 MaskOp =
B.CreateTrunc(MaskOp, MaskTy);
1107 return B.CreateIntrinsic(Intrinsic::ptrmask, {NewV->
getType(), MaskTy},
1110 case Intrinsic::amdgcn_flat_atomic_fadd:
1111 case Intrinsic::amdgcn_flat_atomic_fmax:
1112 case Intrinsic::amdgcn_flat_atomic_fmin:
1113 case Intrinsic::amdgcn_flat_atomic_fmax_num:
1114 case Intrinsic::amdgcn_flat_atomic_fmin_num: {
1115 Type *DestTy =
II->getType();
1122 {DestTy, SrcTy, DestTy});
1123 II->setArgOperand(0, NewV);
1124 II->setCalledFunction(NewDecl);
1138 if (!isa<FixedVectorType>(VT))
1145 unsigned NumVectorElts = cast<FixedVectorType>(VT)->getNumElements();
1149 unsigned RequestedElts =
1150 count_if(Mask, [](
int MaskElt) {
return MaskElt != -1; });
1151 if (RequestedElts == 0)
1159 if (HasVOP3P && NumVectorElts == 2)
1161 unsigned NumPerms =
alignTo(RequestedElts, 2) / 2;
1164 return NumPerms + NumPermMasks;
1173 return alignTo(RequestedElts, 2) / 2;
1178 unsigned NumPerms =
alignTo(RequestedElts, 2) / 2;
1181 return NumPerms + NumPermMasks;
1196 =
static_cast<const GCNSubtarget *
>(TM.getSubtargetImpl(*Caller));
1198 =
static_cast<const GCNSubtarget *
>(TM.getSubtargetImpl(*Callee));
1200 const FeatureBitset &CallerBits = CallerST->getFeatureBits();
1201 const FeatureBitset &CalleeBits = CalleeST->getFeatureBits();
1203 FeatureBitset RealCallerBits = CallerBits & ~InlineFeatureIgnoreList;
1204 FeatureBitset RealCalleeBits = CalleeBits & ~InlineFeatureIgnoreList;
1205 if ((RealCallerBits & RealCalleeBits) != RealCalleeBits)
1215 if (Callee->hasFnAttribute(Attribute::AlwaysInline) ||
1216 Callee->hasFnAttribute(Attribute::InlineHint))
1222 if (Callee->size() == 1)
1224 size_t BBSize = Caller->size() + Callee->size() - 1;
1234 const int NrOfSGPRUntilSpill = 26;
1235 const int NrOfVGPRUntilSpill = 32;
1239 unsigned adjustThreshold = 0;
1245 for (
auto ArgVT : ValueVTs) {
1249 SGPRsInUse += CCRegNum;
1251 VGPRsInUse += CCRegNum;
1261 ArgStackCost +=
const_cast<GCNTTIImpl *
>(TTIImpl)->getMemoryOpCost(
1264 ArgStackCost +=
const_cast<GCNTTIImpl *
>(TTIImpl)->getMemoryOpCost(
1270 adjustThreshold += std::max(0, SGPRsInUse - NrOfSGPRUntilSpill) *
1272 adjustThreshold += std::max(0, VGPRsInUse - NrOfVGPRUntilSpill) *
1274 return adjustThreshold;
1283 unsigned AllocaSize = 0;
1286 PointerType *Ty = dyn_cast<PointerType>(PtrArg->getType());
1290 unsigned AddrSpace = Ty->getAddressSpace();
1338 static_assert(InlinerVectorBonusPercent == 0,
"vector bonus assumed to be 0");
1342 return BB.getTerminator()->getNumSuccessors() > 1;
1345 Threshold += Threshold / 2;
1351 unsigned AllocaThresholdBonus = (Threshold * ArgAllocaSize) / AllocaSize;
1353 return AllocaThresholdBonus;
1369 ? getFullRateInstrCost()
1370 : ST->hasHalfRate64Ops() ? getHalfRateInstrCost(
CostKind)
1371 : getQuarterRateInstrCost(
CostKind);
1374std::pair<InstructionCost, MVT>
1375GCNTTIImpl::getTypeLegalizationCost(
Type *Ty)
const {
Provides AMDGPU specific target descriptions.
The AMDGPU TargetMachine interface definition for hw codegen targets.
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
Analysis containing CSE Info
static cl::opt< TargetTransformInfo::TargetCostKind > CostKind("cost-kind", cl::desc("Target cost kind"), cl::init(TargetTransformInfo::TCK_RecipThroughput), cl::values(clEnumValN(TargetTransformInfo::TCK_RecipThroughput, "throughput", "Reciprocal throughput"), clEnumValN(TargetTransformInfo::TCK_Latency, "latency", "Instruction latency"), clEnumValN(TargetTransformInfo::TCK_CodeSize, "code-size", "Code size"), clEnumValN(TargetTransformInfo::TCK_SizeAndLatency, "size-latency", "Code size and latency")))
unsigned const TargetRegisterInfo * TRI
uint64_t IntrinsicInst * II
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
bool hasMadMacF32Insts() const
unsigned getMaxWorkitemID(const Function &Kernel, unsigned Dimension) const
Return the maximum workitem ID value in the function, for the given (0, 1, 2) dimension.
unsigned getWavefrontSizeLog2() const
bool has16BitInsts() const
bool hasFastFMAF32() const
bool isSingleLaneExecution(const Function &Kernel) const
Return true if only a single workitem can be active in a wave.
bool hasVOP3PInsts() const
void getPeelingPreferences(Loop *L, ScalarEvolution &SE, TTI::PeelingPreferences &PP)
AMDGPUTTIImpl(const AMDGPUTargetMachine *TM, const Function &F)
void getUnrollingPreferences(Loop *L, ScalarEvolution &SE, TTI::UnrollingPreferences &UP, OptimizationRemarkEmitter *ORE)
int64_t getMaxMemIntrinsicInlineSizeThreshold() const
bool isFNegFree(EVT VT) const override
Return true if an fneg operation is free to the point where it is never worthwhile to replace it with...
an instruction to allocate memory on the stack
bool isStaticAlloca() const
Return true if this alloca is in the entry block of the function and is a constant size.
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
This class represents an incoming formal argument to a Function.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
bool empty() const
empty - Check if the array is empty.
LLVM Basic Block Representation.
InstructionCost getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, TTI::TargetCostKind CostKind)
Get intrinsic cost based on arguments.
InstructionCost getVectorInstrCost(unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index, Value *Op0, Value *Op1)
TTI::ShuffleKind improveShuffleKindFromMask(TTI::ShuffleKind Kind, ArrayRef< int > Mask, VectorType *Ty, int &Index, VectorType *&SubTy) const
InstructionCost getMinMaxReductionCost(Intrinsic::ID IID, VectorType *Ty, FastMathFlags FMF, TTI::TargetCostKind CostKind)
Try to calculate op costs for min/max reduction operations.
InstructionCost getShuffleCost(TTI::ShuffleKind Kind, VectorType *Tp, ArrayRef< int > Mask, TTI::TargetCostKind CostKind, int Index, VectorType *SubTp, ArrayRef< const Value * > Args=std::nullopt, const Instruction *CxtI=nullptr)
void getPeelingPreferences(Loop *L, ScalarEvolution &SE, TTI::PeelingPreferences &PP)
InstructionCost getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind, const Instruction *I=nullptr)
InstructionCost getArithmeticInstrCost(unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Opd1Info={TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Opd2Info={TTI::OK_AnyValue, TTI::OP_None}, ArrayRef< const Value * > Args=std::nullopt, const Instruction *CxtI=nullptr)
InstructionCost getArithmeticReductionCost(unsigned Opcode, VectorType *Ty, std::optional< FastMathFlags > FMF, TTI::TargetCostKind CostKind)
std::pair< InstructionCost, MVT > getTypeLegalizationCost(Type *Ty) const
Estimate the cost of type-legalization and the legalized type.
Conditional or Unconditional Branch instruction.
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
bool isInlineAsm() const
Check if this call is an inline asm statement.
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
CallingConv::ID getCallingConv() const
Value * getArgOperand(unsigned i) const
iterator_range< User::op_iterator > args()
Iteration adapter for range-for loops.
unsigned getArgOperandNo(const Use *U) const
Given a use for a arg operand, get the arg operand number that corresponds to it.
This class represents a function call, abstracting a target machine's calling convention.
This is the shared class of boolean and integer constants.
static ConstantInt * getTrue(LLVMContext &Context)
static ConstantInt * getFalse(LLVMContext &Context)
int64_t getSExtValue() const
Return the constant as a 64-bit integer value after it has been sign extended as appropriate for the ...
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
unsigned getPointerSizeInBits(unsigned AS=0) const
Layout pointer size, in bits FIXME: The defaults need to be removed once all of the backends/clients ...
TypeSize getTypeAllocSize(Type *Ty) const
Returns the offset in bytes between successive objects of the specified type, including alignment pad...
TypeSize getTypeSizeInBits(Type *Ty) const
Size examples:
constexpr bool isScalar() const
Exactly one element.
Convenience struct for specifying and reasoning about fast-math flags.
Container class for subtarget features.
static FixedVectorType * get(Type *ElementType, unsigned NumElts)
bool hasUsableDivScaleConditionOutput() const
Condition output from div_scale is usable.
const SIRegisterInfo * getRegisterInfo() const override
bool hasUnalignedScratchAccess() const
bool hasPackedFP32Ops() const
bool hasFullRate64Ops() const
unsigned getMaxPrivateElementSize(bool ForBufferRSrc=false) const
Generation getGeneration() const
TypeSize getRegisterBitWidth(TargetTransformInfo::RegisterKind Vector) const
InstructionCost getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, TTI::TargetCostKind CostKind)
InstructionCost getVectorInstrCost(unsigned Opcode, Type *ValTy, TTI::TargetCostKind CostKind, unsigned Index, Value *Op0, Value *Op1)
bool isAlwaysUniform(const Value *V) const
void getPeelingPreferences(Loop *L, ScalarEvolution &SE, TTI::PeelingPreferences &PP)
GCNTTIImpl(const AMDGPUTargetMachine *TM, const Function &F)
int64_t getMaxMemIntrinsicInlineSizeThreshold() const
bool isLegalToVectorizeLoadChain(unsigned ChainSizeInBytes, Align Alignment, unsigned AddrSpace) const
InstructionCost getMinMaxReductionCost(Intrinsic::ID IID, VectorType *Ty, FastMathFlags FMF, TTI::TargetCostKind CostKind)
bool isInlineAsmSourceOfDivergence(const CallInst *CI, ArrayRef< unsigned > Indices={}) const
Analyze if the results of inline asm are divergent.
bool isReadRegisterSourceOfDivergence(const IntrinsicInst *ReadReg) const
InstructionCost getShuffleCost(TTI::ShuffleKind Kind, VectorType *Tp, ArrayRef< int > Mask, TTI::TargetCostKind CostKind, int Index, VectorType *SubTp, ArrayRef< const Value * > Args=std::nullopt, const Instruction *CxtI=nullptr)
unsigned getNumberOfRegisters(unsigned RCID) const
bool isLegalToVectorizeMemChain(unsigned ChainSizeInBytes, Align Alignment, unsigned AddrSpace) const
bool shouldPrefetchAddressSpace(unsigned AS) const override
unsigned getStoreVectorFactor(unsigned VF, unsigned StoreSize, unsigned ChainSizeInBytes, VectorType *VecTy) const
bool isLegalToVectorizeStoreChain(unsigned ChainSizeInBytes, Align Alignment, unsigned AddrSpace) const
unsigned getMaxInterleaveFactor(ElementCount VF)
unsigned getInliningThresholdMultiplier() const
void getUnrollingPreferences(Loop *L, ScalarEvolution &SE, TTI::UnrollingPreferences &UP, OptimizationRemarkEmitter *ORE)
unsigned getMinVectorRegisterBitWidth() const
bool getTgtMemIntrinsic(IntrinsicInst *Inst, MemIntrinsicInfo &Info) const
unsigned getLoadVectorFactor(unsigned VF, unsigned LoadSize, unsigned ChainSizeInBytes, VectorType *VecTy) const
unsigned getPrefetchDistance() const override
How much before a load we should place the prefetch instruction.
Value * rewriteIntrinsicWithAddressSpace(IntrinsicInst *II, Value *OldV, Value *NewV) const
unsigned getMaximumVF(unsigned ElemWidth, unsigned Opcode) const
bool collectFlatAddressOperands(SmallVectorImpl< int > &OpIndexes, Intrinsic::ID IID) const
unsigned adjustInliningThreshold(const CallBase *CB) const
bool areInlineCompatible(const Function *Caller, const Function *Callee) const
unsigned getCallerAllocaCost(const CallBase *CB, const AllocaInst *AI) const
Type * getMemcpyLoopLoweringType(LLVMContext &Context, Value *Length, unsigned SrcAddrSpace, unsigned DestAddrSpace, Align SrcAlign, Align DestAlign, std::optional< uint32_t > AtomicElementSize) const
void getMemcpyLoopResidualLoweringType(SmallVectorImpl< Type * > &OpsOut, LLVMContext &Context, unsigned RemainingBytes, unsigned SrcAddrSpace, unsigned DestAddrSpace, Align SrcAlign, Align DestAlign, std::optional< uint32_t > AtomicCpySize) const
InstructionCost getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind, const Instruction *I=nullptr)
unsigned getLoadStoreVecRegBitWidth(unsigned AddrSpace) const
bool isSourceOfDivergence(const Value *V) const
InstructionCost getArithmeticInstrCost(unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Op1Info={TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Op2Info={TTI::OK_AnyValue, TTI::OP_None}, ArrayRef< const Value * > Args=std::nullopt, const Instruction *CxtI=nullptr)
bool hasBranchDivergence(const Function *F=nullptr) const
InstructionCost getArithmeticReductionCost(unsigned Opcode, VectorType *Ty, std::optional< FastMathFlags > FMF, TTI::TargetCostKind CostKind)
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
std::optional< CostType > getValue() const
This function is intended to be used as sparingly as possible, since the class provides the full rang...
bool hasApproxFunc() const LLVM_READONLY
Determine whether the approximate-math-functions flag is set.
bool hasAllowContract() const LLVM_READONLY
Determine whether the allow-contract flag is set.
const DataLayout & getDataLayout() const
Get the data layout of the module this instruction belongs to.
Type * getReturnType() const
Intrinsic::ID getID() const
A wrapper class for inspecting calls to intrinsic functions.
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
This is an important class for using LLVM in a threaded context.
An instruction for reading from memory.
Represents a single loop in the control flow graph.
static MVT getVT(Type *Ty, bool HandleUnknown=false)
Return the value type corresponding to the specified type.
A Module instance is used to store all the information related to an LLVM module.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const override
Given a physical register constraint (e.g.
unsigned getNumRegistersForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const override
Certain targets require unusual breakdowns of certain types.
The main scalar evolution driver.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringRef - Represent a constant reference to a string, i.e.
int InstructionOpcodeToISD(unsigned Opcode) const
Get the ISD node that corresponds to the Instruction class opcode.
EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the EVT corresponding to this LLVM type.
const TargetMachine & getTargetMachine() const
std::vector< AsmOperandInfo > AsmOperandInfoVector
virtual AsmOperandInfoVector ParseConstraints(const DataLayout &DL, const TargetRegisterInfo *TRI, const CallBase &Call) const
Split up the constraint string from the inline assembly value into the specific constraints and their...
virtual void ComputeConstraintToUse(AsmOperandInfo &OpInfo, SDValue Op, SelectionDAG *DAG=nullptr) const
Determines the constraint code and constraint type to use for the specific AsmOperandInfo,...
Primary interface to the complete machine description for the target machine.
unsigned UnsafeFPMath
UnsafeFPMath - This flag is enabled when the -enable-unsafe-fp-math flag is specified on the command ...
static constexpr TypeSize getFixed(ScalarTy ExactSize)
static constexpr TypeSize getScalable(ScalarTy MinimumSize)
The instances of the Type class are immutable: once they are created, they are never changed.
unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
static IntegerType * getIntNTy(LLVMContext &C, unsigned N)
unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
static IntegerType * getInt16Ty(LLVMContext &C)
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
static IntegerType * getInt8Ty(LLVMContext &C)
static IntegerType * getInt32Ty(LLVMContext &C)
static IntegerType * getInt64Ty(LLVMContext &C)
A Use represents the edge between a Value definition and its users.
Value * getOperand(unsigned i) const
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
user_iterator user_begin()
bool hasOneUse() const
Return true if there is exactly one use of this value.
LLVMContext & getContext() const
All values hold a context through their type.
Base class of all SIMD vector types.
Type * getElementType() const
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ CONSTANT_ADDRESS_32BIT
Address space for 32-bit constant memory.
@ BUFFER_STRIDED_POINTER
Address space for 192-bit fat buffer pointers with an additional index.
@ REGION_ADDRESS
Address space for region memory. (GDS)
@ LOCAL_ADDRESS
Address space for local memory.
@ CONSTANT_ADDRESS
Address space for constant memory (VTX2).
@ FLAT_ADDRESS
Address space for flat memory.
@ GLOBAL_ADDRESS
Address space for global memory (RAT0, VTX0).
@ BUFFER_FAT_POINTER
Address space for 160-bit buffer fat pointers.
@ PRIVATE_ADDRESS
Address space for private memory.
@ BUFFER_RESOURCE
Address space for 128-bit buffer resources.
bool isFlatGlobalAddrSpace(unsigned AS)
bool isArgPassedInSGPR(const Argument *A)
bool isIntrinsicAlwaysUniform(unsigned IntrID)
bool isIntrinsicSourceOfDivergence(unsigned IntrID)
bool isExtendedGlobalAddrSpace(unsigned AS)
@ C
The default llvm calling convention, compatible with C.
@ ADD
Simple integer binary arithmetic operators.
@ FADD
Simple binary floating point operators.
@ FNEG
Perform various unary floating-point operations inspired by libm.
@ SHL
Shift and rotation operations.
@ AND
Bitwise operators - logical and, logical or, logical xor.
Function * getDeclaration(Module *M, ID id, ArrayRef< Type * > Tys=std::nullopt)
Create or insert an LLVM Function declaration for an intrinsic, and return it.
BinaryOp_match< LHS, RHS, Instruction::AShr > m_AShr(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::And, true > m_c_And(const LHS &L, const RHS &R)
Matches an And with LHS and RHS in either order.
bool match(Val *V, const Pattern &P)
class_match< ConstantInt > m_ConstantInt()
Match an arbitrary ConstantInt and ignore it.
specific_fpval m_FPOne()
Match a float 1.0 or vector with all elements equal to 1.0.
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
BinaryOp_match< LHS, RHS, Instruction::LShr > m_LShr(const LHS &L, const RHS &R)
initializer< Ty > init(const Ty &Val)
This is an optimization pass for GlobalISel generic memory operations.
const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=6)
This method strips off any GEP address adjustments, pointer casts or llvm.threadlocal....
MDNode * findOptionMDForLoop(const Loop *TheLoop, StringRef Name)
Find string metadata for a loop.
constexpr T MinAlign(U A, V B)
A and B are either alignments or offsets.
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
bool none_of(R &&Range, UnaryPredicate P)
Provide wrappers to std::none_of which take ranges instead of having to pass begin/end explicitly.
AtomicOrdering
Atomic ordering for LLVM's memory model.
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
void computeKnownBits(const Value *V, KnownBits &Known, const DataLayout &DL, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
Determine which bits of V are known to be either zero or one and return them in the KnownZero/KnownOn...
void ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL, Type *Ty, SmallVectorImpl< EVT > &ValueVTs, SmallVectorImpl< EVT > *MemVTs, SmallVectorImpl< TypeSize > *Offsets=nullptr, TypeSize StartingOffset=TypeSize::getZero())
ComputeValueVTs - Given an LLVM IR type, compute a sequence of EVTs that represent all the individual...
auto count_if(R &&Range, UnaryPredicate P)
Wrapper function around std::count_if to count the number of times an element satisfying a given pred...
This struct is a compact representation of a valid (non-zero power of two) alignment.
static constexpr DenormalMode getPreserveSign()
uint64_t getScalarSizeInBits() const
unsigned countMinLeadingOnes() const
Returns the minimum number of leading one bits.
Information about a load/store intrinsic defined by the target.
bool isInlineCompatible(SIModeRegisterDefaults CalleeMode) const