16#ifndef LLVM_CODEGEN_BASICTTIIMPL_H
17#define LLVM_CODEGEN_BASICTTIIMPL_H
86 T *thisT() {
return static_cast<T *
>(
this); }
95 Cost += thisT()->getVectorInstrCost(Instruction::ExtractElement, VTy,
99 Cost += thisT()->getVectorInstrCost(Instruction::InsertElement, VTy,
118 Cost += thisT()->getVectorInstrCost(Instruction::InsertElement, VTy,
120 Cost += thisT()->getVectorInstrCost(Instruction::ExtractElement, VTy,
133 "Can only extract subvectors from vectors");
135 assert((!isa<FixedVectorType>(VTy) ||
136 (
Index + NumSubElts) <=
137 (
int)cast<FixedVectorType>(VTy)->getNumElements()) &&
138 "SK_ExtractSubvector index out of range");
144 for (
int i = 0; i != NumSubElts; ++i) {
146 thisT()->getVectorInstrCost(Instruction::ExtractElement, VTy,
148 Cost += thisT()->getVectorInstrCost(Instruction::InsertElement, SubVTy,
161 "Can only insert subvectors into vectors");
163 assert((!isa<FixedVectorType>(VTy) ||
164 (
Index + NumSubElts) <=
165 (
int)cast<FixedVectorType>(VTy)->getNumElements()) &&
166 "SK_InsertSubvector index out of range");
172 for (
int i = 0; i != NumSubElts; ++i) {
173 Cost += thisT()->getVectorInstrCost(Instruction::ExtractElement, SubVTy,
176 thisT()->getVectorInstrCost(Instruction::InsertElement, VTy,
CostKind,
177 i +
Index,
nullptr,
nullptr);
184 return static_cast<const T *
>(
this)->getST();
189 return static_cast<const T *
>(
this)->getTLI();
211 bool IsGatherScatter,
214 if (isa<ScalableVectorType>(DataTy))
217 auto *VT = cast<FixedVectorType>(DataTy);
227 VT->getNumElements()),
231 VT->getNumElements() *
238 Opcode == Instruction::Store,
CostKind);
249 VT->getNumElements() *
251 Instruction::ExtractElement,
253 VT->getNumElements()),
259 return LoadCost + PackingCost + ConditionalCost;
274 unsigned *
Fast)
const {
317 std::pair<const Value *, unsigned>
340 bool HasBaseReg, int64_t Scale,
unsigned AddrSpace,
342 int64_t ScalableOffset = 0) {
357 Type *ScalarValTy)
const {
358 auto &&IsSupportedByTarget = [
this, ScalarMemTy, ScalarValTy](
unsigned VF) {
361 if (getTLI()->isOperationLegal(
ISD::STORE, VT) ||
371 while (VF > 2 && IsSupportedByTarget(VF))
406 int64_t BaseOffset,
bool HasBaseReg,
407 int64_t Scale,
unsigned AddrSpace) {
445 unsigned &JumpTableSize,
455 unsigned N = SI.getNumCases();
460 bool IsJTAllowed = TLI->
areJTsAllowed(SI.getParent()->getParent());
466 APInt MaxCaseVal = SI.case_begin()->getCaseValue()->getValue();
467 APInt MinCaseVal = MaxCaseVal;
468 for (
auto CI : SI.cases()) {
469 const APInt &CaseVal = CI.getCaseValue()->getValue();
470 if (CaseVal.
sgt(MaxCaseVal))
471 MaxCaseVal = CaseVal;
472 if (CaseVal.
slt(MinCaseVal))
473 MinCaseVal = CaseVal;
479 for (
auto I : SI.cases())
480 Dests.
insert(
I.getCaseSuccessor());
489 if (
N < 2 || N < TLI->getMinimumJumpTableEntries())
492 (MaxCaseVal - MinCaseVal)
493 .getLimitedValue(std::numeric_limits<uint64_t>::max() - 1) + 1;
496 JumpTableSize = Range;
512 if (!
TM.isPositionIndependent())
522 Triple TargetTriple =
TM.getTargetTriple();
560 case Instruction::SDiv:
561 case Instruction::SRem:
562 case Instruction::UDiv:
563 case Instruction::URem: {
612 else if (ST->getSchedModel().LoopMicroOpBufferSize > 0)
613 MaxOps = ST->getSchedModel().LoopMicroOpBufferSize;
620 if (isa<CallInst>(
I) || isa<InvokeInst>(
I)) {
630 <<
"advising against unrolling the loop because it "
683 std::optional<Value *>
686 bool &KnownBitsComputed) {
697 IC, II, DemandedElts, UndefElts, UndefElts2, UndefElts3,
701 virtual std::optional<unsigned>
703 return std::optional<unsigned>(
707 virtual std::optional<unsigned>
709 std::optional<unsigned> TargetResult =
727 unsigned NumStridedMemAccesses,
728 unsigned NumPrefetches,
729 bool HasCall)
const {
731 NumPrefetches, HasCall);
763 const APInt &DemandedElts,
764 bool Insert,
bool Extract,
768 if (isa<ScalableVectorType>(InTy))
770 auto *Ty = cast<FixedVectorType>(InTy);
773 "Vector size mismatch");
777 for (
int i = 0, e = Ty->getNumElements(); i < e; ++i) {
778 if (!DemandedElts[i])
781 Cost += thisT()->getVectorInstrCost(Instruction::InsertElement, Ty,
784 Cost += thisT()->getVectorInstrCost(Instruction::ExtractElement, Ty,
795 if (isa<ScalableVectorType>(InTy))
797 auto *Ty = cast<FixedVectorType>(InTy);
800 return thisT()->getScalarizationOverhead(Ty, DemandedElts, Insert, Extract,
811 assert(Args.size() == Tys.
size() &&
"Expected matching Args and Tys");
815 for (
int I = 0,
E = Args.size();
I !=
E;
I++) {
823 if (!isa<Constant>(
A) && UniqueOperands.
insert(
A).second) {
824 if (
auto *VecTy = dyn_cast<VectorType>(Ty))
881 if (MTy == LK.second)
895 ArrayRef<const Value *> Args = ArrayRef<const Value *>(),
896 const Instruction *CxtI =
nullptr) {
898 const TargetLoweringBase *TLI = getTLI();
899 int ISD = TLI->InstructionOpcodeToISD(Opcode);
900 assert(ISD &&
"Invalid opcode");
913 InstructionCost OpCost = (IsFloat ? 2 : 1);
915 if (TLI->isOperationLegalOrPromote(ISD, LT.second)) {
918 return LT.first * OpCost;
921 if (!TLI->isOperationExpand(ISD,
LT.second)) {
924 return LT.first * 2 * OpCost;
936 unsigned DivOpc = IsSigned ? Instruction::SDiv : Instruction::UDiv;
937 InstructionCost DivCost = thisT()->getArithmeticInstrCost(
938 DivOpc, Ty,
CostKind, Opd1Info, Opd2Info);
939 InstructionCost MulCost =
940 thisT()->getArithmeticInstrCost(Instruction::Mul, Ty,
CostKind);
941 InstructionCost SubCost =
942 thisT()->getArithmeticInstrCost(Instruction::Sub, Ty,
CostKind);
943 return DivCost + MulCost + SubCost;
948 if (isa<ScalableVectorType>(Ty))
954 if (
auto *VTy = dyn_cast<FixedVectorType>(Ty)) {
955 InstructionCost
Cost = thisT()->getArithmeticInstrCost(
960 SmallVector<Type *> Tys(
Args.size(), Ty);
983 (
Index + Mask.size()) <= (
size_t)NumSrcElts) {
991 Mask, NumSrcElts, NumSubElts,
Index)) {
992 if (
Index + NumSubElts > NumSrcElts)
1024 if (
auto *FVT = dyn_cast<FixedVectorType>(Tp))
1025 return getBroadcastShuffleOverhead(FVT,
CostKind);
1033 if (
auto *FVT = dyn_cast<FixedVectorType>(Tp))
1034 return getPermuteShuffleOverhead(FVT,
CostKind);
1038 cast<FixedVectorType>(SubTp));
1041 cast<FixedVectorType>(SubTp));
1055 assert(ISD &&
"Invalid opcode");
1059 TypeSize SrcSize = SrcLT.second.getSizeInBits();
1060 TypeSize DstSize = DstLT.second.getSizeInBits();
1061 bool IntOrPtrSrc = Src->isIntegerTy() || Src->isPointerTy();
1062 bool IntOrPtrDst = Dst->isIntegerTy() || Dst->isPointerTy();
1067 case Instruction::Trunc:
1072 case Instruction::BitCast:
1075 if (SrcLT.first == DstLT.first && IntOrPtrSrc == IntOrPtrDst &&
1079 case Instruction::FPExt:
1080 if (
I && getTLI()->isExtFree(
I))
1083 case Instruction::ZExt:
1084 if (TLI->
isZExtFree(SrcLT.second, DstLT.second))
1087 case Instruction::SExt:
1088 if (
I && getTLI()->isExtFree(
I))
1098 if (DstLT.first == SrcLT.first &&
1103 case Instruction::AddrSpaceCast:
1105 Dst->getPointerAddressSpace()))
1110 auto *SrcVTy = dyn_cast<VectorType>(Src);
1111 auto *DstVTy = dyn_cast<VectorType>(Dst);
1114 if (SrcLT.first == DstLT.first &&
1119 if (!SrcVTy && !DstVTy) {
1130 if (DstVTy && SrcVTy) {
1132 if (SrcLT.first == DstLT.first && SrcSize == DstSize) {
1135 if (Opcode == Instruction::ZExt)
1139 if (Opcode == Instruction::SExt)
1140 return SrcLT.first * 2;
1146 return SrcLT.first * 1;
1159 if ((SplitSrc || SplitDst) && SrcVTy->getElementCount().isVector() &&
1160 DstVTy->getElementCount().isVector()) {
1163 T *
TTI =
static_cast<T *
>(
this);
1166 (!SplitSrc || !SplitDst) ?
TTI->getVectorSplitCost() : 0;
1173 if (isa<ScalableVectorType>(DstVTy))
1178 unsigned Num = cast<FixedVectorType>(DstVTy)->getNumElements();
1180 Opcode, Dst->getScalarType(), Src->getScalarType(), CCH,
CostKind,
I);
1193 if (Opcode == Instruction::BitCast) {
1209 return thisT()->getVectorInstrCost(Instruction::ExtractElement, VecTy,
1226 assert(ISD &&
"Invalid opcode");
1235 assert(CondTy &&
"CondTy must exist");
1241 if (!(ValTy->
isVectorTy() && !LT.second.isVector()) &&
1245 return LT.first * 1;
1251 if (
auto *ValVTy = dyn_cast<VectorType>(ValTy)) {
1252 if (isa<ScalableVectorType>(ValTy))
1255 unsigned Num = cast<FixedVectorType>(ValVTy)->getNumElements();
1259 Opcode, ValVTy->getScalarType(), CondTy, VecPred,
CostKind,
I);
1281 Value *Op0 =
nullptr;
1282 Value *Op1 =
nullptr;
1283 if (
auto *IE = dyn_cast<InsertElementInst>(&
I)) {
1284 Op0 = IE->getOperand(0);
1285 Op1 = IE->getOperand(1);
1287 return thisT()->getVectorInstrCost(
I.getOpcode(), Val,
CostKind,
Index, Op0,
1293 const APInt &DemandedDstElts,
1296 "Unexpected size of DemandedDstElts.");
1314 Cost += thisT()->getScalarizationOverhead(SrcVT, DemandedSrcElts,
1317 Cost += thisT()->getScalarizationOverhead(ReplicatedVT, DemandedDstElts,
1329 assert(!Src->isVoidTy() &&
"Invalid type");
1331 if (getTLI()->getValueType(
DL, Src,
true) == MVT::Other)
1346 LT.second.getSizeInBits())) {
1352 if (Opcode == Instruction::Store)
1361 cast<VectorType>(Src), Opcode != Instruction::Store,
1362 Opcode == Instruction::Store,
CostKind);
1372 return getCommonMaskedMemoryOpCost(Opcode, DataTy, Alignment,
true,
false,
1377 const Value *
Ptr,
bool VariableMask,
1381 return getCommonMaskedMemoryOpCost(Opcode, DataTy, Alignment, VariableMask,
1386 const Value *
Ptr,
bool VariableMask,
1393 return thisT()->getGatherScatterOpCost(Opcode, DataTy,
Ptr, VariableMask,
1400 bool UseMaskForCond =
false,
bool UseMaskForGaps =
false) {
1403 if (isa<ScalableVectorType>(VecTy))
1406 auto *VT = cast<FixedVectorType>(VecTy);
1408 unsigned NumElts = VT->getNumElements();
1409 assert(Factor > 1 && NumElts % Factor == 0 &&
"Invalid interleave factor");
1411 unsigned NumSubElts = NumElts / Factor;
1416 if (UseMaskForCond || UseMaskForGaps)
1417 Cost = thisT()->getMaskedMemoryOpCost(Opcode, VecTy, Alignment,
1426 unsigned VecTySize = thisT()->getDataLayout().getTypeStoreSize(VecTy);
1446 unsigned NumLegalInsts =
divideCeil(VecTySize, VecTyLTSize);
1450 unsigned NumEltsPerLegalInst =
divideCeil(NumElts, NumLegalInsts);
1453 BitVector UsedInsts(NumLegalInsts,
false);
1454 for (
unsigned Index : Indices)
1455 for (
unsigned Elt = 0; Elt < NumSubElts; ++Elt)
1456 UsedInsts.
set((
Index + Elt * Factor) / NumEltsPerLegalInst);
1465 "Interleaved memory op has too many members");
1471 for (
unsigned Index : Indices) {
1472 assert(
Index < Factor &&
"Invalid index for interleaved memory op");
1473 for (
unsigned Elm = 0; Elm < NumSubElts; Elm++)
1474 DemandedLoadStoreElts.
setBit(
Index + Elm * Factor);
1477 if (Opcode == Instruction::Load) {
1487 SubVT, DemandedAllSubElts,
1489 Cost += Indices.
size() * InsSubCost;
1490 Cost += thisT()->getScalarizationOverhead(VT, DemandedLoadStoreElts,
1508 SubVT, DemandedAllSubElts,
1510 Cost += ExtSubCost * Indices.
size();
1511 Cost += thisT()->getScalarizationOverhead(VT, DemandedLoadStoreElts,
1516 if (!UseMaskForCond)
1521 Cost += thisT()->getReplicationShuffleCost(
1522 I8Type, Factor, NumSubElts,
1523 UseMaskForGaps ? DemandedLoadStoreElts : DemandedAllResultElts,
1531 if (UseMaskForGaps) {
1533 Cost += thisT()->getArithmeticInstrCost(BinaryOperator::And, MaskVT,
1558 (
RetTy->isVectorTy() ? cast<VectorType>(
RetTy)->getElementCount()
1567 case Intrinsic::powi:
1568 if (
auto *RHSC = dyn_cast<ConstantInt>(Args[1])) {
1569 bool ShouldOptForSize =
I->getParent()->getParent()->hasOptSize();
1571 ShouldOptForSize)) {
1575 unsigned ActiveBits =
Exponent.getActiveBits();
1576 unsigned PopCount =
Exponent.popcount();
1578 thisT()->getArithmeticInstrCost(
1580 if (RHSC->isNegative())
1581 Cost += thisT()->getArithmeticInstrCost(Instruction::FDiv,
RetTy,
1587 case Intrinsic::cttz:
1593 case Intrinsic::ctlz:
1599 case Intrinsic::memcpy:
1600 return thisT()->getMemcpyCost(ICA.
getInst());
1602 case Intrinsic::masked_scatter: {
1603 const Value *Mask = Args[3];
1604 bool VarMask = !isa<Constant>(Mask);
1605 Align Alignment = cast<ConstantInt>(Args[2])->getAlignValue();
1606 return thisT()->getGatherScatterOpCost(Instruction::Store,
1610 case Intrinsic::masked_gather: {
1611 const Value *Mask = Args[2];
1612 bool VarMask = !isa<Constant>(Mask);
1613 Align Alignment = cast<ConstantInt>(Args[1])->getAlignValue();
1614 return thisT()->getGatherScatterOpCost(Instruction::Load,
RetTy, Args[0],
1617 case Intrinsic::experimental_vp_strided_store: {
1620 const Value *Mask = Args[3];
1621 const Value *EVL = Args[4];
1622 bool VarMask = !isa<Constant>(Mask) || !isa<Constant>(EVL);
1623 Align Alignment =
I->getParamAlign(1).valueOrOne();
1624 return thisT()->getStridedMemoryOpCost(Instruction::Store,
1625 Data->getType(),
Ptr, VarMask,
1628 case Intrinsic::experimental_vp_strided_load: {
1630 const Value *Mask = Args[2];
1631 const Value *EVL = Args[3];
1632 bool VarMask = !isa<Constant>(Mask) || !isa<Constant>(EVL);
1633 Align Alignment =
I->getParamAlign(0).valueOrOne();
1634 return thisT()->getStridedMemoryOpCost(Instruction::Load,
RetTy,
Ptr,
1637 case Intrinsic::experimental_stepvector: {
1638 if (isa<ScalableVectorType>(
RetTy))
1643 case Intrinsic::vector_extract: {
1646 if (isa<ScalableVectorType>(
RetTy))
1648 unsigned Index = cast<ConstantInt>(Args[1])->getZExtValue();
1649 return thisT()->getShuffleCost(
1653 case Intrinsic::vector_insert: {
1656 if (isa<ScalableVectorType>(Args[1]->
getType()))
1658 unsigned Index = cast<ConstantInt>(Args[2])->getZExtValue();
1659 return thisT()->getShuffleCost(
1663 case Intrinsic::experimental_vector_reverse: {
1664 return thisT()->getShuffleCost(
1668 case Intrinsic::experimental_vector_splice: {
1669 unsigned Index = cast<ConstantInt>(Args[2])->getZExtValue();
1670 return thisT()->getShuffleCost(
1674 case Intrinsic::vector_reduce_add:
1675 case Intrinsic::vector_reduce_mul:
1676 case Intrinsic::vector_reduce_and:
1677 case Intrinsic::vector_reduce_or:
1678 case Intrinsic::vector_reduce_xor:
1679 case Intrinsic::vector_reduce_smax:
1680 case Intrinsic::vector_reduce_smin:
1681 case Intrinsic::vector_reduce_fmax:
1682 case Intrinsic::vector_reduce_fmin:
1683 case Intrinsic::vector_reduce_fmaximum:
1684 case Intrinsic::vector_reduce_fminimum:
1685 case Intrinsic::vector_reduce_umax:
1686 case Intrinsic::vector_reduce_umin: {
1690 case Intrinsic::vector_reduce_fadd:
1691 case Intrinsic::vector_reduce_fmul: {
1693 IID,
RetTy, {Args[0]->getType(), Args[1]->
getType()}, FMF,
I, 1);
1696 case Intrinsic::fshl:
1697 case Intrinsic::fshr: {
1698 const Value *
X = Args[0];
1699 const Value *
Y = Args[1];
1700 const Value *Z = Args[2];
1713 thisT()->getArithmeticInstrCost(BinaryOperator::Or,
RetTy,
CostKind);
1715 thisT()->getArithmeticInstrCost(BinaryOperator::Sub,
RetTy,
CostKind);
1716 Cost += thisT()->getArithmeticInstrCost(
1719 Cost += thisT()->getArithmeticInstrCost(
1724 Cost += thisT()->getArithmeticInstrCost(BinaryOperator::URem,
RetTy,
1728 Type *CondTy =
RetTy->getWithNewBitWidth(1);
1730 thisT()->getCmpSelInstrCost(BinaryOperator::ICmp,
RetTy, CondTy,
1733 thisT()->getCmpSelInstrCost(BinaryOperator::Select,
RetTy, CondTy,
1738 case Intrinsic::get_active_lane_mask: {
1744 if (!getTLI()->shouldExpandGetActiveLaneMask(ResVT, ArgType)) {
1754 thisT()->getTypeBasedIntrinsicInstrCost(Attrs,
CostKind);
1755 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, ExpRetTy,
RetTy,
1767 std::optional<unsigned> FOp =
1770 if (ICA.
getID() == Intrinsic::vp_load) {
1772 if (
auto *VPI = dyn_cast_or_null<VPIntrinsic>(ICA.
getInst()))
1773 Alignment = VPI->getPointerAlignment().valueOrOne();
1777 dyn_cast<PointerType>(ICA.
getArgs()[0]->getType()))
1778 AS = PtrTy->getAddressSpace();
1779 return thisT()->getMemoryOpCost(*FOp, ICA.
getReturnType(), Alignment,
1782 if (ICA.
getID() == Intrinsic::vp_store) {
1784 if (
auto *VPI = dyn_cast_or_null<VPIntrinsic>(ICA.
getInst()))
1785 Alignment = VPI->getPointerAlignment().valueOrOne();
1789 dyn_cast<PointerType>(ICA.
getArgs()[1]->getType()))
1790 AS = PtrTy->getAddressSpace();
1791 return thisT()->getMemoryOpCost(*FOp, Args[0]->
getType(), Alignment,
1795 return thisT()->getArithmeticInstrCost(*FOp, ICA.
getReturnType(),
1800 std::optional<Intrinsic::ID> FID =
1805 "Expected VPIntrinsic to have Mask and Vector Length args and "
1813 *FID != Intrinsic::vector_reduce_fadd &&
1814 *FID != Intrinsic::vector_reduce_fmul)
1819 return thisT()->getIntrinsicInstrCost(NewICA,
CostKind);
1828 ScalarizationCost = 0;
1829 if (!
RetTy->isVoidTy())
1831 cast<VectorType>(
RetTy),
1833 ScalarizationCost +=
1839 return thisT()->getTypeBasedIntrinsicInstrCost(Attrs,
CostKind);
1860 unsigned VecTyIndex = 0;
1861 if (IID == Intrinsic::vector_reduce_fadd ||
1862 IID == Intrinsic::vector_reduce_fmul)
1864 assert(Tys.
size() > VecTyIndex &&
"Unexpected IntrinsicCostAttributes");
1865 VecOpTy = dyn_cast<VectorType>(Tys[VecTyIndex]);
1874 if (isa<ScalableVectorType>(
RetTy) ||
any_of(Tys, [](
const Type *Ty) {
1875 return isa<ScalableVectorType>(Ty);
1881 SkipScalarizationCost ? ScalarizationCostPassed : 0;
1882 unsigned ScalarCalls = 1;
1884 if (
auto *RetVTy = dyn_cast<VectorType>(
RetTy)) {
1885 if (!SkipScalarizationCost)
1888 ScalarCalls = std::max(ScalarCalls,
1889 cast<FixedVectorType>(RetVTy)->getNumElements());
1890 ScalarRetTy =
RetTy->getScalarType();
1893 for (
unsigned i = 0, ie = Tys.
size(); i != ie; ++i) {
1895 if (
auto *VTy = dyn_cast<VectorType>(Ty)) {
1896 if (!SkipScalarizationCost)
1899 ScalarCalls = std::max(ScalarCalls,
1900 cast<FixedVectorType>(VTy)->getNumElements());
1905 if (ScalarCalls == 1)
1910 thisT()->getIntrinsicInstrCost(ScalarAttrs,
CostKind);
1912 return ScalarCalls * ScalarCost + ScalarizationCost;
1916 case Intrinsic::sqrt:
1919 case Intrinsic::sin:
1922 case Intrinsic::cos:
1925 case Intrinsic::exp:
1928 case Intrinsic::exp2:
1931 case Intrinsic::exp10:
1934 case Intrinsic::log:
1937 case Intrinsic::log10:
1940 case Intrinsic::log2:
1943 case Intrinsic::fabs:
1946 case Intrinsic::canonicalize:
1949 case Intrinsic::minnum:
1952 case Intrinsic::maxnum:
1955 case Intrinsic::minimum:
1958 case Intrinsic::maximum:
1961 case Intrinsic::copysign:
1964 case Intrinsic::floor:
1967 case Intrinsic::ceil:
1970 case Intrinsic::trunc:
1973 case Intrinsic::nearbyint:
1976 case Intrinsic::rint:
1979 case Intrinsic::lrint:
1982 case Intrinsic::llrint:
1985 case Intrinsic::round:
1988 case Intrinsic::roundeven:
1991 case Intrinsic::pow:
1994 case Intrinsic::fma:
1997 case Intrinsic::fmuladd:
2000 case Intrinsic::experimental_constrained_fmuladd:
2004 case Intrinsic::lifetime_start:
2005 case Intrinsic::lifetime_end:
2006 case Intrinsic::sideeffect:
2007 case Intrinsic::pseudoprobe:
2008 case Intrinsic::arithmetic_fence:
2010 case Intrinsic::masked_store: {
2012 Align TyAlign = thisT()->DL.getABITypeAlign(Ty);
2013 return thisT()->getMaskedMemoryOpCost(Instruction::Store, Ty, TyAlign, 0,
2016 case Intrinsic::masked_load: {
2018 Align TyAlign = thisT()->DL.getABITypeAlign(Ty);
2019 return thisT()->getMaskedMemoryOpCost(Instruction::Load, Ty, TyAlign, 0,
2022 case Intrinsic::vector_reduce_add:
2023 case Intrinsic::vector_reduce_mul:
2024 case Intrinsic::vector_reduce_and:
2025 case Intrinsic::vector_reduce_or:
2026 case Intrinsic::vector_reduce_xor:
2027 return thisT()->getArithmeticReductionCost(
2030 case Intrinsic::vector_reduce_fadd:
2031 case Intrinsic::vector_reduce_fmul:
2032 return thisT()->getArithmeticReductionCost(
2034 case Intrinsic::vector_reduce_smax:
2035 case Intrinsic::vector_reduce_smin:
2036 case Intrinsic::vector_reduce_umax:
2037 case Intrinsic::vector_reduce_umin:
2038 case Intrinsic::vector_reduce_fmax:
2039 case Intrinsic::vector_reduce_fmin:
2040 case Intrinsic::vector_reduce_fmaximum:
2041 case Intrinsic::vector_reduce_fminimum:
2044 case Intrinsic::abs: {
2046 Type *CondTy =
RetTy->getWithNewBitWidth(1);
2049 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::ICmp,
RetTy, CondTy,
2051 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::Select,
RetTy, CondTy,
2054 Cost += thisT()->getArithmeticInstrCost(
2058 case Intrinsic::smax:
2059 case Intrinsic::smin:
2060 case Intrinsic::umax:
2061 case Intrinsic::umin: {
2063 Type *CondTy =
RetTy->getWithNewBitWidth(1);
2064 bool IsUnsigned = IID == Intrinsic::umax || IID == Intrinsic::umin;
2068 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::ICmp,
RetTy, CondTy,
2070 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::Select,
RetTy, CondTy,
2074 case Intrinsic::sadd_sat:
2075 case Intrinsic::ssub_sat: {
2076 Type *CondTy =
RetTy->getWithNewBitWidth(1);
2080 ? Intrinsic::sadd_with_overflow
2081 : Intrinsic::ssub_with_overflow;
2088 nullptr, ScalarizationCostPassed);
2089 Cost += thisT()->getIntrinsicInstrCost(Attrs,
CostKind);
2090 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::ICmp,
RetTy, CondTy,
2092 Cost += 2 * thisT()->getCmpSelInstrCost(BinaryOperator::Select,
RetTy,
2096 case Intrinsic::uadd_sat:
2097 case Intrinsic::usub_sat: {
2098 Type *CondTy =
RetTy->getWithNewBitWidth(1);
2102 ? Intrinsic::uadd_with_overflow
2103 : Intrinsic::usub_with_overflow;
2107 nullptr, ScalarizationCostPassed);
2108 Cost += thisT()->getIntrinsicInstrCost(Attrs,
CostKind);
2110 thisT()->getCmpSelInstrCost(BinaryOperator::Select,
RetTy, CondTy,
2114 case Intrinsic::smul_fix:
2115 case Intrinsic::umul_fix: {
2116 unsigned ExtSize =
RetTy->getScalarSizeInBits() * 2;
2117 Type *ExtTy =
RetTy->getWithNewBitWidth(ExtSize);
2120 IID == Intrinsic::smul_fix ? Instruction::SExt : Instruction::ZExt;
2126 thisT()->getArithmeticInstrCost(Instruction::Mul, ExtTy,
CostKind);
2127 Cost += 2 * thisT()->getCastInstrCost(Instruction::Trunc,
RetTy, ExtTy,
2129 Cost += thisT()->getArithmeticInstrCost(Instruction::LShr,
RetTy,
2139 case Intrinsic::sadd_with_overflow:
2140 case Intrinsic::ssub_with_overflow: {
2141 Type *SumTy =
RetTy->getContainedType(0);
2142 Type *OverflowTy =
RetTy->getContainedType(1);
2143 unsigned Opcode = IID == Intrinsic::sadd_with_overflow
2144 ? BinaryOperator::Add
2145 : BinaryOperator::Sub;
2152 Cost += thisT()->getArithmeticInstrCost(Opcode, SumTy,
CostKind);
2153 Cost += 2 * thisT()->getCmpSelInstrCost(
2154 Instruction::ICmp, SumTy, OverflowTy,
2156 Cost += thisT()->getArithmeticInstrCost(BinaryOperator::Xor, OverflowTy,
2160 case Intrinsic::uadd_with_overflow:
2161 case Intrinsic::usub_with_overflow: {
2162 Type *SumTy =
RetTy->getContainedType(0);
2163 Type *OverflowTy =
RetTy->getContainedType(1);
2164 unsigned Opcode = IID == Intrinsic::uadd_with_overflow
2165 ? BinaryOperator::Add
2166 : BinaryOperator::Sub;
2172 Cost += thisT()->getArithmeticInstrCost(Opcode, SumTy,
CostKind);
2174 thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, SumTy, OverflowTy,
2178 case Intrinsic::smul_with_overflow:
2179 case Intrinsic::umul_with_overflow: {
2180 Type *MulTy =
RetTy->getContainedType(0);
2181 Type *OverflowTy =
RetTy->getContainedType(1);
2184 bool IsSigned = IID == Intrinsic::smul_with_overflow;
2186 unsigned ExtOp = IsSigned ? Instruction::SExt : Instruction::ZExt;
2190 Cost += 2 * thisT()->getCastInstrCost(ExtOp, ExtTy, MulTy, CCH,
CostKind);
2192 thisT()->getArithmeticInstrCost(Instruction::Mul, ExtTy,
CostKind);
2193 Cost += 2 * thisT()->getCastInstrCost(Instruction::Trunc, MulTy, ExtTy,
2195 Cost += thisT()->getArithmeticInstrCost(Instruction::LShr, ExtTy,
2201 Cost += thisT()->getArithmeticInstrCost(Instruction::AShr, MulTy,
2206 Cost += thisT()->getCmpSelInstrCost(
2210 case Intrinsic::fptosi_sat:
2211 case Intrinsic::fptoui_sat: {
2214 Type *FromTy = Tys[0];
2215 bool IsSigned = IID == Intrinsic::fptosi_sat;
2220 Cost += thisT()->getIntrinsicInstrCost(Attrs1,
CostKind);
2223 Cost += thisT()->getIntrinsicInstrCost(Attrs2,
CostKind);
2224 Cost += thisT()->getCastInstrCost(
2225 IsSigned ? Instruction::FPToSI : Instruction::FPToUI,
RetTy, FromTy,
2228 Type *CondTy =
RetTy->getWithNewBitWidth(1);
2229 Cost += thisT()->getCmpSelInstrCost(
2231 Cost += thisT()->getCmpSelInstrCost(
2236 case Intrinsic::ctpop:
2242 case Intrinsic::ctlz:
2245 case Intrinsic::cttz:
2248 case Intrinsic::bswap:
2251 case Intrinsic::bitreverse:
2260 if (IID == Intrinsic::fabs && LT.second.isFloatingPoint() &&
2270 return (LT.first * 2);
2272 return (LT.first * 1);
2276 return (LT.first * 2);
2281 if (IID == Intrinsic::fmuladd)
2282 return thisT()->getArithmeticInstrCost(BinaryOperator::FMul,
RetTy,
2284 thisT()->getArithmeticInstrCost(BinaryOperator::FAdd,
RetTy,
2286 if (IID == Intrinsic::experimental_constrained_fmuladd) {
2288 Intrinsic::experimental_constrained_fmul,
RetTy, Tys);
2290 Intrinsic::experimental_constrained_fadd,
RetTy, Tys);
2291 return thisT()->getIntrinsicInstrCost(FMulAttrs,
CostKind) +
2292 thisT()->getIntrinsicInstrCost(FAddAttrs,
CostKind);
2298 if (
auto *RetVTy = dyn_cast<VectorType>(
RetTy)) {
2300 if (isa<ScalableVectorType>(
RetTy) ||
any_of(Tys, [](
const Type *Ty) {
2301 return isa<ScalableVectorType>(Ty);
2306 SkipScalarizationCost
2307 ? ScalarizationCostPassed
2311 unsigned ScalarCalls = cast<FixedVectorType>(RetVTy)->getNumElements();
2313 for (
unsigned i = 0, ie = Tys.
size(); i != ie; ++i) {
2321 thisT()->getIntrinsicInstrCost(Attrs,
CostKind);
2322 for (
unsigned i = 0, ie = Tys.
size(); i != ie; ++i) {
2323 if (
auto *VTy = dyn_cast<VectorType>(Tys[i])) {
2327 ScalarCalls = std::max(ScalarCalls,
2328 cast<FixedVectorType>(VTy)->getNumElements());
2331 return ScalarCalls * ScalarCost + ScalarizationCost;
2335 return SingleCallCost;
2357 return LT.first.isValid() ? *LT.first.getValue() : 0;
2390 if (isa<ScalableVectorType>(Ty))
2394 unsigned NumVecElts = cast<FixedVectorType>(Ty)->getNumElements();
2395 if ((Opcode == Instruction::Or || Opcode == Instruction::And) &&
2405 return thisT()->getCastInstrCost(Instruction::BitCast, ValTy, Ty,
2407 thisT()->getCmpSelInstrCost(Instruction::ICmp, ValTy,
2411 unsigned NumReduxLevels =
Log2_32(NumVecElts);
2414 std::pair<InstructionCost, MVT> LT = thisT()->getTypeLegalizationCost(Ty);
2415 unsigned LongVectorCount = 0;
2417 LT.second.isVector() ? LT.second.getVectorNumElements() : 1;
2418 while (NumVecElts > MVTLen) {
2424 ArithCost += thisT()->getArithmeticInstrCost(Opcode, SubTy,
CostKind);
2429 NumReduxLevels -= LongVectorCount;
2441 NumReduxLevels * thisT()->getArithmeticInstrCost(Opcode, Ty,
CostKind);
2442 return ShuffleCost + ArithCost +
2443 thisT()->getVectorInstrCost(Instruction::ExtractElement, Ty,
2467 if (isa<ScalableVectorType>(Ty))
2470 auto *VTy = cast<FixedVectorType>(Ty);
2477 return ExtractCost + ArithCost;
2481 std::optional<FastMathFlags> FMF,
2483 assert(Ty &&
"Unknown reduction vector type");
2496 if (isa<ScalableVectorType>(Ty))
2500 unsigned NumVecElts = cast<FixedVectorType>(Ty)->getNumElements();
2501 unsigned NumReduxLevels =
Log2_32(NumVecElts);
2504 std::pair<InstructionCost, MVT> LT = thisT()->getTypeLegalizationCost(Ty);
2505 unsigned LongVectorCount = 0;
2507 LT.second.isVector() ? LT.second.getVectorNumElements() : 1;
2508 while (NumVecElts > MVTLen) {
2522 NumReduxLevels -= LongVectorCount;
2535 return ShuffleCost + MinMaxCost +
2536 thisT()->getVectorInstrCost(Instruction::ExtractElement, Ty,
2548 thisT()->getArithmeticReductionCost(Opcode, ExtTy, FMF,
CostKind);
2550 IsUnsigned ? Instruction::ZExt : Instruction::SExt, ExtTy, Ty,
2553 return RedCost + ExtCost;
2564 Instruction::Add, ExtTy, std::nullopt,
CostKind);
2566 IsUnsigned ? Instruction::ZExt : Instruction::SExt, ExtTy, Ty,
2570 thisT()->getArithmeticInstrCost(Instruction::Mul, ExtTy,
CostKind);
2572 return RedCost + MulCost + 2 * ExtCost;
This file implements a class to represent arbitrary precision integral constant values and operations...
This file implements the BitVector class.
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
static cl::opt< TargetTransformInfo::TargetCostKind > CostKind("cost-kind", cl::desc("Target cost kind"), cl::init(TargetTransformInfo::TCK_RecipThroughput), cl::values(clEnumValN(TargetTransformInfo::TCK_RecipThroughput, "throughput", "Reciprocal throughput"), clEnumValN(TargetTransformInfo::TCK_Latency, "latency", "Instruction latency"), clEnumValN(TargetTransformInfo::TCK_CodeSize, "code-size", "Code size"), clEnumValN(TargetTransformInfo::TCK_SizeAndLatency, "size-latency", "Code size and latency")))
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
mir Rename Register Operands
static const Function * getCalledFunction(const Value *V, bool &IsNoBuiltin)
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
const char LLVMTargetMachineRef TM
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
static SymbolRef::Type getType(const Symbol *Sym)
This file describes how to lower LLVM code to machine code.
Class for arbitrary precision integers.
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
void setBit(unsigned BitPosition)
Set the given bit to 1 whose position is given as "bitPosition".
bool sgt(const APInt &RHS) const
Signed greater than comparison.
unsigned getBitWidth() const
Return the number of bits in the APInt.
bool slt(const APInt &RHS) const
Signed less than comparison.
static APInt getZero(unsigned numBits)
Get the '0' value for the specified bit-width.
an instruction to allocate memory on the stack
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
ArrayRef< T > drop_front(size_t N=1) const
Drop the first N elements of the array.
size_t size() const
size - Get the array size.
ArrayRef< T > drop_back(size_t N=1) const
Drop the last N elements of the array.
A cache of @llvm.assume calls within a function.
LLVM Basic Block Representation.
Base class which can be used to help build a TTI implementation.
bool isTypeLegal(Type *Ty)
InstructionCost getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, TTI::TargetCostKind CostKind)
Get intrinsic cost based on arguments.
bool isValidAddrSpaceCast(unsigned FromAS, unsigned ToAS) const
virtual unsigned getPrefetchDistance() const
InstructionCost getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef< unsigned > Indices, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, bool UseMaskForCond=false, bool UseMaskForGaps=false)
InstructionCost getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy, CmpInst::Predicate VecPred, TTI::TargetCostKind CostKind, const Instruction *I=nullptr)
InstructionCost getScalingFactorCost(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset, bool HasBaseReg, int64_t Scale, unsigned AddrSpace)
void getUnrollingPreferences(Loop *L, ScalarEvolution &SE, TTI::UnrollingPreferences &UP, OptimizationRemarkEmitter *ORE)
bool preferToKeepConstantsAttached(const Instruction &Inst, const Function &Fn) const
unsigned getMaxInterleaveFactor(ElementCount VF)
unsigned getNumberOfParts(Type *Tp)
InstructionCost getMaskedMemoryOpCost(unsigned Opcode, Type *DataTy, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind)
InstructionCost getExtractWithExtendCost(unsigned Opcode, Type *Dst, VectorType *VecTy, unsigned Index)
TypeSize getRegisterBitWidth(TargetTransformInfo::RegisterKind K) const
std::optional< unsigned > getVScaleForTuning() const
InstructionCost getOrderedReductionCost(unsigned Opcode, VectorType *Ty, TTI::TargetCostKind CostKind)
Try to calculate the cost of performing strict (in-order) reductions, which involves doing a sequence...
bool isNumRegsMajorCostOfLSR()
bool isTruncateFree(Type *Ty1, Type *Ty2)
InstructionCost getVectorInstrCost(unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index, Value *Op0, Value *Op1)
bool isHardwareLoopProfitable(Loop *L, ScalarEvolution &SE, AssumptionCache &AC, TargetLibraryInfo *LibInfo, HardwareLoopInfo &HWLoopInfo)
InstructionCost getArithmeticInstrCost(unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Opd1Info={TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Opd2Info={TTI::OK_AnyValue, TTI::OP_None}, ArrayRef< const Value * > Args=ArrayRef< const Value * >(), const Instruction *CxtI=nullptr)
InstructionCost getTreeReductionCost(unsigned Opcode, VectorType *Ty, TTI::TargetCostKind CostKind)
Try to calculate arithmetic and shuffle op costs for reduction intrinsics.
bool preferPredicateOverEpilogue(TailFoldingInfo *TFI)
virtual bool shouldPrefetchAddressSpace(unsigned AS) const
InstructionCost getStridedMemoryOpCost(unsigned Opcode, Type *DataTy, const Value *Ptr, bool VariableMask, Align Alignment, TTI::TargetCostKind CostKind, const Instruction *I)
bool isLegalICmpImmediate(int64_t imm)
bool isProfitableToHoist(Instruction *I)
virtual unsigned getMaxPrefetchIterationsAhead() const
InstructionCost getVectorInstrCost(const Instruction &I, Type *Val, TTI::TargetCostKind CostKind, unsigned Index)
std::optional< unsigned > getMaxVScale() const
TTI::ShuffleKind improveShuffleKindFromMask(TTI::ShuffleKind Kind, ArrayRef< int > Mask, VectorType *Ty, int &Index, VectorType *&SubTy) const
InstructionCost getExtendedReductionCost(unsigned Opcode, bool IsUnsigned, Type *ResTy, VectorType *Ty, FastMathFlags FMF, TTI::TargetCostKind CostKind)
unsigned getRegUsageForType(Type *Ty)
bool shouldBuildRelLookupTables() const
InstructionCost getMinMaxReductionCost(Intrinsic::ID IID, VectorType *Ty, FastMathFlags FMF, TTI::TargetCostKind CostKind)
Try to calculate op costs for min/max reduction operations.
unsigned getCallerAllocaCost(const CallBase *CB, const AllocaInst *AI) const
InstructionCost getMemoryOpCost(unsigned Opcode, Type *Src, MaybeAlign Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, TTI::OperandValueInfo OpInfo={TTI::OK_AnyValue, TTI::OP_None}, const Instruction *I=nullptr)
InstructionCost getShuffleCost(TTI::ShuffleKind Kind, VectorType *Tp, ArrayRef< int > Mask, TTI::TargetCostKind CostKind, int Index, VectorType *SubTp, ArrayRef< const Value * > Args=std::nullopt)
InstructionCost getGatherScatterOpCost(unsigned Opcode, Type *DataTy, const Value *Ptr, bool VariableMask, Align Alignment, TTI::TargetCostKind CostKind, const Instruction *I=nullptr)
unsigned getEstimatedNumberOfCaseClusters(const SwitchInst &SI, unsigned &JumpTableSize, ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI)
bool isIndexedLoadLegal(TTI::MemIndexedMode M, Type *Ty, const DataLayout &DL) const
bool isLSRCostLess(TTI::LSRCost C1, TTI::LSRCost C2)
std::optional< Value * > simplifyDemandedUseBitsIntrinsic(InstCombiner &IC, IntrinsicInst &II, APInt DemandedMask, KnownBits &Known, bool &KnownBitsComputed)
bool shouldFoldTerminatingConditionAfterLSR() const
virtual unsigned getMinPrefetchStride(unsigned NumMemAccesses, unsigned NumStridedMemAccesses, unsigned NumPrefetches, bool HasCall) const
bool hasBranchDivergence(const Function *F=nullptr)
bool isIndexedStoreLegal(TTI::MemIndexedMode M, Type *Ty, const DataLayout &DL) const
unsigned getAssumedAddrSpace(const Value *V) const
InstructionCost getOperandsScalarizationOverhead(ArrayRef< const Value * > Args, ArrayRef< Type * > Tys, TTI::TargetCostKind CostKind)
Estimate the overhead of scalarizing an instructions unique non-constant operands.
InstructionCost getAddressComputationCost(Type *Ty, ScalarEvolution *, const SCEV *)
InstructionCost getScalarizationOverhead(VectorType *InTy, const APInt &DemandedElts, bool Insert, bool Extract, TTI::TargetCostKind CostKind)
Estimate the overhead of scalarizing an instruction.
int64_t getPreferredLargeGEPBaseOffset(int64_t MinOffset, int64_t MaxOffset)
InstructionCost getGEPCost(Type *PointeeType, const Value *Ptr, ArrayRef< const Value * > Operands, Type *AccessType, TTI::TargetCostKind CostKind)
bool isFCmpOrdCheaperThanFCmpZero(Type *Ty)
virtual std::optional< unsigned > getCacheSize(TargetTransformInfo::CacheLevel Level) const
bool isAlwaysUniform(const Value *V)
TailFoldingStyle getPreferredTailFoldingStyle(bool IVUpdateMayOverflow=true)
bool allowsMisalignedMemoryAccesses(LLVMContext &Context, unsigned BitWidth, unsigned AddressSpace, Align Alignment, unsigned *Fast) const
unsigned getStoreMinimumVF(unsigned VF, Type *ScalarMemTy, Type *ScalarValTy) const
InstructionCost getScalarizationOverhead(VectorType *InTy, bool Insert, bool Extract, TTI::TargetCostKind CostKind)
Helper wrapper for the DemandedElts variant of getScalarizationOverhead.
virtual std::optional< unsigned > getCacheAssociativity(TargetTransformInfo::CacheLevel Level) const
virtual bool enableWritePrefetching() const
Value * rewriteIntrinsicWithAddressSpace(IntrinsicInst *II, Value *OldV, Value *NewV) const
void getPeelingPreferences(Loop *L, ScalarEvolution &SE, TTI::PeelingPreferences &PP)
InstructionCost getMulAccReductionCost(bool IsUnsigned, Type *ResTy, VectorType *Ty, TTI::TargetCostKind CostKind)
InstructionCost getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind, const Instruction *I=nullptr)
bool collectFlatAddressOperands(SmallVectorImpl< int > &OpIndexes, Intrinsic::ID IID) const
InstructionCost getCallInstrCost(Function *F, Type *RetTy, ArrayRef< Type * > Tys, TTI::TargetCostKind CostKind)
Compute a cost of the given call instruction.
InstructionCost getArithmeticReductionCost(unsigned Opcode, VectorType *Ty, std::optional< FastMathFlags > FMF, TTI::TargetCostKind CostKind)
InstructionCost getFPOpCost(Type *Ty)
InstructionCost getVectorSplitCost()
std::pair< InstructionCost, MVT > getTypeLegalizationCost(Type *Ty) const
Estimate the cost of type-legalization and the legalized type.
bool haveFastSqrt(Type *Ty)
std::pair< const Value *, unsigned > getPredicatedAddrSpace(const Value *V) const
unsigned getInliningThresholdMultiplier() const
InstructionCost getReplicationShuffleCost(Type *EltTy, int ReplicationFactor, int VF, const APInt &DemandedDstElts, TTI::TargetCostKind CostKind)
virtual ~BasicTTIImplBase()=default
bool isLegalAddScalableImmediate(int64_t Imm)
InstructionCost getScalarizationOverhead(VectorType *RetTy, ArrayRef< const Value * > Args, ArrayRef< Type * > Tys, TTI::TargetCostKind CostKind)
Estimate the overhead of scalarizing the inputs and outputs of an instruction, with return type RetTy...
bool isVScaleKnownToBeAPowerOfTwo() const
std::optional< Instruction * > instCombineIntrinsic(InstCombiner &IC, IntrinsicInst &II)
bool addrspacesMayAlias(unsigned AS0, unsigned AS1) const
bool isLegalAddImmediate(int64_t imm)
bool shouldBuildLookupTables()
unsigned getFlatAddressSpace()
InstructionCost getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src, TTI::CastContextHint CCH, TTI::TargetCostKind CostKind, const Instruction *I=nullptr)
virtual unsigned getCacheLineSize() const
bool isNoopAddrSpaceCast(unsigned FromAS, unsigned ToAS) const
bool isSourceOfDivergence(const Value *V)
int getInlinerVectorBonusPercent() const
InstructionCost getTypeBasedIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, TTI::TargetCostKind CostKind)
Get intrinsic cost based on argument types.
std::optional< Value * > simplifyDemandedVectorEltsIntrinsic(InstCombiner &IC, IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts, APInt &UndefElts2, APInt &UndefElts3, std::function< void(Instruction *, unsigned, APInt, APInt &)> SimplifyAndSetOp)
bool isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset, bool HasBaseReg, int64_t Scale, unsigned AddrSpace, Instruction *I=nullptr, int64_t ScalableOffset=0)
bool isSingleThreaded() const
BasicTTIImplBase(const TargetMachine *TM, const DataLayout &DL)
unsigned adjustInliningThreshold(const CallBase *CB)
bool isProfitableLSRChainElement(Instruction *I)
Concrete BasicTTIImpl that can be used if no further customization is needed.
size_type count() const
count - Returns the number of bits which are set.
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
static Type * makeCmpResultType(Type *opnd_type)
Create a result type for fcmp/icmp.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
@ ICMP_UGT
unsigned greater than
@ ICMP_SGT
signed greater than
@ ICMP_ULT
unsigned less than
@ FCMP_UNO
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
A parsed version of the target data layout string in and methods for querying it.
TypeSize getTypeStoreSizeInBits(Type *Ty) const
Returns the maximum number of bits that may be overwritten by storing the specified type; always a mu...
unsigned getIndexSizeInBits(unsigned AS) const
Size in bits of index used for address calculation in getelementptr.
constexpr bool isVector() const
One or more elements.
static constexpr ElementCount getFixed(ScalarTy MinVal)
constexpr bool isScalar() const
Exactly one element.
Convenience struct for specifying and reasoning about fast-math flags.
Class to represent fixed width SIMD vectors.
unsigned getNumElements() const
static FixedVectorType * get(Type *ElementType, unsigned NumElts)
bool isTargetIntrinsic() const
isTargetIntrinsic - Returns true if this function is an intrinsic and the intrinsic is specific to a ...
AttributeList getAttributes() const
Return the attribute list for this Function.
The core instruction combiner logic.
static InstructionCost getInvalid(CostType Val=0)
std::optional< CostType > getValue() const
This function is intended to be used as sparingly as possible, since the class provides the full rang...
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
FastMathFlags getFlags() const
const SmallVectorImpl< Type * > & getArgTypes() const
Type * getReturnType() const
bool skipScalarizationCost() const
const SmallVectorImpl< const Value * > & getArgs() const
InstructionCost getScalarizationCost() const
const IntrinsicInst * getInst() const
Intrinsic::ID getID() const
bool isTypeBasedOnly() const
A wrapper class for inspecting calls to intrinsic functions.
This is an important class for using LLVM in a threaded context.
Represents a single loop in the control flow graph.
virtual bool shouldPrefetchAddressSpace(unsigned AS) const
virtual unsigned getMinPrefetchStride(unsigned NumMemAccesses, unsigned NumStridedMemAccesses, unsigned NumPrefetches, bool HasCall) const
Return the minimum stride necessary to trigger software prefetching.
virtual bool enableWritePrefetching() const
virtual unsigned getMaxPrefetchIterationsAhead() const
Return the maximum prefetch distance in terms of loop iterations.
virtual unsigned getPrefetchDistance() const
Return the preferred prefetch distance in terms of instructions.
virtual std::optional< unsigned > getCacheAssociativity(unsigned Level) const
Return the cache associatvity for the given level of cache.
virtual std::optional< unsigned > getCacheLineSize(unsigned Level) const
Return the target cache line size in bytes at a given level.
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
static PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
Analysis providing profile information.
This class represents an analyzed expression in the program.
The main scalar evolution driver.
static bool isZeroEltSplatMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask chooses all elements with the same value as the first element of exa...
static bool isSpliceMask(ArrayRef< int > Mask, int NumSrcElts, int &Index)
Return true if this shuffle mask is a splice mask, concatenating the two inputs together and then ext...
static bool isSelectMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask chooses elements from its source vectors without lane crossings.
static bool isExtractSubvectorMask(ArrayRef< int > Mask, int NumSrcElts, int &Index)
Return true if this shuffle mask is an extract subvector mask.
static bool isReverseMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask swaps the order of elements from exactly one source vector.
static bool isTransposeMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask is a transpose mask.
static bool isInsertSubvectorMask(ArrayRef< int > Mask, int NumSrcElts, int &NumSubElts, int &Index)
Return true if this shuffle mask is an insert subvector mask.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
static StructType * create(LLVMContext &Context, StringRef Name)
This creates an identified struct.
Provides information about what library functions are available for the current target.
This base class for TargetLowering contains the SelectionDAG-independent parts that can be used from ...
bool isOperationExpand(unsigned Op, EVT VT) const
Return true if the specified operation is illegal on this target or unlikely to be made legal with cu...
int InstructionOpcodeToISD(unsigned Opcode) const
Get the ISD node that corresponds to the Instruction class opcode.
bool isIndexedStoreLegal(unsigned IdxMode, EVT VT) const
Return true if the specified indexed load is legal on this target.
EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the EVT corresponding to this LLVM type.
LegalizeAction
This enum indicates whether operations are valid for a target, and if not, what action should be used...
virtual bool isLegalICmpImmediate(int64_t) const
Return true if the specified immediate is legal icmp immediate, that is the target has icmp instructi...
const TargetMachine & getTargetMachine() const
virtual bool isZExtFree(Type *FromTy, Type *ToTy) const
Return true if any actual instruction that defines a value of type FromTy implicitly zero-extends the...
@ TypeScalarizeScalableVector
virtual bool isSuitableForJumpTable(const SwitchInst *SI, uint64_t NumCases, uint64_t Range, ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI) const
Return true if lowering to a jump table is suitable for a set of case clusters which may contain NumC...
virtual bool areJTsAllowed(const Function *Fn) const
Return true if lowering to a jump table is allowed.
bool isOperationLegalOrPromote(unsigned Op, EVT VT, bool LegalOnly=false) const
Return true if the specified operation is legal on this target or can be made legal using promotion.
virtual unsigned getNumRegisters(LLVMContext &Context, EVT VT, std::optional< MVT > RegisterVT=std::nullopt) const
Return the number of registers that this ValueType will eventually require.
virtual bool isCheapToSpeculateCttz(Type *Ty) const
Return true if it is cheap to speculate a call to intrinsic cttz.
bool isTruncStoreLegal(EVT ValVT, EVT MemVT) const
Return true if the specified store with truncation is legal on this target.
virtual bool allowsMisalignedMemoryAccesses(EVT, unsigned AddrSpace=0, Align Alignment=Align(1), MachineMemOperand::Flags Flags=MachineMemOperand::MONone, unsigned *=nullptr) const
Determine if the target supports unaligned memory accesses.
virtual bool isTruncateFree(Type *FromTy, Type *ToTy) const
Return true if it's free to truncate a value of type FromTy to type ToTy.
virtual EVT getTypeToTransformTo(LLVMContext &Context, EVT VT) const
For types supported by the target, this is an identity function.
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
bool isSuitableForBitTests(unsigned NumDests, unsigned NumCmps, const APInt &Low, const APInt &High, const DataLayout &DL) const
Return true if lowering to a bit test is suitable for a set of case clusters which contains NumDests ...
virtual bool isLegalAddImmediate(int64_t) const
Return true if the specified immediate is legal add immediate, that is the target has add instruction...
virtual bool isFreeAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const
Returns true if a cast from SrcAS to DestAS is "cheap", such that e.g.
LegalizeAction getTruncStoreAction(EVT ValVT, EVT MemVT) const
Return how this store with truncation should be treated: either it is legal, needs to be promoted to ...
LegalizeAction getLoadExtAction(unsigned ExtType, EVT ValVT, EVT MemVT) const
Return how this load with extension should be treated: either it is legal, needs to be promoted to a ...
virtual bool isIntDivCheap(EVT VT, AttributeList Attr) const
Return true if integer divide is usually cheaper than a sequence of several shifts,...
bool isOperationLegalOrCustom(unsigned Op, EVT VT, bool LegalOnly=false) const
Return true if the specified operation is legal on this target or can be made legal with custom lower...
virtual bool isProfitableToHoist(Instruction *I) const
bool isIndexedLoadLegal(unsigned IdxMode, EVT VT) const
Return true if the specified indexed load is legal on this target.
bool isLoadExtLegal(unsigned ExtType, EVT ValVT, EVT MemVT) const
Return true if the specified load with extension is legal on this target.
virtual bool isCheapToSpeculateCtlz(Type *Ty) const
Return true if it is cheap to speculate a call to intrinsic ctlz.
virtual int64_t getPreferredLargeGEPBaseOffset(int64_t MinOffset, int64_t MaxOffset) const
Return the prefered common base offset.
LegalizeKind getTypeConversion(LLVMContext &Context, EVT VT) const
Return pair that represents the legalization kind (first) that needs to happen to EVT (second) in ord...
LegalizeTypeAction getTypeAction(LLVMContext &Context, EVT VT) const
Return how we should legalize values of this type, either it is already legal (return 'Legal') or we ...
virtual bool isLegalAddScalableImmediate(int64_t) const
Return true if adding the specified scalable immediate is legal, that is the target has add instructi...
bool isBeneficialToExpandPowI(int64_t Exponent, bool OptForSize) const
Return true if it is beneficial to expand an @llvm.powi.
virtual bool isFAbsFree(EVT VT) const
Return true if an fabs operation is free to the point where it is never worthwhile to replace it with...
virtual bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty, unsigned AddrSpace, Instruction *I=nullptr) const
Return true if the addressing mode represented by AM is legal for this target, for a load/store of th...
bool isOperationLegalOrCustomOrPromote(unsigned Op, EVT VT, bool LegalOnly=false) const
Return true if the specified operation is legal on this target or can be made legal with custom lower...
std::pair< LegalizeTypeAction, EVT > LegalizeKind
LegalizeKind holds the legalization kind that needs to happen to EVT in order to type-legalize it.
Primary interface to the complete machine description for the target machine.
virtual std::pair< const Value *, unsigned > getPredicatedAddrSpace(const Value *V) const
If the specified predicate checks whether a generic pointer falls within a specified address space,...
virtual bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const
Returns true if a cast between SrcAS and DestAS is a noop.
virtual unsigned getAssumedAddrSpace(const Value *V) const
If the specified generic pointer could be assumed as a pointer to a specific address space,...
ThreadModel::Model ThreadModel
ThreadModel - This flag specifies the type of threading model to assume for things like atomics.
TargetSubtargetInfo - Generic base class for all target subtargets.
virtual bool useAA() const
Enable use of alias analysis during code generation (during MI scheduling, DAGCombine,...
Triple - Helper class for working with autoconf configuration names.
ArchType getArch() const
Get the parsed architecture type of this triple.
bool isArch64Bit() const
Test whether the architecture is 64-bit.
bool isOSDarwin() const
Is this a "Darwin" OS (macOS, iOS, tvOS, watchOS, XROS, or DriverKit).
static constexpr TypeSize getFixed(ScalarTy ExactSize)
The instances of the Type class are immutable: once they are created, they are never changed.
bool isVectorTy() const
True if this is an instance of VectorType.
bool isIntOrIntVectorTy() const
Return true if this is an integer type or a vector of integer types.
static IntegerType * getInt1Ty(LLVMContext &C)
unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
Type * getWithNewBitWidth(unsigned NewBitWidth) const
Given an integer or vector type, change the lane bitwidth to NewBitwidth, whilst keeping the old numb...
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
static IntegerType * getInt8Ty(LLVMContext &C)
bool isPtrOrPtrVectorTy() const
Return true if this is a pointer type or a vector of pointer types.
bool isFPOrFPVectorTy() const
Return true if this is a FP type or a vector of FP.
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
Value * getOperand(unsigned i) const
static bool isVPBinOp(Intrinsic::ID ID)
static std::optional< unsigned > getFunctionalOpcodeForVP(Intrinsic::ID ID)
static std::optional< Intrinsic::ID > getFunctionalIntrinsicIDForVP(Intrinsic::ID ID)
static bool isVPIntrinsic(Intrinsic::ID)
static bool isVPReduction(Intrinsic::ID ID)
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
Base class of all SIMD vector types.
static VectorType * getHalfElementsVectorType(VectorType *VTy)
This static method returns a VectorType with half as many elements as the input type and the same ele...
ElementCount getElementCount() const
Return an ElementCount instance to represent the (possibly scalable) number of elements in the vector...
static VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
Type * getElementType() const
static constexpr bool isKnownLT(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
APInt ScaleBitMask(const APInt &A, unsigned NewBitWidth, bool MatchAllBits=false)
Splat/Merge neighboring bits to widen/narrow the bitmask represented by.
@ Fast
Attempts to make calls as fast as possible (e.g.
@ C
The default llvm calling convention, compatible with C.
@ BSWAP
Byte Swap and Counting operators.
@ FMA
FMA - Perform a * b + c with no intermediate rounding step.
@ FADD
Simple binary floating point operators.
@ SDIVREM
SDIVREM/UDIVREM - Divide two integers and produce both a quotient and remainder result.
@ BRIND
BRIND - Indirect branch.
@ BR_JT
BR_JT - Jumptable branch.
@ FCANONICALIZE
Returns platform specific canonical encoding of a floating point number.
@ SELECT
Select(COND, TRUEVAL, FALSEVAL).
@ FMINNUM
FMINNUM/FMAXNUM - Perform floating-point minimum or maximum on two values.
@ VSELECT
Select with a vector condition (op #0) and two vector operands (ops #1 and #2), returning a vector re...
@ FMINIMUM
FMINIMUM/FMAXIMUM - NaN-propagating minimum/maximum that also treat -0.0 as less than 0....
@ FCOPYSIGN
FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.
MemIndexedMode
MemIndexedMode enum - This enum defines the load / store indexed addressing modes.
DiagnosticInfoOptimizationBase::Argument NV
This is an optimization pass for GlobalISel generic memory operations.
Intrinsic::ID getMinMaxReductionIntrinsicOp(Intrinsic::ID RdxID)
Returns the min/max intrinsic used when expanding a min/max reduction.
uint64_t divideCeil(uint64_t Numerator, uint64_t Denominator)
Returns the integer ceil(Numerator / Denominator).
unsigned getArithmeticReductionInstruction(Intrinsic::ID RdxID)
Returns the arithmetic instruction opcode used when expanding a reduction.
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
constexpr unsigned BitWidth
cl::opt< unsigned > PartialUnrollingThreshold
This struct is a compact representation of a valid (non-zero power of two) alignment.
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
static EVT getEVT(Type *Ty, bool HandleUnknown=false)
Return the value type corresponding to the specified type.
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
static EVT getIntegerVT(LLVMContext &Context, unsigned BitWidth)
Returns the EVT that represents an integer with the given number of bits.
Attributes of a target dependent hardware loop.
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
This represents an addressing mode of: BaseGV + BaseOffs + BaseReg + Scale*ScaleReg + ScalableOffset*...