16#ifndef LLVM_CODEGEN_BASICTTIIMPL_H
17#define LLVM_CODEGEN_BASICTTIIMPL_H
85 T *thisT() {
return static_cast<T *
>(
this); }
94 Cost += thisT()->getVectorInstrCost(Instruction::ExtractElement, VTy,
98 Cost += thisT()->getVectorInstrCost(Instruction::InsertElement, VTy,
117 Cost += thisT()->getVectorInstrCost(Instruction::InsertElement, VTy,
119 Cost += thisT()->getVectorInstrCost(Instruction::ExtractElement, VTy,
132 "Can only extract subvectors from vectors");
134 assert((!isa<FixedVectorType>(VTy) ||
135 (
Index + NumSubElts) <=
136 (
int)cast<FixedVectorType>(VTy)->getNumElements()) &&
137 "SK_ExtractSubvector index out of range");
143 for (
int i = 0; i != NumSubElts; ++i) {
145 thisT()->getVectorInstrCost(Instruction::ExtractElement, VTy,
147 Cost += thisT()->getVectorInstrCost(Instruction::InsertElement, SubVTy,
160 "Can only insert subvectors into vectors");
162 assert((!isa<FixedVectorType>(VTy) ||
163 (
Index + NumSubElts) <=
164 (
int)cast<FixedVectorType>(VTy)->getNumElements()) &&
165 "SK_InsertSubvector index out of range");
171 for (
int i = 0; i != NumSubElts; ++i) {
172 Cost += thisT()->getVectorInstrCost(Instruction::ExtractElement, SubVTy,
175 thisT()->getVectorInstrCost(Instruction::InsertElement, VTy,
CostKind,
176 i +
Index,
nullptr,
nullptr);
183 return static_cast<const T *
>(
this)->getST();
188 return static_cast<const T *
>(
this)->getTLI();
210 bool IsGatherScatter,
213 if (isa<ScalableVectorType>(DataTy))
216 auto *VT = cast<FixedVectorType>(DataTy);
226 VT->getNumElements()),
230 VT->getNumElements() *
237 Opcode == Instruction::Store,
CostKind);
248 VT->getNumElements() *
250 Instruction::ExtractElement,
252 VT->getNumElements()),
258 return LoadCost + PackingCost + ConditionalCost;
273 unsigned *
Fast)
const {
316 std::pair<const Value *, unsigned>
335 bool HasBaseReg, int64_t Scale,
346 Type *ScalarValTy)
const {
347 auto &&IsSupportedByTarget = [
this, ScalarMemTy, ScalarValTy](
unsigned VF) {
350 if (getTLI()->isOperationLegal(
ISD::STORE, VT) ||
360 while (VF > 2 && IsSupportedByTarget(VF))
390 int64_t BaseOffset,
bool HasBaseReg,
391 int64_t Scale,
unsigned AddrSpace) {
429 unsigned &JumpTableSize,
439 unsigned N = SI.getNumCases();
444 bool IsJTAllowed = TLI->
areJTsAllowed(SI.getParent()->getParent());
450 APInt MaxCaseVal = SI.case_begin()->getCaseValue()->getValue();
451 APInt MinCaseVal = MaxCaseVal;
452 for (
auto CI : SI.cases()) {
453 const APInt &CaseVal = CI.getCaseValue()->getValue();
454 if (CaseVal.
sgt(MaxCaseVal))
455 MaxCaseVal = CaseVal;
456 if (CaseVal.
slt(MinCaseVal))
457 MinCaseVal = CaseVal;
463 for (
auto I : SI.cases())
464 Dests.
insert(
I.getCaseSuccessor());
473 if (
N < 2 || N < TLI->getMinimumJumpTableEntries())
476 (MaxCaseVal - MinCaseVal)
477 .getLimitedValue(std::numeric_limits<uint64_t>::max() - 1) + 1;
480 JumpTableSize = Range;
496 if (!
TM.isPositionIndependent())
506 Triple TargetTriple =
TM.getTargetTriple();
577 else if (ST->getSchedModel().LoopMicroOpBufferSize > 0)
578 MaxOps = ST->getSchedModel().LoopMicroOpBufferSize;
585 if (isa<CallInst>(
I) || isa<InvokeInst>(
I)) {
595 <<
"advising against unrolling the loop because it "
648 std::optional<Value *>
651 bool &KnownBitsComputed) {
662 IC, II, DemandedElts, UndefElts, UndefElts2, UndefElts3,
666 virtual std::optional<unsigned>
668 return std::optional<unsigned>(
672 virtual std::optional<unsigned>
674 std::optional<unsigned> TargetResult =
692 unsigned NumStridedMemAccesses,
693 unsigned NumPrefetches,
694 bool HasCall)
const {
696 NumPrefetches, HasCall);
728 const APInt &DemandedElts,
729 bool Insert,
bool Extract,
733 if (isa<ScalableVectorType>(InTy))
735 auto *Ty = cast<FixedVectorType>(InTy);
738 "Vector size mismatch");
742 for (
int i = 0, e = Ty->getNumElements(); i < e; ++i) {
743 if (!DemandedElts[i])
746 Cost += thisT()->getVectorInstrCost(Instruction::InsertElement, Ty,
749 Cost += thisT()->getVectorInstrCost(Instruction::ExtractElement, Ty,
760 if (isa<ScalableVectorType>(InTy))
762 auto *Ty = cast<FixedVectorType>(InTy);
765 return thisT()->getScalarizationOverhead(Ty, DemandedElts, Insert, Extract,
776 assert(Args.size() == Tys.
size() &&
"Expected matching Args and Tys");
780 for (
int I = 0,
E = Args.size();
I !=
E;
I++) {
788 if (!isa<Constant>(
A) && UniqueOperands.
insert(
A).second) {
789 if (
auto *VecTy = dyn_cast<VectorType>(Ty))
846 if (MTy == LK.second)
860 ArrayRef<const Value *> Args = ArrayRef<const Value *>(),
861 const Instruction *CxtI =
nullptr) {
863 const TargetLoweringBase *TLI = getTLI();
864 int ISD = TLI->InstructionOpcodeToISD(Opcode);
865 assert(ISD &&
"Invalid opcode");
878 InstructionCost OpCost = (IsFloat ? 2 : 1);
880 if (TLI->isOperationLegalOrPromote(ISD, LT.second)) {
883 return LT.first * OpCost;
886 if (!TLI->isOperationExpand(ISD,
LT.second)) {
889 return LT.first * 2 * OpCost;
901 unsigned DivOpc = IsSigned ? Instruction::SDiv : Instruction::UDiv;
902 InstructionCost DivCost = thisT()->getArithmeticInstrCost(
903 DivOpc, Ty,
CostKind, Opd1Info, Opd2Info);
904 InstructionCost MulCost =
905 thisT()->getArithmeticInstrCost(Instruction::Mul, Ty,
CostKind);
906 InstructionCost SubCost =
907 thisT()->getArithmeticInstrCost(Instruction::Sub, Ty,
CostKind);
908 return DivCost + MulCost + SubCost;
913 if (isa<ScalableVectorType>(Ty))
919 if (
auto *VTy = dyn_cast<FixedVectorType>(Ty)) {
920 InstructionCost
Cost = thisT()->getArithmeticInstrCost(
925 SmallVector<Type *> Tys(
Args.size(), Ty);
938 int Limit = Mask.size() * 2;
942 any_of(Mask, [Limit](
int I) {
return I >= Limit; }))
954 Mask, Mask.size(), NumSubElts,
Index)) {
985 if (
auto *FVT = dyn_cast<FixedVectorType>(Tp))
986 return getBroadcastShuffleOverhead(FVT,
CostKind);
994 if (
auto *FVT = dyn_cast<FixedVectorType>(Tp))
995 return getPermuteShuffleOverhead(FVT,
CostKind);
999 cast<FixedVectorType>(SubTp));
1002 cast<FixedVectorType>(SubTp));
1016 assert(ISD &&
"Invalid opcode");
1020 TypeSize SrcSize = SrcLT.second.getSizeInBits();
1021 TypeSize DstSize = DstLT.second.getSizeInBits();
1022 bool IntOrPtrSrc = Src->isIntegerTy() || Src->isPointerTy();
1023 bool IntOrPtrDst = Dst->isIntegerTy() || Dst->isPointerTy();
1028 case Instruction::Trunc:
1033 case Instruction::BitCast:
1036 if (SrcLT.first == DstLT.first && IntOrPtrSrc == IntOrPtrDst &&
1040 case Instruction::FPExt:
1041 if (
I && getTLI()->isExtFree(
I))
1044 case Instruction::ZExt:
1045 if (TLI->
isZExtFree(SrcLT.second, DstLT.second))
1048 case Instruction::SExt:
1049 if (
I && getTLI()->isExtFree(
I))
1059 if (DstLT.first == SrcLT.first &&
1064 case Instruction::AddrSpaceCast:
1066 Dst->getPointerAddressSpace()))
1071 auto *SrcVTy = dyn_cast<VectorType>(Src);
1072 auto *DstVTy = dyn_cast<VectorType>(Dst);
1075 if (SrcLT.first == DstLT.first &&
1080 if (!SrcVTy && !DstVTy) {
1091 if (DstVTy && SrcVTy) {
1093 if (SrcLT.first == DstLT.first && SrcSize == DstSize) {
1096 if (Opcode == Instruction::ZExt)
1100 if (Opcode == Instruction::SExt)
1101 return SrcLT.first * 2;
1107 return SrcLT.first * 1;
1120 if ((SplitSrc || SplitDst) && SrcVTy->getElementCount().isVector() &&
1121 DstVTy->getElementCount().isVector()) {
1124 T *
TTI =
static_cast<T *
>(
this);
1127 (!SplitSrc || !SplitDst) ?
TTI->getVectorSplitCost() : 0;
1134 if (isa<ScalableVectorType>(DstVTy))
1139 unsigned Num = cast<FixedVectorType>(DstVTy)->getNumElements();
1141 Opcode, Dst->getScalarType(), Src->getScalarType(), CCH,
CostKind,
I);
1154 if (Opcode == Instruction::BitCast) {
1170 return thisT()->getVectorInstrCost(Instruction::ExtractElement, VecTy,
1187 assert(ISD &&
"Invalid opcode");
1196 assert(CondTy &&
"CondTy must exist");
1202 if (!(ValTy->
isVectorTy() && !LT.second.isVector()) &&
1206 return LT.first * 1;
1212 if (
auto *ValVTy = dyn_cast<VectorType>(ValTy)) {
1213 if (isa<ScalableVectorType>(ValTy))
1216 unsigned Num = cast<FixedVectorType>(ValVTy)->getNumElements();
1220 Opcode, ValVTy->getScalarType(), CondTy, VecPred,
CostKind,
I);
1242 Value *Op0 =
nullptr;
1243 Value *Op1 =
nullptr;
1244 if (
auto *IE = dyn_cast<InsertElementInst>(&
I)) {
1245 Op0 = IE->getOperand(0);
1246 Op1 = IE->getOperand(1);
1248 return thisT()->getVectorInstrCost(
I.getOpcode(), Val,
CostKind,
Index, Op0,
1254 const APInt &DemandedDstElts,
1257 "Unexpected size of DemandedDstElts.");
1275 Cost += thisT()->getScalarizationOverhead(SrcVT, DemandedSrcElts,
1278 Cost += thisT()->getScalarizationOverhead(ReplicatedVT, DemandedDstElts,
1290 assert(!Src->isVoidTy() &&
"Invalid type");
1292 if (getTLI()->getValueType(
DL, Src,
true) == MVT::Other)
1307 LT.second.getSizeInBits())) {
1313 if (Opcode == Instruction::Store)
1322 cast<VectorType>(Src), Opcode != Instruction::Store,
1323 Opcode == Instruction::Store,
CostKind);
1333 return getCommonMaskedMemoryOpCost(Opcode, DataTy, Alignment,
true,
false,
1338 const Value *
Ptr,
bool VariableMask,
1342 return getCommonMaskedMemoryOpCost(Opcode, DataTy, Alignment, VariableMask,
1349 bool UseMaskForCond =
false,
bool UseMaskForGaps =
false) {
1352 if (isa<ScalableVectorType>(VecTy))
1355 auto *VT = cast<FixedVectorType>(VecTy);
1357 unsigned NumElts = VT->getNumElements();
1358 assert(Factor > 1 && NumElts % Factor == 0 &&
"Invalid interleave factor");
1360 unsigned NumSubElts = NumElts / Factor;
1365 if (UseMaskForCond || UseMaskForGaps)
1366 Cost = thisT()->getMaskedMemoryOpCost(Opcode, VecTy, Alignment,
1375 unsigned VecTySize = thisT()->getDataLayout().getTypeStoreSize(VecTy);
1395 unsigned NumLegalInsts =
divideCeil(VecTySize, VecTyLTSize);
1399 unsigned NumEltsPerLegalInst =
divideCeil(NumElts, NumLegalInsts);
1402 BitVector UsedInsts(NumLegalInsts,
false);
1403 for (
unsigned Index : Indices)
1404 for (
unsigned Elt = 0; Elt < NumSubElts; ++Elt)
1405 UsedInsts.
set((
Index + Elt * Factor) / NumEltsPerLegalInst);
1414 "Interleaved memory op has too many members");
1420 for (
unsigned Index : Indices) {
1421 assert(
Index < Factor &&
"Invalid index for interleaved memory op");
1422 for (
unsigned Elm = 0; Elm < NumSubElts; Elm++)
1423 DemandedLoadStoreElts.
setBit(
Index + Elm * Factor);
1426 if (Opcode == Instruction::Load) {
1436 SubVT, DemandedAllSubElts,
1438 Cost += Indices.
size() * InsSubCost;
1439 Cost += thisT()->getScalarizationOverhead(VT, DemandedLoadStoreElts,
1457 SubVT, DemandedAllSubElts,
1459 Cost += ExtSubCost * Indices.
size();
1460 Cost += thisT()->getScalarizationOverhead(VT, DemandedLoadStoreElts,
1465 if (!UseMaskForCond)
1470 Cost += thisT()->getReplicationShuffleCost(
1471 I8Type, Factor, NumSubElts,
1472 UseMaskForGaps ? DemandedLoadStoreElts : DemandedAllResultElts,
1480 if (UseMaskForGaps) {
1482 Cost += thisT()->getArithmeticInstrCost(BinaryOperator::And, MaskVT,
1507 (
RetTy->isVectorTy() ? cast<VectorType>(
RetTy)->getElementCount()
1516 case Intrinsic::powi:
1517 if (
auto *RHSC = dyn_cast<ConstantInt>(Args[1])) {
1518 bool ShouldOptForSize =
I->getParent()->getParent()->hasOptSize();
1520 ShouldOptForSize)) {
1524 unsigned ActiveBits =
Exponent.getActiveBits();
1525 unsigned PopCount =
Exponent.popcount();
1527 thisT()->getArithmeticInstrCost(
1529 if (RHSC->isNegative())
1530 Cost += thisT()->getArithmeticInstrCost(Instruction::FDiv,
RetTy,
1536 case Intrinsic::cttz:
1542 case Intrinsic::ctlz:
1548 case Intrinsic::memcpy:
1549 return thisT()->getMemcpyCost(ICA.
getInst());
1551 case Intrinsic::masked_scatter: {
1552 const Value *Mask = Args[3];
1553 bool VarMask = !isa<Constant>(Mask);
1554 Align Alignment = cast<ConstantInt>(Args[2])->getAlignValue();
1555 return thisT()->getGatherScatterOpCost(Instruction::Store,
1559 case Intrinsic::masked_gather: {
1560 const Value *Mask = Args[2];
1561 bool VarMask = !isa<Constant>(Mask);
1562 Align Alignment = cast<ConstantInt>(Args[1])->getAlignValue();
1563 return thisT()->getGatherScatterOpCost(Instruction::Load,
RetTy, Args[0],
1566 case Intrinsic::experimental_stepvector: {
1567 if (isa<ScalableVectorType>(
RetTy))
1572 case Intrinsic::vector_extract: {
1575 if (isa<ScalableVectorType>(
RetTy))
1577 unsigned Index = cast<ConstantInt>(Args[1])->getZExtValue();
1578 return thisT()->getShuffleCost(
1582 case Intrinsic::vector_insert: {
1585 if (isa<ScalableVectorType>(Args[1]->
getType()))
1587 unsigned Index = cast<ConstantInt>(Args[2])->getZExtValue();
1588 return thisT()->getShuffleCost(
1592 case Intrinsic::experimental_vector_reverse: {
1593 return thisT()->getShuffleCost(
1597 case Intrinsic::experimental_vector_splice: {
1598 unsigned Index = cast<ConstantInt>(Args[2])->getZExtValue();
1599 return thisT()->getShuffleCost(
1603 case Intrinsic::vector_reduce_add:
1604 case Intrinsic::vector_reduce_mul:
1605 case Intrinsic::vector_reduce_and:
1606 case Intrinsic::vector_reduce_or:
1607 case Intrinsic::vector_reduce_xor:
1608 case Intrinsic::vector_reduce_smax:
1609 case Intrinsic::vector_reduce_smin:
1610 case Intrinsic::vector_reduce_fmax:
1611 case Intrinsic::vector_reduce_fmin:
1612 case Intrinsic::vector_reduce_fmaximum:
1613 case Intrinsic::vector_reduce_fminimum:
1614 case Intrinsic::vector_reduce_umax:
1615 case Intrinsic::vector_reduce_umin: {
1619 case Intrinsic::vector_reduce_fadd:
1620 case Intrinsic::vector_reduce_fmul: {
1622 IID,
RetTy, {Args[0]->getType(), Args[1]->
getType()}, FMF,
I, 1);
1625 case Intrinsic::fshl:
1626 case Intrinsic::fshr: {
1627 const Value *
X = Args[0];
1628 const Value *
Y = Args[1];
1629 const Value *Z = Args[2];
1642 thisT()->getArithmeticInstrCost(BinaryOperator::Or,
RetTy,
CostKind);
1644 thisT()->getArithmeticInstrCost(BinaryOperator::Sub,
RetTy,
CostKind);
1645 Cost += thisT()->getArithmeticInstrCost(
1648 Cost += thisT()->getArithmeticInstrCost(
1653 Cost += thisT()->getArithmeticInstrCost(BinaryOperator::URem,
RetTy,
1657 Type *CondTy =
RetTy->getWithNewBitWidth(1);
1659 thisT()->getCmpSelInstrCost(BinaryOperator::ICmp,
RetTy, CondTy,
1662 thisT()->getCmpSelInstrCost(BinaryOperator::Select,
RetTy, CondTy,
1667 case Intrinsic::get_active_lane_mask: {
1673 if (!getTLI()->shouldExpandGetActiveLaneMask(ResVT, ArgType)) {
1683 thisT()->getTypeBasedIntrinsicInstrCost(Attrs,
CostKind);
1684 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, ExpRetTy,
RetTy,
1695 ScalarizationCost = 0;
1696 if (!
RetTy->isVoidTy())
1698 cast<VectorType>(
RetTy),
1700 ScalarizationCost +=
1706 return thisT()->getTypeBasedIntrinsicInstrCost(Attrs,
CostKind);
1727 unsigned VecTyIndex = 0;
1728 if (IID == Intrinsic::vector_reduce_fadd ||
1729 IID == Intrinsic::vector_reduce_fmul)
1731 assert(Tys.
size() > VecTyIndex &&
"Unexpected IntrinsicCostAttributes");
1732 VecOpTy = dyn_cast<VectorType>(Tys[VecTyIndex]);
1741 if (isa<ScalableVectorType>(
RetTy) ||
any_of(Tys, [](
const Type *Ty) {
1742 return isa<ScalableVectorType>(Ty);
1748 SkipScalarizationCost ? ScalarizationCostPassed : 0;
1749 unsigned ScalarCalls = 1;
1751 if (
auto *RetVTy = dyn_cast<VectorType>(
RetTy)) {
1752 if (!SkipScalarizationCost)
1755 ScalarCalls = std::max(ScalarCalls,
1756 cast<FixedVectorType>(RetVTy)->getNumElements());
1757 ScalarRetTy =
RetTy->getScalarType();
1760 for (
unsigned i = 0, ie = Tys.
size(); i != ie; ++i) {
1762 if (
auto *VTy = dyn_cast<VectorType>(Ty)) {
1763 if (!SkipScalarizationCost)
1766 ScalarCalls = std::max(ScalarCalls,
1767 cast<FixedVectorType>(VTy)->getNumElements());
1772 if (ScalarCalls == 1)
1777 thisT()->getIntrinsicInstrCost(ScalarAttrs,
CostKind);
1779 return ScalarCalls * ScalarCost + ScalarizationCost;
1783 case Intrinsic::sqrt:
1786 case Intrinsic::sin:
1789 case Intrinsic::cos:
1792 case Intrinsic::exp:
1795 case Intrinsic::exp2:
1798 case Intrinsic::exp10:
1801 case Intrinsic::log:
1804 case Intrinsic::log10:
1807 case Intrinsic::log2:
1810 case Intrinsic::fabs:
1813 case Intrinsic::canonicalize:
1816 case Intrinsic::minnum:
1819 case Intrinsic::maxnum:
1822 case Intrinsic::minimum:
1825 case Intrinsic::maximum:
1828 case Intrinsic::copysign:
1831 case Intrinsic::floor:
1834 case Intrinsic::ceil:
1837 case Intrinsic::trunc:
1840 case Intrinsic::nearbyint:
1843 case Intrinsic::rint:
1846 case Intrinsic::round:
1849 case Intrinsic::roundeven:
1852 case Intrinsic::pow:
1855 case Intrinsic::fma:
1858 case Intrinsic::fmuladd:
1861 case Intrinsic::experimental_constrained_fmuladd:
1865 case Intrinsic::lifetime_start:
1866 case Intrinsic::lifetime_end:
1867 case Intrinsic::sideeffect:
1868 case Intrinsic::pseudoprobe:
1869 case Intrinsic::arithmetic_fence:
1871 case Intrinsic::masked_store: {
1873 Align TyAlign = thisT()->DL.getABITypeAlign(Ty);
1874 return thisT()->getMaskedMemoryOpCost(Instruction::Store, Ty, TyAlign, 0,
1877 case Intrinsic::masked_load: {
1879 Align TyAlign = thisT()->DL.getABITypeAlign(Ty);
1880 return thisT()->getMaskedMemoryOpCost(Instruction::Load, Ty, TyAlign, 0,
1883 case Intrinsic::vector_reduce_add:
1884 return thisT()->getArithmeticReductionCost(Instruction::Add, VecOpTy,
1886 case Intrinsic::vector_reduce_mul:
1887 return thisT()->getArithmeticReductionCost(Instruction::Mul, VecOpTy,
1889 case Intrinsic::vector_reduce_and:
1890 return thisT()->getArithmeticReductionCost(Instruction::And, VecOpTy,
1892 case Intrinsic::vector_reduce_or:
1893 return thisT()->getArithmeticReductionCost(Instruction::Or, VecOpTy,
1895 case Intrinsic::vector_reduce_xor:
1896 return thisT()->getArithmeticReductionCost(Instruction::Xor, VecOpTy,
1898 case Intrinsic::vector_reduce_fadd:
1899 return thisT()->getArithmeticReductionCost(Instruction::FAdd, VecOpTy,
1901 case Intrinsic::vector_reduce_fmul:
1902 return thisT()->getArithmeticReductionCost(Instruction::FMul, VecOpTy,
1904 case Intrinsic::vector_reduce_smax:
1905 return thisT()->getMinMaxReductionCost(Intrinsic::smax, VecOpTy,
1907 case Intrinsic::vector_reduce_smin:
1908 return thisT()->getMinMaxReductionCost(Intrinsic::smin, VecOpTy,
1910 case Intrinsic::vector_reduce_umax:
1911 return thisT()->getMinMaxReductionCost(Intrinsic::umax, VecOpTy,
1913 case Intrinsic::vector_reduce_umin:
1914 return thisT()->getMinMaxReductionCost(Intrinsic::umin, VecOpTy,
1916 case Intrinsic::vector_reduce_fmax:
1917 return thisT()->getMinMaxReductionCost(Intrinsic::maxnum, VecOpTy,
1919 case Intrinsic::vector_reduce_fmin:
1920 return thisT()->getMinMaxReductionCost(Intrinsic::minnum, VecOpTy,
1922 case Intrinsic::vector_reduce_fmaximum:
1923 return thisT()->getMinMaxReductionCost(Intrinsic::maximum, VecOpTy,
1925 case Intrinsic::vector_reduce_fminimum:
1926 return thisT()->getMinMaxReductionCost(Intrinsic::minimum, VecOpTy,
1928 case Intrinsic::abs: {
1930 Type *CondTy =
RetTy->getWithNewBitWidth(1);
1933 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::ICmp,
RetTy, CondTy,
1935 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::Select,
RetTy, CondTy,
1938 Cost += thisT()->getArithmeticInstrCost(
1942 case Intrinsic::smax:
1943 case Intrinsic::smin:
1944 case Intrinsic::umax:
1945 case Intrinsic::umin: {
1947 Type *CondTy =
RetTy->getWithNewBitWidth(1);
1948 bool IsUnsigned = IID == Intrinsic::umax || IID == Intrinsic::umin;
1952 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::ICmp,
RetTy, CondTy,
1954 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::Select,
RetTy, CondTy,
1958 case Intrinsic::sadd_sat:
1959 case Intrinsic::ssub_sat: {
1960 Type *CondTy =
RetTy->getWithNewBitWidth(1);
1964 ? Intrinsic::sadd_with_overflow
1965 : Intrinsic::ssub_with_overflow;
1972 nullptr, ScalarizationCostPassed);
1973 Cost += thisT()->getIntrinsicInstrCost(Attrs,
CostKind);
1974 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::ICmp,
RetTy, CondTy,
1976 Cost += 2 * thisT()->getCmpSelInstrCost(BinaryOperator::Select,
RetTy,
1980 case Intrinsic::uadd_sat:
1981 case Intrinsic::usub_sat: {
1982 Type *CondTy =
RetTy->getWithNewBitWidth(1);
1986 ? Intrinsic::uadd_with_overflow
1987 : Intrinsic::usub_with_overflow;
1991 nullptr, ScalarizationCostPassed);
1992 Cost += thisT()->getIntrinsicInstrCost(Attrs,
CostKind);
1994 thisT()->getCmpSelInstrCost(BinaryOperator::Select,
RetTy, CondTy,
1998 case Intrinsic::smul_fix:
1999 case Intrinsic::umul_fix: {
2000 unsigned ExtSize =
RetTy->getScalarSizeInBits() * 2;
2001 Type *ExtTy =
RetTy->getWithNewBitWidth(ExtSize);
2004 IID == Intrinsic::smul_fix ? Instruction::SExt : Instruction::ZExt;
2010 thisT()->getArithmeticInstrCost(Instruction::Mul, ExtTy,
CostKind);
2011 Cost += 2 * thisT()->getCastInstrCost(Instruction::Trunc,
RetTy, ExtTy,
2013 Cost += thisT()->getArithmeticInstrCost(Instruction::LShr,
RetTy,
2023 case Intrinsic::sadd_with_overflow:
2024 case Intrinsic::ssub_with_overflow: {
2025 Type *SumTy =
RetTy->getContainedType(0);
2026 Type *OverflowTy =
RetTy->getContainedType(1);
2027 unsigned Opcode = IID == Intrinsic::sadd_with_overflow
2028 ? BinaryOperator::Add
2029 : BinaryOperator::Sub;
2036 Cost += thisT()->getArithmeticInstrCost(Opcode, SumTy,
CostKind);
2037 Cost += 2 * thisT()->getCmpSelInstrCost(
2038 Instruction::ICmp, SumTy, OverflowTy,
2040 Cost += thisT()->getArithmeticInstrCost(BinaryOperator::Xor, OverflowTy,
2044 case Intrinsic::uadd_with_overflow:
2045 case Intrinsic::usub_with_overflow: {
2046 Type *SumTy =
RetTy->getContainedType(0);
2047 Type *OverflowTy =
RetTy->getContainedType(1);
2048 unsigned Opcode = IID == Intrinsic::uadd_with_overflow
2049 ? BinaryOperator::Add
2050 : BinaryOperator::Sub;
2056 Cost += thisT()->getArithmeticInstrCost(Opcode, SumTy,
CostKind);
2058 thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, SumTy, OverflowTy,
2062 case Intrinsic::smul_with_overflow:
2063 case Intrinsic::umul_with_overflow: {
2064 Type *MulTy =
RetTy->getContainedType(0);
2065 Type *OverflowTy =
RetTy->getContainedType(1);
2068 bool IsSigned = IID == Intrinsic::smul_with_overflow;
2070 unsigned ExtOp = IsSigned ? Instruction::SExt : Instruction::ZExt;
2074 Cost += 2 * thisT()->getCastInstrCost(ExtOp, ExtTy, MulTy, CCH,
CostKind);
2076 thisT()->getArithmeticInstrCost(Instruction::Mul, ExtTy,
CostKind);
2077 Cost += 2 * thisT()->getCastInstrCost(Instruction::Trunc, MulTy, ExtTy,
2079 Cost += thisT()->getArithmeticInstrCost(Instruction::LShr, ExtTy,
2085 Cost += thisT()->getArithmeticInstrCost(Instruction::AShr, MulTy,
2090 Cost += thisT()->getCmpSelInstrCost(
2094 case Intrinsic::fptosi_sat:
2095 case Intrinsic::fptoui_sat: {
2098 Type *FromTy = Tys[0];
2099 bool IsSigned = IID == Intrinsic::fptosi_sat;
2104 Cost += thisT()->getIntrinsicInstrCost(Attrs1,
CostKind);
2107 Cost += thisT()->getIntrinsicInstrCost(Attrs2,
CostKind);
2108 Cost += thisT()->getCastInstrCost(
2109 IsSigned ? Instruction::FPToSI : Instruction::FPToUI,
RetTy, FromTy,
2112 Type *CondTy =
RetTy->getWithNewBitWidth(1);
2113 Cost += thisT()->getCmpSelInstrCost(
2115 Cost += thisT()->getCmpSelInstrCost(
2120 case Intrinsic::ctpop:
2126 case Intrinsic::ctlz:
2129 case Intrinsic::cttz:
2132 case Intrinsic::bswap:
2135 case Intrinsic::bitreverse:
2144 if (IID == Intrinsic::fabs && LT.second.isFloatingPoint() &&
2154 return (LT.first * 2);
2156 return (LT.first * 1);
2160 return (LT.first * 2);
2165 if (IID == Intrinsic::fmuladd)
2166 return thisT()->getArithmeticInstrCost(BinaryOperator::FMul,
RetTy,
2168 thisT()->getArithmeticInstrCost(BinaryOperator::FAdd,
RetTy,
2170 if (IID == Intrinsic::experimental_constrained_fmuladd) {
2172 Intrinsic::experimental_constrained_fmul,
RetTy, Tys);
2174 Intrinsic::experimental_constrained_fadd,
RetTy, Tys);
2175 return thisT()->getIntrinsicInstrCost(FMulAttrs,
CostKind) +
2176 thisT()->getIntrinsicInstrCost(FAddAttrs,
CostKind);
2182 if (
auto *RetVTy = dyn_cast<VectorType>(
RetTy)) {
2184 if (isa<ScalableVectorType>(
RetTy) ||
any_of(Tys, [](
const Type *Ty) {
2185 return isa<ScalableVectorType>(Ty);
2190 SkipScalarizationCost
2191 ? ScalarizationCostPassed
2195 unsigned ScalarCalls = cast<FixedVectorType>(RetVTy)->getNumElements();
2197 for (
unsigned i = 0, ie = Tys.
size(); i != ie; ++i) {
2205 thisT()->getIntrinsicInstrCost(Attrs,
CostKind);
2206 for (
unsigned i = 0, ie = Tys.
size(); i != ie; ++i) {
2207 if (
auto *VTy = dyn_cast<VectorType>(Tys[i])) {
2211 ScalarCalls = std::max(ScalarCalls,
2212 cast<FixedVectorType>(VTy)->getNumElements());
2215 return ScalarCalls * ScalarCost + ScalarizationCost;
2219 return SingleCallCost;
2241 return LT.first.isValid() ? *LT.first.getValue() : 0;
2274 if (isa<ScalableVectorType>(Ty))
2278 unsigned NumVecElts = cast<FixedVectorType>(Ty)->getNumElements();
2279 if ((Opcode == Instruction::Or || Opcode == Instruction::And) &&
2289 return thisT()->getCastInstrCost(Instruction::BitCast, ValTy, Ty,
2291 thisT()->getCmpSelInstrCost(Instruction::ICmp, ValTy,
2295 unsigned NumReduxLevels =
Log2_32(NumVecElts);
2298 std::pair<InstructionCost, MVT> LT = thisT()->getTypeLegalizationCost(Ty);
2299 unsigned LongVectorCount = 0;
2301 LT.second.isVector() ? LT.second.getVectorNumElements() : 1;
2302 while (NumVecElts > MVTLen) {
2308 ArithCost += thisT()->getArithmeticInstrCost(Opcode, SubTy,
CostKind);
2313 NumReduxLevels -= LongVectorCount;
2325 NumReduxLevels * thisT()->getArithmeticInstrCost(Opcode, Ty,
CostKind);
2326 return ShuffleCost + ArithCost +
2327 thisT()->getVectorInstrCost(Instruction::ExtractElement, Ty,
2351 if (isa<ScalableVectorType>(Ty))
2354 auto *VTy = cast<FixedVectorType>(Ty);
2361 return ExtractCost + ArithCost;
2365 std::optional<FastMathFlags> FMF,
2367 assert(Ty &&
"Unknown reduction vector type");
2380 if (isa<ScalableVectorType>(Ty))
2384 unsigned NumVecElts = cast<FixedVectorType>(Ty)->getNumElements();
2385 unsigned NumReduxLevels =
Log2_32(NumVecElts);
2388 std::pair<InstructionCost, MVT> LT = thisT()->getTypeLegalizationCost(Ty);
2389 unsigned LongVectorCount = 0;
2391 LT.second.isVector() ? LT.second.getVectorNumElements() : 1;
2392 while (NumVecElts > MVTLen) {
2406 NumReduxLevels -= LongVectorCount;
2419 return ShuffleCost + MinMaxCost +
2420 thisT()->getVectorInstrCost(Instruction::ExtractElement, Ty,
2432 thisT()->getArithmeticReductionCost(Opcode, ExtTy, FMF,
CostKind);
2434 IsUnsigned ? Instruction::ZExt : Instruction::SExt, ExtTy, Ty,
2437 return RedCost + ExtCost;
2448 Instruction::Add, ExtTy, std::nullopt,
CostKind);
2450 IsUnsigned ? Instruction::ZExt : Instruction::SExt, ExtTy, Ty,
2454 thisT()->getArithmeticInstrCost(Instruction::Mul, ExtTy,
CostKind);
2456 return RedCost + MulCost + 2 * ExtCost;
This file implements a class to represent arbitrary precision integral constant values and operations...
This file implements the BitVector class.
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
static cl::opt< TargetTransformInfo::TargetCostKind > CostKind("cost-kind", cl::desc("Target cost kind"), cl::init(TargetTransformInfo::TCK_RecipThroughput), cl::values(clEnumValN(TargetTransformInfo::TCK_RecipThroughput, "throughput", "Reciprocal throughput"), clEnumValN(TargetTransformInfo::TCK_Latency, "latency", "Instruction latency"), clEnumValN(TargetTransformInfo::TCK_CodeSize, "code-size", "Code size"), clEnumValN(TargetTransformInfo::TCK_SizeAndLatency, "size-latency", "Code size and latency")))
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
mir Rename Register Operands
static const Function * getCalledFunction(const Value *V, bool &IsNoBuiltin)
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
const char LLVMTargetMachineRef TM
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
static SymbolRef::Type getType(const Symbol *Sym)
This file describes how to lower LLVM code to machine code.
Class for arbitrary precision integers.
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
void setBit(unsigned BitPosition)
Set the given bit to 1 whose position is given as "bitPosition".
bool sgt(const APInt &RHS) const
Signed greater than comparison.
unsigned getBitWidth() const
Return the number of bits in the APInt.
bool slt(const APInt &RHS) const
Signed less than comparison.
static APInt getZero(unsigned numBits)
Get the '0' value for the specified bit-width.
an instruction to allocate memory on the stack
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
A cache of @llvm.assume calls within a function.
LLVM Basic Block Representation.
Base class which can be used to help build a TTI implementation.
bool isTypeLegal(Type *Ty)
InstructionCost getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, TTI::TargetCostKind CostKind)
Get intrinsic cost based on arguments.
bool isValidAddrSpaceCast(unsigned FromAS, unsigned ToAS) const
virtual unsigned getPrefetchDistance() const
InstructionCost getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef< unsigned > Indices, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, bool UseMaskForCond=false, bool UseMaskForGaps=false)
InstructionCost getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy, CmpInst::Predicate VecPred, TTI::TargetCostKind CostKind, const Instruction *I=nullptr)
InstructionCost getScalingFactorCost(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset, bool HasBaseReg, int64_t Scale, unsigned AddrSpace)
void getUnrollingPreferences(Loop *L, ScalarEvolution &SE, TTI::UnrollingPreferences &UP, OptimizationRemarkEmitter *ORE)
unsigned getMaxInterleaveFactor(ElementCount VF)
unsigned getNumberOfParts(Type *Tp)
InstructionCost getMaskedMemoryOpCost(unsigned Opcode, Type *DataTy, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind)
InstructionCost getExtractWithExtendCost(unsigned Opcode, Type *Dst, VectorType *VecTy, unsigned Index)
TypeSize getRegisterBitWidth(TargetTransformInfo::RegisterKind K) const
std::optional< unsigned > getVScaleForTuning() const
InstructionCost getOrderedReductionCost(unsigned Opcode, VectorType *Ty, TTI::TargetCostKind CostKind)
Try to calculate the cost of performing strict (in-order) reductions, which involves doing a sequence...
bool isNumRegsMajorCostOfLSR()
bool isTruncateFree(Type *Ty1, Type *Ty2)
InstructionCost getVectorInstrCost(unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index, Value *Op0, Value *Op1)
bool isHardwareLoopProfitable(Loop *L, ScalarEvolution &SE, AssumptionCache &AC, TargetLibraryInfo *LibInfo, HardwareLoopInfo &HWLoopInfo)
InstructionCost getArithmeticInstrCost(unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Opd1Info={TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Opd2Info={TTI::OK_AnyValue, TTI::OP_None}, ArrayRef< const Value * > Args=ArrayRef< const Value * >(), const Instruction *CxtI=nullptr)
InstructionCost getTreeReductionCost(unsigned Opcode, VectorType *Ty, TTI::TargetCostKind CostKind)
Try to calculate arithmetic and shuffle op costs for reduction intrinsics.
bool preferPredicateOverEpilogue(TailFoldingInfo *TFI)
virtual bool shouldPrefetchAddressSpace(unsigned AS) const
bool isLegalICmpImmediate(int64_t imm)
bool isProfitableToHoist(Instruction *I)
virtual unsigned getMaxPrefetchIterationsAhead() const
InstructionCost getVectorInstrCost(const Instruction &I, Type *Val, TTI::TargetCostKind CostKind, unsigned Index)
std::optional< unsigned > getMaxVScale() const
TTI::ShuffleKind improveShuffleKindFromMask(TTI::ShuffleKind Kind, ArrayRef< int > Mask, VectorType *Ty, int &Index, VectorType *&SubTy) const
InstructionCost getExtendedReductionCost(unsigned Opcode, bool IsUnsigned, Type *ResTy, VectorType *Ty, FastMathFlags FMF, TTI::TargetCostKind CostKind)
unsigned getRegUsageForType(Type *Ty)
bool shouldBuildRelLookupTables() const
InstructionCost getMinMaxReductionCost(Intrinsic::ID IID, VectorType *Ty, FastMathFlags FMF, TTI::TargetCostKind CostKind)
Try to calculate op costs for min/max reduction operations.
unsigned getCallerAllocaCost(const CallBase *CB, const AllocaInst *AI) const
InstructionCost getMemoryOpCost(unsigned Opcode, Type *Src, MaybeAlign Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, TTI::OperandValueInfo OpInfo={TTI::OK_AnyValue, TTI::OP_None}, const Instruction *I=nullptr)
InstructionCost getShuffleCost(TTI::ShuffleKind Kind, VectorType *Tp, ArrayRef< int > Mask, TTI::TargetCostKind CostKind, int Index, VectorType *SubTp, ArrayRef< const Value * > Args=std::nullopt)
InstructionCost getGatherScatterOpCost(unsigned Opcode, Type *DataTy, const Value *Ptr, bool VariableMask, Align Alignment, TTI::TargetCostKind CostKind, const Instruction *I=nullptr)
unsigned getEstimatedNumberOfCaseClusters(const SwitchInst &SI, unsigned &JumpTableSize, ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI)
bool isIndexedLoadLegal(TTI::MemIndexedMode M, Type *Ty, const DataLayout &DL) const
bool isLSRCostLess(TTI::LSRCost C1, TTI::LSRCost C2)
std::optional< Value * > simplifyDemandedUseBitsIntrinsic(InstCombiner &IC, IntrinsicInst &II, APInt DemandedMask, KnownBits &Known, bool &KnownBitsComputed)
virtual unsigned getMinPrefetchStride(unsigned NumMemAccesses, unsigned NumStridedMemAccesses, unsigned NumPrefetches, bool HasCall) const
bool hasBranchDivergence(const Function *F=nullptr)
bool isIndexedStoreLegal(TTI::MemIndexedMode M, Type *Ty, const DataLayout &DL) const
unsigned getAssumedAddrSpace(const Value *V) const
InstructionCost getOperandsScalarizationOverhead(ArrayRef< const Value * > Args, ArrayRef< Type * > Tys, TTI::TargetCostKind CostKind)
Estimate the overhead of scalarizing an instructions unique non-constant operands.
InstructionCost getAddressComputationCost(Type *Ty, ScalarEvolution *, const SCEV *)
InstructionCost getScalarizationOverhead(VectorType *InTy, const APInt &DemandedElts, bool Insert, bool Extract, TTI::TargetCostKind CostKind)
Estimate the overhead of scalarizing an instruction.
InstructionCost getGEPCost(Type *PointeeType, const Value *Ptr, ArrayRef< const Value * > Operands, Type *AccessType, TTI::TargetCostKind CostKind)
bool isFCmpOrdCheaperThanFCmpZero(Type *Ty)
virtual std::optional< unsigned > getCacheSize(TargetTransformInfo::CacheLevel Level) const
bool isAlwaysUniform(const Value *V)
bool isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset, bool HasBaseReg, int64_t Scale, unsigned AddrSpace, Instruction *I=nullptr)
TailFoldingStyle getPreferredTailFoldingStyle(bool IVUpdateMayOverflow=true)
bool allowsMisalignedMemoryAccesses(LLVMContext &Context, unsigned BitWidth, unsigned AddressSpace, Align Alignment, unsigned *Fast) const
unsigned getStoreMinimumVF(unsigned VF, Type *ScalarMemTy, Type *ScalarValTy) const
InstructionCost getScalarizationOverhead(VectorType *InTy, bool Insert, bool Extract, TTI::TargetCostKind CostKind)
Helper wrapper for the DemandedElts variant of getScalarizationOverhead.
virtual std::optional< unsigned > getCacheAssociativity(TargetTransformInfo::CacheLevel Level) const
virtual bool enableWritePrefetching() const
Value * rewriteIntrinsicWithAddressSpace(IntrinsicInst *II, Value *OldV, Value *NewV) const
void getPeelingPreferences(Loop *L, ScalarEvolution &SE, TTI::PeelingPreferences &PP)
InstructionCost getMulAccReductionCost(bool IsUnsigned, Type *ResTy, VectorType *Ty, TTI::TargetCostKind CostKind)
InstructionCost getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind, const Instruction *I=nullptr)
bool collectFlatAddressOperands(SmallVectorImpl< int > &OpIndexes, Intrinsic::ID IID) const
InstructionCost getCallInstrCost(Function *F, Type *RetTy, ArrayRef< Type * > Tys, TTI::TargetCostKind CostKind)
Compute a cost of the given call instruction.
InstructionCost getArithmeticReductionCost(unsigned Opcode, VectorType *Ty, std::optional< FastMathFlags > FMF, TTI::TargetCostKind CostKind)
InstructionCost getFPOpCost(Type *Ty)
InstructionCost getVectorSplitCost()
std::pair< InstructionCost, MVT > getTypeLegalizationCost(Type *Ty) const
Estimate the cost of type-legalization and the legalized type.
bool haveFastSqrt(Type *Ty)
std::pair< const Value *, unsigned > getPredicatedAddrSpace(const Value *V) const
unsigned getInliningThresholdMultiplier() const
InstructionCost getReplicationShuffleCost(Type *EltTy, int ReplicationFactor, int VF, const APInt &DemandedDstElts, TTI::TargetCostKind CostKind)
virtual ~BasicTTIImplBase()=default
InstructionCost getScalarizationOverhead(VectorType *RetTy, ArrayRef< const Value * > Args, ArrayRef< Type * > Tys, TTI::TargetCostKind CostKind)
Estimate the overhead of scalarizing the inputs and outputs of an instruction, with return type RetTy...
bool isVScaleKnownToBeAPowerOfTwo() const
std::optional< Instruction * > instCombineIntrinsic(InstCombiner &IC, IntrinsicInst &II)
bool addrspacesMayAlias(unsigned AS0, unsigned AS1) const
bool isLegalAddImmediate(int64_t imm)
bool shouldBuildLookupTables()
unsigned getFlatAddressSpace()
InstructionCost getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src, TTI::CastContextHint CCH, TTI::TargetCostKind CostKind, const Instruction *I=nullptr)
virtual unsigned getCacheLineSize() const
bool isNoopAddrSpaceCast(unsigned FromAS, unsigned ToAS) const
bool isSourceOfDivergence(const Value *V)
int getInlinerVectorBonusPercent() const
InstructionCost getTypeBasedIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, TTI::TargetCostKind CostKind)
Get intrinsic cost based on argument types.
std::optional< Value * > simplifyDemandedVectorEltsIntrinsic(InstCombiner &IC, IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts, APInt &UndefElts2, APInt &UndefElts3, std::function< void(Instruction *, unsigned, APInt, APInt &)> SimplifyAndSetOp)
bool isSingleThreaded() const
BasicTTIImplBase(const TargetMachine *TM, const DataLayout &DL)
unsigned adjustInliningThreshold(const CallBase *CB)
bool isProfitableLSRChainElement(Instruction *I)
Concrete BasicTTIImpl that can be used if no further customization is needed.
size_type count() const
count - Returns the number of bits which are set.
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
static Type * makeCmpResultType(Type *opnd_type)
Create a result type for fcmp/icmp.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
@ ICMP_UGT
unsigned greater than
@ ICMP_SGT
signed greater than
@ ICMP_ULT
unsigned less than
@ FCMP_UNO
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
A parsed version of the target data layout string in and methods for querying it.
TypeSize getTypeStoreSizeInBits(Type *Ty) const
Returns the maximum number of bits that may be overwritten by storing the specified type; always a mu...
unsigned getIndexSizeInBits(unsigned AS) const
Size in bits of index used for address calculation in getelementptr.
constexpr bool isVector() const
One or more elements.
static constexpr ElementCount getFixed(ScalarTy MinVal)
constexpr bool isScalar() const
Exactly one element.
Convenience struct for specifying and reasoning about fast-math flags.
Class to represent fixed width SIMD vectors.
unsigned getNumElements() const
static FixedVectorType * get(Type *ElementType, unsigned NumElts)
bool isTargetIntrinsic() const
isTargetIntrinsic - Returns true if this function is an intrinsic and the intrinsic is specific to a ...
The core instruction combiner logic.
static InstructionCost getInvalid(CostType Val=0)
std::optional< CostType > getValue() const
This function is intended to be used as sparingly as possible, since the class provides the full rang...
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
FastMathFlags getFlags() const
const SmallVectorImpl< Type * > & getArgTypes() const
Type * getReturnType() const
bool skipScalarizationCost() const
const SmallVectorImpl< const Value * > & getArgs() const
InstructionCost getScalarizationCost() const
const IntrinsicInst * getInst() const
Intrinsic::ID getID() const
bool isTypeBasedOnly() const
A wrapper class for inspecting calls to intrinsic functions.
This is an important class for using LLVM in a threaded context.
Represents a single loop in the control flow graph.
virtual bool shouldPrefetchAddressSpace(unsigned AS) const
virtual unsigned getMinPrefetchStride(unsigned NumMemAccesses, unsigned NumStridedMemAccesses, unsigned NumPrefetches, bool HasCall) const
Return the minimum stride necessary to trigger software prefetching.
virtual bool enableWritePrefetching() const
virtual unsigned getMaxPrefetchIterationsAhead() const
Return the maximum prefetch distance in terms of loop iterations.
virtual unsigned getPrefetchDistance() const
Return the preferred prefetch distance in terms of instructions.
virtual std::optional< unsigned > getCacheAssociativity(unsigned Level) const
Return the cache associatvity for the given level of cache.
virtual std::optional< unsigned > getCacheLineSize(unsigned Level) const
Return the target cache line size in bytes at a given level.
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
static PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
Analysis providing profile information.
This class represents an analyzed expression in the program.
The main scalar evolution driver.
static bool isSelectMask(ArrayRef< int > Mask)
Return true if this shuffle mask chooses elements from its source vectors without lane crossings.
static bool isReverseMask(ArrayRef< int > Mask)
Return true if this shuffle mask swaps the order of elements from exactly one source vector.
static bool isZeroEltSplatMask(ArrayRef< int > Mask)
Return true if this shuffle mask chooses all elements with the same value as the first element of exa...
static bool isInsertSubvectorMask(ArrayRef< int > Mask, int NumSrcElts, int &NumSubElts, int &Index)
Return true if this shuffle mask is an insert subvector mask.
static bool isSpliceMask(ArrayRef< int > Mask, int &Index)
Return true if this shuffle mask is a splice mask, concatenating the two inputs together and then ext...
static bool isTransposeMask(ArrayRef< int > Mask)
Return true if this shuffle mask is a transpose mask.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
static StructType * create(LLVMContext &Context, StringRef Name)
This creates an identified struct.
Provides information about what library functions are available for the current target.
This base class for TargetLowering contains the SelectionDAG-independent parts that can be used from ...
bool isOperationExpand(unsigned Op, EVT VT) const
Return true if the specified operation is illegal on this target or unlikely to be made legal with cu...
int InstructionOpcodeToISD(unsigned Opcode) const
Get the ISD node that corresponds to the Instruction class opcode.
bool isIndexedStoreLegal(unsigned IdxMode, EVT VT) const
Return true if the specified indexed load is legal on this target.
EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the EVT corresponding to this LLVM type.
LegalizeAction
This enum indicates whether operations are valid for a target, and if not, what action should be used...
virtual bool isLegalICmpImmediate(int64_t) const
Return true if the specified immediate is legal icmp immediate, that is the target has icmp instructi...
const TargetMachine & getTargetMachine() const
virtual bool isZExtFree(Type *FromTy, Type *ToTy) const
Return true if any actual instruction that defines a value of type FromTy implicitly zero-extends the...
@ TypeScalarizeScalableVector
virtual bool isSuitableForJumpTable(const SwitchInst *SI, uint64_t NumCases, uint64_t Range, ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI) const
Return true if lowering to a jump table is suitable for a set of case clusters which may contain NumC...
virtual bool areJTsAllowed(const Function *Fn) const
Return true if lowering to a jump table is allowed.
bool isOperationLegalOrPromote(unsigned Op, EVT VT, bool LegalOnly=false) const
Return true if the specified operation is legal on this target or can be made legal using promotion.
virtual unsigned getNumRegisters(LLVMContext &Context, EVT VT, std::optional< MVT > RegisterVT=std::nullopt) const
Return the number of registers that this ValueType will eventually require.
virtual bool isCheapToSpeculateCttz(Type *Ty) const
Return true if it is cheap to speculate a call to intrinsic cttz.
bool isTruncStoreLegal(EVT ValVT, EVT MemVT) const
Return true if the specified store with truncation is legal on this target.
virtual bool allowsMisalignedMemoryAccesses(EVT, unsigned AddrSpace=0, Align Alignment=Align(1), MachineMemOperand::Flags Flags=MachineMemOperand::MONone, unsigned *=nullptr) const
Determine if the target supports unaligned memory accesses.
virtual bool isTruncateFree(Type *FromTy, Type *ToTy) const
Return true if it's free to truncate a value of type FromTy to type ToTy.
virtual EVT getTypeToTransformTo(LLVMContext &Context, EVT VT) const
For types supported by the target, this is an identity function.
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
bool isSuitableForBitTests(unsigned NumDests, unsigned NumCmps, const APInt &Low, const APInt &High, const DataLayout &DL) const
Return true if lowering to a bit test is suitable for a set of case clusters which contains NumDests ...
virtual bool isLegalAddImmediate(int64_t) const
Return true if the specified immediate is legal add immediate, that is the target has add instruction...
virtual bool isFreeAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const
Returns true if a cast from SrcAS to DestAS is "cheap", such that e.g.
LegalizeAction getTruncStoreAction(EVT ValVT, EVT MemVT) const
Return how this store with truncation should be treated: either it is legal, needs to be promoted to ...
LegalizeAction getLoadExtAction(unsigned ExtType, EVT ValVT, EVT MemVT) const
Return how this load with extension should be treated: either it is legal, needs to be promoted to a ...
bool isOperationLegalOrCustom(unsigned Op, EVT VT, bool LegalOnly=false) const
Return true if the specified operation is legal on this target or can be made legal with custom lower...
virtual bool isProfitableToHoist(Instruction *I) const
bool isIndexedLoadLegal(unsigned IdxMode, EVT VT) const
Return true if the specified indexed load is legal on this target.
bool isLoadExtLegal(unsigned ExtType, EVT ValVT, EVT MemVT) const
Return true if the specified load with extension is legal on this target.
virtual bool isCheapToSpeculateCtlz(Type *Ty) const
Return true if it is cheap to speculate a call to intrinsic ctlz.
LegalizeKind getTypeConversion(LLVMContext &Context, EVT VT) const
Return pair that represents the legalization kind (first) that needs to happen to EVT (second) in ord...
LegalizeTypeAction getTypeAction(LLVMContext &Context, EVT VT) const
Return how we should legalize values of this type, either it is already legal (return 'Legal') or we ...
bool isBeneficialToExpandPowI(int64_t Exponent, bool OptForSize) const
Return true if it is beneficial to expand an @llvm.powi.
virtual bool isFAbsFree(EVT VT) const
Return true if an fabs operation is free to the point where it is never worthwhile to replace it with...
virtual bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty, unsigned AddrSpace, Instruction *I=nullptr) const
Return true if the addressing mode represented by AM is legal for this target, for a load/store of th...
bool isOperationLegalOrCustomOrPromote(unsigned Op, EVT VT, bool LegalOnly=false) const
Return true if the specified operation is legal on this target or can be made legal with custom lower...
std::pair< LegalizeTypeAction, EVT > LegalizeKind
LegalizeKind holds the legalization kind that needs to happen to EVT in order to type-legalize it.
Primary interface to the complete machine description for the target machine.
virtual std::pair< const Value *, unsigned > getPredicatedAddrSpace(const Value *V) const
If the specified predicate checks whether a generic pointer falls within a specified address space,...
virtual bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const
Returns true if a cast between SrcAS and DestAS is a noop.
virtual unsigned getAssumedAddrSpace(const Value *V) const
If the specified generic pointer could be assumed as a pointer to a specific address space,...
ThreadModel::Model ThreadModel
ThreadModel - This flag specifies the type of threading model to assume for things like atomics.
TargetSubtargetInfo - Generic base class for all target subtargets.
virtual bool useAA() const
Enable use of alias analysis during code generation (during MI scheduling, DAGCombine,...
Triple - Helper class for working with autoconf configuration names.
ArchType getArch() const
Get the parsed architecture type of this triple.
bool isArch64Bit() const
Test whether the architecture is 64-bit.
bool isOSDarwin() const
Is this a "Darwin" OS (macOS, iOS, tvOS, watchOS, or DriverKit).
static constexpr TypeSize getFixed(ScalarTy ExactSize)
The instances of the Type class are immutable: once they are created, they are never changed.
bool isVectorTy() const
True if this is an instance of VectorType.
bool isIntOrIntVectorTy() const
Return true if this is an integer type or a vector of integer types.
static IntegerType * getInt1Ty(LLVMContext &C)
unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
Type * getWithNewBitWidth(unsigned NewBitWidth) const
Given an integer or vector type, change the lane bitwidth to NewBitwidth, whilst keeping the old numb...
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
static IntegerType * getInt8Ty(LLVMContext &C)
bool isPtrOrPtrVectorTy() const
Return true if this is a pointer type or a vector of pointer types.
bool isFPOrFPVectorTy() const
Return true if this is a FP type or a vector of FP.
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
LLVM Value Representation.
Base class of all SIMD vector types.
static VectorType * getHalfElementsVectorType(VectorType *VTy)
This static method returns a VectorType with half as many elements as the input type and the same ele...
static VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
Type * getElementType() const
static constexpr bool isKnownLT(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
APInt ScaleBitMask(const APInt &A, unsigned NewBitWidth, bool MatchAllBits=false)
Splat/Merge neighboring bits to widen/narrow the bitmask represented by.
@ Fast
Attempts to make calls as fast as possible (e.g.
@ C
The default llvm calling convention, compatible with C.
@ BSWAP
Byte Swap and Counting operators.
@ FMA
FMA - Perform a * b + c with no intermediate rounding step.
@ FADD
Simple binary floating point operators.
@ SDIVREM
SDIVREM/UDIVREM - Divide two integers and produce both a quotient and remainder result.
@ BRIND
BRIND - Indirect branch.
@ BR_JT
BR_JT - Jumptable branch.
@ FCANONICALIZE
Returns platform specific canonical encoding of a floating point number.
@ SELECT
Select(COND, TRUEVAL, FALSEVAL).
@ FMINNUM
FMINNUM/FMAXNUM - Perform floating-point minimum or maximum on two values.
@ VSELECT
Select with a vector condition (op #0) and two vector operands (ops #1 and #2), returning a vector re...
@ FMINIMUM
FMINIMUM/FMAXIMUM - NaN-propagating minimum/maximum that also treat -0.0 as less than 0....
@ FCOPYSIGN
FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.
MemIndexedMode
MemIndexedMode enum - This enum defines the load / store indexed addressing modes.
DiagnosticInfoOptimizationBase::Argument NV
This is an optimization pass for GlobalISel generic memory operations.
uint64_t divideCeil(uint64_t Numerator, uint64_t Denominator)
Returns the integer ceil(Numerator / Denominator).
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
constexpr unsigned BitWidth
cl::opt< unsigned > PartialUnrollingThreshold
This struct is a compact representation of a valid (non-zero power of two) alignment.
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
static EVT getEVT(Type *Ty, bool HandleUnknown=false)
Return the value type corresponding to the specified type.
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
static EVT getIntegerVT(LLVMContext &Context, unsigned BitWidth)
Returns the EVT that represents an integer with the given number of bits.
Attributes of a target dependent hardware loop.
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
This represents an addressing mode of: BaseGV + BaseOffs + BaseReg + Scale*ScaleReg If BaseGV is null...