16#ifndef LLVM_CODEGEN_BASICTTIIMPL_H
17#define LLVM_CODEGEN_BASICTTIIMPL_H
88 const T *thisT()
const {
return static_cast<const T *
>(
this); }
98 Cost += thisT()->getVectorInstrCost(Instruction::ExtractElement, VTy,
102 Cost += thisT()->getVectorInstrCost(Instruction::InsertElement, VTy,
122 Cost += thisT()->getVectorInstrCost(Instruction::InsertElement, VTy,
124 Cost += thisT()->getVectorInstrCost(Instruction::ExtractElement, VTy,
137 "Can only extract subvectors from vectors");
140 (Index + NumSubElts) <=
142 "SK_ExtractSubvector index out of range");
148 for (
int i = 0; i != NumSubElts; ++i) {
150 thisT()->getVectorInstrCost(Instruction::ExtractElement, VTy,
151 CostKind, i + Index,
nullptr,
nullptr);
152 Cost += thisT()->getVectorInstrCost(Instruction::InsertElement, SubVTy,
165 "Can only insert subvectors into vectors");
168 (Index + NumSubElts) <=
170 "SK_InsertSubvector index out of range");
176 for (
int i = 0; i != NumSubElts; ++i) {
177 Cost += thisT()->getVectorInstrCost(Instruction::ExtractElement, SubVTy,
180 thisT()->getVectorInstrCost(Instruction::InsertElement, VTy,
CostKind,
181 i + Index,
nullptr,
nullptr);
188 return static_cast<const T *
>(
this)->getST();
193 return static_cast<const T *
>(
this)->getTLI();
215 bool IsGatherScatter,
223 unsigned VF = VT->getNumElements();
238 VF * thisT()->getMemoryOpCost(Opcode, VT->getElementType(), Alignment,
244 Opcode == Instruction::Store,
CostKind);
258 VF * (thisT()->getCFInstrCost(Instruction::Br,
CostKind) +
259 thisT()->getCFInstrCost(Instruction::PHI,
CostKind));
262 return AddrExtractCost + MemoryOpCost + PackingCost + ConditionalCost;
270 static bool isSplatMask(
ArrayRef<int> Mask,
unsigned NumSrcElts,
int &Index) {
272 bool IsCompared =
false;
276 return P.index() != Mask.size() - 1 || IsCompared;
277 if (
static_cast<unsigned>(
P.value()) >= NumSrcElts * 2)
280 SplatIdx =
P.value();
281 return P.index() != Mask.size() - 1;
284 return SplatIdx ==
P.value();
303 std::optional<InstructionCost> getMultipleResultIntrinsicVectorLibCallCost(
305 std::optional<unsigned> CallRetElementIndex = {})
const {
314 EVT VT = getTLI()->getValueType(
DL, Ty);
316 RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL;
318 switch (ICA.
getID()) {
319 case Intrinsic::modf:
322 case Intrinsic::sincospi:
325 case Intrinsic::sincos:
333 RTLIB::LibcallImpl LibcallImpl = getTLI()->getLibcallImpl(LC);
334 if (LibcallImpl == RTLIB::Unsupported)
347 VecTy, {},
CostKind, 0,
nullptr, {});
353 if (Idx == CallRetElementIndex)
355 Cost += thisT()->getMemoryOpCost(
356 Instruction::Load, VectorTy,
389 unsigned *
Fast)
const override {
391 return getTLI()->allowsMisalignedMemoryAccesses(
396 const Function *Callee)
const override {
406 return (CallerBits & CalleeBits) == CalleeBits;
436 return getTLI()->getTargetMachine().isNoopAddrSpaceCast(FromAS, ToAS);
440 return getTLI()->getTargetMachine().getAssumedAddrSpace(V);
444 return getTLI()->getTargetMachine().Options.ThreadModel ==
448 std::pair<const Value *, unsigned>
450 return getTLI()->getTargetMachine().getPredicatedAddrSpace(V);
454 Value *NewV)
const override {
459 return getTLI()->isLegalAddImmediate(imm);
463 return getTLI()->isLegalAddScalableImmediate(Imm);
467 return getTLI()->isLegalICmpImmediate(imm);
471 bool HasBaseReg, int64_t Scale,
unsigned AddrSpace,
473 int64_t ScalableOffset = 0)
const override {
480 return getTLI()->isLegalAddressingMode(
DL, AM, Ty, AddrSpace,
I);
484 return getTLI()->getPreferredLargeGEPBaseOffset(MinOffset, MaxOffset);
488 Type *ScalarValTy)
const override {
489 auto &&IsSupportedByTarget = [
this, ScalarMemTy, ScalarValTy](
unsigned VF) {
491 EVT VT = getTLI()->getValueType(
DL, SrcTy);
492 if (getTLI()->isOperationLegal(ISD::STORE, VT) ||
493 getTLI()->isOperationCustom(ISD::STORE, VT))
499 getTLI()->getTypeToTransformTo(ScalarMemTy->
getContext(), VT);
500 return getTLI()->isTruncStoreLegal(LegalizedVT, ValVT);
502 while (VF > 2 && IsSupportedByTarget(VF))
508 EVT VT = getTLI()->getValueType(
DL, Ty,
true);
509 return getTLI()->isIndexedLoadLegal(getISDIndexedMode(M), VT);
513 EVT VT = getTLI()->getValueType(
DL, Ty,
true);
514 return getTLI()->isIndexedStoreLegal(getISDIndexedMode(M), VT);
537 unsigned AddrSpace)
const override {
550 return getTLI()->isTruncateFree(Ty1, Ty2);
554 return getTLI()->isProfitableToHoist(
I);
557 bool useAA()
const override {
return getST()->useAA(); }
560 EVT VT = getTLI()->getValueType(
DL, Ty,
true);
561 return getTLI()->isTypeLegal(VT);
565 EVT ETy = getTLI()->getValueType(
DL, Ty);
566 return getTLI()->getNumRegisters(Ty->getContext(), ETy);
585 unsigned N =
SI.getNumCases();
593 if (
N < 1 || (!IsJTAllowed &&
DL.getIndexSizeInBits(0u) <
N))
596 APInt MaxCaseVal =
SI.case_begin()->getCaseValue()->getValue();
597 APInt MinCaseVal = MaxCaseVal;
598 for (
auto CI :
SI.cases()) {
599 const APInt &CaseVal = CI.getCaseValue()->getValue();
600 if (CaseVal.
sgt(MaxCaseVal))
601 MaxCaseVal = CaseVal;
602 if (CaseVal.
slt(MinCaseVal))
603 MinCaseVal = CaseVal;
607 if (
N <=
DL.getIndexSizeInBits(0u)) {
609 for (
auto I :
SI.cases()) {
620 if (
N < 2 ||
N < TLI->getMinimumJumpTableEntries())
623 (MaxCaseVal - MinCaseVal)
624 .getLimitedValue(std::numeric_limits<uint64_t>::max() - 1) + 1;
627 JumpTableSize =
Range;
685 const Function &Fn)
const override {
689 case Instruction::SDiv:
690 case Instruction::SRem:
691 case Instruction::UDiv:
692 case Instruction::URem: {
744 else if (ST->getSchedModel().LoopMicroOpBufferSize > 0)
745 MaxOps = ST->getSchedModel().LoopMicroOpBufferSize;
762 <<
"advising against unrolling the loop because it "
813 std::optional<Instruction *>
818 std::optional<Value *>
821 bool &KnownBitsComputed)
const override {
830 SimplifyAndSetOp)
const override {
832 IC,
II, DemandedElts, UndefElts, UndefElts2, UndefElts3,
836 std::optional<unsigned>
838 return std::optional<unsigned>(
842 std::optional<unsigned>
844 std::optional<unsigned> TargetResult =
845 getST()->getCacheAssociativity(
static_cast<unsigned>(Level));
854 return getST()->getCacheLineSize();
858 return getST()->getPrefetchDistance();
862 unsigned NumStridedMemAccesses,
863 unsigned NumPrefetches,
864 bool HasCall)
const override {
865 return getST()->getMinPrefetchStride(NumMemAccesses, NumStridedMemAccesses,
866 NumPrefetches, HasCall);
870 return getST()->getMaxPrefetchIterationsAhead();
874 return getST()->enableWritePrefetching();
878 return getST()->shouldPrefetchAddressSpace(AS);
891 std::optional<unsigned>
getMaxVScale()
const override {
return std::nullopt; }
901 VectorType *InTy,
const APInt &DemandedElts,
bool Insert,
bool Extract,
911 (VL.empty() || VL.size() == Ty->getNumElements()) &&
912 "Vector size mismatch");
916 for (
int i = 0, e = Ty->getNumElements(); i < e; ++i) {
917 if (!DemandedElts[i])
920 Value *InsertedVal = VL.empty() ? nullptr : VL[i];
921 Cost += thisT()->getVectorInstrCost(Instruction::InsertElement, Ty,
925 Cost += thisT()->getVectorInstrCost(Instruction::ExtractElement, Ty,
938 unsigned ScalarOpdIdx)
const override {
943 int OpdIdx)
const override {
949 int RetIdx)
const override {
962 return thisT()->getScalarizationOverhead(Ty, DemandedElts, Insert, Extract,
972 for (
Type *Ty : Tys) {
974 if (!Ty->isIntOrIntVectorTy() && !Ty->isFPOrFPVectorTy() &&
975 !Ty->isPtrOrPtrVectorTy())
998 filterConstantAndDuplicatedOperands(Args, Tys),
CostKind);
1011 EVT MTy = getTLI()->getValueType(
DL, Ty);
1035 if (MTy == LK.second)
1050 const Instruction *CxtI =
nullptr)
const override {
1052 const TargetLoweringBase *TLI = getTLI();
1053 int ISD = TLI->InstructionOpcodeToISD(Opcode);
1054 assert(ISD &&
"Invalid opcode");
1069 if (TLI->isOperationLegalOrPromote(ISD,
LT.second)) {
1072 return LT.first * OpCost;
1075 if (!TLI->isOperationExpand(ISD,
LT.second)) {
1078 return LT.first * 2 * OpCost;
1090 unsigned DivOpc = IsSigned ? Instruction::SDiv : Instruction::UDiv;
1092 DivOpc, Ty,
CostKind, Opd1Info, Opd2Info);
1094 thisT()->getArithmeticInstrCost(Instruction::Mul, Ty,
CostKind);
1096 thisT()->getArithmeticInstrCost(Instruction::Sub, Ty,
CostKind);
1097 return DivCost + MulCost + SubCost;
1129 int NumDstElts = Mask.size();
1130 int NumSrcElts = SrcTy->getElementCount().getKnownMinValue();
1137 if (isSplatMask(Mask, NumSrcElts, Index))
1140 (Index + NumDstElts) <= NumSrcElts) {
1147 if (
all_of(Mask, [NumSrcElts](
int M) {
return M < NumSrcElts; }))
1152 Mask, NumSrcElts, NumSubElts, Index)) {
1153 if (Index + NumSubElts > NumSrcElts)
1182 const Instruction *CxtI =
nullptr)
const override {
1186 return getBroadcastShuffleOverhead(FVT,
CostKind);
1195 return getPermuteShuffleOverhead(FVT,
CostKind);
1198 return getExtractSubvectorOverhead(SrcTy,
CostKind, Index,
1201 return getInsertSubvectorOverhead(DstTy,
CostKind, Index,
1220 TypeSize SrcSize = SrcLT.second.getSizeInBits();
1221 TypeSize DstSize = DstLT.second.getSizeInBits();
1222 bool IntOrPtrSrc = Src->isIntegerTy() || Src->isPointerTy();
1223 bool IntOrPtrDst = Dst->isIntegerTy() || Dst->isPointerTy();
1228 case Instruction::Trunc:
1233 case Instruction::BitCast:
1236 if (SrcLT.first == DstLT.first && IntOrPtrSrc == IntOrPtrDst &&
1240 case Instruction::FPExt:
1241 if (
I && getTLI()->isExtFree(
I))
1244 case Instruction::ZExt:
1245 if (TLI->
isZExtFree(SrcLT.second, DstLT.second))
1248 case Instruction::SExt:
1249 if (
I && getTLI()->isExtFree(
I))
1259 if (DstLT.first == SrcLT.first &&
1264 case Instruction::AddrSpaceCast:
1266 Dst->getPointerAddressSpace()))
1275 if (SrcLT.first == DstLT.first &&
1280 if (!SrcVTy && !DstVTy) {
1291 if (DstVTy && SrcVTy) {
1293 if (SrcLT.first == DstLT.first && SrcSize == DstSize) {
1296 if (Opcode == Instruction::ZExt)
1300 if (Opcode == Instruction::SExt)
1301 return SrcLT.first * 2;
1307 return SrcLT.first * 1;
1320 if ((SplitSrc || SplitDst) && SrcVTy->getElementCount().isKnownEven() &&
1321 DstVTy->getElementCount().isKnownEven()) {
1324 const T *TTI = thisT();
1327 (!SplitSrc || !SplitDst) ? TTI->getVectorSplitCost() : 0;
1329 (2 * TTI->getCastInstrCost(Opcode, SplitDstTy, SplitSrcTy, CCH,
1341 Opcode, Dst->getScalarType(), Src->getScalarType(), CCH,
CostKind,
I);
1354 if (Opcode == Instruction::BitCast) {
1371 return thisT()->getVectorInstrCost(Instruction::ExtractElement, VecTy,
1372 CostKind, Index,
nullptr,
nullptr) +
1388 const Instruction *
I =
nullptr)
const override {
1389 const TargetLoweringBase *TLI = getTLI();
1390 int ISD = TLI->InstructionOpcodeToISD(Opcode);
1391 assert(ISD &&
"Invalid opcode");
1395 Op1Info, Op2Info,
I);
1399 assert(CondTy &&
"CondTy must exist");
1406 !TLI->isOperationExpand(ISD,
LT.second)) {
1409 return LT.first * 1;
1421 Opcode, ValVTy->getScalarType(), CondTy->
getScalarType(), VecPred,
1437 unsigned Index,
const Value *Op0,
1438 const Value *Op1)
const override {
1448 unsigned Index,
Value *Scalar,
1449 ArrayRef<std::tuple<Value *, User *, int>>
1450 ScalarUserAndIdx)
const override {
1451 return thisT()->getVectorInstrCost(Opcode, Val,
CostKind, Index,
nullptr,
1457 unsigned Index)
const override {
1458 Value *Op0 =
nullptr;
1459 Value *Op1 =
nullptr;
1461 Op0 = IE->getOperand(0);
1462 Op1 = IE->getOperand(1);
1464 return thisT()->getVectorInstrCost(
I.getOpcode(), Val,
CostKind, Index, Op0,
1471 unsigned Index)
const override {
1472 unsigned NewIndex = -1;
1475 "Unexpected index from end of vector");
1476 NewIndex = FVTy->getNumElements() - 1 - Index;
1478 return thisT()->getVectorInstrCost(Opcode, Val,
CostKind, NewIndex,
nullptr,
1484 const APInt &DemandedDstElts,
1487 "Unexpected size of DemandedDstElts.");
1505 Cost += thisT()->getScalarizationOverhead(SrcVT, DemandedSrcElts,
1508 Cost += thisT()->getScalarizationOverhead(ReplicatedVT, DemandedDstElts,
1520 assert(!Src->isVoidTy() &&
"Invalid type");
1537 LT.second.getSizeInBits())) {
1543 if (Opcode == Instruction::Store)
1553 Opcode == Instruction::Store,
CostKind);
1563 bool UseMaskForCond =
false,
bool UseMaskForGaps =
false)
const override {
1571 unsigned NumElts = VT->getNumElements();
1572 assert(Factor > 1 && NumElts % Factor == 0 &&
"Invalid interleave factor");
1574 unsigned NumSubElts = NumElts / Factor;
1579 if (UseMaskForCond || UseMaskForGaps) {
1580 unsigned IID = Opcode == Instruction::Load ? Intrinsic::masked_load
1581 : Intrinsic::masked_store;
1582 Cost = thisT()->getMemIntrinsicInstrCost(
1592 unsigned VecTySize = thisT()->getDataLayout().getTypeStoreSize(VecTy);
1609 if (
Cost.isValid() && VecTySize > VecTyLTSize) {
1612 unsigned NumLegalInsts =
divideCeil(VecTySize, VecTyLTSize);
1616 unsigned NumEltsPerLegalInst =
divideCeil(NumElts, NumLegalInsts);
1619 BitVector UsedInsts(NumLegalInsts,
false);
1620 for (
unsigned Index : Indices)
1621 for (
unsigned Elt = 0; Elt < NumSubElts; ++Elt)
1622 UsedInsts.
set((Index + Elt * Factor) / NumEltsPerLegalInst);
1631 "Interleaved memory op has too many members");
1637 for (
unsigned Index : Indices) {
1638 assert(Index < Factor &&
"Invalid index for interleaved memory op");
1639 for (
unsigned Elm = 0; Elm < NumSubElts; Elm++)
1640 DemandedLoadStoreElts.
setBit(Index + Elm * Factor);
1643 if (Opcode == Instruction::Load) {
1653 SubVT, DemandedAllSubElts,
1655 Cost += Indices.
size() * InsSubCost;
1656 Cost += thisT()->getScalarizationOverhead(VT, DemandedLoadStoreElts,
1674 SubVT, DemandedAllSubElts,
1676 Cost += ExtSubCost * Indices.
size();
1677 Cost += thisT()->getScalarizationOverhead(VT, DemandedLoadStoreElts,
1682 if (!UseMaskForCond)
1687 Cost += thisT()->getReplicationShuffleCost(
1688 I8Type, Factor, NumSubElts,
1689 UseMaskForGaps ? DemandedLoadStoreElts : DemandedAllResultElts,
1697 if (UseMaskForGaps) {
1699 Cost += thisT()->getArithmeticInstrCost(BinaryOperator::And, MaskVT,
1725 std::optional<unsigned> FOp =
1728 if (ICA.
getID() == Intrinsic::vp_load) {
1731 Alignment = VPI->getPointerAlignment().valueOrOne();
1735 AS = PtrTy->getAddressSpace();
1736 return thisT()->getMemoryOpCost(*FOp, ICA.
getReturnType(), Alignment,
1739 if (ICA.
getID() == Intrinsic::vp_store) {
1742 Alignment = VPI->getPointerAlignment().valueOrOne();
1746 AS = PtrTy->getAddressSpace();
1747 return thisT()->getMemoryOpCost(*FOp, ICA.
getArgTypes()[0], Alignment,
1751 ICA.
getID() == Intrinsic::vp_fneg) {
1752 return thisT()->getArithmeticInstrCost(*FOp, ICA.
getReturnType(),
1756 return thisT()->getCastInstrCost(
1765 return thisT()->getCmpSelInstrCost(*FOp, ICA.
getArgTypes()[0],
1771 if (ICA.
getID() == Intrinsic::vp_load_ff) {
1776 Alignment = VPI->getPointerAlignment().valueOrOne();
1777 return thisT()->getMemIntrinsicInstrCost(
1781 if (ICA.
getID() == Intrinsic::vp_scatter) {
1791 Alignment = VPI->getPointerAlignment().valueOrOne();
1793 return thisT()->getMemIntrinsicInstrCost(
1796 VarMask, Alignment,
nullptr),
1799 if (ICA.
getID() == Intrinsic::vp_gather) {
1809 Alignment = VPI->getPointerAlignment().valueOrOne();
1811 return thisT()->getMemIntrinsicInstrCost(
1814 VarMask, Alignment,
nullptr),
1818 if (ICA.
getID() == Intrinsic::vp_select ||
1819 ICA.
getID() == Intrinsic::vp_merge) {
1830 std::optional<Intrinsic::ID> FID =
1834 if (ICA.
getID() == Intrinsic::experimental_vp_reverse)
1835 FID = Intrinsic::vector_reverse;
1841 "Expected VPIntrinsic to have Mask and Vector Length args and "
1853 *FID != Intrinsic::vector_reduce_fadd &&
1854 *FID != Intrinsic::vector_reduce_fmul) {
1862 return thisT()->getIntrinsicInstrCost(NewICA,
CostKind);
1881 case Intrinsic::powi:
1883 bool ShouldOptForSize =
I->getParent()->getParent()->hasOptSize();
1884 if (getTLI()->isBeneficialToExpandPowI(RHSC->getSExtValue(),
1885 ShouldOptForSize)) {
1889 unsigned ActiveBits =
Exponent.getActiveBits();
1890 unsigned PopCount =
Exponent.popcount();
1892 thisT()->getArithmeticInstrCost(
1893 Instruction::FMul, RetTy,
CostKind);
1894 if (RHSC->isNegative())
1895 Cost += thisT()->getArithmeticInstrCost(Instruction::FDiv, RetTy,
1901 case Intrinsic::cttz:
1903 if (RetVF.
isScalar() && getTLI()->isCheapToSpeculateCttz(RetTy))
1907 case Intrinsic::ctlz:
1909 if (RetVF.
isScalar() && getTLI()->isCheapToSpeculateCtlz(RetTy))
1913 case Intrinsic::memcpy:
1914 return thisT()->getMemcpyCost(ICA.
getInst());
1916 case Intrinsic::masked_scatter: {
1917 const Value *Mask = Args[2];
1919 Align Alignment =
I->getParamAlign(1).valueOrOne();
1920 return thisT()->getMemIntrinsicInstrCost(
1926 case Intrinsic::masked_gather: {
1927 const Value *Mask = Args[1];
1929 Align Alignment =
I->getParamAlign(0).valueOrOne();
1930 return thisT()->getMemIntrinsicInstrCost(
1932 VarMask, Alignment,
I),
1935 case Intrinsic::masked_compressstore: {
1937 const Value *Mask = Args[2];
1938 Align Alignment =
I->getParamAlign(1).valueOrOne();
1939 return thisT()->getMemIntrinsicInstrCost(
1944 case Intrinsic::masked_expandload: {
1945 const Value *Mask = Args[1];
1946 Align Alignment =
I->getParamAlign(0).valueOrOne();
1947 return thisT()->getMemIntrinsicInstrCost(
1952 case Intrinsic::experimental_vp_strided_store: {
1954 const Value *Ptr = Args[1];
1955 const Value *Mask = Args[3];
1956 const Value *EVL = Args[4];
1960 I->getParamAlign(1).value_or(thisT()->
DL.getABITypeAlign(EltTy));
1961 return thisT()->getMemIntrinsicInstrCost(
1966 case Intrinsic::experimental_vp_strided_load: {
1967 const Value *Ptr = Args[0];
1968 const Value *Mask = Args[2];
1969 const Value *EVL = Args[3];
1973 I->getParamAlign(0).value_or(thisT()->
DL.getABITypeAlign(EltTy));
1974 return thisT()->getMemIntrinsicInstrCost(
1978 case Intrinsic::stepvector: {
1984 case Intrinsic::vector_extract: {
1995 case Intrinsic::vector_insert: {
2001 return thisT()->getShuffleCost(
2006 case Intrinsic::vector_splice: {
2012 case Intrinsic::vector_reduce_add:
2013 case Intrinsic::vector_reduce_mul:
2014 case Intrinsic::vector_reduce_and:
2015 case Intrinsic::vector_reduce_or:
2016 case Intrinsic::vector_reduce_xor:
2017 case Intrinsic::vector_reduce_smax:
2018 case Intrinsic::vector_reduce_smin:
2019 case Intrinsic::vector_reduce_fmax:
2020 case Intrinsic::vector_reduce_fmin:
2021 case Intrinsic::vector_reduce_fmaximum:
2022 case Intrinsic::vector_reduce_fminimum:
2023 case Intrinsic::vector_reduce_umax:
2024 case Intrinsic::vector_reduce_umin: {
2028 case Intrinsic::vector_reduce_fadd:
2029 case Intrinsic::vector_reduce_fmul: {
2031 IID, RetTy, {Args[0]->getType(), Args[1]->getType()}, FMF,
I, 1);
2034 case Intrinsic::fshl:
2035 case Intrinsic::fshr: {
2036 const Value *
X = Args[0];
2037 const Value *
Y = Args[1];
2038 const Value *Z = Args[2];
2047 thisT()->getArithmeticInstrCost(BinaryOperator::Or, RetTy,
CostKind);
2049 thisT()->getArithmeticInstrCost(BinaryOperator::Sub, RetTy,
CostKind);
2050 Cost += thisT()->getArithmeticInstrCost(
2051 BinaryOperator::Shl, RetTy,
CostKind, OpInfoX,
2053 Cost += thisT()->getArithmeticInstrCost(
2054 BinaryOperator::LShr, RetTy,
CostKind, OpInfoY,
2060 Cost += thisT()->getArithmeticInstrCost(
2062 : BinaryOperator::URem,
2064 {TTI::OK_UniformConstantValue, TTI::OP_None});
2069 thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, RetTy, CondTy,
2072 thisT()->getCmpSelInstrCost(BinaryOperator::Select, RetTy, CondTy,
2077 case Intrinsic::experimental_cttz_elts: {
2082 if (!getTLI()->shouldExpandCttzElements(ArgType))
2095 unsigned EltWidth = getTLI()->getBitWidthForCttzElements(
2106 thisT()->getIntrinsicInstrCost(StepVecAttrs,
CostKind);
2109 thisT()->getArithmeticInstrCost(Instruction::Sub, NewVecTy,
CostKind);
2110 Cost += thisT()->getCastInstrCost(Instruction::SExt, NewVecTy,
2114 thisT()->getArithmeticInstrCost(Instruction::And, NewVecTy,
CostKind);
2117 NewEltTy, NewVecTy, FMF,
I, 1);
2118 Cost += thisT()->getTypeBasedIntrinsicInstrCost(ReducAttrs,
CostKind);
2120 thisT()->getArithmeticInstrCost(Instruction::Sub, NewEltTy,
CostKind);
2124 case Intrinsic::get_active_lane_mask:
2125 case Intrinsic::experimental_vector_match:
2126 case Intrinsic::experimental_vector_histogram_add:
2127 case Intrinsic::experimental_vector_histogram_uadd_sat:
2128 case Intrinsic::experimental_vector_histogram_umax:
2129 case Intrinsic::experimental_vector_histogram_umin:
2130 return thisT()->getTypeBasedIntrinsicInstrCost(ICA,
CostKind);
2131 case Intrinsic::modf:
2132 case Intrinsic::sincos:
2133 case Intrinsic::sincospi: {
2134 std::optional<unsigned> CallRetElementIndex;
2137 if (ICA.
getID() == Intrinsic::modf)
2138 CallRetElementIndex = 0;
2140 if (
auto Cost = getMultipleResultIntrinsicVectorLibCallCost(
2141 ICA,
CostKind, CallRetElementIndex))
2153 ScalarizationCost = 0;
2162 filterConstantAndDuplicatedOperands(Args, ICA.
getArgTypes()),
2168 return thisT()->getTypeBasedIntrinsicInstrCost(Attrs,
CostKind);
2189 unsigned VecTyIndex = 0;
2190 if (IID == Intrinsic::vector_reduce_fadd ||
2191 IID == Intrinsic::vector_reduce_fmul)
2193 assert(Tys.
size() > VecTyIndex &&
"Unexpected IntrinsicCostAttributes");
2210 SkipScalarizationCost ? ScalarizationCostPassed : 0;
2211 unsigned ScalarCalls = 1;
2212 Type *ScalarRetTy = RetTy;
2214 if (!SkipScalarizationCost)
2217 ScalarCalls = std::max(ScalarCalls,
2222 for (
Type *Ty : Tys) {
2224 if (!SkipScalarizationCost)
2227 ScalarCalls = std::max(ScalarCalls,
2229 Ty = Ty->getScalarType();
2233 if (ScalarCalls == 1)
2238 thisT()->getIntrinsicInstrCost(ScalarAttrs,
CostKind);
2240 return ScalarCalls * ScalarCost + ScalarizationCost;
2244 case Intrinsic::sqrt:
2247 case Intrinsic::sin:
2250 case Intrinsic::cos:
2253 case Intrinsic::sincos:
2256 case Intrinsic::sincospi:
2257 ISD = ISD::FSINCOSPI;
2259 case Intrinsic::modf:
2262 case Intrinsic::tan:
2265 case Intrinsic::asin:
2268 case Intrinsic::acos:
2271 case Intrinsic::atan:
2274 case Intrinsic::atan2:
2277 case Intrinsic::sinh:
2280 case Intrinsic::cosh:
2283 case Intrinsic::tanh:
2286 case Intrinsic::exp:
2289 case Intrinsic::exp2:
2292 case Intrinsic::exp10:
2295 case Intrinsic::log:
2298 case Intrinsic::log10:
2301 case Intrinsic::log2:
2304 case Intrinsic::ldexp:
2307 case Intrinsic::fabs:
2310 case Intrinsic::canonicalize:
2313 case Intrinsic::minnum:
2316 case Intrinsic::maxnum:
2319 case Intrinsic::minimum:
2320 ISD = ISD::FMINIMUM;
2322 case Intrinsic::maximum:
2323 ISD = ISD::FMAXIMUM;
2325 case Intrinsic::minimumnum:
2326 ISD = ISD::FMINIMUMNUM;
2328 case Intrinsic::maximumnum:
2329 ISD = ISD::FMAXIMUMNUM;
2331 case Intrinsic::copysign:
2334 case Intrinsic::floor:
2337 case Intrinsic::ceil:
2340 case Intrinsic::trunc:
2343 case Intrinsic::nearbyint:
2344 ISD = ISD::FNEARBYINT;
2346 case Intrinsic::rint:
2349 case Intrinsic::lrint:
2352 case Intrinsic::llrint:
2355 case Intrinsic::round:
2358 case Intrinsic::roundeven:
2359 ISD = ISD::FROUNDEVEN;
2361 case Intrinsic::lround:
2364 case Intrinsic::llround:
2367 case Intrinsic::pow:
2370 case Intrinsic::fma:
2373 case Intrinsic::fmuladd:
2376 case Intrinsic::experimental_constrained_fmuladd:
2380 case Intrinsic::lifetime_start:
2381 case Intrinsic::lifetime_end:
2382 case Intrinsic::sideeffect:
2383 case Intrinsic::pseudoprobe:
2384 case Intrinsic::arithmetic_fence:
2386 case Intrinsic::masked_store: {
2388 Align TyAlign = thisT()->DL.getABITypeAlign(Ty);
2389 return thisT()->getMemIntrinsicInstrCost(
2392 case Intrinsic::masked_load: {
2394 Align TyAlign = thisT()->DL.getABITypeAlign(Ty);
2395 return thisT()->getMemIntrinsicInstrCost(
2398 case Intrinsic::experimental_vp_strided_store: {
2400 Align Alignment = thisT()->DL.getABITypeAlign(Ty->getElementType());
2401 return thisT()->getMemIntrinsicInstrCost(
2407 case Intrinsic::experimental_vp_strided_load: {
2409 Align Alignment = thisT()->DL.getABITypeAlign(Ty->getElementType());
2410 return thisT()->getMemIntrinsicInstrCost(
2416 case Intrinsic::vector_reduce_add:
2417 case Intrinsic::vector_reduce_mul:
2418 case Intrinsic::vector_reduce_and:
2419 case Intrinsic::vector_reduce_or:
2420 case Intrinsic::vector_reduce_xor:
2421 return thisT()->getArithmeticReductionCost(
2424 case Intrinsic::vector_reduce_fadd:
2425 case Intrinsic::vector_reduce_fmul:
2426 return thisT()->getArithmeticReductionCost(
2428 case Intrinsic::vector_reduce_smax:
2429 case Intrinsic::vector_reduce_smin:
2430 case Intrinsic::vector_reduce_umax:
2431 case Intrinsic::vector_reduce_umin:
2432 case Intrinsic::vector_reduce_fmax:
2433 case Intrinsic::vector_reduce_fmin:
2434 case Intrinsic::vector_reduce_fmaximum:
2435 case Intrinsic::vector_reduce_fminimum:
2438 case Intrinsic::experimental_vector_match: {
2441 unsigned SearchSize = NeedleTy->getNumElements();
2445 EVT SearchVT = getTLI()->getValueType(
DL, SearchTy);
2446 if (!getTLI()->shouldExpandVectorMatch(SearchVT, SearchSize))
2452 Cost += thisT()->getVectorInstrCost(Instruction::ExtractElement, NeedleTy,
2454 Cost += thisT()->getVectorInstrCost(Instruction::InsertElement, SearchTy,
2458 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, SearchTy, RetTy,
2461 thisT()->getArithmeticInstrCost(BinaryOperator::Or, RetTy,
CostKind);
2464 thisT()->getArithmeticInstrCost(BinaryOperator::And, RetTy,
CostKind);
2467 case Intrinsic::vector_reverse:
2471 case Intrinsic::experimental_vector_histogram_add:
2472 case Intrinsic::experimental_vector_histogram_uadd_sat:
2473 case Intrinsic::experimental_vector_histogram_umax:
2474 case Intrinsic::experimental_vector_histogram_umin: {
2482 Align Alignment = thisT()->DL.getABITypeAlign(EltTy);
2484 Cost += thisT()->getVectorInstrCost(Instruction::ExtractElement, PtrsTy,
2486 Cost += thisT()->getMemoryOpCost(Instruction::Load, EltTy, Alignment, 0,
2491 case Intrinsic::experimental_vector_histogram_add:
2493 thisT()->getArithmeticInstrCost(Instruction::Add, EltTy,
CostKind);
2495 case Intrinsic::experimental_vector_histogram_uadd_sat: {
2497 Cost += thisT()->getIntrinsicInstrCost(UAddSat,
CostKind);
2500 case Intrinsic::experimental_vector_histogram_umax: {
2505 case Intrinsic::experimental_vector_histogram_umin: {
2511 Cost += thisT()->getMemoryOpCost(Instruction::Store, EltTy, Alignment, 0,
2516 case Intrinsic::get_active_lane_mask: {
2518 EVT ResVT = getTLI()->getValueType(
DL, RetTy,
true);
2519 EVT ArgVT = getTLI()->getValueType(
DL, ArgTy,
true);
2523 if (!getTLI()->shouldExpandGetActiveLaneMask(ResVT, ArgVT))
2532 thisT()->getTypeBasedIntrinsicInstrCost(Attrs,
CostKind);
2533 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, ExpRetTy, RetTy,
2537 case Intrinsic::experimental_memset_pattern:
2542 case Intrinsic::abs:
2545 case Intrinsic::fshl:
2548 case Intrinsic::fshr:
2551 case Intrinsic::smax:
2554 case Intrinsic::smin:
2557 case Intrinsic::umax:
2560 case Intrinsic::umin:
2563 case Intrinsic::sadd_sat:
2566 case Intrinsic::ssub_sat:
2569 case Intrinsic::uadd_sat:
2572 case Intrinsic::usub_sat:
2575 case Intrinsic::smul_fix:
2578 case Intrinsic::umul_fix:
2581 case Intrinsic::sadd_with_overflow:
2584 case Intrinsic::ssub_with_overflow:
2587 case Intrinsic::uadd_with_overflow:
2590 case Intrinsic::usub_with_overflow:
2593 case Intrinsic::smul_with_overflow:
2596 case Intrinsic::umul_with_overflow:
2599 case Intrinsic::fptosi_sat:
2600 case Intrinsic::fptoui_sat: {
2606 if (!SrcLT.first.isValid() || !RetLT.first.isValid())
2612 case Intrinsic::ctpop:
2618 case Intrinsic::ctlz:
2621 case Intrinsic::cttz:
2624 case Intrinsic::bswap:
2627 case Intrinsic::bitreverse:
2630 case Intrinsic::ucmp:
2633 case Intrinsic::scmp:
2639 Type *LegalizeTy = ST ? ST->getContainedType(0) : RetTy;
2645 if (IID == Intrinsic::fabs && LT.second.isFloatingPoint() &&
2655 return (LT.first * 2);
2657 return (LT.first * 1);
2661 return (LT.first * 2);
2665 case Intrinsic::fmuladd: {
2669 return thisT()->getArithmeticInstrCost(BinaryOperator::FMul, RetTy,
2671 thisT()->getArithmeticInstrCost(BinaryOperator::FAdd, RetTy,
2674 case Intrinsic::experimental_constrained_fmuladd: {
2676 Intrinsic::experimental_constrained_fmul, RetTy, Tys);
2678 Intrinsic::experimental_constrained_fadd, RetTy, Tys);
2679 return thisT()->getIntrinsicInstrCost(FMulAttrs,
CostKind) +
2680 thisT()->getIntrinsicInstrCost(FAddAttrs,
CostKind);
2682 case Intrinsic::smin:
2683 case Intrinsic::smax:
2684 case Intrinsic::umin:
2685 case Intrinsic::umax: {
2688 bool IsUnsigned = IID == Intrinsic::umax || IID == Intrinsic::umin;
2692 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, RetTy, CondTy,
2694 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::Select, RetTy, CondTy,
2698 case Intrinsic::sadd_with_overflow:
2699 case Intrinsic::ssub_with_overflow: {
2702 unsigned Opcode = IID == Intrinsic::sadd_with_overflow
2703 ? BinaryOperator::Add
2704 : BinaryOperator::Sub;
2711 Cost += thisT()->getArithmeticInstrCost(Opcode, SumTy,
CostKind);
2713 2 * thisT()->getCmpSelInstrCost(Instruction::ICmp, SumTy, OverflowTy,
2715 Cost += thisT()->getArithmeticInstrCost(BinaryOperator::Xor, OverflowTy,
2719 case Intrinsic::uadd_with_overflow:
2720 case Intrinsic::usub_with_overflow: {
2723 unsigned Opcode = IID == Intrinsic::uadd_with_overflow
2724 ? BinaryOperator::Add
2725 : BinaryOperator::Sub;
2731 Cost += thisT()->getArithmeticInstrCost(Opcode, SumTy,
CostKind);
2732 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, SumTy,
2736 case Intrinsic::smul_with_overflow:
2737 case Intrinsic::umul_with_overflow: {
2742 bool IsSigned = IID == Intrinsic::smul_with_overflow;
2744 unsigned ExtOp = IsSigned ? Instruction::SExt : Instruction::ZExt;
2748 Cost += 2 * thisT()->getCastInstrCost(ExtOp, ExtTy, MulTy, CCH,
CostKind);
2750 thisT()->getArithmeticInstrCost(Instruction::Mul, ExtTy,
CostKind);
2751 Cost += 2 * thisT()->getCastInstrCost(Instruction::Trunc, MulTy, ExtTy,
2753 Cost += thisT()->getArithmeticInstrCost(
2758 Cost += thisT()->getArithmeticInstrCost(
2759 Instruction::AShr, MulTy,
CostKind,
2763 Cost += thisT()->getCmpSelInstrCost(
2767 case Intrinsic::sadd_sat:
2768 case Intrinsic::ssub_sat: {
2774 ? Intrinsic::sadd_with_overflow
2775 : Intrinsic::ssub_with_overflow;
2782 nullptr, ScalarizationCostPassed);
2783 Cost += thisT()->getIntrinsicInstrCost(Attrs,
CostKind);
2784 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, RetTy, CondTy,
2786 Cost += 2 * thisT()->getCmpSelInstrCost(BinaryOperator::Select, RetTy,
2790 case Intrinsic::uadd_sat:
2791 case Intrinsic::usub_sat: {
2796 ? Intrinsic::uadd_with_overflow
2797 : Intrinsic::usub_with_overflow;
2801 nullptr, ScalarizationCostPassed);
2802 Cost += thisT()->getIntrinsicInstrCost(Attrs,
CostKind);
2804 thisT()->getCmpSelInstrCost(BinaryOperator::Select, RetTy, CondTy,
2808 case Intrinsic::smul_fix:
2809 case Intrinsic::umul_fix: {
2814 IID == Intrinsic::smul_fix ? Instruction::SExt : Instruction::ZExt;
2818 Cost += 2 * thisT()->getCastInstrCost(ExtOp, ExtTy, RetTy, CCH,
CostKind);
2820 thisT()->getArithmeticInstrCost(Instruction::Mul, ExtTy,
CostKind);
2821 Cost += 2 * thisT()->getCastInstrCost(Instruction::Trunc, RetTy, ExtTy,
2823 Cost += thisT()->getArithmeticInstrCost(
2826 Cost += thisT()->getArithmeticInstrCost(
2829 Cost += thisT()->getArithmeticInstrCost(Instruction::Or, RetTy,
CostKind);
2832 case Intrinsic::abs: {
2837 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, RetTy, CondTy,
2839 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::Select, RetTy, CondTy,
2842 Cost += thisT()->getArithmeticInstrCost(
2843 BinaryOperator::Sub, RetTy,
CostKind,
2847 case Intrinsic::fshl:
2848 case Intrinsic::fshr: {
2854 thisT()->getArithmeticInstrCost(BinaryOperator::Or, RetTy,
CostKind);
2856 thisT()->getArithmeticInstrCost(BinaryOperator::Sub, RetTy,
CostKind);
2858 thisT()->getArithmeticInstrCost(BinaryOperator::Shl, RetTy,
CostKind);
2859 Cost += thisT()->getArithmeticInstrCost(BinaryOperator::LShr, RetTy,
2864 Cost += thisT()->getArithmeticInstrCost(
2866 : BinaryOperator::URem,
2867 RetTy,
CostKind, {TTI::OK_AnyValue, TTI::OP_None},
2868 {TTI::OK_UniformConstantValue, TTI::OP_None});
2870 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, RetTy, CondTy,
2872 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::Select, RetTy, CondTy,
2876 case Intrinsic::fptosi_sat:
2877 case Intrinsic::fptoui_sat: {
2880 Type *FromTy = Tys[0];
2881 bool IsSigned = IID == Intrinsic::fptosi_sat;
2886 Cost += thisT()->getIntrinsicInstrCost(Attrs1,
CostKind);
2889 Cost += thisT()->getIntrinsicInstrCost(Attrs2,
CostKind);
2890 Cost += thisT()->getCastInstrCost(
2891 IsSigned ? Instruction::FPToSI : Instruction::FPToUI, RetTy, FromTy,
2895 Cost += thisT()->getCmpSelInstrCost(
2897 Cost += thisT()->getCmpSelInstrCost(
2902 case Intrinsic::ucmp:
2903 case Intrinsic::scmp: {
2904 Type *CmpTy = Tys[0];
2907 thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, CmpTy, CondTy,
2910 thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, CmpTy, CondTy,
2917 Cost += 2 * thisT()->getCmpSelInstrCost(
2918 BinaryOperator::Select, RetTy, CondTy,
2923 2 * thisT()->getCastInstrCost(CastInst::ZExt, RetTy, CondTy,
2925 Cost += thisT()->getArithmeticInstrCost(BinaryOperator::Sub, RetTy,
2930 case Intrinsic::maximumnum:
2931 case Intrinsic::minimumnum: {
2941 IID == Intrinsic::maximumnum ? ISD::FMAXNUM_IEEE : ISD::FMINNUM_IEEE;
2946 thisT()->getIntrinsicInstrCost(FCanonicalizeAttrs,
CostKind);
2947 return LT.first + FCanonicalizeCost * 2;
2967 if (!SkipScalarizationCost) {
2968 ScalarizationCost = 0;
2969 for (
Type *RetVTy : RetVTys) {
2978 for (
Type *Ty : Tys) {
2979 if (Ty->isVectorTy())
2980 Ty = Ty->getScalarType();
2985 thisT()->getIntrinsicInstrCost(Attrs,
CostKind);
2986 for (
Type *Ty : Tys) {
2991 ScalarCalls = std::max(ScalarCalls,
2995 return ScalarCalls * ScalarCost + ScalarizationCost;
2999 return SingleCallCost;
3006 unsigned Id = MICA.
getID();
3012 case Intrinsic::experimental_vp_strided_load:
3013 case Intrinsic::experimental_vp_strided_store: {
3014 unsigned Opcode = Id == Intrinsic::experimental_vp_strided_load
3016 : Instruction::Store;
3020 return getCommonMaskedMemoryOpCost(Opcode, DataTy, Alignment,
3023 case Intrinsic::masked_scatter:
3024 case Intrinsic::masked_gather:
3025 case Intrinsic::vp_scatter:
3026 case Intrinsic::vp_gather: {
3027 unsigned Opcode = (MICA.
getID() == Intrinsic::masked_gather ||
3028 MICA.
getID() == Intrinsic::vp_gather)
3030 : Instruction::Store;
3032 return getCommonMaskedMemoryOpCost(Opcode, DataTy, Alignment,
3035 case Intrinsic::vp_load:
3036 case Intrinsic::vp_store:
3038 case Intrinsic::masked_load:
3039 case Intrinsic::masked_store: {
3041 Id == Intrinsic::masked_load ? Instruction::Load : Instruction::Store;
3043 return getCommonMaskedMemoryOpCost(Opcode, DataTy, Alignment,
true,
false,
3046 case Intrinsic::masked_compressstore:
3047 case Intrinsic::masked_expandload: {
3048 unsigned Opcode = MICA.
getID() == Intrinsic::masked_expandload
3050 : Instruction::Store;
3053 return getCommonMaskedMemoryOpCost(Opcode, DataTy, Alignment,
3057 case Intrinsic::vp_load_ff:
3083 if (!LT.first.isValid())
3088 Tp && LT.second.isFixedLengthVector() &&
3093 return divideCeil(FTp->getNumElements(), SubTp->getNumElements());
3095 return LT.first.getValue();
3132 Type *ScalarTy = Ty->getElementType();
3134 if ((Opcode == Instruction::Or || Opcode == Instruction::And) &&
3144 return thisT()->getCastInstrCost(Instruction::BitCast, ValTy, Ty,
3146 thisT()->getCmpSelInstrCost(Instruction::ICmp, ValTy,
3150 unsigned NumReduxLevels =
Log2_32(NumVecElts);
3153 std::pair<InstructionCost, MVT> LT = thisT()->getTypeLegalizationCost(Ty);
3154 unsigned LongVectorCount = 0;
3156 LT.second.isVector() ? LT.second.getVectorNumElements() : 1;
3157 while (NumVecElts > MVTLen) {
3160 ShuffleCost += thisT()->getShuffleCost(
3162 ArithCost += thisT()->getArithmeticInstrCost(Opcode, SubTy,
CostKind);
3167 NumReduxLevels -= LongVectorCount;
3179 NumReduxLevels * thisT()->getArithmeticInstrCost(Opcode, Ty,
CostKind);
3180 return ShuffleCost + ArithCost +
3181 thisT()->getVectorInstrCost(Instruction::ExtractElement, Ty,
3215 return ExtractCost + ArithCost;
3220 std::optional<FastMathFlags> FMF,
3222 assert(Ty &&
"Unknown reduction vector type");
3238 Type *ScalarTy = Ty->getElementType();
3240 unsigned NumReduxLevels =
Log2_32(NumVecElts);
3243 std::pair<InstructionCost, MVT> LT = thisT()->getTypeLegalizationCost(Ty);
3244 unsigned LongVectorCount = 0;
3246 LT.second.isVector() ? LT.second.getVectorNumElements() : 1;
3247 while (NumVecElts > MVTLen) {
3251 ShuffleCost += thisT()->getShuffleCost(
3260 NumReduxLevels -= LongVectorCount;
3273 return ShuffleCost + MinMaxCost +
3274 thisT()->getVectorInstrCost(Instruction::ExtractElement, Ty,
3280 VectorType *Ty, std::optional<FastMathFlags> FMF,
3283 FTy && IsUnsigned && Opcode == Instruction::Add &&
3291 return thisT()->getCastInstrCost(Instruction::BitCast, IntTy, FTy,
3293 thisT()->getIntrinsicInstrCost(ICA,
CostKind);
3299 thisT()->getArithmeticReductionCost(Opcode, ExtTy, FMF,
CostKind);
3301 IsUnsigned ? Instruction::ZExt : Instruction::SExt, ExtTy, Ty,
3304 return RedCost + ExtCost;
3314 assert((RedOpcode == Instruction::Add || RedOpcode == Instruction::Sub) &&
3315 "The reduction opcode is expected to be Add or Sub.");
3318 RedOpcode, ExtTy, std::nullopt,
CostKind);
3320 IsUnsigned ? Instruction::ZExt : Instruction::SExt, ExtTy, Ty,
3324 thisT()->getArithmeticInstrCost(Instruction::Mul, ExtTy,
CostKind);
3326 return RedCost + MulCost + 2 * ExtCost;
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
This file implements a class to represent arbitrary precision integral constant values and operations...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
This file implements the BitVector class.
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
static cl::opt< OutputCostKind > CostKind("cost-kind", cl::desc("Target cost kind"), cl::init(OutputCostKind::RecipThroughput), cl::values(clEnumValN(OutputCostKind::RecipThroughput, "throughput", "Reciprocal throughput"), clEnumValN(OutputCostKind::Latency, "latency", "Instruction latency"), clEnumValN(OutputCostKind::CodeSize, "code-size", "Code size"), clEnumValN(OutputCostKind::SizeAndLatency, "size-latency", "Code size and latency"), clEnumValN(OutputCostKind::All, "all", "Print all cost kinds")))
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
static const Function * getCalledFunction(const Value *V)
ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))
uint64_t IntrinsicInst * II
static unsigned getNumElements(Type *Ty)
static Type * getValueType(Value *V)
Returns the type of the given value/instruction V.
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
static SymbolRef::Type getType(const Symbol *Sym)
This file describes how to lower LLVM code to machine code.
Class for arbitrary precision integers.
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
void setBit(unsigned BitPosition)
Set the given bit to 1 whose position is given as "bitPosition".
bool sgt(const APInt &RHS) const
Signed greater than comparison.
unsigned getBitWidth() const
Return the number of bits in the APInt.
bool slt(const APInt &RHS) const
Signed less than comparison.
static APInt getZero(unsigned numBits)
Get the '0' value for the specified bit-width.
an instruction to allocate memory on the stack
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
ArrayRef< T > drop_front(size_t N=1) const
Drop the first N elements of the array.
size_t size() const
size - Get the array size.
ArrayRef< T > drop_back(size_t N=1) const
Drop the last N elements of the array.
A cache of @llvm.assume calls within a function.
LLVM Basic Block Representation.
InstructionCost getFPOpCost(Type *Ty) const override
bool preferToKeepConstantsAttached(const Instruction &Inst, const Function &Fn) const override
InstructionCost getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef< unsigned > Indices, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, bool UseMaskForCond=false, bool UseMaskForGaps=false) const override
InstructionCost getVectorInstrCost(unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index, const Value *Op0, const Value *Op1) const override
InstructionCost getArithmeticInstrCost(unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Opd1Info={TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Opd2Info={TTI::OK_AnyValue, TTI::OP_None}, ArrayRef< const Value * > Args={}, const Instruction *CxtI=nullptr) const override
InstructionCost getMinMaxReductionCost(Intrinsic::ID IID, VectorType *Ty, FastMathFlags FMF, TTI::TargetCostKind CostKind) const override
Try to calculate op costs for min/max reduction operations.
bool isIndexedLoadLegal(TTI::MemIndexedMode M, Type *Ty) const override
InstructionCost getGEPCost(Type *PointeeType, const Value *Ptr, ArrayRef< const Value * > Operands, Type *AccessType, TTI::TargetCostKind CostKind) const override
unsigned getCallerAllocaCost(const CallBase *CB, const AllocaInst *AI) const override
InstructionCost getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind, const Instruction *I=nullptr) const override
TypeSize getRegisterBitWidth(TargetTransformInfo::RegisterKind K) const override
bool shouldBuildLookupTables() const override
InstructionCost getScalarizationOverhead(VectorType *InTy, const APInt &DemandedElts, bool Insert, bool Extract, TTI::TargetCostKind CostKind, bool ForPoisonSrc=true, ArrayRef< Value * > VL={}) const override
Estimate the overhead of scalarizing an instruction.
bool isNoopAddrSpaceCast(unsigned FromAS, unsigned ToAS) const override
bool isProfitableToHoist(Instruction *I) const override
unsigned getNumberOfParts(Type *Tp) const override
unsigned getMinPrefetchStride(unsigned NumMemAccesses, unsigned NumStridedMemAccesses, unsigned NumPrefetches, bool HasCall) const override
InstructionCost getVectorInstrCost(const Instruction &I, Type *Val, TTI::TargetCostKind CostKind, unsigned Index) const override
bool useAA() const override
unsigned getPrefetchDistance() const override
TTI::ShuffleKind improveShuffleKindFromMask(TTI::ShuffleKind Kind, ArrayRef< int > Mask, VectorType *SrcTy, int &Index, VectorType *&SubTy) const
unsigned getStoreMinimumVF(unsigned VF, Type *ScalarMemTy, Type *ScalarValTy) const override
bool isLegalAddScalableImmediate(int64_t Imm) const override
unsigned getAssumedAddrSpace(const Value *V) const override
std::optional< Value * > simplifyDemandedUseBitsIntrinsic(InstCombiner &IC, IntrinsicInst &II, APInt DemandedMask, KnownBits &Known, bool &KnownBitsComputed) const override
bool isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset, bool HasBaseReg, int64_t Scale, unsigned AddrSpace, Instruction *I=nullptr, int64_t ScalableOffset=0) const override
bool addrspacesMayAlias(unsigned AS0, unsigned AS1) const override
bool areInlineCompatible(const Function *Caller, const Function *Callee) const override
bool isIndexedStoreLegal(TTI::MemIndexedMode M, Type *Ty) const override
bool haveFastSqrt(Type *Ty) const override
bool collectFlatAddressOperands(SmallVectorImpl< int > &OpIndexes, Intrinsic::ID IID) const override
InstructionCost getShuffleCost(TTI::ShuffleKind Kind, VectorType *DstTy, VectorType *SrcTy, ArrayRef< int > Mask, TTI::TargetCostKind CostKind, int Index, VectorType *SubTp, ArrayRef< const Value * > Args={}, const Instruction *CxtI=nullptr) const override
InstructionCost getVectorInstrCost(unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index, Value *Scalar, ArrayRef< std::tuple< Value *, User *, int > > ScalarUserAndIdx) const override
unsigned getEstimatedNumberOfCaseClusters(const SwitchInst &SI, unsigned &JumpTableSize, ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI) const override
Value * rewriteIntrinsicWithAddressSpace(IntrinsicInst *II, Value *OldV, Value *NewV) const override
unsigned adjustInliningThreshold(const CallBase *CB) const override
unsigned getInliningThresholdMultiplier() const override
int64_t getPreferredLargeGEPBaseOffset(int64_t MinOffset, int64_t MaxOffset)
bool shouldBuildRelLookupTables() const override
bool isTargetIntrinsicWithStructReturnOverloadAtField(Intrinsic::ID ID, int RetIdx) const override
InstructionCost getArithmeticReductionCost(unsigned Opcode, VectorType *Ty, std::optional< FastMathFlags > FMF, TTI::TargetCostKind CostKind) const override
InstructionCost getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy, CmpInst::Predicate VecPred, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Op1Info={TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Op2Info={TTI::OK_AnyValue, TTI::OP_None}, const Instruction *I=nullptr) const override
InstructionCost getScalingFactorCost(Type *Ty, GlobalValue *BaseGV, StackOffset BaseOffset, bool HasBaseReg, int64_t Scale, unsigned AddrSpace) const override
unsigned getEpilogueVectorizationMinVF() const override
InstructionCost getExtractWithExtendCost(unsigned Opcode, Type *Dst, VectorType *VecTy, unsigned Index, TTI::TargetCostKind CostKind) const override
InstructionCost getVectorSplitCost() const
bool isTruncateFree(Type *Ty1, Type *Ty2) const override
std::optional< unsigned > getMaxVScale() const override
unsigned getFlatAddressSpace() const override
InstructionCost getCallInstrCost(Function *F, Type *RetTy, ArrayRef< Type * > Tys, TTI::TargetCostKind CostKind) const override
Compute a cost of the given call instruction.
void getUnrollingPreferences(Loop *L, ScalarEvolution &SE, TTI::UnrollingPreferences &UP, OptimizationRemarkEmitter *ORE) const override
InstructionCost getTreeReductionCost(unsigned Opcode, VectorType *Ty, TTI::TargetCostKind CostKind) const
Try to calculate arithmetic and shuffle op costs for reduction intrinsics.
~BasicTTIImplBase() override=default
std::pair< const Value *, unsigned > getPredicatedAddrSpace(const Value *V) const override
unsigned getMaxPrefetchIterationsAhead() const override
void getPeelingPreferences(Loop *L, ScalarEvolution &SE, TTI::PeelingPreferences &PP) const override
InstructionCost getTypeBasedIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, TTI::TargetCostKind CostKind) const
Get intrinsic cost based on argument types.
bool hasBranchDivergence(const Function *F=nullptr) const override
InstructionCost getOrderedReductionCost(unsigned Opcode, VectorType *Ty, TTI::TargetCostKind CostKind) const
Try to calculate the cost of performing strict (in-order) reductions, which involves doing a sequence...
bool isTargetIntrinsicTriviallyScalarizable(Intrinsic::ID ID) const override
bool preferPredicateOverEpilogue(TailFoldingInfo *TFI) const override
std::optional< unsigned > getCacheAssociativity(TargetTransformInfo::CacheLevel Level) const override
bool shouldPrefetchAddressSpace(unsigned AS) const override
bool allowsMisalignedMemoryAccesses(LLVMContext &Context, unsigned BitWidth, unsigned AddressSpace, Align Alignment, unsigned *Fast) const override
unsigned getCacheLineSize() const override
std::optional< Instruction * > instCombineIntrinsic(InstCombiner &IC, IntrinsicInst &II) const override
bool shouldDropLSRSolutionIfLessProfitable() const override
int getInlinerVectorBonusPercent() const override
bool isVScaleKnownToBeAPowerOfTwo() const override
InstructionCost getMulAccReductionCost(bool IsUnsigned, unsigned RedOpcode, Type *ResTy, VectorType *Ty, TTI::TargetCostKind CostKind) const override
InstructionCost getIndexedVectorInstrCostFromEnd(unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index) const override
InstructionCost getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src, TTI::CastContextHint CCH, TTI::TargetCostKind CostKind, const Instruction *I=nullptr) const override
std::pair< InstructionCost, MVT > getTypeLegalizationCost(Type *Ty) const
Estimate the cost of type-legalization and the legalized type.
bool isLegalAddImmediate(int64_t imm) const override
InstructionCost getReplicationShuffleCost(Type *EltTy, int ReplicationFactor, int VF, const APInt &DemandedDstElts, TTI::TargetCostKind CostKind) const override
unsigned getMaxInterleaveFactor(ElementCount VF) const override
bool isSingleThreaded() const override
InstructionCost getScalarizationOverhead(VectorType *InTy, bool Insert, bool Extract, TTI::TargetCostKind CostKind) const
Helper wrapper for the DemandedElts variant of getScalarizationOverhead.
bool isProfitableLSRChainElement(Instruction *I) const override
bool isValidAddrSpaceCast(unsigned FromAS, unsigned ToAS) const override
bool isTargetIntrinsicWithOverloadTypeAtArg(Intrinsic::ID ID, int OpdIdx) const override
bool isTargetIntrinsicWithScalarOpAtArg(Intrinsic::ID ID, unsigned ScalarOpdIdx) const override
std::optional< unsigned > getVScaleForTuning() const override
InstructionCost getExtendedReductionCost(unsigned Opcode, bool IsUnsigned, Type *ResTy, VectorType *Ty, std::optional< FastMathFlags > FMF, TTI::TargetCostKind CostKind) const override
InstructionCost getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, TTI::TargetCostKind CostKind) const override
Get intrinsic cost based on arguments.
TailFoldingStyle getPreferredTailFoldingStyle(bool IVUpdateMayOverflow=true) const override
std::optional< Value * > simplifyDemandedVectorEltsIntrinsic(InstCombiner &IC, IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts, APInt &UndefElts2, APInt &UndefElts3, std::function< void(Instruction *, unsigned, APInt, APInt &)> SimplifyAndSetOp) const override
InstructionCost getAddressComputationCost(Type *PtrTy, ScalarEvolution *, const SCEV *, TTI::TargetCostKind) const override
bool isSourceOfDivergence(const Value *V) const override
bool isFCmpOrdCheaperThanFCmpZero(Type *Ty) const override
InstructionCost getScalarizationOverhead(VectorType *RetTy, ArrayRef< const Value * > Args, ArrayRef< Type * > Tys, TTI::TargetCostKind CostKind) const
Estimate the overhead of scalarizing the inputs and outputs of an instruction, with return type RetTy...
std::optional< unsigned > getCacheSize(TargetTransformInfo::CacheLevel Level) const override
bool isAlwaysUniform(const Value *V) const override
bool isLegalICmpImmediate(int64_t imm) const override
bool isHardwareLoopProfitable(Loop *L, ScalarEvolution &SE, AssumptionCache &AC, TargetLibraryInfo *LibInfo, HardwareLoopInfo &HWLoopInfo) const override
unsigned getRegUsageForType(Type *Ty) const override
InstructionCost getMemIntrinsicInstrCost(const MemIntrinsicCostAttributes &MICA, TTI::TargetCostKind CostKind) const override
Get memory intrinsic cost based on arguments.
BasicTTIImplBase(const TargetMachine *TM, const DataLayout &DL)
InstructionCost getMemoryOpCost(unsigned Opcode, Type *Src, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, TTI::OperandValueInfo OpInfo={TTI::OK_AnyValue, TTI::OP_None}, const Instruction *I=nullptr) const override
bool isTypeLegal(Type *Ty) const override
bool enableWritePrefetching() const override
bool isLSRCostLess(const TTI::LSRCost &C1, const TTI::LSRCost &C2) const override
InstructionCost getOperandsScalarizationOverhead(ArrayRef< Type * > Tys, TTI::TargetCostKind CostKind) const override
Estimate the overhead of scalarizing an instruction's operands.
bool isNumRegsMajorCostOfLSR() const override
BasicTTIImpl(const TargetMachine *TM, const Function &F)
size_type count() const
count - Returns the number of bits which are set.
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
static Type * makeCmpResultType(Type *opnd_type)
Create a result type for fcmp/icmp.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
@ ICMP_UGT
unsigned greater than
@ ICMP_SGT
signed greater than
@ ICMP_ULT
unsigned less than
@ FCMP_UNO
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
static CmpInst::Predicate getGTPredicate(Intrinsic::ID ID)
static CmpInst::Predicate getLTPredicate(Intrinsic::ID ID)
This class represents a range of values.
A parsed version of the target data layout string in and methods for querying it.
constexpr bool isVector() const
One or more elements.
static constexpr ElementCount getFixed(ScalarTy MinVal)
constexpr bool isScalar() const
Exactly one element.
Convenience struct for specifying and reasoning about fast-math flags.
Container class for subtarget features.
Class to represent fixed width SIMD vectors.
unsigned getNumElements() const
static LLVM_ABI FixedVectorType * get(Type *ElementType, unsigned NumElts)
AttributeList getAttributes() const
Return the attribute list for this Function.
The core instruction combiner logic.
static InstructionCost getInvalid(CostType Val=0)
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
FastMathFlags getFlags() const
const TargetLibraryInfo * getLibInfo() const
const SmallVectorImpl< Type * > & getArgTypes() const
Type * getReturnType() const
bool skipScalarizationCost() const
const SmallVectorImpl< const Value * > & getArgs() const
InstructionCost getScalarizationCost() const
const IntrinsicInst * getInst() const
Intrinsic::ID getID() const
bool isTypeBasedOnly() const
A wrapper class for inspecting calls to intrinsic functions.
This is an important class for using LLVM in a threaded context.
Represents a single loop in the control flow graph.
const FeatureBitset & getFeatureBits() const
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
Information for memory intrinsic cost model.
Align getAlignment() const
Type * getDataType() const
bool getVariableMask() const
Intrinsic::ID getID() const
static LLVM_ABI PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
Analysis providing profile information.
This class represents an analyzed expression in the program.
The main scalar evolution driver.
static LLVM_ABI bool isZeroEltSplatMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask chooses all elements with the same value as the first element of exa...
static LLVM_ABI bool isSpliceMask(ArrayRef< int > Mask, int NumSrcElts, int &Index)
Return true if this shuffle mask is a splice mask, concatenating the two inputs together and then ext...
static LLVM_ABI bool isSelectMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask chooses elements from its source vectors without lane crossings.
static LLVM_ABI bool isExtractSubvectorMask(ArrayRef< int > Mask, int NumSrcElts, int &Index)
Return true if this shuffle mask is an extract subvector mask.
static LLVM_ABI bool isReverseMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask swaps the order of elements from exactly one source vector.
static LLVM_ABI bool isTransposeMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask is a transpose mask.
static LLVM_ABI bool isInsertSubvectorMask(ArrayRef< int > Mask, int NumSrcElts, int &NumSubElts, int &Index)
Return true if this shuffle mask is an insert subvector mask.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StackOffset holds a fixed and a scalable offset in bytes.
static StackOffset getScalable(int64_t Scalable)
static StackOffset getFixed(int64_t Fixed)
static LLVM_ABI StructType * create(LLVMContext &Context, StringRef Name)
This creates an identified struct.
Provides information about what library functions are available for the current target.
This base class for TargetLowering contains the SelectionDAG-independent parts that can be used from ...
bool isOperationExpand(unsigned Op, EVT VT) const
Return true if the specified operation is illegal on this target or unlikely to be made legal with cu...
int InstructionOpcodeToISD(unsigned Opcode) const
Get the ISD node that corresponds to the Instruction class opcode.
EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the EVT corresponding to this LLVM type.
LegalizeAction
This enum indicates whether operations are valid for a target, and if not, what action should be used...
virtual bool preferSelectsOverBooleanArithmetic(EVT VT) const
Should we prefer selects to doing arithmetic on boolean types.
virtual bool isZExtFree(Type *FromTy, Type *ToTy) const
Return true if any actual instruction that defines a value of type FromTy implicitly zero-extends the...
@ TypeScalarizeScalableVector
virtual bool isSuitableForJumpTable(const SwitchInst *SI, uint64_t NumCases, uint64_t Range, ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI) const
Return true if lowering to a jump table is suitable for a set of case clusters which may contain NumC...
virtual bool areJTsAllowed(const Function *Fn) const
Return true if lowering to a jump table is allowed.
bool isOperationLegalOrPromote(unsigned Op, EVT VT, bool LegalOnly=false) const
Return true if the specified operation is legal on this target or can be made legal using promotion.
bool isOperationCustom(unsigned Op, EVT VT) const
Return true if the operation uses custom lowering, regardless of whether the type is legal or not.
bool isSuitableForBitTests(const DenseMap< const BasicBlock *, unsigned int > &DestCmps, const APInt &Low, const APInt &High, const DataLayout &DL) const
Return true if lowering to a bit test is suitable for a set of case clusters which contains NumDests ...
virtual bool isTruncateFree(Type *FromTy, Type *ToTy) const
Return true if it's free to truncate a value of type FromTy to type ToTy.
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
virtual bool isFreeAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const
Returns true if a cast from SrcAS to DestAS is "cheap", such that e.g.
bool isOperationLegal(unsigned Op, EVT VT) const
Return true if the specified operation is legal on this target.
LegalizeAction getTruncStoreAction(EVT ValVT, EVT MemVT) const
Return how this store with truncation should be treated: either it is legal, needs to be promoted to ...
LegalizeAction getLoadExtAction(unsigned ExtType, EVT ValVT, EVT MemVT) const
Return how this load with extension should be treated: either it is legal, needs to be promoted to a ...
bool isOperationLegalOrCustom(unsigned Op, EVT VT, bool LegalOnly=false) const
Return true if the specified operation is legal on this target or can be made legal with custom lower...
bool isLoadExtLegal(unsigned ExtType, EVT ValVT, EVT MemVT) const
Return true if the specified load with extension is legal on this target.
LegalizeTypeAction getTypeAction(LLVMContext &Context, EVT VT) const
Return how we should legalize values of this type, either it is already legal (return 'Legal') or we ...
virtual bool isFAbsFree(EVT VT) const
Return true if an fabs operation is free to the point where it is never worthwhile to replace it with...
bool isOperationLegalOrCustomOrPromote(unsigned Op, EVT VT, bool LegalOnly=false) const
Return true if the specified operation is legal on this target or can be made legal with custom lower...
std::pair< LegalizeTypeAction, EVT > LegalizeKind
LegalizeKind holds the legalization kind that needs to happen to EVT in order to type-legalize it.
Primary interface to the complete machine description for the target machine.
bool isPositionIndependent() const
const Triple & getTargetTriple() const
virtual const TargetSubtargetInfo * getSubtargetImpl(const Function &) const
Virtual method implemented by subclasses that returns a reference to that target's TargetSubtargetInf...
CodeModel::Model getCodeModel() const
Returns the code model.
TargetSubtargetInfo - Generic base class for all target subtargets.
Triple - Helper class for working with autoconf configuration names.
ArchType getArch() const
Get the parsed architecture type of this triple.
LLVM_ABI bool isArch64Bit() const
Test whether the architecture is 64-bit.
bool isOSDarwin() const
Is this a "Darwin" OS (macOS, iOS, tvOS, watchOS, DriverKit, XROS, or bridgeOS).
static constexpr TypeSize getFixed(ScalarTy ExactSize)
The instances of the Type class are immutable: once they are created, they are never changed.
bool isVectorTy() const
True if this is an instance of VectorType.
static LLVM_ABI IntegerType * getInt8Ty(LLVMContext &C)
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
LLVM_ABI Type * getWithNewBitWidth(unsigned NewBitWidth) const
Given an integer or vector type, change the lane bitwidth to NewBitwidth, whilst keeping the old numb...
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
static LLVM_ABI IntegerType * getInt1Ty(LLVMContext &C)
static LLVM_ABI IntegerType * getIntNTy(LLVMContext &C, unsigned N)
bool isFPOrFPVectorTy() const
Return true if this is a FP type or a vector of FP.
Type * getContainedType(unsigned i) const
This method is used to implement the type iterator (defined at the end of the file).
bool isVoidTy() const
Return true if this is 'void'.
Value * getOperand(unsigned i) const
static LLVM_ABI bool isVPBinOp(Intrinsic::ID ID)
static LLVM_ABI bool isVPCast(Intrinsic::ID ID)
static LLVM_ABI bool isVPCmp(Intrinsic::ID ID)
static LLVM_ABI std::optional< unsigned > getFunctionalOpcodeForVP(Intrinsic::ID ID)
static LLVM_ABI std::optional< Intrinsic::ID > getFunctionalIntrinsicIDForVP(Intrinsic::ID ID)
static LLVM_ABI bool isVPIntrinsic(Intrinsic::ID)
static LLVM_ABI bool isVPReduction(Intrinsic::ID ID)
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
Base class of all SIMD vector types.
static VectorType * getHalfElementsVectorType(VectorType *VTy)
This static method returns a VectorType with half as many elements as the input type and the same ele...
static LLVM_ABI VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
Type * getElementType() const
constexpr ScalarTy getFixedValue() const
static constexpr bool isKnownLT(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
LLVM_ABI APInt ScaleBitMask(const APInt &A, unsigned NewBitWidth, bool MatchAllBits=false)
Splat/Merge neighboring bits to widen/narrow the bitmask represented by.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
@ Fast
Attempts to make calls as fast as possible (e.g.
@ C
The default llvm calling convention, compatible with C.
ISD namespace - This namespace contains an enum which represents all of the SelectionDAG node types a...
@ BSWAP
Byte Swap and Counting operators.
@ SMULFIX
RESULT = [US]MULFIX(LHS, RHS, SCALE) - Perform fixed point multiplication on 2 integers with the same...
@ FMA
FMA - Perform a * b + c with no intermediate rounding step.
@ FADD
Simple binary floating point operators.
@ ABS
ABS - Determine the unsigned absolute value of a signed integer value of the same bitwidth.
@ SDIVREM
SDIVREM/UDIVREM - Divide two integers and produce both a quotient and remainder result.
@ SSUBO
Same for subtraction.
@ FCANONICALIZE
Returns platform specific canonical encoding of a floating point number.
@ SSUBSAT
RESULT = [US]SUBSAT(LHS, RHS) - Perform saturation subtraction on 2 integers with the same bit width ...
@ SELECT
Select(COND, TRUEVAL, FALSEVAL).
@ SADDO
RESULT, BOOL = [SU]ADDO(LHS, RHS) - Overflow-aware nodes for addition.
@ SMULO
Same for multiplication.
@ SMIN
[US]{MIN/MAX} - Binary minimum or maximum of signed or unsigned integers.
@ VSELECT
Select with a vector condition (op #0) and two vector operands (ops #1 and #2), returning a vector re...
@ SCMP
[US]CMP - 3-way comparison of signed or unsigned integers.
@ FP_TO_SINT_SAT
FP_TO_[US]INT_SAT - Convert floating point value in operand 0 to a signed or unsigned scalar integer ...
@ FCOPYSIGN
FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.
@ SADDSAT
RESULT = [US]ADDSAT(LHS, RHS) - Perform saturation addition on 2 integers with the same bit width (W)...
MemIndexedMode
MemIndexedMode enum - This enum defines the load / store indexed addressing modes.
LLVM_ABI bool isTargetIntrinsic(ID IID)
isTargetIntrinsic - Returns true if IID is an intrinsic specific to a certain target.
LLVM_ABI Libcall getSINCOSPI(EVT RetVT)
getSINCOSPI - Return the SINCOSPI_* value for the given types, or UNKNOWN_LIBCALL if there is none.
LLVM_ABI Libcall getMODF(EVT VT)
getMODF - Return the MODF_* value for the given types, or UNKNOWN_LIBCALL if there is none.
LLVM_ABI Libcall getSINCOS(EVT RetVT)
getSINCOS - Return the SINCOS_* value for the given types, or UNKNOWN_LIBCALL if there is none.
DiagnosticInfoOptimizationBase::Argument NV
friend class Instruction
Iterator for Instructions in a `BasicBlock.
This is an optimization pass for GlobalISel generic memory operations.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
LLVM_ABI Intrinsic::ID getMinMaxReductionIntrinsicOp(Intrinsic::ID RdxID)
Returns the min/max intrinsic used when expanding a min/max reduction.
detail::zippy< detail::zip_first, T, U, Args... > zip_equal(T &&t, U &&u, Args &&...args)
zip iterator that assumes that all iteratees have the same length.
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
Type * toScalarizedTy(Type *Ty)
A helper for converting vectorized types to scalarized (non-vector) types.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
auto dyn_cast_if_present(const Y &Val)
dyn_cast_if_present<X> - Functionally identical to dyn_cast, except that a null (or none in the case ...
LLVM_ABI unsigned getArithmeticReductionInstruction(Intrinsic::ID RdxID)
Returns the arithmetic instruction opcode used when expanding a reduction.
bool isVectorizedTy(Type *Ty)
Returns true if Ty is a vector type or a struct of vector types where all vector types share the same...
detail::concat_range< ValueT, RangeTs... > concat(RangeTs &&...Ranges)
Returns a concatenated range across two or more ranges.
auto dyn_cast_or_null(const Y &Val)
constexpr bool has_single_bit(T Value) noexcept
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
ElementCount getVectorizedTypeVF(Type *Ty)
Returns the number of vector elements for a vectorized type.
LLVM_ABI ConstantRange getVScaleRange(const Function *F, unsigned BitWidth)
Determine the possible constant range of vscale with the given bit width, based on the vscale_range f...
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
constexpr int PoisonMaskElem
constexpr T divideCeil(U Numerator, V Denominator)
Returns the integer ceil(Numerator / Denominator).
FunctionAddr VTableAddr uintptr_t uintptr_t Data
@ UMin
Unsigned integer min implemented in terms of select(cmp()).
@ UMax
Unsigned integer max implemented in terms of select(cmp()).
DWARFExpression::Operation Op
ArrayRef(const T &OneElt) -> ArrayRef< T >
constexpr unsigned BitWidth
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
ArrayRef< Type * > getContainedTypes(Type *const &Ty)
Returns the types contained in Ty.
cl::opt< unsigned > PartialUnrollingThreshold
LLVM_ABI bool isVectorizedStructTy(StructType *StructTy)
Returns true if StructTy is an unpacked literal struct where all elements are vectors of matching ele...
This struct is a compact representation of a valid (non-zero power of two) alignment.
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
ElementCount getVectorElementCount() const
static LLVM_ABI EVT getEVT(Type *Ty, bool HandleUnknown=false)
Return the value type corresponding to the specified type.
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
static EVT getIntegerVT(LLVMContext &Context, unsigned BitWidth)
Returns the EVT that represents an integer with the given number of bits.
LLVM_ABI Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
Attributes of a target dependent hardware loop.
static bool hasVectorMaskArgument(RTLIB::LibcallImpl Impl)
Returns true if the function has a vector mask argument, which is assumed to be the last argument.
This represents an addressing mode of: BaseGV + BaseOffs + BaseReg + Scale*ScaleReg + ScalableOffset*...