16#ifndef LLVM_LIB_TARGET_RISCV_RISCVTARGETTRANSFORMINFO_H 
   17#define LLVM_LIB_TARGET_RISCV_RISCVTARGETTRANSFORMINFO_H 
   48  unsigned getEstimatedVLFor(
VectorType *Ty) 
const;
 
   74        TLI(ST->getTargetLowering()) {}
 
 
  104      unsigned Opcode, 
Type *InputTypeA, 
Type *InputTypeB, 
Type *AccumType,
 
  111    return ST->hasVInstructions();
 
 
  115    return ST->hasVInstructions();
 
 
  118    return ST->hasVInstructions();
 
 
  133  unsigned getMaximumVF(
unsigned ElemWidth, 
unsigned Opcode) 
const override;
 
  167    return ST->useRVVForFixedLengthVectors() ? 16 : 0;
 
 
  174                 const Instruction *CxtI = 
nullptr) 
const override;
 
  177      VectorType *Ty, 
const APInt &DemandedElts, 
bool Insert, 
bool Extract,
 
  190      unsigned Opcode, 
Type *VecTy, 
unsigned Factor, ArrayRef<unsigned> Indices,
 
  192      bool UseMaskForCond = 
false, 
bool UseMaskForGaps = 
false) 
const override;
 
  195                                         const Value *
Ptr, 
bool VariableMask,
 
  198                                         const Instruction *
I) 
const override;
 
  203                                const Instruction *
I = 
nullptr) 
const override;
 
  206                                         const Value *
Ptr, 
bool VariableMask,
 
  209                                         const Instruction *
I) 
const override;
 
  217                   const Instruction *
I = 
nullptr) 
const override;
 
  225                             std::optional<FastMathFlags> FMF,
 
  230                           VectorType *ValTy, std::optional<FastMathFlags> FMF,
 
  247                                 const Instruction *
I = 
nullptr) 
const override;
 
  252                                     unsigned Index, 
const Value *Op0,
 
  253                                     const Value *Op1) 
const override;
 
  258                                   unsigned Index) 
const override;
 
  268    return TLI->isLegalElementTypeForRVV(TLI->getValueType(
DL, Ty));
 
 
  272    if (!ST->hasVInstructions())
 
  275    EVT DataTypeVT = TLI->getValueType(
DL, DataType);
 
  282    if (!ST->enableUnalignedVectorMem() && Alignment < ElemType.
getStoreSize())
 
  285    return TLI->isLegalElementTypeForRVV(ElemType);
 
 
  289                         unsigned )
 const override {
 
 
  293                          unsigned )
 const override {
 
 
  298    if (!ST->hasVInstructions())
 
  301    EVT DataTypeVT = TLI->getValueType(
DL, DataType);
 
  308    EVT PointerTypeVT = 
EVT(TLI->getPointerTy(
DL));
 
  310        !TLI->isLegalElementTypeForRVV(PointerTypeVT))
 
  314    if (!ST->enableUnalignedVectorMem() && Alignment < ElemType.
getStoreSize())
 
  317    return TLI->isLegalElementTypeForRVV(ElemType);
 
 
  328                                  Align Alignment)
 const override {
 
  330    return ST->is64Bit() && !ST->hasVInstructionsI64();
 
 
  334                                   Align Alignment)
 const override {
 
  336    return ST->is64Bit() && !ST->hasVInstructionsI64();
 
 
  340    EVT DataTypeVT = TLI->getValueType(
DL, DataType);
 
  341    return TLI->isLegalStridedLoadStore(DataTypeVT, Alignment);
 
 
  346                                    unsigned AddrSpace)
 const override {
 
  347    return TLI->isLegalInterleavedAccessType(VTy, Factor, Alignment, AddrSpace,
 
 
  356    return TLI->isVScaleKnownToBeAPowerOfTwo();
 
 
  364    if (!ST->hasVInstructions() ||
 
  368                 ->getIntegerBitWidth() != 1))
 
 
  379    if (!TLI->isLegalElementTypeForRVV(TLI->getValueType(
DL, Ty)))
 
  401      if (Ty->isBFloatTy() || (Ty->isHalfTy() && !ST->hasVInstructionsF16()))
 
 
  415    return VF.
isScalar() ? 1 : ST->getMaxInterleaveFactor();
 
 
  421    return ST->hasVInstructions();
 
 
  434      if (ST->hasStdExtF())
 
  442      return ST->hasVInstructions() ? 32 : 0;
 
 
  451                                   Type *Ty = 
nullptr)
 const override {
 
  458    if ((ScalarTy->
isHalfTy() && ST->hasStdExtZfhmin()) ||
 
  459        (ScalarTy->
isFloatTy() && ST->hasStdExtF()) ||
 
  460        (ScalarTy->
isDoubleTy() && ST->hasStdExtD())) {
 
 
  470      return "RISCV::GPRRC";
 
  472      return "RISCV::FPRRC";
 
  474      return "RISCV::VRRC";
 
 
  484      bool &AllowPromotionWithoutCommonHeader) 
const override;
 
 
This file provides a helper that implements much of the TTI interface in terms of the target-independ...
 
Analysis containing CSE Info
 
static cl::opt< OutputCostKind > CostKind("cost-kind", cl::desc("Target cost kind"), cl::init(OutputCostKind::RecipThroughput), cl::values(clEnumValN(OutputCostKind::RecipThroughput, "throughput", "Reciprocal throughput"), clEnumValN(OutputCostKind::Latency, "latency", "Instruction latency"), clEnumValN(OutputCostKind::CodeSize, "code-size", "Code size"), clEnumValN(OutputCostKind::SizeAndLatency, "size-latency", "Code size and latency"), clEnumValN(OutputCostKind::All, "all", "Print all cost kinds")))
 
TargetTransformInfo::VPLegalization VPLegalization
 
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
 
uint64_t IntrinsicInst * II
 
Class for arbitrary precision integers.
 
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
 
InstructionCost getVectorInstrCost(unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index, const Value *Op0, const Value *Op1) const override
 
BasicTTIImplBase(const TargetMachine *TM, const DataLayout &DL)
 
Value * getArgOperand(unsigned i) const
 
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
 
constexpr bool isScalar() const
Exactly one element.
 
Class to represent fixed width SIMD vectors.
 
A wrapper class for inspecting calls to intrinsic functions.
 
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
 
Represents a single loop in the control flow graph.
 
InstructionCost getExtendedReductionCost(unsigned Opcode, bool IsUnsigned, Type *ResTy, VectorType *ValTy, std::optional< FastMathFlags > FMF, TTI::TargetCostKind CostKind) const override
 
bool supportsScalableVectors() const override
 
InstructionCost getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind, const Instruction *I=nullptr) const override
 
InstructionCost getArithmeticInstrCost(unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Op1Info={TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Op2Info={TTI::OK_AnyValue, TTI::OP_None}, ArrayRef< const Value * > Args={}, const Instruction *CxtI=nullptr) const override
 
bool isLegalMaskedExpandLoad(Type *DataType, Align Alignment) const override
 
bool isLegalMaskedLoadStore(Type *DataType, Align Alignment) const
 
InstructionCost getIntImmCostIntrin(Intrinsic::ID IID, unsigned Idx, const APInt &Imm, Type *Ty, TTI::TargetCostKind CostKind) const override
 
unsigned getMinTripCountTailFoldingThreshold() const override
 
unsigned getRegisterClassForType(bool Vector, Type *Ty=nullptr) const override
 
TTI::AddressingModeKind getPreferredAddressingMode(const Loop *L, ScalarEvolution *SE) const override
 
bool preferEpilogueVectorization() const override
 
InstructionCost getAddressComputationCost(Type *PTy, ScalarEvolution *SE, const SCEV *Ptr, TTI::TargetCostKind CostKind) const override
 
InstructionCost getStoreImmCost(Type *VecTy, TTI::OperandValueInfo OpInfo, TTI::TargetCostKind CostKind) const
Return the cost of materializing an immediate for a value operand of a store instruction.
 
bool getTgtMemIntrinsic(IntrinsicInst *Inst, MemIntrinsicInfo &Info) const override
 
bool isElementTypeLegalForScalableVector(Type *Ty) const override
 
bool enableMaskedInterleavedAccessVectorization() const override
 
bool isLegalInterleavedAccessType(VectorType *VTy, unsigned Factor, Align Alignment, unsigned AddrSpace) const override
 
std::optional< unsigned > getMinPageSize() const override
 
InstructionCost getCostOfKeepingLiveOverCall(ArrayRef< Type * > Tys) const override
 
bool preferPredicateOverEpilogue(TailFoldingInfo *TFI) const override
 
bool hasActiveVectorLength() const override
 
InstructionCost getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src, TTI::CastContextHint CCH, TTI::TargetCostKind CostKind, const Instruction *I=nullptr) const override
 
bool shouldConsiderVectorizationRegPressure() const override
 
InstructionCost getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy, CmpInst::Predicate VecPred, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Op1Info={TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Op2Info={TTI::OK_AnyValue, TTI::OP_None}, const Instruction *I=nullptr) const override
 
InstructionCost getIndexedVectorInstrCostFromEnd(unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index) const override
 
InstructionCost getExpandCompressMemoryOpCost(unsigned Opcode, Type *Src, bool VariableMask, Align Alignment, TTI::TargetCostKind CostKind, const Instruction *I=nullptr) const override
 
void getUnrollingPreferences(Loop *L, ScalarEvolution &SE, TTI::UnrollingPreferences &UP, OptimizationRemarkEmitter *ORE) const override
 
InstructionCost getMaskedMemoryOpCost(unsigned Opcode, Type *Src, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind) const override
 
InstructionCost getGatherScatterOpCost(unsigned Opcode, Type *DataTy, const Value *Ptr, bool VariableMask, Align Alignment, TTI::TargetCostKind CostKind, const Instruction *I) const override
 
bool forceScalarizeMaskedScatter(VectorType *VTy, Align Alignment) const override
 
const char * getRegisterClassName(unsigned ClassID) const override
 
InstructionCost getIntImmCostInst(unsigned Opcode, unsigned Idx, const APInt &Imm, Type *Ty, TTI::TargetCostKind CostKind, Instruction *Inst=nullptr) const override
 
bool isLegalMaskedLoad(Type *DataType, Align Alignment, unsigned) const override
 
InstructionCost getMinMaxReductionCost(Intrinsic::ID IID, VectorType *Ty, FastMathFlags FMF, TTI::TargetCostKind CostKind) const override
Try to calculate op costs for min/max reduction operations.
 
bool canSplatOperand(Instruction *I, int Operand) const
Return true if the (vector) instruction I will be lowered to an instruction with a scalar splat opera...
 
bool enableInterleavedAccessVectorization() const override
 
bool isLegalMaskedGatherScatter(Type *DataType, Align Alignment) const
 
bool isLSRCostLess(const TargetTransformInfo::LSRCost &C1, const TargetTransformInfo::LSRCost &C2) const override
 
bool isLegalStridedLoadStore(Type *DataType, Align Alignment) const override
 
unsigned getRegUsageForType(Type *Ty) const override
 
InstructionCost getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef< unsigned > Indices, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, bool UseMaskForCond=false, bool UseMaskForGaps=false) const override
 
InstructionCost getScalarizationOverhead(VectorType *Ty, const APInt &DemandedElts, bool Insert, bool Extract, TTI::TargetCostKind CostKind, bool ForPoisonSrc=true, ArrayRef< Value * > VL={}) const override
Estimate the overhead of scalarizing an instruction.
 
TailFoldingStyle getPreferredTailFoldingStyle(bool IVUpdateMayOverflow) const override
 
unsigned getMinVectorRegisterBitWidth() const override
 
bool isLegalMaskedScatter(Type *DataType, Align Alignment) const override
 
bool isLegalMaskedCompressStore(Type *DataTy, Align Alignment) const override
 
unsigned getMaxInterleaveFactor(ElementCount VF) const override
 
bool enableOrderedReductions() const override
 
RISCVTTIImpl(const RISCVTargetMachine *TM, const Function &F)
 
TargetTransformInfo::VPLegalization getVPLegalizationStrategy(const VPIntrinsic &PI) const override
 
bool preferAlternateOpcodeVectorization() const override
 
bool isVScaleKnownToBeAPowerOfTwo() const override
 
bool isProfitableToSinkOperands(Instruction *I, SmallVectorImpl< Use * > &Ops) const override
Check if sinking I's operands to I's basic block is profitable, because the operands can be folded in...
 
unsigned getNumberOfRegisters(unsigned ClassID) const override
 
InstructionCost getVectorInstrCost(unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index, const Value *Op0, const Value *Op1) const override
 
std::optional< unsigned > getMaxVScale() const override
 
bool shouldExpandReduction(const IntrinsicInst *II) const override
 
std::optional< unsigned > getVScaleForTuning() const override
 
bool isLegalMaskedStore(Type *DataType, Align Alignment, unsigned) const override
 
bool isLegalMaskedGather(Type *DataType, Align Alignment) const override
 
InstructionCost getShuffleCost(TTI::ShuffleKind Kind, VectorType *DstTy, VectorType *SrcTy, ArrayRef< int > Mask, TTI::TargetCostKind CostKind, int Index, VectorType *SubTp, ArrayRef< const Value * > Args={}, const Instruction *CxtI=nullptr) const override
 
unsigned getMaximumVF(unsigned ElemWidth, unsigned Opcode) const override
 
InstructionCost getPointersChainCost(ArrayRef< const Value * > Ptrs, const Value *Base, const TTI::PointersChainInfo &Info, Type *AccessTy, TTI::TargetCostKind CostKind) const override
 
TTI::MemCmpExpansionOptions enableMemCmpExpansion(bool OptSize, bool IsZeroCmp) const override
 
InstructionCost getPartialReductionCost(unsigned Opcode, Type *InputTypeA, Type *InputTypeB, Type *AccumType, ElementCount VF, TTI::PartialReductionExtendKind OpAExtend, TTI::PartialReductionExtendKind OpBExtend, std::optional< unsigned > BinOp, TTI::TargetCostKind CostKind) const override
 
InstructionCost getMemoryOpCost(unsigned Opcode, Type *Src, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, TTI::OperandValueInfo OpdInfo={TTI::OK_AnyValue, TTI::OP_None}, const Instruction *I=nullptr) const override
 
bool isLegalToVectorizeReduction(const RecurrenceDescriptor &RdxDesc, ElementCount VF) const override
 
bool enableScalableVectorization() const override
 
InstructionCost getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, TTI::TargetCostKind CostKind) const override
Get intrinsic cost based on arguments.
 
InstructionCost getArithmeticReductionCost(unsigned Opcode, VectorType *Ty, std::optional< FastMathFlags > FMF, TTI::TargetCostKind CostKind) const override
 
TypeSize getRegisterBitWidth(TargetTransformInfo::RegisterKind K) const override
 
void getPeelingPreferences(Loop *L, ScalarEvolution &SE, TTI::PeelingPreferences &PP) const override
 
bool shouldConsiderAddressTypePromotion(const Instruction &I, bool &AllowPromotionWithoutCommonHeader) const override
See if I should be considered for address type promotion.
 
InstructionCost getIntImmCost(const APInt &Imm, Type *Ty, TTI::TargetCostKind CostKind) const override
 
InstructionCost getStridedMemoryOpCost(unsigned Opcode, Type *DataTy, const Value *Ptr, bool VariableMask, Align Alignment, TTI::TargetCostKind CostKind, const Instruction *I) const override
 
bool forceScalarizeMaskedGather(VectorType *VTy, Align Alignment) const override
 
TargetTransformInfo::PopcntSupportKind getPopcntSupport(unsigned TyWidth) const override
 
The RecurrenceDescriptor is used to identify recurrences variables in a loop.
 
Type * getRecurrenceType() const
Returns the type of the recurrence.
 
RecurKind getRecurrenceKind() const
 
The main scalar evolution driver.
 
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
 
The instances of the Type class are immutable: once they are created, they are never changed.
 
bool isFloatTy() const
Return true if this is 'float', a 32-bit IEEE fp type.
 
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
 
bool isHalfTy() const
Return true if this is 'half', a 16-bit IEEE fp type.
 
bool isDoubleTy() const
Return true if this is 'double', a 64-bit IEEE fp type.
 
This is the common base class for vector predication intrinsics.
 
LLVM Value Representation.
 
Type * getType() const
All values are typed, get the type of this value.
 
Base class of all SIMD vector types.
 
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
 
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
 
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
 
friend class Instruction
Iterator for Instructions in a `BasicBlock.
 
This is an optimization pass for GlobalISel generic memory operations.
 
FunctionAddr VTableAddr Value
 
@ UMin
Unsigned integer min implemented in terms of select(cmp()).
 
@ Or
Bitwise or logical OR of integers.
 
@ AnyOf
AnyOf reduction with select(cmp(),x,y) where one of (x,y) is loop invariant, and both x and y are int...
 
@ Xor
Bitwise or logical XOR of integers.
 
@ FMax
FP max implemented in terms of select(cmp()).
 
@ FMulAdd
Sum of float products with llvm.fmuladd(a * b + sum).
 
@ SMax
Signed integer max implemented in terms of select(cmp()).
 
@ And
Bitwise or logical AND of integers.
 
@ SMin
Signed integer min implemented in terms of select(cmp()).
 
@ FMin
FP min implemented in terms of select(cmp()).
 
@ Sub
Subtraction of integers.
 
@ AddChainWithSubs
A chain of adds and subs.
 
@ UMax
Unsigned integer max implemented in terms of select(cmp()).
 
ArrayRef(const T &OneElt) -> ArrayRef< T >
 
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
 
@ None
Don't use tail folding.
 
@ DataWithEVL
Use predicated EVL instructions for tail-folding.
 
This struct is a compact representation of a valid (non-zero power of two) alignment.
 
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
 
bool isFixedLengthVector() const
 
EVT getScalarType() const
If this is a vector type, return the element type, otherwise return this.
 
bool isScalableVector() const
Return true if this is a vector type where the runtime length is machine dependent.
 
Information about a load/store intrinsic defined by the target.