Go to the documentation of this file.
15 #ifndef LLVM_LIB_TARGET_HEXAGON_HEXAGONTARGETTRANSFORMINFO_H
16 #define LLVM_LIB_TARGET_HEXAGON_HEXAGONTARGETTRANSFORMINFO_H
29 class ScalarEvolution;
50 unsigned getTypeNumElements(
Type *Ty)
const;
55 ST(*
TM->getSubtargetImpl(
F)), TLI(*ST.getTargetLowering()) {}
109 const APInt &DemandedElts,
110 bool Insert,
bool Extract);
131 const Value *Ptr,
bool VariableMask,
138 bool UseMaskForCond =
false,
bool UseMaskForGaps =
false);
176 #endif // LLVM_LIB_TARGET_HEXAGON_HEXAGONTARGETTRANSFORMINFO_H
unsigned getMinVectorRegisterBitWidth() const
@ User
could "use" a pointer
This is an optimization pass for GlobalISel generic memory operations.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Represents a single loop in the control flow graph.
The main scalar evolution driver.
InstructionCost getMemoryOpCost(unsigned Opcode, Type *Src, MaybeAlign Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, const Instruction *I=nullptr)
The instances of the Type class are immutable: once they are created, they are never changed.
InstructionCost getShuffleCost(TTI::ShuffleKind Kind, Type *Tp, ArrayRef< int > Mask, int Index, Type *SubTp, ArrayRef< const Value * > Args=None)
unsigned getMaxInterleaveFactor(unsigned VF)
InstructionCost getGatherScatterOpCost(unsigned Opcode, Type *DataTy, const Value *Ptr, bool VariableMask, Align Alignment, TTI::TargetCostKind CostKind, const Instruction *I)
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
InstructionCost getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index)
TTI::PopcntSupportKind getPopcntSupport(unsigned IntTyWidthInBit) const
InstructionCost getUserCost(const User *U, ArrayRef< const Value * > Operands, TTI::TargetCostKind CostKind)
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
bool shouldMaximizeVectorBandwidth() const
bool shouldBuildLookupTables() const
bool enableAggressiveInterleaving(bool LoopHasReductions)
This struct is a compact representation of a valid (non-zero power of two) alignment.
mir Rename Register Operands
bool prefersVectorizedAddressing()
Base class of all SIMD vector types.
This class represents an analyzed expression in the program.
InstructionCost getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind, const Instruction *I=nullptr)
Base class which can be used to help build a TTI implementation.
InstructionCost getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef< unsigned > Indices, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, bool UseMaskForCond=false, bool UseMaskForGaps=false)
bool enableInterleavedAccessVectorization()
bool isLegalMaskedStore(Type *DataType, Align Alignment)
InstructionCost getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src, TTI::CastContextHint CCH, TTI::TargetCostKind CostKind, const Instruction *I=nullptr)
Class for arbitrary precision integers.
InstructionCost getCallInstrCost(Function *F, Type *RetTy, ArrayRef< Type * > Tys, TTI::TargetCostKind CostKind)
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
InstructionCost getOperandsScalarizationOverhead(ArrayRef< const Value * > Args, ArrayRef< Type * > Tys)
static cl::opt< TargetTransformInfo::TargetCostKind > CostKind("cost-kind", cl::desc("Target cost kind"), cl::init(TargetTransformInfo::TCK_RecipThroughput), cl::values(clEnumValN(TargetTransformInfo::TCK_RecipThroughput, "throughput", "Reciprocal throughput"), clEnumValN(TargetTransformInfo::TCK_Latency, "latency", "Instruction latency"), clEnumValN(TargetTransformInfo::TCK_CodeSize, "code-size", "Code size"), clEnumValN(TargetTransformInfo::TCK_SizeAndLatency, "size-latency", "Code size and latency")))
static const Function * getParent(const Value *V)
TypeSize getRegisterBitWidth(TargetTransformInfo::RegisterKind K) const
add sub stmia L5 ldr r0 bl L_printf $stub Instead of a and a wouldn t it be better to do three moves *Return an aggregate type is even return S
InstructionCost getMaskedMemoryOpCost(unsigned Opcode, Type *Src, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind)
void getPeelingPreferences(Loop *L, ScalarEvolution &SE, TTI::PeelingPreferences &PP)
bool supportsEfficientVectorElementLoadStore()
InstructionCost getArithmeticInstrCost(unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind, TTI::OperandValueKind Opd1Info=TTI::OK_AnyValue, TTI::OperandValueKind Opd2Info=TTI::OK_AnyValue, TTI::OperandValueProperties Opd1PropInfo=TTI::OP_None, TTI::OperandValueProperties Opd2PropInfo=TTI::OP_None, ArrayRef< const Value * > Args=ArrayRef< const Value * >(), const Instruction *CxtI=nullptr)
InstructionCost getAddressComputationCost(Type *Tp, ScalarEvolution *SE, const SCEV *S)
unsigned getPrefetchDistance() const override
— Vector TTI end —
InstructionCost getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, TTI::TargetCostKind CostKind)
TTI::AddressingModeKind getPreferredAddressingMode(const Loop *L, ScalarEvolution *SE) const
Bias LSR towards creating post-increment opportunities.
InstructionCost getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy, CmpInst::Predicate VecPred, TTI::TargetCostKind CostKind, const Instruction *I=nullptr)
unsigned getCacheLineSize() const override
InstructionCost getScalarizationOverhead(VectorType *Ty, const APInt &DemandedElts, bool Insert, bool Extract)
bool hasBranchDivergence()
HexagonTTIImpl(const HexagonTargetMachine *TM, const Function &F)
const char LLVMTargetMachineRef TM
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
bool isLegalMaskedLoad(Type *DataType, Align Alignment)
unsigned getNumberOfRegisters(bool vector) const
— Vector TTI begin —
void getUnrollingPreferences(Loop *L, ScalarEvolution &SE, TTI::UnrollingPreferences &UP, OptimizationRemarkEmitter *ORE)
LLVM Value Representation.
ElementCount getMinimumVF(unsigned ElemWidth, bool IsScalable) const