16#ifndef LLVM_LIB_TARGET_VE_VETARGETTRANSFORMINFO_H
17#define LLVM_LIB_TARGET_VE_VETARGETTRANSFORMINFO_H
37 if (ElemTy.isIntegerTy()) {
38 unsigned ScaBits = ElemTy.getScalarSizeInBits();
39 return ScaBits == 1 || ScaBits == 32 || ScaBits == 64;
41 if (ElemTy.isPointerTy()) {
44 if (ElemTy.isFloatTy() || ElemTy.isDoubleTy()) {
62 bool enableVPU()
const {
return getST()->enableVPU(); }
65#define VEC_VP_CASE(SUFFIX) \
66 case Intrinsic::vp_reduce_##SUFFIX: \
67 case Intrinsic::vector_reduce_##SUFFIX:
69 switch (ReductionID) {
86 TLI(ST->getTargetLowering()) {}
89 bool VectorRegs = (ClassID == 1);
143 Type *DataType,
Align Alignment,
unsigned ,
158 return !isSupportedReduction(
II->getIntrinsicID());
162 unsigned Opcode,
Type *InputTypeA,
Type *InputTypeB,
Type *AccumType,
166 std::optional<unsigned> BinOp,
168 std::optional<FastMathFlags> FMF)
const override {
This file provides a helper that implements much of the TTI interface in terms of the target-independ...
static cl::opt< OutputCostKind > CostKind("cost-kind", cl::desc("Target cost kind"), cl::init(OutputCostKind::RecipThroughput), cl::values(clEnumValN(OutputCostKind::RecipThroughput, "throughput", "Reciprocal throughput"), clEnumValN(OutputCostKind::Latency, "latency", "Instruction latency"), clEnumValN(OutputCostKind::CodeSize, "code-size", "Code size"), clEnumValN(OutputCostKind::SizeAndLatency, "size-latency", "Code size and latency"), clEnumValN(OutputCostKind::All, "all", "Print all cost kinds")))
TargetTransformInfo::VPLegalization VPLegalization
uint64_t IntrinsicInst * II
BasicTTIImplBase(const TargetMachine *TM, const DataLayout &DL)
static InstructionCost getInvalid(CostType Val=0)
A wrapper class for inspecting calls to intrinsic functions.
static constexpr TypeSize getFixed(ScalarTy ExactSize)
static constexpr TypeSize getScalable(ScalarTy MinimumSize)
The instances of the Type class are immutable: once they are created, they are never changed.
bool isLegalMaskedGather(Type *DataType, Align Alignment) const override
bool isLegalMaskedStore(Type *DataType, Align Alignment, unsigned, TargetTransformInfo::MaskKind) const override
VETTIImpl(const VETargetMachine *TM, const Function &F)
bool shouldBuildRelLookupTables() const override
InstructionCost getPartialReductionCost(unsigned Opcode, Type *InputTypeA, Type *InputTypeB, Type *AccumType, ElementCount VF, TargetTransformInfo::PartialReductionExtendKind OpAExtend, TargetTransformInfo::PartialReductionExtendKind OpBExtend, std::optional< unsigned > BinOp, TargetTransformInfo::TargetCostKind CostKind, std::optional< FastMathFlags > FMF) const override
bool shouldExpandReduction(const IntrinsicInst *II) const override
bool isLegalMaskedLoad(Type *DataType, Align Alignment, unsigned, TargetTransformInfo::MaskKind) const override
unsigned getNumberOfRegisters(unsigned ClassID) const override
TypeSize getRegisterBitWidth(TargetTransformInfo::RegisterKind K) const override
bool isLegalMaskedScatter(Type *DataType, Align Alignment) const override
TargetTransformInfo::VPLegalization getVPLegalizationStrategy(const VPIntrinsic &PI) const override
unsigned getMinVectorRegisterBitWidth() const override
This is the common base class for vector predication intrinsics.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
This is an optimization pass for GlobalISel generic memory operations.
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
This struct is a compact representation of a valid (non-zero power of two) alignment.