16#ifndef LLVM_LIB_TARGET_NVPTX_NVPTXTARGETTRANSFORMINFO_H
17#define LLVM_LIB_TARGET_NVPTX_NVPTXTARGETTRANSFORMINFO_H
42 TLI(ST->getTargetLowering()) {}
63 unsigned AddrSpace)
const {
64 return Alignment >= ChainSizeInBytes;
67 unsigned AddrSpace)
const {
101 ArrayRef<const Value *>
Args = std::nullopt,
102 const Instruction *CxtI =
nullptr);
105 TTI::UnrollingPreferences &UP,
106 OptimizationRemarkEmitter *ORE);
109 TTI::PeelingPreferences &PP);
119 switch(
I->getOpcode()){
122 case Instruction::Load:
123 case Instruction::Store:
This file provides a helper that implements much of the TTI interface in terms of the target-independ...
static cl::opt< TargetTransformInfo::TargetCostKind > CostKind("cost-kind", cl::desc("Target cost kind"), cl::init(TargetTransformInfo::TCK_RecipThroughput), cl::values(clEnumValN(TargetTransformInfo::TCK_RecipThroughput, "throughput", "Reciprocal throughput"), clEnumValN(TargetTransformInfo::TCK_Latency, "latency", "Instruction latency"), clEnumValN(TargetTransformInfo::TCK_CodeSize, "code-size", "Code size"), clEnumValN(TargetTransformInfo::TCK_SizeAndLatency, "size-latency", "Code size and latency")))
uint64_t IntrinsicInst * II
This file describes how to lower LLVM code to machine code.
Base class which can be used to help build a TTI implementation.
The core instruction combiner logic.
A wrapper class for inspecting calls to intrinsic functions.
bool isLegalToVectorizeStoreChain(unsigned ChainSizeInBytes, Align Alignment, unsigned AddrSpace) const
bool canHaveNonUndefGlobalInitializerInAddressSpace(unsigned AS) const
void getUnrollingPreferences(Loop *L, ScalarEvolution &SE, TTI::UnrollingPreferences &UP, OptimizationRemarkEmitter *ORE)
bool areInlineCompatible(const Function *Caller, const Function *Callee) const
bool isLegalToVectorizeLoadChain(unsigned ChainSizeInBytes, Align Alignment, unsigned AddrSpace) const
bool hasBranchDivergence(const Function *F=nullptr)
NVPTXTTIImpl(const NVPTXTargetMachine *TM, const Function &F)
std::optional< Instruction * > instCombineIntrinsic(InstCombiner &IC, IntrinsicInst &II) const
unsigned getMinVectorRegisterBitWidth() const
void getPeelingPreferences(Loop *L, ScalarEvolution &SE, TTI::PeelingPreferences &PP)
unsigned getFlatAddressSpace() const
bool hasVolatileVariant(Instruction *I, unsigned AddrSpace)
InstructionCost getArithmeticInstrCost(unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Op1Info={TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Op2Info={TTI::OK_AnyValue, TTI::OP_None}, ArrayRef< const Value * > Args=std::nullopt, const Instruction *CxtI=nullptr)
unsigned getNumberOfRegisters(bool Vector) const
TypeSize getRegisterBitWidth(TargetTransformInfo::RegisterKind K) const
unsigned getInliningThresholdMultiplier() const
bool isSourceOfDivergence(const Value *V)
static constexpr TypeSize getFixed(ScalarTy ExactSize)
The instances of the Type class are immutable: once they are created, they are never changed.
LLVM Value Representation.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
This is an optimization pass for GlobalISel generic memory operations.
This struct is a compact representation of a valid (non-zero power of two) alignment.