LLVM 20.0.0git
NVPTXTargetTransformInfo.h
Go to the documentation of this file.
1//===-- NVPTXTargetTransformInfo.h - NVPTX specific TTI ---------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8/// \file
9/// This file a TargetTransformInfo::Concept conforming object specific to the
10/// NVPTX target machine. It uses the target's detailed information to
11/// provide more precise answers to certain TTI queries, while letting the
12/// target independent and default TTI implementations handle the rest.
13///
14//===----------------------------------------------------------------------===//
15
16#ifndef LLVM_LIB_TARGET_NVPTX_NVPTXTARGETTRANSFORMINFO_H
17#define LLVM_LIB_TARGET_NVPTX_NVPTXTARGETTRANSFORMINFO_H
18
19#include "NVPTXTargetMachine.h"
24#include <optional>
25
26namespace llvm {
27
28class NVPTXTTIImpl : public BasicTTIImplBase<NVPTXTTIImpl> {
31 friend BaseT;
32
33 const NVPTXSubtarget *ST;
34 const NVPTXTargetLowering *TLI;
35
36 const NVPTXSubtarget *getST() const { return ST; };
37 const NVPTXTargetLowering *getTLI() const { return TLI; };
38
39public:
40 explicit NVPTXTTIImpl(const NVPTXTargetMachine *TM, const Function &F)
41 : BaseT(TM, F.getDataLayout()), ST(TM->getSubtargetImpl()),
42 TLI(ST->getTargetLowering()) {}
43
44 bool hasBranchDivergence(const Function *F = nullptr) { return true; }
45
46 bool isSourceOfDivergence(const Value *V);
47
48 unsigned getFlatAddressSpace() const {
49 return AddressSpace::ADDRESS_SPACE_GENERIC;
50 }
51
53 return AS != AddressSpace::ADDRESS_SPACE_SHARED &&
54 AS != AddressSpace::ADDRESS_SPACE_LOCAL && AS != ADDRESS_SPACE_PARAM;
55 }
56
57 std::optional<Instruction *> instCombineIntrinsic(InstCombiner &IC,
58 IntrinsicInst &II) const;
59
60 // Loads and stores can be vectorized if the alignment is at least as big as
61 // the load/store we want to vectorize.
62 bool isLegalToVectorizeLoadChain(unsigned ChainSizeInBytes, Align Alignment,
63 unsigned AddrSpace) const {
64 return Alignment >= ChainSizeInBytes;
65 }
66 bool isLegalToVectorizeStoreChain(unsigned ChainSizeInBytes, Align Alignment,
67 unsigned AddrSpace) const {
68 return isLegalToVectorizeLoadChain(ChainSizeInBytes, Alignment, AddrSpace);
69 }
70
71 // NVPTX has infinite registers of all kinds, but the actual machine doesn't.
72 // We conservatively return 1 here which is just enough to enable the
73 // vectorizers but disables heuristics based on the number of registers.
74 // FIXME: Return a more reasonable number, while keeping an eye on
75 // LoopVectorizer's unrolling heuristics.
76 unsigned getNumberOfRegisters(bool Vector) const { return 1; }
77
78 // Only <2 x half> should be vectorized, so always return 32 for the vector
79 // register size.
81 return TypeSize::getFixed(32);
82 }
83 unsigned getMinVectorRegisterBitWidth() const { return 32; }
84
85 // We don't want to prevent inlining because of target-cpu and -features
86 // attributes that were added to newer versions of LLVM/Clang: There are
87 // no incompatible functions in PTX, ptxas will throw errors in such cases.
88 bool areInlineCompatible(const Function *Caller,
89 const Function *Callee) const {
90 return true;
91 }
92
93 // Increase the inlining cost threshold by a factor of 11, reflecting that
94 // calls are particularly expensive in NVPTX.
95 unsigned getInliningThresholdMultiplier() const { return 11; }
96
98 unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind,
100 TTI::OperandValueInfo Op2Info = {TTI::OK_AnyValue, TTI::OP_None},
101 ArrayRef<const Value *> Args = {}, const Instruction *CxtI = nullptr);
102
103 void getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
104 TTI::UnrollingPreferences &UP,
105 OptimizationRemarkEmitter *ORE);
106
107 void getPeelingPreferences(Loop *L, ScalarEvolution &SE,
108 TTI::PeelingPreferences &PP);
109
110 bool hasVolatileVariant(Instruction *I, unsigned AddrSpace) {
111 // Volatile loads/stores are only supported for shared and global address
112 // spaces, or for generic AS that maps to them.
113 if (!(AddrSpace == llvm::ADDRESS_SPACE_GENERIC ||
114 AddrSpace == llvm::ADDRESS_SPACE_GLOBAL ||
115 AddrSpace == llvm::ADDRESS_SPACE_SHARED))
116 return false;
117
118 switch(I->getOpcode()){
119 default:
120 return false;
121 case Instruction::Load:
122 case Instruction::Store:
123 return true;
124 }
125 }
126
128 Intrinsic::ID IID) const;
129
131 Value *NewV) const;
132};
133
134} // end namespace llvm
135
136#endif
This file provides a helper that implements much of the TTI interface in terms of the target-independ...
static cl::opt< TargetTransformInfo::TargetCostKind > CostKind("cost-kind", cl::desc("Target cost kind"), cl::init(TargetTransformInfo::TCK_RecipThroughput), cl::values(clEnumValN(TargetTransformInfo::TCK_RecipThroughput, "throughput", "Reciprocal throughput"), clEnumValN(TargetTransformInfo::TCK_Latency, "latency", "Instruction latency"), clEnumValN(TargetTransformInfo::TCK_CodeSize, "code-size", "Code size"), clEnumValN(TargetTransformInfo::TCK_SizeAndLatency, "size-latency", "Code size and latency")))
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
uint64_t IntrinsicInst * II
This file describes how to lower LLVM code to machine code.
This pass exposes codegen information to IR-level passes.
Base class which can be used to help build a TTI implementation.
Definition: BasicTTIImpl.h:80
The core instruction combiner logic.
Definition: InstCombiner.h:48
A wrapper class for inspecting calls to intrinsic functions.
Definition: IntrinsicInst.h:48
bool isLegalToVectorizeStoreChain(unsigned ChainSizeInBytes, Align Alignment, unsigned AddrSpace) const
bool collectFlatAddressOperands(SmallVectorImpl< int > &OpIndexes, Intrinsic::ID IID) const
Value * rewriteIntrinsicWithAddressSpace(IntrinsicInst *II, Value *OldV, Value *NewV) const
bool canHaveNonUndefGlobalInitializerInAddressSpace(unsigned AS) const
void getUnrollingPreferences(Loop *L, ScalarEvolution &SE, TTI::UnrollingPreferences &UP, OptimizationRemarkEmitter *ORE)
bool areInlineCompatible(const Function *Caller, const Function *Callee) const
bool isLegalToVectorizeLoadChain(unsigned ChainSizeInBytes, Align Alignment, unsigned AddrSpace) const
bool hasBranchDivergence(const Function *F=nullptr)
NVPTXTTIImpl(const NVPTXTargetMachine *TM, const Function &F)
InstructionCost getArithmeticInstrCost(unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Op1Info={TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Op2Info={TTI::OK_AnyValue, TTI::OP_None}, ArrayRef< const Value * > Args={}, const Instruction *CxtI=nullptr)
std::optional< Instruction * > instCombineIntrinsic(InstCombiner &IC, IntrinsicInst &II) const
unsigned getMinVectorRegisterBitWidth() const
void getPeelingPreferences(Loop *L, ScalarEvolution &SE, TTI::PeelingPreferences &PP)
unsigned getFlatAddressSpace() const
bool hasVolatileVariant(Instruction *I, unsigned AddrSpace)
unsigned getNumberOfRegisters(bool Vector) const
TypeSize getRegisterBitWidth(TargetTransformInfo::RegisterKind K) const
unsigned getInliningThresholdMultiplier() const
bool isSourceOfDivergence(const Value *V)
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:573
const DataLayout & getDataLayout() const
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
TargetCostKind
The kind of cost model.
static constexpr TypeSize getFixed(ScalarTy ExactSize)
Definition: TypeSize.h:345
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
LLVM Value Representation.
Definition: Value.h:74
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39