LLVM 20.0.0git
RISCVTargetTransformInfo.h
Go to the documentation of this file.
1//===- RISCVTargetTransformInfo.h - RISC-V specific TTI ---------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8/// \file
9/// This file defines a TargetTransformInfo::Concept conforming object specific
10/// to the RISC-V target machine. It uses the target's detailed information to
11/// provide more precise answers to certain TTI queries, while letting the
12/// target independent and default TTI implementations handle the rest.
13///
14//===----------------------------------------------------------------------===//
15
16#ifndef LLVM_LIB_TARGET_RISCV_RISCVTARGETTRANSFORMINFO_H
17#define LLVM_LIB_TARGET_RISCV_RISCVTARGETTRANSFORMINFO_H
18
19#include "RISCVSubtarget.h"
20#include "RISCVTargetMachine.h"
24#include "llvm/IR/Function.h"
25#include <optional>
26
27namespace llvm {
28
29class RISCVTTIImpl : public BasicTTIImplBase<RISCVTTIImpl> {
32
33 friend BaseT;
34
35 const RISCVSubtarget *ST;
36 const RISCVTargetLowering *TLI;
37
38 const RISCVSubtarget *getST() const { return ST; }
39 const RISCVTargetLowering *getTLI() const { return TLI; }
40
41 /// This function returns an estimate for VL to be used in VL based terms
42 /// of the cost model. For fixed length vectors, this is simply the
43 /// vector length. For scalable vectors, we return results consistent
44 /// with getVScaleForTuning under the assumption that clients are also
45 /// using that when comparing costs between scalar and vector representation.
46 /// This does unfortunately mean that we can both undershoot and overshot
47 /// the true cost significantly if getVScaleForTuning is wildly off for the
48 /// actual target hardware.
49 unsigned getEstimatedVLFor(VectorType *Ty);
50
51 InstructionCost getRISCVInstructionCost(ArrayRef<unsigned> OpCodes, MVT VT,
53
54 /// Return the cost of accessing a constant pool entry of the specified
55 /// type.
56 InstructionCost getConstantPoolLoadCost(Type *Ty,
58public:
59 explicit RISCVTTIImpl(const RISCVTargetMachine *TM, const Function &F)
60 : BaseT(TM, F.getDataLayout()), ST(TM->getSubtargetImpl(F)),
61 TLI(ST->getTargetLowering()) {}
62
63 /// Return the cost of materializing an immediate for a value operand of
64 /// a store instruction.
67
70 InstructionCost getIntImmCostInst(unsigned Opcode, unsigned Idx,
71 const APInt &Imm, Type *Ty,
73 Instruction *Inst = nullptr);
75 const APInt &Imm, Type *Ty,
77
78 /// \name EVL Support for predicated vectorization.
79 /// Whether the target supports the %evl parameter of VP intrinsic efficiently
80 /// in hardware, for the given opcode and type/alignment. (see LLVM Language
81 /// Reference - "Vector Predication Intrinsics",
82 /// https://llvm.org/docs/LangRef.html#vector-predication-intrinsics and
83 /// "IR-level VP intrinsics",
84 /// https://llvm.org/docs/Proposals/VectorPredication.html#ir-level-vp-intrinsics).
85 /// \param Opcode the opcode of the instruction checked for predicated version
86 /// support.
87 /// \param DataType the type of the instruction with the \p Opcode checked for
88 /// prediction support.
89 /// \param Alignment the alignment for memory access operation checked for
90 /// predicated version support.
91 bool hasActiveVectorLength(unsigned Opcode, Type *DataType,
92 Align Alignment) const;
93
95
96 bool shouldExpandReduction(const IntrinsicInst *II) const;
97 bool supportsScalableVectors() const { return ST->hasVInstructions(); }
98 bool enableOrderedReductions() const { return true; }
99 bool enableScalableVectorization() const { return ST->hasVInstructions(); }
101 getPreferredTailFoldingStyle(bool IVUpdateMayOverflow) const {
102 return ST->hasVInstructions() ? TailFoldingStyle::Data
104 }
105 std::optional<unsigned> getMaxVScale() const;
106 std::optional<unsigned> getVScaleForTuning() const;
107
109
110 unsigned getRegUsageForType(Type *Ty);
111
112 unsigned getMaximumVF(unsigned ElemWidth, unsigned Opcode) const;
113
115 // Epilogue vectorization is usually unprofitable - tail folding or
116 // a smaller VF would have been better. This a blunt hammer - we
117 // should re-examine this once vectorization is better tuned.
118 return false;
119 }
120
121 InstructionCost getMaskedMemoryOpCost(unsigned Opcode, Type *Src,
122 Align Alignment, unsigned AddressSpace,
124
126 const Value *Base,
128 Type *AccessTy,
130
134
137
139 return ST->useRVVForFixedLengthVectors() ? 16 : 0;
140 }
141
143 ArrayRef<int> Mask,
145 VectorType *SubTp,
146 ArrayRef<const Value *> Args = {},
147 const Instruction *CxtI = nullptr);
148
149 InstructionCost getScalarizationOverhead(VectorType *Ty,
150 const APInt &DemandedElts,
151 bool Insert, bool Extract,
153 ArrayRef<Value *> VL = {});
154
155 InstructionCost getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA,
157
158 InstructionCost getInterleavedMemoryOpCost(
159 unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices,
160 Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind,
161 bool UseMaskForCond = false, bool UseMaskForGaps = false);
162
163 InstructionCost getGatherScatterOpCost(unsigned Opcode, Type *DataTy,
164 const Value *Ptr, bool VariableMask,
165 Align Alignment,
167 const Instruction *I);
168
169 InstructionCost getStridedMemoryOpCost(unsigned Opcode, Type *DataTy,
170 const Value *Ptr, bool VariableMask,
171 Align Alignment,
173 const Instruction *I);
174
175 InstructionCost getCostOfKeepingLiveOverCall(ArrayRef<Type *> Tys);
176
177 InstructionCost getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src,
180 const Instruction *I = nullptr);
181
182 InstructionCost getMinMaxReductionCost(Intrinsic::ID IID, VectorType *Ty,
183 FastMathFlags FMF,
185
186 InstructionCost getArithmeticReductionCost(unsigned Opcode, VectorType *Ty,
187 std::optional<FastMathFlags> FMF,
189
190 InstructionCost getExtendedReductionCost(unsigned Opcode, bool IsUnsigned,
191 Type *ResTy, VectorType *ValTy,
192 FastMathFlags FMF,
194
195 InstructionCost
196 getMemoryOpCost(unsigned Opcode, Type *Src, MaybeAlign Alignment,
198 TTI::OperandValueInfo OpdInfo = {TTI::OK_AnyValue, TTI::OP_None},
199 const Instruction *I = nullptr);
200
201 InstructionCost getCmpSelInstrCost(
202 unsigned Opcode, Type *ValTy, Type *CondTy, CmpInst::Predicate VecPred,
204 TTI::OperandValueInfo Op1Info = {TTI::OK_AnyValue, TTI::OP_None},
205 TTI::OperandValueInfo Op2Info = {TTI::OK_AnyValue, TTI::OP_None},
206 const Instruction *I = nullptr);
207
208 InstructionCost getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind,
209 const Instruction *I = nullptr);
210
212 InstructionCost getVectorInstrCost(unsigned Opcode, Type *Val,
214 unsigned Index, Value *Op0, Value *Op1);
215
216 InstructionCost getArithmeticInstrCost(
217 unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind,
218 TTI::OperandValueInfo Op1Info = {TTI::OK_AnyValue, TTI::OP_None},
219 TTI::OperandValueInfo Op2Info = {TTI::OK_AnyValue, TTI::OP_None},
220 ArrayRef<const Value *> Args = {}, const Instruction *CxtI = nullptr);
221
223 return TLI->isLegalElementTypeForRVV(TLI->getValueType(DL, Ty));
224 }
225
226 bool isLegalMaskedLoadStore(Type *DataType, Align Alignment) {
227 if (!ST->hasVInstructions())
228 return false;
229
230 EVT DataTypeVT = TLI->getValueType(DL, DataType);
231
232 // Only support fixed vectors if we know the minimum vector size.
233 if (DataTypeVT.isFixedLengthVector() && !ST->useRVVForFixedLengthVectors())
234 return false;
235
236 EVT ElemType = DataTypeVT.getScalarType();
237 if (!ST->enableUnalignedVectorMem() && Alignment < ElemType.getStoreSize())
238 return false;
239
240 return TLI->isLegalElementTypeForRVV(ElemType);
241 }
242
243 bool isLegalMaskedLoad(Type *DataType, Align Alignment) {
244 return isLegalMaskedLoadStore(DataType, Alignment);
245 }
246 bool isLegalMaskedStore(Type *DataType, Align Alignment) {
247 return isLegalMaskedLoadStore(DataType, Alignment);
248 }
249
250 bool isLegalMaskedGatherScatter(Type *DataType, Align Alignment) {
251 if (!ST->hasVInstructions())
252 return false;
253
254 EVT DataTypeVT = TLI->getValueType(DL, DataType);
255
256 // Only support fixed vectors if we know the minimum vector size.
257 if (DataTypeVT.isFixedLengthVector() && !ST->useRVVForFixedLengthVectors())
258 return false;
259
260 // We also need to check if the vector of address is valid.
261 EVT PointerTypeVT = EVT(TLI->getPointerTy(DL));
262 if (DataTypeVT.isScalableVector() &&
263 !TLI->isLegalElementTypeForRVV(PointerTypeVT))
264 return false;
265
266 EVT ElemType = DataTypeVT.getScalarType();
267 if (!ST->enableUnalignedVectorMem() && Alignment < ElemType.getStoreSize())
268 return false;
269
270 return TLI->isLegalElementTypeForRVV(ElemType);
271 }
272
273 bool isLegalMaskedGather(Type *DataType, Align Alignment) {
274 return isLegalMaskedGatherScatter(DataType, Alignment);
275 }
276 bool isLegalMaskedScatter(Type *DataType, Align Alignment) {
277 return isLegalMaskedGatherScatter(DataType, Alignment);
278 }
279
281 // Scalarize masked gather for RV64 if EEW=64 indices aren't supported.
282 return ST->is64Bit() && !ST->hasVInstructionsI64();
283 }
284
286 // Scalarize masked scatter for RV64 if EEW=64 indices aren't supported.
287 return ST->is64Bit() && !ST->hasVInstructionsI64();
288 }
289
290 bool isLegalStridedLoadStore(Type *DataType, Align Alignment) {
291 EVT DataTypeVT = TLI->getValueType(DL, DataType);
292 return TLI->isLegalStridedLoadStore(DataTypeVT, Alignment);
293 }
294
295 bool isLegalInterleavedAccessType(VectorType *VTy, unsigned Factor,
296 Align Alignment, unsigned AddrSpace) {
297 return TLI->isLegalInterleavedAccessType(VTy, Factor, Alignment, AddrSpace,
298 DL);
299 }
300
301 bool isLegalMaskedExpandLoad(Type *DataType, Align Alignment);
302
303 bool isLegalMaskedCompressStore(Type *DataTy, Align Alignment);
304
306 return TLI->isVScaleKnownToBeAPowerOfTwo();
307 }
308
309 /// \returns How the target needs this vector-predicated operation to be
310 /// transformed.
314 if (!ST->hasVInstructions() ||
315 (PI.getIntrinsicID() == Intrinsic::vp_reduce_mul &&
316 cast<VectorType>(PI.getArgOperand(1)->getType())
317 ->getElementType()
318 ->getIntegerBitWidth() != 1))
321 }
322
324 ElementCount VF) const {
325 if (!VF.isScalable())
326 return true;
327
328 Type *Ty = RdxDesc.getRecurrenceType();
329 if (!TLI->isLegalElementTypeForRVV(TLI->getValueType(DL, Ty)))
330 return false;
331
332 // We can't promote f16/bf16 fadd reductions and scalable vectors can't be
333 // expanded.
334 // TODO: Promote f16/bf16 fmin/fmax reductions
335 if (Ty->isBFloatTy() || (Ty->isHalfTy() && !ST->hasVInstructionsF16()))
336 return false;
337
338 switch (RdxDesc.getRecurrenceKind()) {
339 case RecurKind::Add:
340 case RecurKind::FAdd:
341 case RecurKind::And:
342 case RecurKind::Or:
343 case RecurKind::Xor:
344 case RecurKind::SMin:
345 case RecurKind::SMax:
346 case RecurKind::UMin:
347 case RecurKind::UMax:
348 case RecurKind::FMin:
349 case RecurKind::FMax:
353 return true;
354 default:
355 return false;
356 }
357 }
358
360 // Don't interleave if the loop has been vectorized with scalable vectors.
361 if (VF.isScalable())
362 return 1;
363 // If the loop will not be vectorized, don't interleave the loop.
364 // Let regular unroll to unroll the loop.
365 return VF.isScalar() ? 1 : ST->getMaxInterleaveFactor();
366 }
367
369
371 unsigned getNumberOfRegisters(unsigned ClassID) const {
372 switch (ClassID) {
374 // 31 = 32 GPR - x0 (zero register)
375 // FIXME: Should we exclude fixed registers like SP, TP or GP?
376 return 31;
378 if (ST->hasStdExtF())
379 return 32;
380 return 0;
382 // Although there are 32 vector registers, v0 is special in that it is the
383 // only register that can be used to hold a mask.
384 // FIXME: Should we conservatively return 31 as the number of usable
385 // vector registers?
386 return ST->hasVInstructions() ? 32 : 0;
387 }
388 llvm_unreachable("unknown register class");
389 }
390
391 unsigned getRegisterClassForType(bool Vector, Type *Ty = nullptr) const {
392 if (Vector)
394 if (!Ty)
396
397 Type *ScalarTy = Ty->getScalarType();
398 if ((ScalarTy->isHalfTy() && ST->hasStdExtZfhmin()) ||
399 (ScalarTy->isFloatTy() && ST->hasStdExtF()) ||
400 (ScalarTy->isDoubleTy() && ST->hasStdExtD())) {
402 }
403
405 }
406
407 const char *getRegisterClassName(unsigned ClassID) const {
408 switch (ClassID) {
410 return "RISCV::GPRRC";
412 return "RISCV::FPRRC";
414 return "RISCV::VRRC";
415 }
416 llvm_unreachable("unknown register class");
417 }
418
421
422 bool
424 bool &AllowPromotionWithoutCommonHeader);
425 std::optional<unsigned> getMinPageSize() const { return 4096; }
426 /// Return true if the (vector) instruction I will be lowered to an
427 /// instruction with a scalar splat operand for the given Operand number.
428 bool canSplatOperand(Instruction *I, int Operand) const;
429 /// Return true if a vector instruction will lower to a target instruction
430 /// able to splat the given operand.
431 bool canSplatOperand(unsigned Opcode, int Operand) const;
432
434 SmallVectorImpl<Use *> &Ops) const;
435
437 bool IsZeroCmp) const;
438};
439
440} // end namespace llvm
441
442#endif // LLVM_LIB_TARGET_RISCV_RISCVTARGETTRANSFORMINFO_H
This file provides a helper that implements much of the TTI interface in terms of the target-independ...
Analysis containing CSE Info
Definition: CSEInfo.cpp:27
static cl::opt< TargetTransformInfo::TargetCostKind > CostKind("cost-kind", cl::desc("Target cost kind"), cl::init(TargetTransformInfo::TCK_RecipThroughput), cl::values(clEnumValN(TargetTransformInfo::TCK_RecipThroughput, "throughput", "Reciprocal throughput"), clEnumValN(TargetTransformInfo::TCK_Latency, "latency", "Instruction latency"), clEnumValN(TargetTransformInfo::TCK_CodeSize, "code-size", "Code size"), clEnumValN(TargetTransformInfo::TCK_SizeAndLatency, "size-latency", "Code size and latency")))
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
uint32_t Index
TargetTransformInfo::VPLegalization VPLegalization
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
uint64_t IntrinsicInst * II
This pass exposes codegen information to IR-level passes.
Class for arbitrary precision integers.
Definition: APInt.h:78
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
Base class which can be used to help build a TTI implementation.
Definition: BasicTTIImpl.h:80
InstructionCost getVectorInstrCost(unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index, Value *Op0, Value *Op1)
Value * getArgOperand(unsigned i) const
Definition: InstrTypes.h:1294
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition: InstrTypes.h:673
constexpr bool isScalar() const
Exactly one element.
Definition: TypeSize.h:322
A wrapper class for inspecting calls to intrinsic functions.
Definition: IntrinsicInst.h:48
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
Definition: IntrinsicInst.h:55
Represents a single loop in the control flow graph.
Definition: LoopInfo.h:39
Machine Value Type.
The optimization diagnostic interface.
InstructionCost getVectorInstrCost(unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index, Value *Op0, Value *Op1)
const char * getRegisterClassName(unsigned ClassID) const
InstructionCost getGatherScatterOpCost(unsigned Opcode, Type *DataTy, const Value *Ptr, bool VariableMask, Align Alignment, TTI::TargetCostKind CostKind, const Instruction *I)
void getPeelingPreferences(Loop *L, ScalarEvolution &SE, TTI::PeelingPreferences &PP)
InstructionCost getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy, CmpInst::Predicate VecPred, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Op1Info={TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Op2Info={TTI::OK_AnyValue, TTI::OP_None}, const Instruction *I=nullptr)
InstructionCost getShuffleCost(TTI::ShuffleKind Kind, VectorType *Tp, ArrayRef< int > Mask, TTI::TargetCostKind CostKind, int Index, VectorType *SubTp, ArrayRef< const Value * > Args={}, const Instruction *CxtI=nullptr)
bool isLSRCostLess(const TargetTransformInfo::LSRCost &C1, const TargetTransformInfo::LSRCost &C2)
InstructionCost getArithmeticReductionCost(unsigned Opcode, VectorType *Ty, std::optional< FastMathFlags > FMF, TTI::TargetCostKind CostKind)
bool isLegalMaskedGatherScatter(Type *DataType, Align Alignment)
InstructionCost getIntImmCostIntrin(Intrinsic::ID IID, unsigned Idx, const APInt &Imm, Type *Ty, TTI::TargetCostKind CostKind)
bool isProfitableToSinkOperands(Instruction *I, SmallVectorImpl< Use * > &Ops) const
Check if sinking I's operands to I's basic block is profitable, because the operands can be folded in...
unsigned getMaximumVF(unsigned ElemWidth, unsigned Opcode) const
bool isLegalMaskedExpandLoad(Type *DataType, Align Alignment)
TailFoldingStyle getPreferredTailFoldingStyle(bool IVUpdateMayOverflow) const
InstructionCost getArithmeticInstrCost(unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Op1Info={TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Op2Info={TTI::OK_AnyValue, TTI::OP_None}, ArrayRef< const Value * > Args={}, const Instruction *CxtI=nullptr)
TargetTransformInfo::VPLegalization getVPLegalizationStrategy(const VPIntrinsic &PI) const
bool isLegalMaskedStore(Type *DataType, Align Alignment)
InstructionCost getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src, TTI::CastContextHint CCH, TTI::TargetCostKind CostKind, const Instruction *I=nullptr)
unsigned getNumberOfRegisters(unsigned ClassID) const
bool isElementTypeLegalForScalableVector(Type *Ty) const
unsigned getMaxInterleaveFactor(ElementCount VF)
InstructionCost getMinMaxReductionCost(Intrinsic::ID IID, VectorType *Ty, FastMathFlags FMF, TTI::TargetCostKind CostKind)
bool isLegalMaskedLoadStore(Type *DataType, Align Alignment)
bool canSplatOperand(Instruction *I, int Operand) const
Return true if the (vector) instruction I will be lowered to an instruction with a scalar splat opera...
InstructionCost getIntImmCostInst(unsigned Opcode, unsigned Idx, const APInt &Imm, Type *Ty, TTI::TargetCostKind CostKind, Instruction *Inst=nullptr)
bool enableScalableVectorization() const
bool preferEpilogueVectorization() const
bool forceScalarizeMaskedGather(VectorType *VTy, Align Alignment)
bool shouldConsiderAddressTypePromotion(const Instruction &I, bool &AllowPromotionWithoutCommonHeader)
See if I should be considered for address type promotion.
bool isVScaleKnownToBeAPowerOfTwo() const
InstructionCost getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, TTI::TargetCostKind CostKind)
bool isLegalMaskedLoad(Type *DataType, Align Alignment)
InstructionCost getStridedMemoryOpCost(unsigned Opcode, Type *DataTy, const Value *Ptr, bool VariableMask, Align Alignment, TTI::TargetCostKind CostKind, const Instruction *I)
bool supportsScalableVectors() const
std::optional< unsigned > getVScaleForTuning() const
InstructionCost getExtendedReductionCost(unsigned Opcode, bool IsUnsigned, Type *ResTy, VectorType *ValTy, FastMathFlags FMF, TTI::TargetCostKind CostKind)
InstructionCost getMaskedMemoryOpCost(unsigned Opcode, Type *Src, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind)
TTI::MemCmpExpansionOptions enableMemCmpExpansion(bool OptSize, bool IsZeroCmp) const
std::optional< unsigned > getMaxVScale() const
void getUnrollingPreferences(Loop *L, ScalarEvolution &SE, TTI::UnrollingPreferences &UP, OptimizationRemarkEmitter *ORE)
RISCVTTIImpl(const RISCVTargetMachine *TM, const Function &F)
InstructionCost getScalarizationOverhead(VectorType *Ty, const APInt &DemandedElts, bool Insert, bool Extract, TTI::TargetCostKind CostKind, ArrayRef< Value * > VL={})
InstructionCost getPointersChainCost(ArrayRef< const Value * > Ptrs, const Value *Base, const TTI::PointersChainInfo &Info, Type *AccessTy, TTI::TargetCostKind CostKind)
TargetTransformInfo::PopcntSupportKind getPopcntSupport(unsigned TyWidth)
bool shouldExpandReduction(const IntrinsicInst *II) const
InstructionCost getCostOfKeepingLiveOverCall(ArrayRef< Type * > Tys)
bool isLegalInterleavedAccessType(VectorType *VTy, unsigned Factor, Align Alignment, unsigned AddrSpace)
InstructionCost getStoreImmCost(Type *VecTy, TTI::OperandValueInfo OpInfo, TTI::TargetCostKind CostKind)
Return the cost of materializing an immediate for a value operand of a store instruction.
bool isLegalMaskedCompressStore(Type *DataTy, Align Alignment)
bool isLegalStridedLoadStore(Type *DataType, Align Alignment)
InstructionCost getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind, const Instruction *I=nullptr)
unsigned getRegUsageForType(Type *Ty)
bool forceScalarizeMaskedScatter(VectorType *VTy, Align Alignment)
bool isLegalMaskedGather(Type *DataType, Align Alignment)
InstructionCost getMemoryOpCost(unsigned Opcode, Type *Src, MaybeAlign Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, TTI::OperandValueInfo OpdInfo={TTI::OK_AnyValue, TTI::OP_None}, const Instruction *I=nullptr)
bool enableOrderedReductions() const
std::optional< unsigned > getMinPageSize() const
bool isLegalToVectorizeReduction(const RecurrenceDescriptor &RdxDesc, ElementCount VF) const
bool isLegalMaskedScatter(Type *DataType, Align Alignment)
unsigned getRegisterClassForType(bool Vector, Type *Ty=nullptr) const
TypeSize getRegisterBitWidth(TargetTransformInfo::RegisterKind K) const
InstructionCost getIntImmCost(const APInt &Imm, Type *Ty, TTI::TargetCostKind CostKind)
bool hasActiveVectorLength(unsigned Opcode, Type *DataType, Align Alignment) const
unsigned getMinVectorRegisterBitWidth() const
InstructionCost getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef< unsigned > Indices, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, bool UseMaskForCond=false, bool UseMaskForGaps=false)
bool isLegalElementTypeForRVV(EVT ScalarTy) const
bool isVScaleKnownToBeAPowerOfTwo() const override
Return true only if vscale must be a power of two.
bool isLegalInterleavedAccessType(VectorType *VTy, unsigned Factor, Align Alignment, unsigned AddrSpace, const DataLayout &) const
Returns whether or not generating a interleaved load/store intrinsic for this type will be legal.
bool isLegalStridedLoadStore(EVT DataType, Align Alignment) const
Return true if a stride load store of the given result type and alignment is legal.
The RecurrenceDescriptor is used to identify recurrences variables in a loop.
Definition: IVDescriptors.h:77
Type * getRecurrenceType() const
Returns the type of the recurrence.
RecurKind getRecurrenceKind() const
The main scalar evolution driver.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:573
EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the EVT corresponding to this LLVM type.
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
const DataLayout & getDataLayout() const
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
TargetCostKind
The kind of cost model.
PopcntSupportKind
Flags indicating the kind of support for population count.
ShuffleKind
The various kinds of shuffle patterns for vector queries.
CastContextHint
Represents a hint about the context in which a cast is used.
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
bool isFloatTy() const
Return true if this is 'float', a 32-bit IEEE fp type.
Definition: Type.h:153
bool isBFloatTy() const
Return true if this is 'bfloat', a 16-bit bfloat type.
Definition: Type.h:145
bool isHalfTy() const
Return true if this is 'half', a 16-bit IEEE fp type.
Definition: Type.h:142
bool isDoubleTy() const
Return true if this is 'double', a 64-bit IEEE fp type.
Definition: Type.h:156
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
Definition: Type.h:355
This is the common base class for vector predication intrinsics.
LLVM Value Representation.
Definition: Value.h:74
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
Base class of all SIMD vector types.
Definition: DerivedTypes.h:427
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
Definition: TypeSize.h:171
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
@ UMin
Unsigned integer min implemented in terms of select(cmp()).
@ FAnyOf
Any_of reduction with select(fcmp(),x,y) where one of (x,y) is loop invariant, and both x and y are i...
@ Or
Bitwise or logical OR of integers.
@ Xor
Bitwise or logical XOR of integers.
@ FMax
FP max implemented in terms of select(cmp()).
@ FMulAdd
Sum of float products with llvm.fmuladd(a * b + sum).
@ SMax
Signed integer max implemented in terms of select(cmp()).
@ And
Bitwise or logical AND of integers.
@ SMin
Signed integer min implemented in terms of select(cmp()).
@ FMin
FP min implemented in terms of select(cmp()).
@ Add
Sum of integers.
@ FAdd
Sum of floats.
@ IAnyOf
Any_of reduction with select(icmp(),x,y) where one of (x,y) is loop invariant, and both x and y are i...
@ UMax
Unsigned integer max implemented in terms of select(cmp()).
@ DataWithoutLaneMask
Same as Data, but avoids using the get.active.lane.mask intrinsic to calculate the mask and instead i...
@ Data
Use predicate only to mask operations on data in the loop.
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
Extended Value Type.
Definition: ValueTypes.h:35
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
Definition: ValueTypes.h:390
bool isFixedLengthVector() const
Definition: ValueTypes.h:181
EVT getScalarType() const
If this is a vector type, return the element type, otherwise return this.
Definition: ValueTypes.h:318
bool isScalableVector() const
Return true if this is a vector type where the runtime length is machine dependent.
Definition: ValueTypes.h:174
Returns options for expansion of memcmp. IsZeroCmp is.
Describe known properties for a set of pointers.
Parameters that control the generic loop unrolling transformation.