LLVM 22.0.0git
X86TargetTransformInfo.h
Go to the documentation of this file.
1//===-- X86TargetTransformInfo.h - X86 specific TTI -------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8/// \file
9/// This file a TargetTransformInfoImplBase conforming object specific to the
10/// X86 target machine. It uses the target's detailed information to
11/// provide more precise answers to certain TTI queries, while letting the
12/// target independent and default TTI implementations handle the rest.
13///
14//===----------------------------------------------------------------------===//
15
16#ifndef LLVM_LIB_TARGET_X86_X86TARGETTRANSFORMINFO_H
17#define LLVM_LIB_TARGET_X86_X86TARGETTRANSFORMINFO_H
18
19#include "X86TargetMachine.h"
22#include <optional>
23
24namespace llvm {
25
26class InstCombiner;
27
28class X86TTIImpl final : public BasicTTIImplBase<X86TTIImpl> {
29 typedef BasicTTIImplBase<X86TTIImpl> BaseT;
30 typedef TargetTransformInfo TTI;
31 friend BaseT;
32
33 const X86Subtarget *ST;
34 const X86TargetLowering *TLI;
35
36 const X86Subtarget *getST() const { return ST; }
37 const X86TargetLowering *getTLI() const { return TLI; }
38
39 const FeatureBitset InlineFeatureIgnoreList = {
40 // This indicates the CPU is 64 bit capable not that we are in 64-bit
41 // mode.
42 X86::FeatureX86_64,
43
44 // These features don't have any intrinsics or ABI effect.
45 X86::FeatureNOPL,
46 X86::FeatureCX16,
47 X86::FeatureLAHFSAHF64,
48
49 // Some older targets can be setup to fold unaligned loads.
50 X86::FeatureSSEUnalignedMem,
51
52 // Codegen control options.
53 X86::TuningFast11ByteNOP,
54 X86::TuningFast15ByteNOP,
55 X86::TuningFastBEXTR,
56 X86::TuningFastHorizontalOps,
57 X86::TuningFastLZCNT,
58 X86::TuningFastScalarFSQRT,
59 X86::TuningFastSHLDRotate,
60 X86::TuningFastScalarShiftMasks,
61 X86::TuningFastVectorShiftMasks,
62 X86::TuningFastVariableCrossLaneShuffle,
63 X86::TuningFastVariablePerLaneShuffle,
64 X86::TuningFastVectorFSQRT,
65 X86::TuningLEAForSP,
66 X86::TuningLEAUsesAG,
67 X86::TuningLZCNTFalseDeps,
68 X86::TuningBranchFusion,
69 X86::TuningMacroFusion,
70 X86::TuningPadShortFunctions,
71 X86::TuningPOPCNTFalseDeps,
72 X86::TuningMULCFalseDeps,
73 X86::TuningPERMFalseDeps,
74 X86::TuningRANGEFalseDeps,
75 X86::TuningGETMANTFalseDeps,
76 X86::TuningMULLQFalseDeps,
77 X86::TuningSlow3OpsLEA,
78 X86::TuningSlowDivide32,
79 X86::TuningSlowDivide64,
80 X86::TuningSlowIncDec,
81 X86::TuningSlowLEA,
82 X86::TuningSlowPMADDWD,
83 X86::TuningSlowPMULLD,
84 X86::TuningSlowSHLD,
85 X86::TuningSlowTwoMemOps,
86 X86::TuningSlowUAMem16,
87 X86::TuningPreferMaskRegisters,
88 X86::TuningInsertVZEROUPPER,
89 X86::TuningUseSLMArithCosts,
90 X86::TuningUseGLMDivSqrtCosts,
91 X86::TuningNoDomainDelay,
92 X86::TuningNoDomainDelayMov,
93 X86::TuningNoDomainDelayShuffle,
94 X86::TuningNoDomainDelayBlend,
95 X86::TuningPreferShiftShuffle,
96 X86::TuningFastImmVectorShift,
97 X86::TuningFastDPWSSD,
98
99 // Perf-tuning flags.
100 X86::TuningFastGather,
101 X86::TuningSlowUAMem32,
102 X86::TuningAllowLight256Bit,
103
104 // Based on whether user set the -mprefer-vector-width command line.
105 X86::TuningPrefer128Bit,
106 X86::TuningPrefer256Bit,
107
108 // CPU name enums. These just follow CPU string.
109 X86::ProcIntelAtom
110 };
111
112public:
113 explicit X86TTIImpl(const X86TargetMachine *TM, const Function &F)
114 : BaseT(TM, F.getDataLayout()), ST(TM->getSubtargetImpl(F)),
115 TLI(ST->getTargetLowering()) {}
116
117 /// \name Scalar TTI Implementations
118 /// @{
119 TTI::PopcntSupportKind getPopcntSupport(unsigned TyWidth) const override;
120
121 /// @}
122
123 /// \name Cache TTI Implementation
124 /// @{
125 std::optional<unsigned> getCacheSize(
126 TargetTransformInfo::CacheLevel Level) const override;
127 std::optional<unsigned> getCacheAssociativity(
128 TargetTransformInfo::CacheLevel Level) const override;
129 /// @}
130
131 /// \name Vector TTI Implementations
132 /// @{
133
134 unsigned getNumberOfRegisters(unsigned ClassID) const override;
135 unsigned getRegisterClassForType(bool Vector, Type *Ty) const override;
136 bool hasConditionalLoadStoreForType(Type *Ty, bool IsStore) const override;
139 unsigned getLoadStoreVecRegBitWidth(unsigned AS) const override;
140 unsigned getMaxInterleaveFactor(ElementCount VF) const override;
142 unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind,
146 const Instruction *CxtI = nullptr) const override;
147 InstructionCost getAltInstrCost(VectorType *VecTy, unsigned Opcode0,
148 unsigned Opcode1,
149 const SmallBitVector &OpcodeMask,
150 TTI::TargetCostKind CostKind) const override;
151
153 getShuffleCost(TTI::ShuffleKind Kind, VectorType *DstTy, VectorType *SrcTy,
154 ArrayRef<int> Mask, TTI::TargetCostKind CostKind, int Index,
155 VectorType *SubTp, ArrayRef<const Value *> Args = {},
156 const Instruction *CxtI = nullptr) const override;
158 getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src,
160 const Instruction *I = nullptr) const override;
162 unsigned Opcode, Type *ValTy, Type *CondTy, CmpInst::Predicate VecPred,
166 const Instruction *I = nullptr) const override;
168 InstructionCost getVectorInstrCost(unsigned Opcode, Type *Val,
170 unsigned Index, const Value *Op0,
171 const Value *Op1) const override;
173 VectorType *Ty, const APInt &DemandedElts, bool Insert, bool Extract,
174 TTI::TargetCostKind CostKind, bool ForPoisonSrc = true,
175 ArrayRef<Value *> VL = {}) const override;
177 getReplicationShuffleCost(Type *EltTy, int ReplicationFactor, int VF,
178 const APInt &DemandedDstElts,
179 TTI::TargetCostKind CostKind) const override;
181 unsigned Opcode, Type *Src, Align Alignment, unsigned AddressSpace,
184 const Instruction *I = nullptr) const override;
186 getMemIntrinsicInstrCost(const MemIntrinsicCostAttributes &MICA,
187 TTI::TargetCostKind CostKind) const override;
188 InstructionCost getMaskedMemoryOpCost(const MemIntrinsicCostAttributes &MICA,
190 InstructionCost getGatherScatterOpCost(const MemIntrinsicCostAttributes &MICA,
194 const TTI::PointersChainInfo &Info, Type *AccessTy,
195 TTI::TargetCostKind CostKind) const override;
197 getAddressComputationCost(Type *PtrTy, ScalarEvolution *SE, const SCEV *Ptr,
198 TTI::TargetCostKind CostKind) const override;
199
200 std::optional<Instruction *>
201 instCombineIntrinsic(InstCombiner &IC, IntrinsicInst &II) const override;
202 std::optional<Value *>
204 APInt DemandedMask, KnownBits &Known,
205 bool &KnownBitsComputed) const override;
206 std::optional<Value *> simplifyDemandedVectorEltsIntrinsic(
207 InstCombiner &IC, IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts,
208 APInt &UndefElts2, APInt &UndefElts3,
209 std::function<void(Instruction *, unsigned, APInt, APInt &)>
210 SimplifyAndSetOp) const override;
211
212 unsigned getAtomicMemIntrinsicMaxElementSize() const override;
213
215 getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA,
216 TTI::TargetCostKind CostKind) const override;
217
219 getArithmeticReductionCost(unsigned Opcode, VectorType *Ty,
220 std::optional<FastMathFlags> FMF,
221 TTI::TargetCostKind CostKind) const override;
222
225 FastMathFlags FMF) const;
226
228 getMinMaxReductionCost(Intrinsic::ID IID, VectorType *Ty, FastMathFlags FMF,
229 TTI::TargetCostKind CostKind) const override;
230
232 unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices,
233 Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind,
234 bool UseMaskForCond = false, bool UseMaskForGaps = false) const override;
236 unsigned Opcode, FixedVectorType *VecTy, unsigned Factor,
237 ArrayRef<unsigned> Indices, Align Alignment, unsigned AddressSpace,
238 TTI::TargetCostKind CostKind, bool UseMaskForCond = false,
239 bool UseMaskForGaps = false) const;
240
241 InstructionCost getIntImmCost(int64_t) const;
242
243 InstructionCost getIntImmCost(const APInt &Imm, Type *Ty,
244 TTI::TargetCostKind CostKind) const override;
245
247 const Instruction *I = nullptr) const override;
248
249 InstructionCost getIntImmCostInst(unsigned Opcode, unsigned Idx,
250 const APInt &Imm, Type *Ty,
252 Instruction *Inst = nullptr) const override;
254 getIntImmCostIntrin(Intrinsic::ID IID, unsigned Idx, const APInt &Imm,
255 Type *Ty, TTI::TargetCostKind CostKind) const override;
256 /// Return the cost of the scaling factor used in the addressing
257 /// mode represented by AM for this target, for a load/store
258 /// of the specified type.
259 /// If the AM is supported, the return value must be >= 0.
260 /// If the AM is not supported, it returns an invalid cost.
261 InstructionCost getScalingFactorCost(Type *Ty, GlobalValue *BaseGV,
262 StackOffset BaseOffset, bool HasBaseReg,
263 int64_t Scale,
264 unsigned AddrSpace) const override;
265
266 bool isLSRCostLess(const TargetTransformInfo::LSRCost &C1,
267 const TargetTransformInfo::LSRCost &C2) const override;
268 bool canMacroFuseCmp() const override;
269 bool
270 isLegalMaskedLoad(Type *DataType, Align Alignment, unsigned AddressSpace,
271 TTI::MaskKind MaskKind =
273 bool
274 isLegalMaskedStore(Type *DataType, Align Alignment, unsigned AddressSpace,
275 TTI::MaskKind MaskKind =
277 bool isLegalNTLoad(Type *DataType, Align Alignment) const override;
278 bool isLegalNTStore(Type *DataType, Align Alignment) const override;
279 bool isLegalBroadcastLoad(Type *ElementTy,
280 ElementCount NumElements) const override;
281 bool forceScalarizeMaskedGather(VectorType *VTy,
282 Align Alignment) const override;
284 Align Alignment) const override {
285 return forceScalarizeMaskedGather(VTy, Alignment);
286 }
287 bool isLegalMaskedGatherScatter(Type *DataType, Align Alignment) const;
288 bool isLegalMaskedGather(Type *DataType, Align Alignment) const override;
289 bool isLegalMaskedScatter(Type *DataType, Align Alignment) const override;
290 bool isLegalMaskedExpandLoad(Type *DataType, Align Alignment) const override;
291 bool isLegalMaskedCompressStore(Type *DataType,
292 Align Alignment) const override;
293 bool isLegalAltInstr(VectorType *VecTy, unsigned Opcode0, unsigned Opcode1,
294 const SmallBitVector &OpcodeMask) const override;
295 bool hasDivRemOp(Type *DataType, bool IsSigned) const override;
296 bool isExpensiveToSpeculativelyExecute(const Instruction *I) const override;
297 bool isFCmpOrdCheaperThanFCmpZero(Type *Ty) const override;
298 bool areInlineCompatible(const Function *Caller,
299 const Function *Callee) const override;
300 bool areTypesABICompatible(const Function *Caller, const Function *Callee,
301 ArrayRef<Type *> Type) const override;
302
304 return ST->getMaxInlineSizeThreshold();
305 }
306
308 enableMemCmpExpansion(bool OptSize, bool IsZeroCmp) const override;
309 bool preferAlternateOpcodeVectorization() const override { return false; }
310 bool prefersVectorizedAddressing() const override;
311 bool supportsEfficientVectorElementLoadStore() const override;
312 bool enableInterleavedAccessVectorization() const override;
313
315
317 SmallVectorImpl<Use *> &Ops) const override;
318
319 bool isVectorShiftByScalarCheap(Type *Ty) const override;
320
321 unsigned getStoreMinimumVF(unsigned VF, Type *ScalarMemTy,
322 Type *ScalarValTy) const override;
323
324 bool useFastCCForInternalCall(Function &F) const override;
325
326private:
327 bool supportsGather() const;
328 InstructionCost getGSVectorCost(unsigned Opcode, TTI::TargetCostKind CostKind,
329 Type *DataTy, const Value *Ptr,
330 Align Alignment, unsigned AddressSpace) const;
331
332 int getGatherOverhead() const;
333 int getScatterOverhead() const;
334
335 /// @}
336};
337
338} // end namespace llvm
339
340#endif
This file provides a helper that implements much of the TTI interface in terms of the target-independ...
Analysis containing CSE Info
Definition CSEInfo.cpp:27
static cl::opt< OutputCostKind > CostKind("cost-kind", cl::desc("Target cost kind"), cl::init(OutputCostKind::RecipThroughput), cl::values(clEnumValN(OutputCostKind::RecipThroughput, "throughput", "Reciprocal throughput"), clEnumValN(OutputCostKind::Latency, "latency", "Instruction latency"), clEnumValN(OutputCostKind::CodeSize, "code-size", "Code size"), clEnumValN(OutputCostKind::SizeAndLatency, "size-latency", "Code size and latency"), clEnumValN(OutputCostKind::All, "all", "Print all cost kinds")))
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
Machine InstCombiner
uint64_t IntrinsicInst * II
This pass exposes codegen information to IR-level passes.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:40
InstructionCost getVectorInstrCost(unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index, const Value *Op0, const Value *Op1) const override
BasicTTIImplBase(const TargetMachine *TM, const DataLayout &DL)
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition InstrTypes.h:676
Container class for subtarget features.
The core instruction combiner logic.
This is a 'bitvector' (really, a variable-sized bit array), optimized for the case when the array is ...
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
virtual const DataLayout & getDataLayout() const
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
MaskKind
Some targets only support masked load/store with a constant mask.
TargetCostKind
The kind of cost model.
PopcntSupportKind
Flags indicating the kind of support for population count.
ShuffleKind
The various kinds of shuffle patterns for vector queries.
CastContextHint
Represents a hint about the context in which a cast is used.
CacheLevel
The possible cache levels.
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
LLVM Value Representation.
Definition Value.h:75
Base class of all SIMD vector types.
bool useFastCCForInternalCall(Function &F) const override
InstructionCost getReplicationShuffleCost(Type *EltTy, int ReplicationFactor, int VF, const APInt &DemandedDstElts, TTI::TargetCostKind CostKind) const override
bool isLegalNTLoad(Type *DataType, Align Alignment) const override
unsigned getMaxInterleaveFactor(ElementCount VF) const override
std::optional< unsigned > getCacheAssociativity(TargetTransformInfo::CacheLevel Level) const override
InstructionCost getMinMaxReductionCost(Intrinsic::ID IID, VectorType *Ty, FastMathFlags FMF, TTI::TargetCostKind CostKind) const override
Try to calculate op costs for min/max reduction operations.
bool isLegalBroadcastLoad(Type *ElementTy, ElementCount NumElements) const override
unsigned getRegisterClassForType(bool Vector, Type *Ty) const override
unsigned getStoreMinimumVF(unsigned VF, Type *ScalarMemTy, Type *ScalarValTy) const override
InstructionCost getMemIntrinsicInstrCost(const MemIntrinsicCostAttributes &MICA, TTI::TargetCostKind CostKind) const override
Get memory intrinsic cost based on arguments.
bool preferAlternateOpcodeVectorization() const override
InstructionCost getVectorInstrCost(unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index, const Value *Op0, const Value *Op1) const override
InstructionCost getShuffleCost(TTI::ShuffleKind Kind, VectorType *DstTy, VectorType *SrcTy, ArrayRef< int > Mask, TTI::TargetCostKind CostKind, int Index, VectorType *SubTp, ArrayRef< const Value * > Args={}, const Instruction *CxtI=nullptr) const override
X86TTIImpl(const X86TargetMachine *TM, const Function &F)
bool isLegalNTStore(Type *DataType, Align Alignment) const override
InstructionCost getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src, TTI::CastContextHint CCH, TTI::TargetCostKind CostKind, const Instruction *I=nullptr) const override
InstructionCost getInterleavedMemoryOpCostAVX512(unsigned Opcode, FixedVectorType *VecTy, unsigned Factor, ArrayRef< unsigned > Indices, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, bool UseMaskForCond=false, bool UseMaskForGaps=false) const
bool isLegalAltInstr(VectorType *VecTy, unsigned Opcode0, unsigned Opcode1, const SmallBitVector &OpcodeMask) const override
TypeSize getRegisterBitWidth(TargetTransformInfo::RegisterKind K) const override
bool isVectorShiftByScalarCheap(Type *Ty) const override
bool isLegalMaskedGather(Type *DataType, Align Alignment) const override
InstructionCost getScalingFactorCost(Type *Ty, GlobalValue *BaseGV, StackOffset BaseOffset, bool HasBaseReg, int64_t Scale, unsigned AddrSpace) const override
Return the cost of the scaling factor used in the addressing mode represented by AM for this target,...
unsigned getAtomicMemIntrinsicMaxElementSize() const override
InstructionCost getPointersChainCost(ArrayRef< const Value * > Ptrs, const Value *Base, const TTI::PointersChainInfo &Info, Type *AccessTy, TTI::TargetCostKind CostKind) const override
bool forceScalarizeMaskedGather(VectorType *VTy, Align Alignment) const override
InstructionCost getBranchMispredictPenalty() const override
bool isExpensiveToSpeculativelyExecute(const Instruction *I) const override
bool hasConditionalLoadStoreForType(Type *Ty, bool IsStore) const override
bool isLegalMaskedStore(Type *DataType, Align Alignment, unsigned AddressSpace, TTI::MaskKind MaskKind=TTI::MaskKind::VariableOrConstantMask) const override
std::optional< unsigned > getCacheSize(TargetTransformInfo::CacheLevel Level) const override
InstructionCost getArithmeticInstrCost(unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Op1Info={TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Op2Info={TTI::OK_AnyValue, TTI::OP_None}, ArrayRef< const Value * > Args={}, const Instruction *CxtI=nullptr) const override
bool isLegalMaskedGatherScatter(Type *DataType, Align Alignment) const
bool isLegalMaskedLoad(Type *DataType, Align Alignment, unsigned AddressSpace, TTI::MaskKind MaskKind=TTI::MaskKind::VariableOrConstantMask) const override
bool enableInterleavedAccessVectorization() const override
unsigned getLoadStoreVecRegBitWidth(unsigned AS) const override
unsigned getNumberOfRegisters(unsigned ClassID) const override
std::optional< Value * > simplifyDemandedUseBitsIntrinsic(InstCombiner &IC, IntrinsicInst &II, APInt DemandedMask, KnownBits &Known, bool &KnownBitsComputed) const override
uint64_t getMaxMemIntrinsicInlineSizeThreshold() const override
InstructionCost getScalarizationOverhead(VectorType *Ty, const APInt &DemandedElts, bool Insert, bool Extract, TTI::TargetCostKind CostKind, bool ForPoisonSrc=true, ArrayRef< Value * > VL={}) const override
Estimate the overhead of scalarizing an instruction.
bool isLegalMaskedScatter(Type *DataType, Align Alignment) const override
bool hasDivRemOp(Type *DataType, bool IsSigned) const override
bool isLegalMaskedCompressStore(Type *DataType, Align Alignment) const override
InstructionCost getIntImmCostIntrin(Intrinsic::ID IID, unsigned Idx, const APInt &Imm, Type *Ty, TTI::TargetCostKind CostKind) const override
std::optional< Value * > simplifyDemandedVectorEltsIntrinsic(InstCombiner &IC, IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts, APInt &UndefElts2, APInt &UndefElts3, std::function< void(Instruction *, unsigned, APInt, APInt &)> SimplifyAndSetOp) const override
bool supportsEfficientVectorElementLoadStore() const override
InstructionCost getIntImmCostInst(unsigned Opcode, unsigned Idx, const APInt &Imm, Type *Ty, TTI::TargetCostKind CostKind, Instruction *Inst=nullptr) const override
bool isLegalMaskedExpandLoad(Type *DataType, Align Alignment) const override
TTI::PopcntSupportKind getPopcntSupport(unsigned TyWidth) const override
bool isFCmpOrdCheaperThanFCmpZero(Type *Ty) const override
TTI::MemCmpExpansionOptions enableMemCmpExpansion(bool OptSize, bool IsZeroCmp) const override
InstructionCost getIntImmCost(int64_t) const
Calculate the cost of materializing a 64-bit value.
InstructionCost getMinMaxCost(Intrinsic::ID IID, Type *Ty, TTI::TargetCostKind CostKind, FastMathFlags FMF) const
InstructionCost getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind, const Instruction *I=nullptr) const override
InstructionCost getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef< unsigned > Indices, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, bool UseMaskForCond=false, bool UseMaskForGaps=false) const override
bool canMacroFuseCmp() const override
bool areInlineCompatible(const Function *Caller, const Function *Callee) const override
InstructionCost getMaskedMemoryOpCost(const MemIntrinsicCostAttributes &MICA, TTI::TargetCostKind CostKind) const
bool prefersVectorizedAddressing() const override
bool areTypesABICompatible(const Function *Caller, const Function *Callee, ArrayRef< Type * > Type) const override
InstructionCost getAltInstrCost(VectorType *VecTy, unsigned Opcode0, unsigned Opcode1, const SmallBitVector &OpcodeMask, TTI::TargetCostKind CostKind) const override
bool forceScalarizeMaskedScatter(VectorType *VTy, Align Alignment) const override
InstructionCost getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, TTI::TargetCostKind CostKind) const override
Get intrinsic cost based on arguments.
bool isProfitableToSinkOperands(Instruction *I, SmallVectorImpl< Use * > &Ops) const override
InstructionCost getArithmeticReductionCost(unsigned Opcode, VectorType *Ty, std::optional< FastMathFlags > FMF, TTI::TargetCostKind CostKind) const override
InstructionCost getGatherScatterOpCost(const MemIntrinsicCostAttributes &MICA, TTI::TargetCostKind CostKind) const
Calculate the cost of Gather / Scatter operation.
std::optional< Instruction * > instCombineIntrinsic(InstCombiner &IC, IntrinsicInst &II) const override
InstructionCost getMemoryOpCost(unsigned Opcode, Type *Src, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, TTI::OperandValueInfo OpInfo={TTI::OK_AnyValue, TTI::OP_None}, const Instruction *I=nullptr) const override
InstructionCost getAddressComputationCost(Type *PtrTy, ScalarEvolution *SE, const SCEV *Ptr, TTI::TargetCostKind CostKind) const override
InstructionCost getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy, CmpInst::Predicate VecPred, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Op1Info={TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Op2Info={TTI::OK_AnyValue, TTI::OP_None}, const Instruction *I=nullptr) const override
bool isLSRCostLess(const TargetTransformInfo::LSRCost &C1, const TargetTransformInfo::LSRCost &C2) const override
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
friend class Instruction
Iterator for Instructions in a `BasicBlock.
Definition BasicBlock.h:73
This is an optimization pass for GlobalISel generic memory operations.
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
ArrayRef(const T &OneElt) -> ArrayRef< T >
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
Returns options for expansion of memcmp. IsZeroCmp is.
Describe known properties for a set of pointers.