LLVM 22.0.0git
HexagonTargetTransformInfo.cpp
Go to the documentation of this file.
1//===- HexagonTargetTransformInfo.cpp - Hexagon specific TTI pass ---------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7/// \file
8/// This file implements a TargetTransformInfo analysis pass specific to the
9/// Hexagon target machine. It uses the target's detailed information to provide
10/// more precise answers to certain TTI queries, while letting the target
11/// independent and default TTI implementations handle the rest.
12///
13//===----------------------------------------------------------------------===//
14
16#include "HexagonSubtarget.h"
19#include "llvm/IR/InstrTypes.h"
21#include "llvm/IR/User.h"
26
27using namespace llvm;
28
29#define DEBUG_TYPE "hexagontti"
30
31static cl::opt<bool> HexagonAutoHVX("hexagon-autohvx", cl::init(false),
32 cl::Hidden, cl::desc("Enable loop vectorizer for HVX"));
33
35 "hexagon-allow-scatter-gather-hvx", cl::init(false), cl::Hidden,
36 cl::desc("Allow auto-generation of HVX scatter-gather"));
37
39 "force-hvx-float", cl::Hidden,
40 cl::desc("Enable auto-vectorization of floatint point types on v68."));
41
42static cl::opt<bool> EmitLookupTables("hexagon-emit-lookup-tables",
43 cl::init(true), cl::Hidden,
44 cl::desc("Control lookup table emission on Hexagon target"));
45
46static cl::opt<bool> HexagonMaskedVMem("hexagon-masked-vmem", cl::init(true),
47 cl::Hidden, cl::desc("Enable masked loads/stores for HVX"));
48
49// Constant "cost factor" to make floating point operations more expensive
50// in terms of vectorization cost. This isn't the best way, but it should
51// do. Ultimately, the cost should use cycles.
52static const unsigned FloatFactor = 4;
53
54bool HexagonTTIImpl::useHVX() const {
55 return ST.useHVXOps() && HexagonAutoHVX;
56}
57
58bool HexagonTTIImpl::isHVXVectorType(Type *Ty) const {
59 auto *VecTy = dyn_cast<VectorType>(Ty);
60 if (!VecTy)
61 return false;
62 if (!ST.isTypeForHVX(VecTy))
63 return false;
64 if (ST.useHVXV69Ops() || !VecTy->getElementType()->isFloatingPointTy())
65 return true;
66 return ST.useHVXV68Ops() && EnableV68FloatAutoHVX;
67}
68
69unsigned HexagonTTIImpl::getTypeNumElements(Type *Ty) const {
70 if (auto *VTy = dyn_cast<FixedVectorType>(Ty))
71 return VTy->getNumElements();
72 assert((Ty->isIntegerTy() || Ty->isFloatingPointTy()) &&
73 "Expecting scalar type");
74 return 1;
75}
76
78HexagonTTIImpl::getPopcntSupport(unsigned IntTyWidthInBit) const {
79 // Return fast hardware support as every input < 64 bits will be promoted
80 // to 64 bits.
82}
83
84// The Hexagon target can unroll loops with run-time trip counts.
90
92 TTI::PeelingPreferences &PP) const {
94 // Only try to peel innermost loops with small runtime trip counts.
95 if (L && L->isInnermost() && canPeel(L) &&
96 SE.getSmallConstantTripCount(L) == 0 &&
99 PP.PeelCount = 2;
100 }
101}
102
108
109/// --- Vector TTI begin ---
110
111unsigned HexagonTTIImpl::getNumberOfRegisters(unsigned ClassID) const {
112 bool Vector = ClassID == 1;
113 if (Vector)
114 return useHVX() ? 32 : 0;
115 return 32;
116}
117
119 return useHVX() ? 2 : 1;
120}
121
135
137 return useHVX() ? ST.getVectorLength()*8 : 32;
138}
139
141 bool IsScalable) const {
142 assert(!IsScalable && "Scalable VFs are not supported for Hexagon");
143 return ElementCount::getFixed((8 * ST.getVectorLength()) / ElemWidth);
144}
145
151
155 if (ICA.getID() == Intrinsic::bswap) {
156 std::pair<InstructionCost, MVT> LT =
158 return LT.first + 2;
159 }
161}
162
165 const SCEV *S,
167 return 0;
168}
169
171 Align Alignment,
172 unsigned AddressSpace,
175 const Instruction *I) const {
176 assert(Opcode == Instruction::Load || Opcode == Instruction::Store);
177 // TODO: Handle other cost kinds.
179 return 1;
180
181 if (Opcode == Instruction::Store)
182 return BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace,
183 CostKind, OpInfo, I);
184
185 if (Src->isVectorTy()) {
186 VectorType *VecTy = cast<VectorType>(Src);
187 unsigned VecWidth = VecTy->getPrimitiveSizeInBits().getFixedValue();
188 if (isHVXVectorType(VecTy)) {
189 unsigned RegWidth =
191 .getFixedValue();
192 assert(RegWidth && "Non-zero vector register width expected");
193 // Cost of HVX loads.
194 if (VecWidth % RegWidth == 0)
195 return VecWidth / RegWidth;
196 // Cost of constructing HVX vector from scalar loads
197 const Align RegAlign(RegWidth / 8);
198 if (Alignment > RegAlign)
199 Alignment = RegAlign;
200 unsigned AlignWidth = 8 * Alignment.value();
201 unsigned NumLoads = alignTo(VecWidth, AlignWidth) / AlignWidth;
202 return 3 * NumLoads;
203 }
204
205 // Non-HVX vectors.
206 // Add extra cost for floating point types.
207 unsigned Cost =
209
210 // At this point unspecified alignment is considered as Align(1).
211 const Align BoundAlignment = std::min(Alignment, Align(8));
212 unsigned AlignWidth = 8 * BoundAlignment.value();
213 unsigned NumLoads = alignTo(VecWidth, AlignWidth) / AlignWidth;
214 if (Alignment == Align(4) || Alignment == Align(8))
215 return Cost * NumLoads;
216 // Loads of less than 32 bits will need extra inserts to compose a vector.
217 assert(BoundAlignment <= Align(8));
218 unsigned LogA = Log2(BoundAlignment);
219 return (3 - LogA) * Cost * NumLoads;
220 }
221
222 return BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace, CostKind,
223 OpInfo, I);
224}
225
228 VectorType *SrcTy, ArrayRef<int> Mask,
229 TTI::TargetCostKind CostKind, int Index,
231 const Instruction *CxtI) const {
232 return 1;
233}
234
236 unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices,
237 Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind,
238 bool UseMaskForCond, bool UseMaskForGaps) const {
239 if (Indices.size() != Factor || UseMaskForCond || UseMaskForGaps)
240 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
241 Alignment, AddressSpace,
242 CostKind,
243 UseMaskForCond, UseMaskForGaps);
244 return getMemoryOpCost(Opcode, VecTy, Alignment, AddressSpace, CostKind);
245}
246
248 unsigned Opcode, Type *ValTy, Type *CondTy, CmpInst::Predicate VecPred,
250 TTI::OperandValueInfo Op2Info, const Instruction *I) const {
251 if (ValTy->isVectorTy() && CostKind == TTI::TCK_RecipThroughput) {
252 if (!isHVXVectorType(ValTy) && ValTy->isFPOrFPVectorTy())
254 std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(ValTy);
255 if (Opcode == Instruction::FCmp)
256 return LT.first + FloatFactor * getTypeNumElements(ValTy);
257 }
258 return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, VecPred, CostKind,
259 Op1Info, Op2Info, I);
260}
261
263 unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind,
265 ArrayRef<const Value *> Args, const Instruction *CxtI) const {
266 // TODO: Handle more cost kinds.
268 return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Op1Info,
269 Op2Info, Args, CxtI);
270
271 if (Ty->isVectorTy()) {
272 if (!isHVXVectorType(Ty) && Ty->isFPOrFPVectorTy())
274 std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Ty);
275 if (LT.second.isFloatingPoint())
276 return LT.first + FloatFactor * getTypeNumElements(Ty);
277 }
278 return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Op1Info, Op2Info,
279 Args, CxtI);
280}
281
283 Type *SrcTy,
286 const Instruction *I) const {
287 auto isNonHVXFP = [this] (Type *Ty) {
288 return Ty->isVectorTy() && !isHVXVectorType(Ty) && Ty->isFPOrFPVectorTy();
289 };
290 if (isNonHVXFP(SrcTy) || isNonHVXFP(DstTy))
292
293 if (SrcTy->isFPOrFPVectorTy() || DstTy->isFPOrFPVectorTy()) {
294 unsigned SrcN = SrcTy->isFPOrFPVectorTy() ? getTypeNumElements(SrcTy) : 0;
295 unsigned DstN = DstTy->isFPOrFPVectorTy() ? getTypeNumElements(DstTy) : 0;
296
297 std::pair<InstructionCost, MVT> SrcLT = getTypeLegalizationCost(SrcTy);
298 std::pair<InstructionCost, MVT> DstLT = getTypeLegalizationCost(DstTy);
300 std::max(SrcLT.first, DstLT.first) + FloatFactor * (SrcN + DstN);
301 // TODO: Allow non-throughput costs that aren't binary.
303 return Cost == 0 ? 0 : 1;
304 return Cost;
305 }
306 return 1;
307}
308
311 unsigned Index,
312 const Value *Op0,
313 const Value *Op1) const {
314 Type *ElemTy = Val->isVectorTy() ? cast<VectorType>(Val)->getElementType()
315 : Val;
316 if (Opcode == Instruction::InsertElement) {
317 // Need two rotations for non-zero index.
318 unsigned Cost = (Index != 0) ? 2 : 0;
319 if (ElemTy->isIntegerTy(32))
320 return Cost;
321 // If it's not a 32-bit value, there will need to be an extract.
322 return Cost + getVectorInstrCost(Instruction::ExtractElement, Val, CostKind,
323 Index, Op0, Op1);
324 }
325
326 if (Opcode == Instruction::ExtractElement)
327 return 2;
328
329 return 1;
330}
331
332bool HexagonTTIImpl::isLegalMaskedStore(Type *DataType, Align /*Alignment*/,
333 unsigned /*AddressSpace*/,
334 TTI::MaskKind /*MaskKind*/) const {
335 // This function is called from scalarize-masked-mem-intrin, which runs
336 // in pre-isel. Use ST directly instead of calling isHVXVectorType.
337 return HexagonMaskedVMem && ST.isTypeForHVX(DataType);
338}
339
340bool HexagonTTIImpl::isLegalMaskedLoad(Type *DataType, Align /*Alignment*/,
341 unsigned /*AddressSpace*/,
342 TTI::MaskKind /*MaskKind*/) const {
343 // This function is called from scalarize-masked-mem-intrin, which runs
344 // in pre-isel. Use ST directly instead of calling isHVXVectorType.
345 return HexagonMaskedVMem && ST.isTypeForHVX(DataType);
346}
347
349 // For now assume we can not deal with all HVX datatypes.
350 if (!Ty->isVectorTy() || !ST.isTypeForHVX(Ty) ||
352 return false;
353 // This must be in sync with HexagonVectorCombine pass.
354 switch (Ty->getScalarSizeInBits()) {
355 case 8:
356 return (getTypeNumElements(Ty) == 128);
357 case 16:
358 if (getTypeNumElements(Ty) == 64 || getTypeNumElements(Ty) == 32)
359 return (Alignment >= 2);
360 break;
361 case 32:
362 if (getTypeNumElements(Ty) == 32)
363 return (Alignment >= 4);
364 break;
365 default:
366 break;
367 }
368 return false;
369}
370
372 if (!Ty->isVectorTy() || !ST.isTypeForHVX(Ty) ||
374 return false;
375 // This must be in sync with HexagonVectorCombine pass.
376 switch (Ty->getScalarSizeInBits()) {
377 case 8:
378 return (getTypeNumElements(Ty) == 128);
379 case 16:
380 if (getTypeNumElements(Ty) == 64)
381 return (Alignment >= 2);
382 break;
383 case 32:
384 if (getTypeNumElements(Ty) == 32)
385 return (Alignment >= 4);
386 break;
387 default:
388 break;
389 }
390 return false;
391}
392
394 Align Alignment) const {
395 return !isLegalMaskedGather(VTy, Alignment);
396}
397
399 Align Alignment) const {
400 return !isLegalMaskedScatter(VTy, Alignment);
401}
402
403/// --- Vector TTI end ---
404
406 return ST.getL1PrefetchDistance();
407}
408
410 return ST.getL1CacheLineSize();
411}
412
417 auto isCastFoldedIntoLoad = [this](const CastInst *CI) -> bool {
418 if (!CI->isIntegerCast())
419 return false;
420 // Only extensions from an integer type shorter than 32-bit to i32
421 // can be folded into the load.
422 const DataLayout &DL = getDataLayout();
423 unsigned SBW = DL.getTypeSizeInBits(CI->getSrcTy());
424 unsigned DBW = DL.getTypeSizeInBits(CI->getDestTy());
425 if (DBW != 32 || SBW >= DBW)
426 return false;
427
428 const LoadInst *LI = dyn_cast<const LoadInst>(CI->getOperand(0));
429 // Technically, this code could allow multiple uses of the load, and
430 // check if all the uses are the same extension operation, but this
431 // should be sufficient for most cases.
432 return LI && LI->hasOneUse();
433 };
434
435 if (const CastInst *CI = dyn_cast<const CastInst>(U))
436 if (isCastFoldedIntoLoad(CI))
438 return BaseT::getInstructionCost(U, Operands, CostKind);
439}
440
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static cl::opt< OutputCostKind > CostKind("cost-kind", cl::desc("Target cost kind"), cl::init(OutputCostKind::RecipThroughput), cl::values(clEnumValN(OutputCostKind::RecipThroughput, "throughput", "Reciprocal throughput"), clEnumValN(OutputCostKind::Latency, "latency", "Instruction latency"), clEnumValN(OutputCostKind::CodeSize, "code-size", "Code size"), clEnumValN(OutputCostKind::SizeAndLatency, "size-latency", "Code size and latency"), clEnumValN(OutputCostKind::All, "all", "Print all cost kinds")))
static const unsigned FloatFactor
static cl::opt< bool > EnableV68FloatAutoHVX("force-hvx-float", cl::Hidden, cl::desc("Enable auto-vectorization of floatint point types on v68."))
cl::opt< bool > HexagonAllowScatterGatherHVX("hexagon-allow-scatter-gather-hvx", cl::init(false), cl::Hidden, cl::desc("Allow auto-generation of HVX scatter-gather"))
static cl::opt< bool > EmitLookupTables("hexagon-emit-lookup-tables", cl::init(true), cl::Hidden, cl::desc("Control lookup table emission on Hexagon target"))
static cl::opt< bool > HexagonMaskedVMem("hexagon-masked-vmem", cl::init(true), cl::Hidden, cl::desc("Enable masked loads/stores for HVX"))
static cl::opt< bool > HexagonAutoHVX("hexagon-autohvx", cl::init(false), cl::Hidden, cl::desc("Enable loop vectorizer for HVX"))
This file implements a TargetTransformInfo analysis pass specific to the Hexagon target machine.
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
This pass exposes codegen information to IR-level passes.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:40
size_t size() const
size - Get the array size.
Definition ArrayRef.h:142
InstructionCost getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef< unsigned > Indices, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, bool UseMaskForCond=false, bool UseMaskForGaps=false) const override
InstructionCost getArithmeticInstrCost(unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Opd1Info={TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Opd2Info={TTI::OK_AnyValue, TTI::OP_None}, ArrayRef< const Value * > Args={}, const Instruction *CxtI=nullptr) const override
InstructionCost getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy, CmpInst::Predicate VecPred, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Op1Info={TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Op2Info={TTI::OK_AnyValue, TTI::OP_None}, const Instruction *I=nullptr) const override
InstructionCost getCallInstrCost(Function *F, Type *RetTy, ArrayRef< Type * > Tys, TTI::TargetCostKind CostKind) const override
void getPeelingPreferences(Loop *L, ScalarEvolution &SE, TTI::PeelingPreferences &PP) const override
std::pair< InstructionCost, MVT > getTypeLegalizationCost(Type *Ty) const
InstructionCost getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, TTI::TargetCostKind CostKind) const override
InstructionCost getMemoryOpCost(unsigned Opcode, Type *Src, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, TTI::OperandValueInfo OpInfo={TTI::OK_AnyValue, TTI::OP_None}, const Instruction *I=nullptr) const override
This is the base class for all instructions that perform data casts.
Definition InstrTypes.h:448
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition InstrTypes.h:676
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:63
static constexpr ElementCount getFixed(ScalarTy MinVal)
Definition TypeSize.h:309
InstructionCost getMemoryOpCost(unsigned Opcode, Type *Src, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, TTI::OperandValueInfo OpInfo={TTI::OK_AnyValue, TTI::OP_None}, const Instruction *I=nullptr) const override
bool forceScalarizeMaskedScatter(VectorType *VTy, Align Alignment) const override
bool isLegalMaskedStore(Type *DataType, Align Alignment, unsigned AddressSpace, TTI::MaskKind MaskKind) const override
ElementCount getMinimumVF(unsigned ElemWidth, bool IsScalable) const override
InstructionCost getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src, TTI::CastContextHint CCH, TTI::TargetCostKind CostKind, const Instruction *I=nullptr) const override
bool forceScalarizeMaskedGather(VectorType *VTy, Align Alignment) const override
InstructionCost getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef< unsigned > Indices, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, bool UseMaskForCond=false, bool UseMaskForGaps=false) const override
unsigned getNumberOfRegisters(unsigned ClassID) const override
— Vector TTI begin —
InstructionCost getAddressComputationCost(Type *PtrTy, ScalarEvolution *SE, const SCEV *S, TTI::TargetCostKind CostKind) const override
InstructionCost getArithmeticInstrCost(unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Op1Info={TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Op2Info={TTI::OK_AnyValue, TTI::OP_None}, ArrayRef< const Value * > Args={}, const Instruction *CxtI=nullptr) const override
TTI::PopcntSupportKind getPopcntSupport(unsigned IntTyWidthInBit) const override
unsigned getMinVectorRegisterBitWidth() const override
bool isLegalMaskedLoad(Type *DataType, Align Alignment, unsigned AddressSpace, TTI::MaskKind MaskKind) const override
bool isLegalMaskedGather(Type *Ty, Align Alignment) const override
TypeSize getRegisterBitWidth(TargetTransformInfo::RegisterKind K) const override
InstructionCost getCallInstrCost(Function *F, Type *RetTy, ArrayRef< Type * > Tys, TTI::TargetCostKind CostKind) const override
Compute a cost of the given call instruction.
void getPeelingPreferences(Loop *L, ScalarEvolution &SE, TTI::PeelingPreferences &PP) const override
InstructionCost getVectorInstrCost(unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index, const Value *Op0, const Value *Op1) const override
InstructionCost getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, TTI::TargetCostKind CostKind) const override
Get intrinsic cost based on arguments.
InstructionCost getShuffleCost(TTI::ShuffleKind Kind, VectorType *DstTy, VectorType *SrcTy, ArrayRef< int > Mask, TTI::TargetCostKind CostKind, int Index, VectorType *SubTp, ArrayRef< const Value * > Args={}, const Instruction *CxtI=nullptr) const override
TTI::AddressingModeKind getPreferredAddressingMode(const Loop *L, ScalarEvolution *SE) const override
Bias LSR towards creating post-increment opportunities.
bool shouldBuildLookupTables() const override
unsigned getMaxInterleaveFactor(ElementCount VF) const override
bool isLegalMaskedScatter(Type *Ty, Align Alignment) const override
InstructionCost getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy, CmpInst::Predicate VecPred, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Op1Info={TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Op2Info={TTI::OK_AnyValue, TTI::OP_None}, const Instruction *I=nullptr) const override
InstructionCost getInstructionCost(const User *U, ArrayRef< const Value * > Operands, TTI::TargetCostKind CostKind) const override
void getUnrollingPreferences(Loop *L, ScalarEvolution &SE, TTI::UnrollingPreferences &UP, OptimizationRemarkEmitter *ORE) const override
unsigned getCacheLineSize() const override
unsigned getPrefetchDistance() const override
— Vector TTI end —
static InstructionCost getMax()
An instruction for reading from memory.
Represents a single loop in the control flow graph.
Definition LoopInfo.h:40
The optimization diagnostic interface.
This class represents an analyzed expression in the program.
The main scalar evolution driver.
LLVM_ABI unsigned getSmallConstantMaxTripCount(const Loop *L, SmallVectorImpl< const SCEVPredicate * > *Predicates=nullptr)
Returns the upper bound of the loop trip count as a normal unsigned value.
LLVM_ABI unsigned getSmallConstantTripCount(const Loop *L)
Returns the exact trip count of the loop if we can compute it, and the result is a small constant.
virtual const DataLayout & getDataLayout() const
virtual InstructionCost getInstructionCost(const User *U, ArrayRef< const Value * > Operands, TTI::TargetCostKind CostKind) const
MaskKind
Some targets only support masked load/store with a constant mask.
TargetCostKind
The kind of cost model.
@ TCK_RecipThroughput
Reciprocal throughput.
PopcntSupportKind
Flags indicating the kind of support for population count.
@ TCC_Free
Expected to fold away in lowering.
AddressingModeKind
Which addressing mode Loop Strength Reduction will try to generate.
@ AMK_PostIndexed
Prefer post-indexed addressing mode.
ShuffleKind
The various kinds of shuffle patterns for vector queries.
CastContextHint
Represents a hint about the context in which a cast is used.
static constexpr TypeSize getFixed(ScalarTy ExactSize)
Definition TypeSize.h:343
static constexpr TypeSize getScalable(ScalarTy MinimumSize)
Definition TypeSize.h:346
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
bool isVectorTy() const
True if this is an instance of VectorType.
Definition Type.h:273
LLVM_ABI TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
Definition Type.cpp:197
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
Definition Type.h:184
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition Type.h:240
bool isFPOrFPVectorTy() const
Return true if this is a FP type or a vector of FP.
Definition Type.h:225
LLVM Value Representation.
Definition Value.h:75
bool hasOneUse() const
Return true if there is exactly one use of this value.
Definition Value.h:439
Base class of all SIMD vector types.
Type * getElementType() const
constexpr ScalarTy getFixedValue() const
Definition TypeSize.h:200
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
initializer< Ty > init(const Ty &Val)
This is an optimization pass for GlobalISel generic memory operations.
InstructionCost Cost
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
bool canPeel(const Loop *L)
Definition LoopPeel.cpp:95
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
Definition Alignment.h:144
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
unsigned Log2(Align A)
Returns the log2 of the alignment.
Definition Alignment.h:197
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
constexpr uint64_t value() const
This is a hole in the type system and should not be abused.
Definition Alignment.h:77
unsigned PeelCount
A forced peeling factor (the number of bodied of the original loop that should be peeled off before t...
Parameters that control the generic loop unrolling transformation.
bool Runtime
Allow runtime unrolling (unrolling of loops to expand the size of the loop body even when the number ...
bool Partial
Allow partial unrolling (unrolling of loops to expand the size of the loop body, not only to eliminat...