LLVM 20.0.0git
HexagonTargetTransformInfo.cpp
Go to the documentation of this file.
1//===- HexagonTargetTransformInfo.cpp - Hexagon specific TTI pass ---------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7/// \file
8/// This file implements a TargetTransformInfo analysis pass specific to the
9/// Hexagon target machine. It uses the target's detailed information to provide
10/// more precise answers to certain TTI queries, while letting the target
11/// independent and default TTI implementations handle the rest.
12///
13//===----------------------------------------------------------------------===//
14
16#include "HexagonSubtarget.h"
19#include "llvm/IR/InstrTypes.h"
21#include "llvm/IR/User.h"
26
27using namespace llvm;
28
29#define DEBUG_TYPE "hexagontti"
30
31static cl::opt<bool> HexagonAutoHVX("hexagon-autohvx", cl::init(false),
32 cl::Hidden, cl::desc("Enable loop vectorizer for HVX"));
33
35 "force-hvx-float", cl::Hidden,
36 cl::desc("Enable auto-vectorization of floatint point types on v68."));
37
38static cl::opt<bool> EmitLookupTables("hexagon-emit-lookup-tables",
39 cl::init(true), cl::Hidden,
40 cl::desc("Control lookup table emission on Hexagon target"));
41
42static cl::opt<bool> HexagonMaskedVMem("hexagon-masked-vmem", cl::init(true),
43 cl::Hidden, cl::desc("Enable masked loads/stores for HVX"));
44
45// Constant "cost factor" to make floating point operations more expensive
46// in terms of vectorization cost. This isn't the best way, but it should
47// do. Ultimately, the cost should use cycles.
48static const unsigned FloatFactor = 4;
49
50bool HexagonTTIImpl::useHVX() const {
51 return ST.useHVXOps() && HexagonAutoHVX;
52}
53
54bool HexagonTTIImpl::isHVXVectorType(Type *Ty) const {
55 auto *VecTy = dyn_cast<VectorType>(Ty);
56 if (!VecTy)
57 return false;
58 if (!ST.isTypeForHVX(VecTy))
59 return false;
60 if (ST.useHVXV69Ops() || !VecTy->getElementType()->isFloatingPointTy())
61 return true;
63}
64
65unsigned HexagonTTIImpl::getTypeNumElements(Type *Ty) const {
66 if (auto *VTy = dyn_cast<FixedVectorType>(Ty))
67 return VTy->getNumElements();
68 assert((Ty->isIntegerTy() || Ty->isFloatingPointTy()) &&
69 "Expecting scalar type");
70 return 1;
71}
72
74HexagonTTIImpl::getPopcntSupport(unsigned IntTyWidthInBit) const {
75 // Return fast hardware support as every input < 64 bits will be promoted
76 // to 64 bits.
78}
79
80// The Hexagon target can unroll loops with run-time trip counts.
84 UP.Runtime = UP.Partial = true;
85}
86
90 // Only try to peel innermost loops with small runtime trip counts.
91 if (L && L->isInnermost() && canPeel(L) &&
92 SE.getSmallConstantTripCount(L) == 0 &&
95 PP.PeelCount = 2;
96 }
97}
98
101 ScalarEvolution *SE) const {
103}
104
105/// --- Vector TTI begin ---
106
108 if (Vector)
109 return useHVX() ? 32 : 0;
110 return 32;
111}
112
114 return useHVX() ? 2 : 1;
115}
116
119 switch (K) {
121 return TypeSize::getFixed(32);
125 return TypeSize::getScalable(0);
126 }
127
128 llvm_unreachable("Unsupported register kind");
129}
130
132 return useHVX() ? ST.getVectorLength()*8 : 32;
133}
134
136 bool IsScalable) const {
137 assert(!IsScalable && "Scalable VFs are not supported for Hexagon");
138 return ElementCount::getFixed((8 * ST.getVectorLength()) / ElemWidth);
139}
140
145}
146
150 if (ICA.getID() == Intrinsic::bswap) {
151 std::pair<InstructionCost, MVT> LT =
153 return LT.first + 2;
154 }
156}
157
159 ScalarEvolution *SE,
160 const SCEV *S) {
161 return 0;
162}
163
165 MaybeAlign Alignment,
166 unsigned AddressSpace,
169 const Instruction *I) {
170 assert(Opcode == Instruction::Load || Opcode == Instruction::Store);
171 // TODO: Handle other cost kinds.
173 return 1;
174
175 if (Opcode == Instruction::Store)
176 return BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace,
177 CostKind, OpInfo, I);
178
179 if (Src->isVectorTy()) {
180 VectorType *VecTy = cast<VectorType>(Src);
181 unsigned VecWidth = VecTy->getPrimitiveSizeInBits().getFixedValue();
182 if (isHVXVectorType(VecTy)) {
183 unsigned RegWidth =
185 .getFixedValue();
186 assert(RegWidth && "Non-zero vector register width expected");
187 // Cost of HVX loads.
188 if (VecWidth % RegWidth == 0)
189 return VecWidth / RegWidth;
190 // Cost of constructing HVX vector from scalar loads
191 const Align RegAlign(RegWidth / 8);
192 if (!Alignment || *Alignment > RegAlign)
193 Alignment = RegAlign;
194 assert(Alignment);
195 unsigned AlignWidth = 8 * Alignment->value();
196 unsigned NumLoads = alignTo(VecWidth, AlignWidth) / AlignWidth;
197 return 3 * NumLoads;
198 }
199
200 // Non-HVX vectors.
201 // Add extra cost for floating point types.
202 unsigned Cost =
204
205 // At this point unspecified alignment is considered as Align(1).
206 const Align BoundAlignment = std::min(Alignment.valueOrOne(), Align(8));
207 unsigned AlignWidth = 8 * BoundAlignment.value();
208 unsigned NumLoads = alignTo(VecWidth, AlignWidth) / AlignWidth;
209 if (Alignment == Align(4) || Alignment == Align(8))
210 return Cost * NumLoads;
211 // Loads of less than 32 bits will need extra inserts to compose a vector.
212 assert(BoundAlignment <= Align(8));
213 unsigned LogA = Log2(BoundAlignment);
214 return (3 - LogA) * Cost * NumLoads;
215 }
216
217 return BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace, CostKind,
218 OpInfo, I);
219}
220
223 Align Alignment, unsigned AddressSpace,
225 return BaseT::getMaskedMemoryOpCost(Opcode, Src, Alignment, AddressSpace,
226 CostKind);
227}
228
230 ArrayRef<int> Mask,
232 int Index, Type *SubTp,
234 const Instruction *CxtI) {
235 return 1;
236}
237
239 unsigned Opcode, Type *DataTy, const Value *Ptr, bool VariableMask,
240 Align Alignment, TTI::TargetCostKind CostKind, const Instruction *I) {
241 return BaseT::getGatherScatterOpCost(Opcode, DataTy, Ptr, VariableMask,
242 Alignment, CostKind, I);
243}
244
246 unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices,
247 Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind,
248 bool UseMaskForCond, bool UseMaskForGaps) {
249 if (Indices.size() != Factor || UseMaskForCond || UseMaskForGaps)
250 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
251 Alignment, AddressSpace,
252 CostKind,
253 UseMaskForCond, UseMaskForGaps);
254 return getMemoryOpCost(Opcode, VecTy, MaybeAlign(Alignment), AddressSpace,
255 CostKind);
256}
257
259 Type *CondTy,
260 CmpInst::Predicate VecPred,
262 const Instruction *I) {
263 if (ValTy->isVectorTy() && CostKind == TTI::TCK_RecipThroughput) {
264 if (!isHVXVectorType(ValTy) && ValTy->isFPOrFPVectorTy())
266 std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(ValTy);
267 if (Opcode == Instruction::FCmp)
268 return LT.first + FloatFactor * getTypeNumElements(ValTy);
269 }
270 return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, VecPred, CostKind, I);
271}
272
274 unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind,
277 const Instruction *CxtI) {
278 // TODO: Handle more cost kinds.
280 return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Op1Info,
281 Op2Info, Args, CxtI);
282
283 if (Ty->isVectorTy()) {
284 if (!isHVXVectorType(Ty) && Ty->isFPOrFPVectorTy())
286 std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Ty);
287 if (LT.second.isFloatingPoint())
288 return LT.first + FloatFactor * getTypeNumElements(Ty);
289 }
290 return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Op1Info, Op2Info,
291 Args, CxtI);
292}
293
295 Type *SrcTy,
298 const Instruction *I) {
299 auto isNonHVXFP = [this] (Type *Ty) {
300 return Ty->isVectorTy() && !isHVXVectorType(Ty) && Ty->isFPOrFPVectorTy();
301 };
302 if (isNonHVXFP(SrcTy) || isNonHVXFP(DstTy))
304
305 if (SrcTy->isFPOrFPVectorTy() || DstTy->isFPOrFPVectorTy()) {
306 unsigned SrcN = SrcTy->isFPOrFPVectorTy() ? getTypeNumElements(SrcTy) : 0;
307 unsigned DstN = DstTy->isFPOrFPVectorTy() ? getTypeNumElements(DstTy) : 0;
308
309 std::pair<InstructionCost, MVT> SrcLT = getTypeLegalizationCost(SrcTy);
310 std::pair<InstructionCost, MVT> DstLT = getTypeLegalizationCost(DstTy);
312 std::max(SrcLT.first, DstLT.first) + FloatFactor * (SrcN + DstN);
313 // TODO: Allow non-throughput costs that aren't binary.
315 return Cost == 0 ? 0 : 1;
316 return Cost;
317 }
318 return 1;
319}
320
323 unsigned Index, Value *Op0,
324 Value *Op1) {
325 Type *ElemTy = Val->isVectorTy() ? cast<VectorType>(Val)->getElementType()
326 : Val;
327 if (Opcode == Instruction::InsertElement) {
328 // Need two rotations for non-zero index.
329 unsigned Cost = (Index != 0) ? 2 : 0;
330 if (ElemTy->isIntegerTy(32))
331 return Cost;
332 // If it's not a 32-bit value, there will need to be an extract.
333 return Cost + getVectorInstrCost(Instruction::ExtractElement, Val, CostKind,
334 Index, Op0, Op1);
335 }
336
337 if (Opcode == Instruction::ExtractElement)
338 return 2;
339
340 return 1;
341}
342
343bool HexagonTTIImpl::isLegalMaskedStore(Type *DataType, Align /*Alignment*/) {
344 // This function is called from scalarize-masked-mem-intrin, which runs
345 // in pre-isel. Use ST directly instead of calling isHVXVectorType.
346 return HexagonMaskedVMem && ST.isTypeForHVX(DataType);
347}
348
349bool HexagonTTIImpl::isLegalMaskedLoad(Type *DataType, Align /*Alignment*/) {
350 // This function is called from scalarize-masked-mem-intrin, which runs
351 // in pre-isel. Use ST directly instead of calling isHVXVectorType.
352 return HexagonMaskedVMem && ST.isTypeForHVX(DataType);
353}
354
355/// --- Vector TTI end ---
356
358 return ST.getL1PrefetchDistance();
359}
360
362 return ST.getL1CacheLineSize();
363}
364
369 auto isCastFoldedIntoLoad = [this](const CastInst *CI) -> bool {
370 if (!CI->isIntegerCast())
371 return false;
372 // Only extensions from an integer type shorter than 32-bit to i32
373 // can be folded into the load.
374 const DataLayout &DL = getDataLayout();
375 unsigned SBW = DL.getTypeSizeInBits(CI->getSrcTy());
376 unsigned DBW = DL.getTypeSizeInBits(CI->getDestTy());
377 if (DBW != 32 || SBW >= DBW)
378 return false;
379
380 const LoadInst *LI = dyn_cast<const LoadInst>(CI->getOperand(0));
381 // Technically, this code could allow multiple uses of the load, and
382 // check if all the uses are the same extension operation, but this
383 // should be sufficient for most cases.
384 return LI && LI->hasOneUse();
385 };
386
387 if (const CastInst *CI = dyn_cast<const CastInst>(U))
388 if (isCastFoldedIntoLoad(CI))
391}
392
394 return EmitLookupTables;
395}
static cl::opt< TargetTransformInfo::TargetCostKind > CostKind("cost-kind", cl::desc("Target cost kind"), cl::init(TargetTransformInfo::TCK_RecipThroughput), cl::values(clEnumValN(TargetTransformInfo::TCK_RecipThroughput, "throughput", "Reciprocal throughput"), clEnumValN(TargetTransformInfo::TCK_Latency, "latency", "Instruction latency"), clEnumValN(TargetTransformInfo::TCK_CodeSize, "code-size", "Code size"), clEnumValN(TargetTransformInfo::TCK_SizeAndLatency, "size-latency", "Code size and latency")))
return RetTy
static const unsigned FloatFactor
static cl::opt< bool > EnableV68FloatAutoHVX("force-hvx-float", cl::Hidden, cl::desc("Enable auto-vectorization of floatint point types on v68."))
static cl::opt< bool > EmitLookupTables("hexagon-emit-lookup-tables", cl::init(true), cl::Hidden, cl::desc("Control lookup table emission on Hexagon target"))
static cl::opt< bool > HexagonMaskedVMem("hexagon-masked-vmem", cl::init(true), cl::Hidden, cl::desc("Enable masked loads/stores for HVX"))
static cl::opt< bool > HexagonAutoHVX("hexagon-autohvx", cl::init(false), cl::Hidden, cl::desc("Enable loop vectorizer for HVX"))
This file implements a TargetTransformInfo analysis pass specific to the Hexagon target machine.
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
mir Rename Register Operands
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This pass exposes codegen information to IR-level passes.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:165
InstructionCost getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, TTI::TargetCostKind CostKind)
Get intrinsic cost based on arguments.
InstructionCost getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef< unsigned > Indices, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, bool UseMaskForCond=false, bool UseMaskForGaps=false)
InstructionCost getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy, CmpInst::Predicate VecPred, TTI::TargetCostKind CostKind, const Instruction *I=nullptr)
InstructionCost getMaskedMemoryOpCost(unsigned Opcode, Type *DataTy, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind)
InstructionCost getMemoryOpCost(unsigned Opcode, Type *Src, MaybeAlign Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, TTI::OperandValueInfo OpInfo={TTI::OK_AnyValue, TTI::OP_None}, const Instruction *I=nullptr)
InstructionCost getGatherScatterOpCost(unsigned Opcode, Type *DataTy, const Value *Ptr, bool VariableMask, Align Alignment, TTI::TargetCostKind CostKind, const Instruction *I=nullptr)
void getPeelingPreferences(Loop *L, ScalarEvolution &SE, TTI::PeelingPreferences &PP)
Definition: BasicTTIImpl.h:655
InstructionCost getCallInstrCost(Function *F, Type *RetTy, ArrayRef< Type * > Tys, TTI::TargetCostKind CostKind)
Compute a cost of the given call instruction.
InstructionCost getArithmeticInstrCost(unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Opd1Info={TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Opd2Info={TTI::OK_AnyValue, TTI::OP_None}, ArrayRef< const Value * > Args=std::nullopt, const Instruction *CxtI=nullptr)
Definition: BasicTTIImpl.h:892
std::pair< InstructionCost, MVT > getTypeLegalizationCost(Type *Ty) const
Estimate the cost of type-legalization and the legalized type.
Definition: BasicTTIImpl.h:856
This is the base class for all instructions that perform data casts.
Definition: InstrTypes.h:530
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition: InstrTypes.h:757
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:63
TypeSize getTypeSizeInBits(Type *Ty) const
Size examples:
Definition: DataLayout.h:621
static constexpr ElementCount getFixed(ScalarTy MinVal)
Definition: TypeSize.h:311
unsigned getL1PrefetchDistance() const
unsigned getVectorLength() const
unsigned getL1CacheLineSize() const
bool isTypeForHVX(Type *VecTy, bool IncludeBool=false) const
InstructionCost getMemoryOpCost(unsigned Opcode, Type *Src, MaybeAlign Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, TTI::OperandValueInfo OpInfo={TTI::OK_AnyValue, TTI::OP_None}, const Instruction *I=nullptr)
InstructionCost getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src, TTI::CastContextHint CCH, TTI::TargetCostKind CostKind, const Instruction *I=nullptr)
void getUnrollingPreferences(Loop *L, ScalarEvolution &SE, TTI::UnrollingPreferences &UP, OptimizationRemarkEmitter *ORE)
bool isLegalMaskedLoad(Type *DataType, Align Alignment)
InstructionCost getShuffleCost(TTI::ShuffleKind Kind, Type *Tp, ArrayRef< int > Mask, TTI::TargetCostKind CostKind, int Index, Type *SubTp, ArrayRef< const Value * > Args=std::nullopt, const Instruction *CxtI=nullptr)
InstructionCost getAddressComputationCost(Type *Tp, ScalarEvolution *SE, const SCEV *S)
InstructionCost getGatherScatterOpCost(unsigned Opcode, Type *DataTy, const Value *Ptr, bool VariableMask, Align Alignment, TTI::TargetCostKind CostKind, const Instruction *I)
unsigned getNumberOfRegisters(bool vector) const
— Vector TTI begin —
InstructionCost getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef< unsigned > Indices, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, bool UseMaskForCond=false, bool UseMaskForGaps=false)
InstructionCost getMaskedMemoryOpCost(unsigned Opcode, Type *Src, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind)
InstructionCost getVectorInstrCost(unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index, Value *Op0, Value *Op1)
InstructionCost getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy, CmpInst::Predicate VecPred, TTI::TargetCostKind CostKind, const Instruction *I=nullptr)
InstructionCost getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, TTI::TargetCostKind CostKind)
TypeSize getRegisterBitWidth(TargetTransformInfo::RegisterKind K) const
void getPeelingPreferences(Loop *L, ScalarEvolution &SE, TTI::PeelingPreferences &PP)
bool isLegalMaskedStore(Type *DataType, Align Alignment)
InstructionCost getArithmeticInstrCost(unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Op1Info={TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Op2Info={TTI::OK_AnyValue, TTI::OP_None}, ArrayRef< const Value * > Args=std::nullopt, const Instruction *CxtI=nullptr)
TTI::AddressingModeKind getPreferredAddressingMode(const Loop *L, ScalarEvolution *SE) const
Bias LSR towards creating post-increment opportunities.
unsigned getMinVectorRegisterBitWidth() const
InstructionCost getInstructionCost(const User *U, ArrayRef< const Value * > Operands, TTI::TargetCostKind CostKind)
ElementCount getMinimumVF(unsigned ElemWidth, bool IsScalable) const
InstructionCost getCallInstrCost(Function *F, Type *RetTy, ArrayRef< Type * > Tys, TTI::TargetCostKind CostKind)
unsigned getCacheLineSize() const override
unsigned getPrefetchDistance() const override
— Vector TTI end —
TTI::PopcntSupportKind getPopcntSupport(unsigned IntTyWidthInBit) const
unsigned getMaxInterleaveFactor(ElementCount VF)
static InstructionCost getMax()
An instruction for reading from memory.
Definition: Instructions.h:174
Represents a single loop in the control flow graph.
Definition: LoopInfo.h:39
The optimization diagnostic interface.
This class represents an analyzed expression in the program.
The main scalar evolution driver.
unsigned getSmallConstantMaxTripCount(const Loop *L)
Returns the upper bound of the loop trip count as a normal unsigned value.
unsigned getSmallConstantTripCount(const Loop *L)
Returns the exact trip count of the loop if we can compute it, and the result is a small constant.
const DataLayout & getDataLayout() const
InstructionCost getInstructionCost(const User *U, ArrayRef< const Value * > Operands, TTI::TargetCostKind CostKind)
TargetCostKind
The kind of cost model.
@ TCK_RecipThroughput
Reciprocal throughput.
PopcntSupportKind
Flags indicating the kind of support for population count.
@ TCC_Free
Expected to fold away in lowering.
ShuffleKind
The various kinds of shuffle patterns for vector queries.
CastContextHint
Represents a hint about the context in which a cast is used.
static constexpr TypeSize getFixed(ScalarTy ExactSize)
Definition: TypeSize.h:345
static constexpr TypeSize getScalable(ScalarTy MinimumSize)
Definition: TypeSize.h:348
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
bool isVectorTy() const
True if this is an instance of VectorType.
Definition: Type.h:261
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
Definition: Type.h:184
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition: Type.h:224
bool isFPOrFPVectorTy() const
Return true if this is a FP type or a vector of FP.
Definition: Type.h:212
TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
LLVM Value Representation.
Definition: Value.h:74
bool hasOneUse() const
Return true if there is exactly one use of this value.
Definition: Value.h:434
Base class of all SIMD vector types.
Definition: DerivedTypes.h:403
Type * getElementType() const
Definition: DerivedTypes.h:436
constexpr ScalarTy getFixedValue() const
Definition: TypeSize.h:202
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:443
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
AddressSpace
Definition: NVPTXBaseInfo.h:21
bool canPeel(const Loop *L)
Definition: LoopPeel.cpp:83
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
Definition: Alignment.h:155
unsigned Log2(Align A)
Returns the log2 of the alignment.
Definition: Alignment.h:208
InstructionCost Cost
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
uint64_t value() const
This is a hole in the type system and should not be abused.
Definition: Alignment.h:85
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Definition: Alignment.h:117
Align valueOrOne() const
For convenience, returns a valid alignment or 1 if undefined.
Definition: Alignment.h:141
unsigned PeelCount
A forced peeling factor (the number of bodied of the original loop that should be peeled off before t...
Parameters that control the generic loop unrolling transformation.
bool Runtime
Allow runtime unrolling (unrolling of loops to expand the size of the loop body even when the number ...
bool Partial
Allow partial unrolling (unrolling of loops to expand the size of the loop body, not only to eliminat...