LLVM 20.0.0git
X86TargetTransformInfo.h
Go to the documentation of this file.
1//===-- X86TargetTransformInfo.h - X86 specific TTI -------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8/// \file
9/// This file a TargetTransformInfo::Concept conforming object specific to the
10/// X86 target machine. It uses the target's detailed information to
11/// provide more precise answers to certain TTI queries, while letting the
12/// target independent and default TTI implementations handle the rest.
13///
14//===----------------------------------------------------------------------===//
15
16#ifndef LLVM_LIB_TARGET_X86_X86TARGETTRANSFORMINFO_H
17#define LLVM_LIB_TARGET_X86_X86TARGETTRANSFORMINFO_H
18
19#include "X86TargetMachine.h"
22#include <optional>
23
24namespace llvm {
25
26class InstCombiner;
27
28class X86TTIImpl : public BasicTTIImplBase<X86TTIImpl> {
31 friend BaseT;
32
33 const X86Subtarget *ST;
34 const X86TargetLowering *TLI;
35
36 const X86Subtarget *getST() const { return ST; }
37 const X86TargetLowering *getTLI() const { return TLI; }
38
39 const FeatureBitset InlineFeatureIgnoreList = {
40 // This indicates the CPU is 64 bit capable not that we are in 64-bit
41 // mode.
42 X86::FeatureX86_64,
43
44 // These features don't have any intrinsics or ABI effect.
45 X86::FeatureNOPL,
46 X86::FeatureCX16,
47 X86::FeatureLAHFSAHF64,
48
49 // Some older targets can be setup to fold unaligned loads.
50 X86::FeatureSSEUnalignedMem,
51
52 // Codegen control options.
53 X86::TuningFast11ByteNOP,
54 X86::TuningFast15ByteNOP,
55 X86::TuningFastBEXTR,
56 X86::TuningFastHorizontalOps,
57 X86::TuningFastLZCNT,
58 X86::TuningFastScalarFSQRT,
59 X86::TuningFastSHLDRotate,
60 X86::TuningFastScalarShiftMasks,
61 X86::TuningFastVectorShiftMasks,
62 X86::TuningFastVariableCrossLaneShuffle,
63 X86::TuningFastVariablePerLaneShuffle,
64 X86::TuningFastVectorFSQRT,
65 X86::TuningLEAForSP,
66 X86::TuningLEAUsesAG,
67 X86::TuningLZCNTFalseDeps,
68 X86::TuningBranchFusion,
69 X86::TuningMacroFusion,
70 X86::TuningPadShortFunctions,
71 X86::TuningPOPCNTFalseDeps,
72 X86::TuningMULCFalseDeps,
73 X86::TuningPERMFalseDeps,
74 X86::TuningRANGEFalseDeps,
75 X86::TuningGETMANTFalseDeps,
76 X86::TuningMULLQFalseDeps,
77 X86::TuningSlow3OpsLEA,
78 X86::TuningSlowDivide32,
79 X86::TuningSlowDivide64,
80 X86::TuningSlowIncDec,
81 X86::TuningSlowLEA,
82 X86::TuningSlowPMADDWD,
83 X86::TuningSlowPMULLD,
84 X86::TuningSlowSHLD,
85 X86::TuningSlowTwoMemOps,
86 X86::TuningSlowUAMem16,
87 X86::TuningPreferMaskRegisters,
88 X86::TuningInsertVZEROUPPER,
89 X86::TuningUseSLMArithCosts,
90 X86::TuningUseGLMDivSqrtCosts,
91 X86::TuningNoDomainDelay,
92 X86::TuningNoDomainDelayMov,
93 X86::TuningNoDomainDelayShuffle,
94 X86::TuningNoDomainDelayBlend,
95 X86::TuningPreferShiftShuffle,
96 X86::TuningFastImmVectorShift,
97 X86::TuningFastDPWSSD,
98
99 // Perf-tuning flags.
100 X86::TuningFastGather,
101 X86::TuningSlowUAMem32,
102 X86::TuningAllowLight256Bit,
103
104 // Based on whether user set the -mprefer-vector-width command line.
105 X86::TuningPrefer128Bit,
106 X86::TuningPrefer256Bit,
107
108 // CPU name enums. These just follow CPU string.
109 X86::ProcIntelAtom
110 };
111
112public:
113 explicit X86TTIImpl(const X86TargetMachine *TM, const Function &F)
114 : BaseT(TM, F.getDataLayout()), ST(TM->getSubtargetImpl(F)),
115 TLI(ST->getTargetLowering()) {}
116
117 /// \name Scalar TTI Implementations
118 /// @{
120
121 /// @}
122
123 /// \name Cache TTI Implementation
124 /// @{
125 std::optional<unsigned> getCacheSize(
126 TargetTransformInfo::CacheLevel Level) const override;
127 std::optional<unsigned> getCacheAssociativity(
128 TargetTransformInfo::CacheLevel Level) const override;
129 /// @}
130
131 /// \name Vector TTI Implementations
132 /// @{
133
134 unsigned getNumberOfRegisters(unsigned ClassID) const;
135 bool hasConditionalLoadStoreForType(Type *Ty = nullptr) const;
137 unsigned getLoadStoreVecRegBitWidth(unsigned AS) const;
140 unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind,
142 TTI::OperandValueInfo Op2Info = {TTI::OK_AnyValue, TTI::OP_None},
143 ArrayRef<const Value *> Args = {}, const Instruction *CxtI = nullptr);
144 InstructionCost getAltInstrCost(VectorType *VecTy, unsigned Opcode0,
145 unsigned Opcode1,
146 const SmallBitVector &OpcodeMask,
148
149 InstructionCost getShuffleCost(TTI::ShuffleKind Kind, VectorType *Tp,
150 ArrayRef<int> Mask,
152 VectorType *SubTp,
153 ArrayRef<const Value *> Args = {},
154 const Instruction *CxtI = nullptr);
155 InstructionCost getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src,
158 const Instruction *I = nullptr);
159 InstructionCost getCmpSelInstrCost(
160 unsigned Opcode, Type *ValTy, Type *CondTy, CmpInst::Predicate VecPred,
162 TTI::OperandValueInfo Op1Info = {TTI::OK_AnyValue, TTI::OP_None},
163 TTI::OperandValueInfo Op2Info = {TTI::OK_AnyValue, TTI::OP_None},
164 const Instruction *I = nullptr);
166 InstructionCost getVectorInstrCost(unsigned Opcode, Type *Val,
168 unsigned Index, Value *Op0, Value *Op1);
169 InstructionCost getScalarizationOverhead(VectorType *Ty,
170 const APInt &DemandedElts,
171 bool Insert, bool Extract,
173 ArrayRef<Value *> VL = {});
174 InstructionCost getReplicationShuffleCost(Type *EltTy, int ReplicationFactor,
175 int VF,
176 const APInt &DemandedDstElts,
178 InstructionCost
179 getMemoryOpCost(unsigned Opcode, Type *Src, MaybeAlign Alignment,
181 TTI::OperandValueInfo OpInfo = {TTI::OK_AnyValue, TTI::OP_None},
182 const Instruction *I = nullptr);
183 InstructionCost getMaskedMemoryOpCost(unsigned Opcode, Type *Src,
184 Align Alignment, unsigned AddressSpace,
186 InstructionCost getGatherScatterOpCost(unsigned Opcode, Type *DataTy,
187 const Value *Ptr, bool VariableMask,
188 Align Alignment,
190 const Instruction *I);
191 InstructionCost getPointersChainCost(ArrayRef<const Value *> Ptrs,
192 const Value *Base,
193 const TTI::PointersChainInfo &Info,
194 Type *AccessTy,
196 InstructionCost getAddressComputationCost(Type *PtrTy, ScalarEvolution *SE,
197 const SCEV *Ptr);
198
199 std::optional<Instruction *> instCombineIntrinsic(InstCombiner &IC,
200 IntrinsicInst &II) const;
201 std::optional<Value *>
202 simplifyDemandedUseBitsIntrinsic(InstCombiner &IC, IntrinsicInst &II,
203 APInt DemandedMask, KnownBits &Known,
204 bool &KnownBitsComputed) const;
205 std::optional<Value *> simplifyDemandedVectorEltsIntrinsic(
206 InstCombiner &IC, IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts,
207 APInt &UndefElts2, APInt &UndefElts3,
208 std::function<void(Instruction *, unsigned, APInt, APInt &)>
209 SimplifyAndSetOp) const;
210
212
213 InstructionCost getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA,
215
216 InstructionCost getArithmeticReductionCost(unsigned Opcode, VectorType *Ty,
217 std::optional<FastMathFlags> FMF,
219
220 InstructionCost getMinMaxCost(Intrinsic::ID IID, Type *Ty,
222 FastMathFlags FMF);
223
224 InstructionCost getMinMaxReductionCost(Intrinsic::ID IID, VectorType *Ty,
225 FastMathFlags FMF,
227
228 InstructionCost getInterleavedMemoryOpCost(
229 unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices,
230 Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind,
231 bool UseMaskForCond = false, bool UseMaskForGaps = false);
232 InstructionCost getInterleavedMemoryOpCostAVX512(
233 unsigned Opcode, FixedVectorType *VecTy, unsigned Factor,
234 ArrayRef<unsigned> Indices, Align Alignment, unsigned AddressSpace,
235 TTI::TargetCostKind CostKind, bool UseMaskForCond = false,
236 bool UseMaskForGaps = false);
237
238 InstructionCost getIntImmCost(int64_t);
239
240 InstructionCost getIntImmCost(const APInt &Imm, Type *Ty,
242
243 InstructionCost getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind,
244 const Instruction *I = nullptr);
245
246 InstructionCost getIntImmCostInst(unsigned Opcode, unsigned Idx,
247 const APInt &Imm, Type *Ty,
249 Instruction *Inst = nullptr);
250 InstructionCost getIntImmCostIntrin(Intrinsic::ID IID, unsigned Idx,
251 const APInt &Imm, Type *Ty,
253 /// Return the cost of the scaling factor used in the addressing
254 /// mode represented by AM for this target, for a load/store
255 /// of the specified type.
256 /// If the AM is supported, the return value must be >= 0.
257 /// If the AM is not supported, it returns a negative value.
258 InstructionCost getScalingFactorCost(Type *Ty, GlobalValue *BaseGV,
259 StackOffset BaseOffset, bool HasBaseReg,
260 int64_t Scale, unsigned AddrSpace) const;
261
262 bool isLSRCostLess(const TargetTransformInfo::LSRCost &C1,
263 const TargetTransformInfo::LSRCost &C2);
264 bool canMacroFuseCmp();
265 bool isLegalMaskedLoad(Type *DataType, Align Alignment);
266 bool isLegalMaskedStore(Type *DataType, Align Alignment);
267 bool isLegalNTLoad(Type *DataType, Align Alignment);
268 bool isLegalNTStore(Type *DataType, Align Alignment);
269 bool isLegalBroadcastLoad(Type *ElementTy, ElementCount NumElements) const;
270 bool forceScalarizeMaskedGather(VectorType *VTy, Align Alignment);
272 return forceScalarizeMaskedGather(VTy, Alignment);
273 }
274 bool isLegalMaskedGatherScatter(Type *DataType, Align Alignment);
275 bool isLegalMaskedGather(Type *DataType, Align Alignment);
276 bool isLegalMaskedScatter(Type *DataType, Align Alignment);
277 bool isLegalMaskedExpandLoad(Type *DataType, Align Alignment);
278 bool isLegalMaskedCompressStore(Type *DataType, Align Alignment);
279 bool isLegalAltInstr(VectorType *VecTy, unsigned Opcode0, unsigned Opcode1,
280 const SmallBitVector &OpcodeMask) const;
281 bool hasDivRemOp(Type *DataType, bool IsSigned);
284 bool areInlineCompatible(const Function *Caller,
285 const Function *Callee) const;
286 bool areTypesABICompatible(const Function *Caller, const Function *Callee,
287 const ArrayRef<Type *> &Type) const;
288
290 return ST->getMaxInlineSizeThreshold();
291 }
292
294 bool IsZeroCmp) const;
295 bool prefersVectorizedAddressing() const;
298
300
302 SmallVectorImpl<Use *> &Ops) const;
303
304 bool isVectorShiftByScalarCheap(Type *Ty) const;
305
306 unsigned getStoreMinimumVF(unsigned VF, Type *ScalarMemTy,
307 Type *ScalarValTy) const;
308
309private:
310 bool supportsGather() const;
311 InstructionCost getGSVectorCost(unsigned Opcode, TTI::TargetCostKind CostKind,
312 Type *DataTy, const Value *Ptr,
313 Align Alignment, unsigned AddressSpace);
314
315 int getGatherOverhead() const;
316 int getScatterOverhead() const;
317
318 /// @}
319};
320
321} // end namespace llvm
322
323#endif
This file provides a helper that implements much of the TTI interface in terms of the target-independ...
Analysis containing CSE Info
Definition: CSEInfo.cpp:27
static cl::opt< TargetTransformInfo::TargetCostKind > CostKind("cost-kind", cl::desc("Target cost kind"), cl::init(TargetTransformInfo::TCK_RecipThroughput), cl::values(clEnumValN(TargetTransformInfo::TCK_RecipThroughput, "throughput", "Reciprocal throughput"), clEnumValN(TargetTransformInfo::TCK_Latency, "latency", "Instruction latency"), clEnumValN(TargetTransformInfo::TCK_CodeSize, "code-size", "Code size"), clEnumValN(TargetTransformInfo::TCK_SizeAndLatency, "size-latency", "Code size and latency")))
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
uint32_t Index
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
Machine InstCombiner
uint64_t IntrinsicInst * II
This pass exposes codegen information to IR-level passes.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
Base class which can be used to help build a TTI implementation.
Definition: BasicTTIImpl.h:80
InstructionCost getVectorInstrCost(unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index, Value *Op0, Value *Op1)
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition: InstrTypes.h:673
Container class for subtarget features.
This is a 'bitvector' (really, a variable-sized bit array), optimized for the case when the array is ...
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:573
const DataLayout & getDataLayout() const
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
TargetCostKind
The kind of cost model.
PopcntSupportKind
Flags indicating the kind of support for population count.
ShuffleKind
The various kinds of shuffle patterns for vector queries.
CastContextHint
Represents a hint about the context in which a cast is used.
CacheLevel
The possible cache levels.
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
LLVM Value Representation.
Definition: Value.h:74
Base class of all SIMD vector types.
Definition: DerivedTypes.h:427
InstructionCost getInterleavedMemoryOpCostAVX512(unsigned Opcode, FixedVectorType *VecTy, unsigned Factor, ArrayRef< unsigned > Indices, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, bool UseMaskForCond=false, bool UseMaskForGaps=false)
bool isLegalMaskedGather(Type *DataType, Align Alignment)
InstructionCost getAltInstrCost(VectorType *VecTy, unsigned Opcode0, unsigned Opcode1, const SmallBitVector &OpcodeMask, TTI::TargetCostKind CostKind) const
bool isProfitableToSinkOperands(Instruction *I, SmallVectorImpl< Use * > &Ops) const
std::optional< unsigned > getCacheAssociativity(TargetTransformInfo::CacheLevel Level) const override
TTI::PopcntSupportKind getPopcntSupport(unsigned TyWidth)
bool isLegalNTStore(Type *DataType, Align Alignment)
InstructionCost getArithmeticInstrCost(unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Op1Info={TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Op2Info={TTI::OK_AnyValue, TTI::OP_None}, ArrayRef< const Value * > Args={}, const Instruction *CxtI=nullptr)
InstructionCost getIntImmCostIntrin(Intrinsic::ID IID, unsigned Idx, const APInt &Imm, Type *Ty, TTI::TargetCostKind CostKind)
bool isLegalNTLoad(Type *DataType, Align Alignment)
X86TTIImpl(const X86TargetMachine *TM, const Function &F)
InstructionCost getIntImmCostInst(unsigned Opcode, unsigned Idx, const APInt &Imm, Type *Ty, TTI::TargetCostKind CostKind, Instruction *Inst=nullptr)
std::optional< Instruction * > instCombineIntrinsic(InstCombiner &IC, IntrinsicInst &II) const
bool forceScalarizeMaskedScatter(VectorType *VTy, Align Alignment)
std::optional< Value * > simplifyDemandedUseBitsIntrinsic(InstCombiner &IC, IntrinsicInst &II, APInt DemandedMask, KnownBits &Known, bool &KnownBitsComputed) const
bool isLegalMaskedGatherScatter(Type *DataType, Align Alignment)
InstructionCost getScalingFactorCost(Type *Ty, GlobalValue *BaseGV, StackOffset BaseOffset, bool HasBaseReg, int64_t Scale, unsigned AddrSpace) const
Return the cost of the scaling factor used in the addressing mode represented by AM for this target,...
bool isLegalMaskedLoad(Type *DataType, Align Alignment)
bool hasConditionalLoadStoreForType(Type *Ty=nullptr) const
bool supportsEfficientVectorElementLoadStore() const
TTI::MemCmpExpansionOptions enableMemCmpExpansion(bool OptSize, bool IsZeroCmp) const
bool prefersVectorizedAddressing() const
unsigned getLoadStoreVecRegBitWidth(unsigned AS) const
bool isLegalBroadcastLoad(Type *ElementTy, ElementCount NumElements) const
uint64_t getMaxMemIntrinsicInlineSizeThreshold() const
InstructionCost getMinMaxReductionCost(Intrinsic::ID IID, VectorType *Ty, FastMathFlags FMF, TTI::TargetCostKind CostKind)
bool forceScalarizeMaskedGather(VectorType *VTy, Align Alignment)
std::optional< unsigned > getCacheSize(TargetTransformInfo::CacheLevel Level) const override
bool isLegalMaskedStore(Type *DataType, Align Alignment)
InstructionCost getShuffleCost(TTI::ShuffleKind Kind, VectorType *Tp, ArrayRef< int > Mask, TTI::TargetCostKind CostKind, int Index, VectorType *SubTp, ArrayRef< const Value * > Args={}, const Instruction *CxtI=nullptr)
InstructionCost getReplicationShuffleCost(Type *EltTy, int ReplicationFactor, int VF, const APInt &DemandedDstElts, TTI::TargetCostKind CostKind)
InstructionCost getGatherScatterOpCost(unsigned Opcode, Type *DataTy, const Value *Ptr, bool VariableMask, Align Alignment, TTI::TargetCostKind CostKind, const Instruction *I)
Calculate the cost of Gather / Scatter operation.
InstructionCost getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef< unsigned > Indices, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, bool UseMaskForCond=false, bool UseMaskForGaps=false)
unsigned getMaxInterleaveFactor(ElementCount VF)
TypeSize getRegisterBitWidth(TargetTransformInfo::RegisterKind K) const
bool isLegalMaskedCompressStore(Type *DataType, Align Alignment)
InstructionCost getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src, TTI::CastContextHint CCH, TTI::TargetCostKind CostKind, const Instruction *I=nullptr)
bool isVectorShiftByScalarCheap(Type *Ty) const
InstructionCost getPointersChainCost(ArrayRef< const Value * > Ptrs, const Value *Base, const TTI::PointersChainInfo &Info, Type *AccessTy, TTI::TargetCostKind CostKind)
InstructionCost getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, TTI::TargetCostKind CostKind)
unsigned getNumberOfRegisters(unsigned ClassID) const
InstructionCost getMemoryOpCost(unsigned Opcode, Type *Src, MaybeAlign Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, TTI::OperandValueInfo OpInfo={TTI::OK_AnyValue, TTI::OP_None}, const Instruction *I=nullptr)
InstructionCost getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy, CmpInst::Predicate VecPred, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Op1Info={TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Op2Info={TTI::OK_AnyValue, TTI::OP_None}, const Instruction *I=nullptr)
bool isLSRCostLess(const TargetTransformInfo::LSRCost &C1, const TargetTransformInfo::LSRCost &C2)
bool isLegalMaskedExpandLoad(Type *DataType, Align Alignment)
InstructionCost getArithmeticReductionCost(unsigned Opcode, VectorType *Ty, std::optional< FastMathFlags > FMF, TTI::TargetCostKind CostKind)
unsigned getAtomicMemIntrinsicMaxElementSize() const
bool isLegalMaskedScatter(Type *DataType, Align Alignment)
InstructionCost getScalarizationOverhead(VectorType *Ty, const APInt &DemandedElts, bool Insert, bool Extract, TTI::TargetCostKind CostKind, ArrayRef< Value * > VL={})
InstructionCost getIntImmCost(int64_t)
Calculate the cost of materializing a 64-bit value.
InstructionCost getMaskedMemoryOpCost(unsigned Opcode, Type *Src, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind)
InstructionCost getVectorInstrCost(unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index, Value *Op0, Value *Op1)
unsigned getStoreMinimumVF(unsigned VF, Type *ScalarMemTy, Type *ScalarValTy) const
bool areInlineCompatible(const Function *Caller, const Function *Callee) const
InstructionCost getBranchMispredictPenalty() const
bool isExpensiveToSpeculativelyExecute(const Instruction *I)
InstructionCost getAddressComputationCost(Type *PtrTy, ScalarEvolution *SE, const SCEV *Ptr)
bool isLegalAltInstr(VectorType *VecTy, unsigned Opcode0, unsigned Opcode1, const SmallBitVector &OpcodeMask) const
InstructionCost getMinMaxCost(Intrinsic::ID IID, Type *Ty, TTI::TargetCostKind CostKind, FastMathFlags FMF)
bool isFCmpOrdCheaperThanFCmpZero(Type *Ty)
InstructionCost getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind, const Instruction *I=nullptr)
bool areTypesABICompatible(const Function *Caller, const Function *Callee, const ArrayRef< Type * > &Type) const
bool hasDivRemOp(Type *DataType, bool IsSigned)
std::optional< Value * > simplifyDemandedVectorEltsIntrinsic(InstCombiner &IC, IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts, APInt &UndefElts2, APInt &UndefElts3, std::function< void(Instruction *, unsigned, APInt, APInt &)> SimplifyAndSetOp) const
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
Returns options for expansion of memcmp. IsZeroCmp is.