14#ifndef LLVM_ANALYSIS_TARGETTRANSFORMINFOIMPL_H
15#define LLVM_ANALYSIS_TARGETTRANSFORMINFOIMPL_H
61 for (
const Value *Operand : Operands)
82 return SI.getNumCases();
156 virtual std::pair<KnownBits, KnownBits>
160 "expected pointer or pointer vector type");
163 if (
DL.isNonIntegralAddressSpace(FromAS))
164 return std::pair(
KnownBits(
DL.getPointerSizeInBits(FromAS)),
170 CastI->getDestAddressSpace(), *CastI->getPointerOperand());
171 FromPtrBits = KB.second;
179 return {FromPtrBits, ToPtrBits};
185 unsigned ToASBitSize =
DL.getPointerSizeInBits(ToAS);
187 if (
DL.isNonIntegralAddressSpace(FromAS))
205 virtual std::pair<const Value *, unsigned>
207 return std::make_pair(
nullptr, -1);
217 assert(
F &&
"A concrete function must be provided to this routine.");
224 if (
F->isIntrinsic())
227 if (
F->hasLocalLinkage() || !
F->hasName())
234 if (Name ==
"copysign" || Name ==
"copysignf" || Name ==
"copysignl" ||
235 Name ==
"fabs" || Name ==
"fabsf" || Name ==
"fabsl" ||
236 Name ==
"fmin" || Name ==
"fminf" || Name ==
"fminl" ||
237 Name ==
"fmax" || Name ==
"fmaxf" || Name ==
"fmaxl" ||
238 Name ==
"sin" || Name ==
"sinf" || Name ==
"sinl" ||
239 Name ==
"cos" || Name ==
"cosf" || Name ==
"cosl" ||
240 Name ==
"tan" || Name ==
"tanf" || Name ==
"tanl" ||
241 Name ==
"asin" || Name ==
"asinf" || Name ==
"asinl" ||
242 Name ==
"acos" || Name ==
"acosf" || Name ==
"acosl" ||
243 Name ==
"atan" || Name ==
"atanf" || Name ==
"atanl" ||
244 Name ==
"atan2" || Name ==
"atan2f" || Name ==
"atan2l"||
245 Name ==
"sinh" || Name ==
"sinhf" || Name ==
"sinhl" ||
246 Name ==
"cosh" || Name ==
"coshf" || Name ==
"coshl" ||
247 Name ==
"tanh" || Name ==
"tanhf" || Name ==
"tanhl" ||
248 Name ==
"sqrt" || Name ==
"sqrtf" || Name ==
"sqrtl" ||
249 Name ==
"exp10" || Name ==
"exp10l" || Name ==
"exp10f")
253 if (Name ==
"pow" || Name ==
"powf" || Name ==
"powl" || Name ==
"exp2" ||
254 Name ==
"exp2l" || Name ==
"exp2f" || Name ==
"floor" ||
255 Name ==
"floorf" || Name ==
"ceil" || Name ==
"round" ||
256 Name ==
"ffs" || Name ==
"ffsl" || Name ==
"abs" || Name ==
"labs" ||
281 virtual std::optional<Instruction *>
286 virtual std::optional<Value *>
289 bool &KnownBitsComputed)
const {
297 SimplifyAndSetOp)
const {
315 int64_t BaseOffset,
bool HasBaseReg,
316 int64_t Scale,
unsigned AddrSpace,
318 int64_t ScalableOffset = 0)
const {
321 return !BaseGV && BaseOffset == 0 && (Scale == 0 || Scale == 1);
368 unsigned DataSize =
DL.getTypeStoreSize(DataType);
375 unsigned DataSize =
DL.getTypeStoreSize(DataType);
393 Align Alignment)
const {
398 Align Alignment)
const {
403 Align Alignment)
const {
423 unsigned AddrSpace)
const {
428 Type *DataType)
const {
446 bool HasBaseReg, int64_t Scale,
447 unsigned AddrSpace)
const {
450 Scale, AddrSpace,
nullptr,
462 virtual bool useAA()
const {
return false; }
485 unsigned ScalarOpdIdx)
const {
557 unsigned *
Fast)
const {
613 Type *Ty =
nullptr)
const {
620 return "Generic::Unknown Register Class";
622 return "Generic::ScalarRC";
624 return "Generic::VectorRC";
635 virtual std::optional<unsigned>
getMaxVScale()
const {
return std::nullopt; }
650 virtual unsigned getMaximumVF(
unsigned ElemWidth,
unsigned Opcode)
const {
658 const Instruction &
I,
bool &AllowPromotionWithoutCommonHeader)
const {
659 AllowPromotionWithoutCommonHeader =
false;
664 virtual std::optional<unsigned>
675 virtual std::optional<unsigned>
691 unsigned NumStridedMemAccesses,
692 unsigned NumPrefetches,
693 bool HasCall)
const {
701 unsigned Opcode,
Type *InputTypeA,
Type *InputTypeB,
Type *AccumType,
716 auto IsWidenableCondition = [](
const Value *V) {
718 if (
II->getIntrinsicID() == Intrinsic::experimental_widenable_condition)
727 case Instruction::FDiv:
728 case Instruction::FRem:
729 case Instruction::SDiv:
730 case Instruction::SRem:
731 case Instruction::UDiv:
732 case Instruction::URem:
735 case Instruction::And:
736 case Instruction::Or:
737 if (
any_of(Args, IsWidenableCondition))
744 if (Ty->getScalarType()->isFloatingPointTy())
772 case Instruction::IntToPtr: {
773 unsigned SrcSize = Src->getScalarSizeInBits();
774 if (
DL.isLegalInteger(SrcSize) &&
775 SrcSize <=
DL.getPointerTypeSizeInBits(Dst))
779 case Instruction::PtrToAddr: {
780 unsigned DstSize = Dst->getScalarSizeInBits();
781 assert(DstSize ==
DL.getAddressSizeInBits(Src));
782 if (
DL.isLegalInteger(DstSize))
786 case Instruction::PtrToInt: {
787 unsigned DstSize = Dst->getScalarSizeInBits();
788 if (
DL.isLegalInteger(DstSize) &&
789 DstSize >=
DL.getPointerTypeSizeInBits(Src))
793 case Instruction::BitCast:
794 if (Dst == Src || (Dst->isPointerTy() && Src->isPointerTy()))
798 case Instruction::Trunc: {
835 unsigned Index,
const Value *Op0,
836 const Value *Op1)
const {
847 ArrayRef<std::tuple<Value *, User *, int>> ScalarUserAndIdx)
const {
853 unsigned Index)
const {
860 unsigned Index)
const {
866 const APInt &DemandedDstElts,
877 if (Opcode == Instruction::InsertValue &&
893 bool UseMaskForCond,
bool UseMaskForGaps)
const {
900 switch (ICA.
getID()) {
903 case Intrinsic::allow_runtime_check:
904 case Intrinsic::allow_ubsan_check:
905 case Intrinsic::annotation:
906 case Intrinsic::assume:
907 case Intrinsic::sideeffect:
908 case Intrinsic::pseudoprobe:
909 case Intrinsic::arithmetic_fence:
910 case Intrinsic::dbg_assign:
911 case Intrinsic::dbg_declare:
912 case Intrinsic::dbg_value:
913 case Intrinsic::dbg_label:
914 case Intrinsic::invariant_start:
915 case Intrinsic::invariant_end:
916 case Intrinsic::launder_invariant_group:
917 case Intrinsic::strip_invariant_group:
918 case Intrinsic::is_constant:
919 case Intrinsic::lifetime_start:
920 case Intrinsic::lifetime_end:
921 case Intrinsic::experimental_noalias_scope_decl:
922 case Intrinsic::objectsize:
923 case Intrinsic::ptr_annotation:
924 case Intrinsic::var_annotation:
925 case Intrinsic::experimental_gc_result:
926 case Intrinsic::experimental_gc_relocate:
927 case Intrinsic::coro_alloc:
928 case Intrinsic::coro_begin:
929 case Intrinsic::coro_begin_custom_abi:
930 case Intrinsic::coro_free:
931 case Intrinsic::coro_end:
932 case Intrinsic::coro_frame:
933 case Intrinsic::coro_size:
934 case Intrinsic::coro_align:
935 case Intrinsic::coro_suspend:
936 case Intrinsic::coro_subfn_addr:
937 case Intrinsic::threadlocal_address:
938 case Intrinsic::experimental_widenable_condition:
939 case Intrinsic::ssa_copy:
949 switch (MICA.
getID()) {
950 case Intrinsic::masked_scatter:
951 case Intrinsic::masked_gather:
952 case Intrinsic::masked_load:
953 case Intrinsic::masked_store:
954 case Intrinsic::vp_scatter:
955 case Intrinsic::vp_gather:
956 case Intrinsic::masked_compressstore:
957 case Intrinsic::masked_expandload:
981 std::optional<FastMathFlags> FMF,
994 VectorType *Ty, std::optional<FastMathFlags> FMF,
1026 bool CanCreate =
true)
const {
1032 unsigned SrcAddrSpace,
unsigned DestAddrSpace,
1034 std::optional<uint32_t> AtomicElementSize)
const {
1035 return AtomicElementSize ?
Type::getIntNTy(Context, *AtomicElementSize * 8)
1041 unsigned RemainingBytes,
unsigned SrcAddrSpace,
unsigned DestAddrSpace,
1043 std::optional<uint32_t> AtomicCpySize)
const {
1044 unsigned OpSizeInBytes = AtomicCpySize.value_or(1);
1046 for (
unsigned i = 0; i != RemainingBytes; i += OpSizeInBytes)
1052 return (Caller->getFnAttribute(
"target-cpu") ==
1053 Callee->getFnAttribute(
"target-cpu")) &&
1054 (Caller->getFnAttribute(
"target-features") ==
1055 Callee->getFnAttribute(
"target-features"));
1059 unsigned DefaultCallPenalty)
const {
1060 return DefaultCallPenalty;
1066 return (Caller->getFnAttribute(
"target-cpu") ==
1067 Callee->getFnAttribute(
"target-cpu")) &&
1068 (Caller->getFnAttribute(
"target-features") ==
1069 Callee->getFnAttribute(
"target-features"));
1090 unsigned AddrSpace)
const {
1096 unsigned AddrSpace)
const {
1110 unsigned ChainSizeInBytes,
1116 unsigned ChainSizeInBytes,
1211 unsigned MaxRequiredSize =
1212 VT->getElementType()->getPrimitiveSizeInBits().getFixedValue();
1214 unsigned MinRequiredSize = 0;
1215 for (
unsigned i = 0, e = VT->getNumElements(); i < e; ++i) {
1216 if (
auto *IntElement =
1218 bool signedElement = IntElement->getValue().isNegative();
1220 unsigned ElementMinRequiredSize =
1221 IntElement->getValue().getSignificantBits() - 1;
1223 isSigned |= signedElement;
1225 MinRequiredSize = std::max(MinRequiredSize, ElementMinRequiredSize);
1228 return MaxRequiredSize;
1231 return MinRequiredSize;
1235 isSigned = CI->getValue().isNegative();
1236 return CI->getValue().getSignificantBits() - 1;
1241 return Cast->getSrcTy()->getScalarSizeInBits() - 1;
1246 return Cast->getSrcTy()->getScalarSizeInBits();
1258 const SCEV *Ptr)
const {
1266 int64_t MergeDistance)
const {
1280template <
typename T>
1292 assert(PointeeType && Ptr &&
"can't get GEPCost of nullptr");
1294 bool HasBaseReg = (BaseGV ==
nullptr);
1296 auto PtrSizeBits =
DL.getPointerTypeSizeInBits(Ptr->
getType());
1297 APInt BaseOffset(PtrSizeBits, 0);
1301 Type *TargetType =
nullptr;
1305 if (Operands.
empty())
1308 for (
auto I = Operands.
begin();
I != Operands.
end(); ++
I, ++GTI) {
1309 TargetType = GTI.getIndexedType();
1316 if (
StructType *STy = GTI.getStructTypeOrNull()) {
1318 assert(ConstIdx &&
"Unexpected GEP index");
1320 BaseOffset +=
DL.getStructLayout(STy)->getElementOffset(
Field);
1326 int64_t ElementSize =
1327 GTI.getSequentialElementStride(
DL).getFixedValue();
1336 Scale = ElementSize;
1351 AccessType = TargetType;
1382 for (
const Value *V : Ptrs) {
1386 if (
Info.isSameBase() && V !=
Base) {
1387 if (
GEP->hasAllConstantIndices())
1391 {TTI::OK_AnyValue, TTI::OP_None}, {TTI::OK_AnyValue, TTI::OP_None},
1396 GEP->getSourceElementType(),
GEP->getPointerOperand(), Indices,
1408 auto *TargetTTI =
static_cast<const T *
>(
this);
1413 if (
const Function *
F = CB->getCalledFunction()) {
1414 if (!TargetTTI->isLoweredToCall(
F))
1423 Type *Ty = U->getType();
1429 case Instruction::Call: {
1433 return TargetTTI->getIntrinsicInstrCost(CostAttrs,
CostKind);
1435 case Instruction::Br:
1436 case Instruction::Ret:
1437 case Instruction::PHI:
1438 case Instruction::Switch:
1439 return TargetTTI->getCFInstrCost(Opcode,
CostKind,
I);
1440 case Instruction::Freeze:
1442 case Instruction::ExtractValue:
1443 case Instruction::InsertValue:
1444 return TargetTTI->getInsertExtractValueCost(Opcode,
CostKind);
1445 case Instruction::Alloca:
1449 case Instruction::GetElementPtr: {
1451 Type *AccessType =
nullptr;
1454 if (
GEP->hasOneUser() &&
I)
1455 AccessType =
I->user_back()->getAccessType();
1457 return TargetTTI->getGEPCost(
GEP->getSourceElementType(),
1461 case Instruction::Add:
1462 case Instruction::FAdd:
1463 case Instruction::Sub:
1464 case Instruction::FSub:
1465 case Instruction::Mul:
1466 case Instruction::FMul:
1467 case Instruction::UDiv:
1468 case Instruction::SDiv:
1469 case Instruction::FDiv:
1470 case Instruction::URem:
1471 case Instruction::SRem:
1472 case Instruction::FRem:
1473 case Instruction::Shl:
1474 case Instruction::LShr:
1475 case Instruction::AShr:
1476 case Instruction::And:
1477 case Instruction::Or:
1478 case Instruction::Xor:
1479 case Instruction::FNeg: {
1482 if (Opcode != Instruction::FNeg)
1484 return TargetTTI->getArithmeticInstrCost(Opcode, Ty,
CostKind, Op1Info,
1485 Op2Info, Operands,
I);
1487 case Instruction::IntToPtr:
1488 case Instruction::PtrToAddr:
1489 case Instruction::PtrToInt:
1490 case Instruction::SIToFP:
1491 case Instruction::UIToFP:
1492 case Instruction::FPToUI:
1493 case Instruction::FPToSI:
1494 case Instruction::Trunc:
1495 case Instruction::FPTrunc:
1496 case Instruction::BitCast:
1497 case Instruction::FPExt:
1498 case Instruction::SExt:
1499 case Instruction::ZExt:
1500 case Instruction::AddrSpaceCast: {
1501 Type *OpTy = Operands[0]->getType();
1502 return TargetTTI->getCastInstrCost(
1505 case Instruction::Store: {
1507 Type *ValTy = Operands[0]->getType();
1509 return TargetTTI->getMemoryOpCost(Opcode, ValTy,
SI->getAlign(),
1513 case Instruction::Load: {
1518 Type *LoadType = U->getType();
1529 LoadType = TI->getDestTy();
1531 return TargetTTI->getMemoryOpCost(Opcode, LoadType, LI->getAlign(),
1533 {TTI::OK_AnyValue, TTI::OP_None},
I);
1535 case Instruction::Select: {
1536 const Value *Op0, *Op1;
1547 return TargetTTI->getArithmeticInstrCost(
1549 CostKind, Op1Info, Op2Info, Operands,
I);
1553 Type *CondTy = Operands[0]->getType();
1554 return TargetTTI->getCmpSelInstrCost(Opcode, U->getType(), CondTy,
1558 case Instruction::ICmp:
1559 case Instruction::FCmp: {
1562 Type *ValTy = Operands[0]->getType();
1564 return TargetTTI->getCmpSelInstrCost(Opcode, ValTy, U->getType(),
1569 case Instruction::InsertElement: {
1575 if (CI->getValue().getActiveBits() <= 32)
1576 Idx = CI->getZExtValue();
1577 return TargetTTI->getVectorInstrCost(*IE, Ty,
CostKind, Idx);
1579 case Instruction::ShuffleVector: {
1587 int NumSubElts, SubIndex;
1590 if (
all_of(Mask, [](
int M) {
return M < 0; }))
1594 if (Shuffle->changesLength()) {
1596 if (Shuffle->increasesLength() && Shuffle->isIdentityWithPadding())
1599 if (Shuffle->isExtractSubvectorMask(SubIndex))
1601 VecSrcTy, Mask,
CostKind, SubIndex,
1602 VecTy, Operands, Shuffle);
1604 if (Shuffle->isInsertSubvectorMask(NumSubElts, SubIndex))
1605 return TargetTTI->getShuffleCost(
1611 int ReplicationFactor, VF;
1612 if (Shuffle->isReplicationMask(ReplicationFactor, VF)) {
1616 DemandedDstElts.
setBit(
I.index());
1618 return TargetTTI->getReplicationShuffleCost(
1619 VecSrcTy->getElementType(), ReplicationFactor, VF,
1624 NumSubElts = VecSrcTy->getElementCount().getKnownMinValue();
1630 if (Shuffle->increasesLength()) {
1631 for (
int &M : AdjustMask)
1632 M = M >= NumSubElts ? (M + (Mask.size() - NumSubElts)) : M;
1634 return TargetTTI->getShuffleCost(
1636 VecTy, AdjustMask,
CostKind, 0,
nullptr, Operands, Shuffle);
1647 VecSrcTy, VecSrcTy, AdjustMask,
CostKind, 0,
nullptr, Operands,
1651 std::iota(ExtractMask.
begin(), ExtractMask.
end(), 0);
1652 return ShuffleCost + TargetTTI->getShuffleCost(
1654 ExtractMask,
CostKind, 0, VecTy, {}, Shuffle);
1657 if (Shuffle->isIdentity())
1660 if (Shuffle->isReverse())
1661 return TargetTTI->getShuffleCost(
TTI::SK_Reverse, VecTy, VecSrcTy, Mask,
1665 if (Shuffle->isTranspose())
1667 Mask,
CostKind, 0,
nullptr, Operands,
1670 if (Shuffle->isZeroEltSplat())
1672 Mask,
CostKind, 0,
nullptr, Operands,
1675 if (Shuffle->isSingleSource())
1677 VecSrcTy, Mask,
CostKind, 0,
nullptr,
1680 if (Shuffle->isInsertSubvectorMask(NumSubElts, SubIndex))
1681 return TargetTTI->getShuffleCost(
1686 if (Shuffle->isSelect())
1687 return TargetTTI->getShuffleCost(
TTI::SK_Select, VecTy, VecSrcTy, Mask,
1691 if (Shuffle->isSplice(SubIndex))
1692 return TargetTTI->getShuffleCost(
TTI::SK_Splice, VecTy, VecSrcTy, Mask,
1693 CostKind, SubIndex,
nullptr, Operands,
1697 Mask,
CostKind, 0,
nullptr, Operands,
1700 case Instruction::ExtractElement: {
1706 if (CI->getValue().getActiveBits() <= 32)
1707 Idx = CI->getZExtValue();
1708 Type *DstTy = Operands[0]->getType();
1709 return TargetTTI->getVectorInstrCost(*EEI, DstTy,
CostKind, Idx);
1718 auto *TargetTTI =
static_cast<const T *
>(
this);
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
Analysis containing CSE Info
static cl::opt< OutputCostKind > CostKind("cost-kind", cl::desc("Target cost kind"), cl::init(OutputCostKind::RecipThroughput), cl::values(clEnumValN(OutputCostKind::RecipThroughput, "throughput", "Reciprocal throughput"), clEnumValN(OutputCostKind::Latency, "latency", "Instruction latency"), clEnumValN(OutputCostKind::CodeSize, "code-size", "Code size"), clEnumValN(OutputCostKind::SizeAndLatency, "size-latency", "Code size and latency"), clEnumValN(OutputCostKind::All, "all", "Print all cost kinds")))
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
uint64_t IntrinsicInst * II
OptimizedStructLayoutField Field
static cl::opt< RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode > Mode("regalloc-enable-advisor", cl::Hidden, cl::init(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Default), cl::desc("Enable regalloc advisor mode"), cl::values(clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Default, "default", "Default"), clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Release, "release", "precompiled"), clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Development, "development", "for training")))
static SymbolRef::Type getType(const Symbol *Sym)
Class for arbitrary precision integers.
void setBit(unsigned BitPosition)
Set the given bit to 1 whose position is given as "bitPosition".
unsigned getBitWidth() const
Return the number of bits in the APInt.
LLVM_ABI APInt sextOrTrunc(unsigned width) const
Sign extend or truncate to width.
static APInt getZero(unsigned numBits)
Get the '0' value for the specified bit-width.
int64_t getSExtValue() const
Get sign extended value.
This class represents a conversion between pointers from one address space to another.
an instruction to allocate memory on the stack
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
ArrayRef< T > drop_front(size_t N=1) const
Drop the first N elements of the array.
const T & front() const
front - Get the first element.
bool empty() const
empty - Check if the array is empty.
Class to represent array types.
A cache of @llvm.assume calls within a function.
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
Conditional or Unconditional Branch instruction.
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
This is the shared class of boolean and integer constants.
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
const APInt & getValue() const
Return the constant as an APInt value reference.
This is an important base class in LLVM.
A parsed version of the target data layout string in and methods for querying it.
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
static constexpr ElementCount get(ScalarTy MinVal, bool Scalable)
Convenience struct for specifying and reasoning about fast-math flags.
static LLVM_ABI FixedVectorType * get(Type *ElementType, unsigned NumElts)
The core instruction combiner logic.
static InstructionCost getInvalid(CostType Val=0)
Intrinsic::ID getID() const
A wrapper class for inspecting calls to intrinsic functions.
This is an important class for using LLVM in a threaded context.
An instruction for reading from memory.
Represents a single loop in the control flow graph.
Information for memory intrinsic cost model.
Intrinsic::ID getID() const
unsigned getOpcode() const
Return the opcode for this Instruction or ConstantExpr.
Analysis providing profile information.
The RecurrenceDescriptor is used to identify recurrences variables in a loop.
This node represents a polynomial recurrence on the trip count of the specified loop.
const SCEV * getStepRecurrence(ScalarEvolution &SE) const
Constructs and returns the recurrence indicating how much this expression steps by.
This class represents a constant integer value.
const APInt & getAPInt() const
This class represents an analyzed expression in the program.
The main scalar evolution driver.
This is a 'bitvector' (really, a variable-sized bit array), optimized for the case when the array is ...
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StackOffset holds a fixed and a scalable offset in bytes.
static StackOffset getScalable(int64_t Scalable)
static StackOffset getFixed(int64_t Fixed)
An instruction for storing to memory.
StringRef - Represent a constant reference to a string, i.e.
Class to represent struct types.
Provides information about what library functions are available for the current target.
This class represents a truncation of integer types.
static constexpr TypeSize getFixed(ScalarTy ExactSize)
The instances of the Type class are immutable: once they are created, they are never changed.
bool isVectorTy() const
True if this is an instance of VectorType.
LLVM_ABI bool isScalableTy(SmallPtrSetImpl< const Type * > &Visited) const
Return true if this is a type whose size is a known multiple of vscale.
LLVM_ABI unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
static LLVM_ABI IntegerType * getInt8Ty(LLVMContext &C)
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
bool isPtrOrPtrVectorTy() const
Return true if this is a pointer type or a vector of pointer types.
static LLVM_ABI IntegerType * getIntNTy(LLVMContext &C, unsigned N)
This is the common base class for vector predication intrinsics.
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
LLVM_ABI const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs and address space casts.
Base class of all SIMD vector types.
constexpr ScalarTy getFixedValue() const
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
@ Fast
Attempts to make calls as fast as possible (e.g.
@ C
The default llvm calling convention, compatible with C.
This namespace contains an enum with a value for every intrinsic/builtin function known by LLVM.
class_match< Constant > m_Constant()
Match an arbitrary Constant and ignore it.
bool match(Val *V, const Pattern &P)
ThreeOps_match< Cond, LHS, RHS, Instruction::Select > m_Select(const Cond &C, const LHS &L, const RHS &R)
Matches SelectInst.
auto m_LogicalOr()
Matches L || R where L and R are arbitrary values.
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
auto m_LogicalAnd()
Matches L && R where L and R are arbitrary values.
match_combine_or< LTy, RTy > m_CombineOr(const LTy &L, const RTy &R)
Combine two pattern matchers matching L || R.
This is an optimization pass for GlobalISel generic memory operations.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
FunctionAddr VTableAddr uintptr_t uintptr_t DataSize
LLVM_ABI Value * getSplatValue(const Value *V)
Get splat value if the input is a splat vector or return nullptr.
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
LLVM_ABI void computeKnownBits(const Value *V, KnownBits &Known, const DataLayout &DL, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true, unsigned Depth=0)
Determine which bits of V are known to be either zero or one and return them in the KnownZero/KnownOn...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
constexpr int PoisonMaskElem
RecurKind
These are the kinds of recurrences that we support.
constexpr unsigned BitWidth
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
gep_type_iterator gep_type_begin(const User *GEP)
@ DataWithoutLaneMask
Same as Data, but avoids using the get.active.lane.mask intrinsic to calculate the mask and instead i...
InstructionUniformity
Enum describing how instructions behave with respect to uniformity and divergence,...
@ Default
The result values are uniform if and only if all operands are uniform.
This struct is a compact representation of a valid (non-zero power of two) alignment.
Attributes of a target dependent hardware loop.
KnownBits anyextOrTrunc(unsigned BitWidth) const
Return known bits for an "any" extension or truncation of the value we're tracking.
Information about a load/store intrinsic defined by the target.