14#ifndef LLVM_ANALYSIS_TARGETTRANSFORMINFOIMPL_H
15#define LLVM_ANALYSIS_TARGETTRANSFORMINFOIMPL_H
60 for (
const Value *Operand : Operands)
81 return SI.getNumCases();
163 virtual std::pair<const Value *, unsigned>
165 return std::make_pair(
nullptr, -1);
175 assert(
F &&
"A concrete function must be provided to this routine.");
182 if (
F->isIntrinsic())
185 if (
F->hasLocalLinkage() || !
F->hasName())
192 if (Name ==
"copysign" || Name ==
"copysignf" || Name ==
"copysignl" ||
193 Name ==
"fabs" || Name ==
"fabsf" || Name ==
"fabsl" ||
194 Name ==
"fmin" || Name ==
"fminf" || Name ==
"fminl" ||
195 Name ==
"fmax" || Name ==
"fmaxf" || Name ==
"fmaxl" ||
196 Name ==
"sin" || Name ==
"sinf" || Name ==
"sinl" ||
197 Name ==
"cos" || Name ==
"cosf" || Name ==
"cosl" ||
198 Name ==
"tan" || Name ==
"tanf" || Name ==
"tanl" ||
199 Name ==
"asin" || Name ==
"asinf" || Name ==
"asinl" ||
200 Name ==
"acos" || Name ==
"acosf" || Name ==
"acosl" ||
201 Name ==
"atan" || Name ==
"atanf" || Name ==
"atanl" ||
202 Name ==
"atan2" || Name ==
"atan2f" || Name ==
"atan2l"||
203 Name ==
"sinh" || Name ==
"sinhf" || Name ==
"sinhl" ||
204 Name ==
"cosh" || Name ==
"coshf" || Name ==
"coshl" ||
205 Name ==
"tanh" || Name ==
"tanhf" || Name ==
"tanhl" ||
206 Name ==
"sqrt" || Name ==
"sqrtf" || Name ==
"sqrtl" ||
207 Name ==
"exp10" || Name ==
"exp10l" || Name ==
"exp10f")
211 if (Name ==
"pow" || Name ==
"powf" || Name ==
"powl" || Name ==
"exp2" ||
212 Name ==
"exp2l" || Name ==
"exp2f" || Name ==
"floor" ||
213 Name ==
"floorf" || Name ==
"ceil" || Name ==
"round" ||
214 Name ==
"ffs" || Name ==
"ffsl" || Name ==
"abs" || Name ==
"labs" ||
239 virtual std::optional<Instruction *>
244 virtual std::optional<Value *>
247 bool &KnownBitsComputed)
const {
255 SimplifyAndSetOp)
const {
273 int64_t BaseOffset,
bool HasBaseReg,
274 int64_t Scale,
unsigned AddrSpace,
276 int64_t ScalableOffset = 0)
const {
279 return !BaseGV && BaseOffset == 0 && (Scale == 0 || Scale == 1);
326 unsigned DataSize =
DL.getTypeStoreSize(DataType);
333 unsigned DataSize =
DL.getTypeStoreSize(DataType);
351 Align Alignment)
const {
356 Align Alignment)
const {
361 Align Alignment)
const {
381 unsigned AddrSpace)
const {
386 Type *DataType)
const {
404 bool HasBaseReg, int64_t Scale,
405 unsigned AddrSpace)
const {
408 Scale, AddrSpace,
nullptr,
420 virtual bool useAA()
const {
return false; }
443 unsigned ScalarOpdIdx)
const {
515 unsigned *
Fast)
const {
571 Type *Ty =
nullptr)
const {
578 return "Generic::Unknown Register Class";
580 return "Generic::ScalarRC";
582 return "Generic::VectorRC";
593 virtual std::optional<unsigned>
getMaxVScale()
const {
return std::nullopt; }
608 virtual unsigned getMaximumVF(
unsigned ElemWidth,
unsigned Opcode)
const {
616 const Instruction &
I,
bool &AllowPromotionWithoutCommonHeader)
const {
617 AllowPromotionWithoutCommonHeader =
false;
622 virtual std::optional<unsigned>
633 virtual std::optional<unsigned>
649 unsigned NumStridedMemAccesses,
650 unsigned NumPrefetches,
651 bool HasCall)
const {
659 unsigned Opcode,
Type *InputTypeA,
Type *InputTypeB,
Type *AccumType,
674 auto IsWidenableCondition = [](
const Value *V) {
676 if (
II->getIntrinsicID() == Intrinsic::experimental_widenable_condition)
685 case Instruction::FDiv:
686 case Instruction::FRem:
687 case Instruction::SDiv:
688 case Instruction::SRem:
689 case Instruction::UDiv:
690 case Instruction::URem:
693 case Instruction::And:
694 case Instruction::Or:
695 if (
any_of(Args, IsWidenableCondition))
702 if (Ty->getScalarType()->isFloatingPointTy())
730 case Instruction::IntToPtr: {
731 unsigned SrcSize = Src->getScalarSizeInBits();
732 if (
DL.isLegalInteger(SrcSize) &&
733 SrcSize <=
DL.getPointerTypeSizeInBits(Dst))
737 case Instruction::PtrToAddr: {
738 unsigned DstSize = Dst->getScalarSizeInBits();
739 assert(DstSize ==
DL.getAddressSizeInBits(Src));
740 if (
DL.isLegalInteger(DstSize))
744 case Instruction::PtrToInt: {
745 unsigned DstSize = Dst->getScalarSizeInBits();
746 if (
DL.isLegalInteger(DstSize) &&
747 DstSize >=
DL.getPointerTypeSizeInBits(Src))
751 case Instruction::BitCast:
752 if (Dst == Src || (Dst->isPointerTy() && Src->isPointerTy()))
756 case Instruction::Trunc: {
793 unsigned Index,
const Value *Op0,
794 const Value *Op1)
const {
805 ArrayRef<std::tuple<Value *, User *, int>> ScalarUserAndIdx)
const {
811 unsigned Index)
const {
818 unsigned Index)
const {
824 const APInt &DemandedDstElts,
835 if (Opcode == Instruction::InsertValue &&
851 bool UseMaskForCond,
bool UseMaskForGaps)
const {
858 switch (ICA.
getID()) {
861 case Intrinsic::allow_runtime_check:
862 case Intrinsic::allow_ubsan_check:
863 case Intrinsic::annotation:
864 case Intrinsic::assume:
865 case Intrinsic::sideeffect:
866 case Intrinsic::pseudoprobe:
867 case Intrinsic::arithmetic_fence:
868 case Intrinsic::dbg_assign:
869 case Intrinsic::dbg_declare:
870 case Intrinsic::dbg_value:
871 case Intrinsic::dbg_label:
872 case Intrinsic::invariant_start:
873 case Intrinsic::invariant_end:
874 case Intrinsic::launder_invariant_group:
875 case Intrinsic::strip_invariant_group:
876 case Intrinsic::is_constant:
877 case Intrinsic::lifetime_start:
878 case Intrinsic::lifetime_end:
879 case Intrinsic::experimental_noalias_scope_decl:
880 case Intrinsic::objectsize:
881 case Intrinsic::ptr_annotation:
882 case Intrinsic::var_annotation:
883 case Intrinsic::experimental_gc_result:
884 case Intrinsic::experimental_gc_relocate:
885 case Intrinsic::coro_alloc:
886 case Intrinsic::coro_begin:
887 case Intrinsic::coro_begin_custom_abi:
888 case Intrinsic::coro_free:
889 case Intrinsic::coro_end:
890 case Intrinsic::coro_frame:
891 case Intrinsic::coro_size:
892 case Intrinsic::coro_align:
893 case Intrinsic::coro_suspend:
894 case Intrinsic::coro_subfn_addr:
895 case Intrinsic::threadlocal_address:
896 case Intrinsic::experimental_widenable_condition:
897 case Intrinsic::ssa_copy:
907 switch (MICA.
getID()) {
908 case Intrinsic::masked_scatter:
909 case Intrinsic::masked_gather:
910 case Intrinsic::masked_load:
911 case Intrinsic::masked_store:
912 case Intrinsic::vp_scatter:
913 case Intrinsic::vp_gather:
914 case Intrinsic::masked_compressstore:
915 case Intrinsic::masked_expandload:
939 std::optional<FastMathFlags> FMF,
952 VectorType *Ty, std::optional<FastMathFlags> FMF,
984 bool CanCreate =
true)
const {
990 unsigned SrcAddrSpace,
unsigned DestAddrSpace,
992 std::optional<uint32_t> AtomicElementSize)
const {
993 return AtomicElementSize ?
Type::getIntNTy(Context, *AtomicElementSize * 8)
999 unsigned RemainingBytes,
unsigned SrcAddrSpace,
unsigned DestAddrSpace,
1001 std::optional<uint32_t> AtomicCpySize)
const {
1002 unsigned OpSizeInBytes = AtomicCpySize.value_or(1);
1004 for (
unsigned i = 0; i != RemainingBytes; i += OpSizeInBytes)
1010 return (Caller->getFnAttribute(
"target-cpu") ==
1011 Callee->getFnAttribute(
"target-cpu")) &&
1012 (Caller->getFnAttribute(
"target-features") ==
1013 Callee->getFnAttribute(
"target-features"));
1017 unsigned DefaultCallPenalty)
const {
1018 return DefaultCallPenalty;
1024 return (Caller->getFnAttribute(
"target-cpu") ==
1025 Callee->getFnAttribute(
"target-cpu")) &&
1026 (Caller->getFnAttribute(
"target-features") ==
1027 Callee->getFnAttribute(
"target-features"));
1048 unsigned AddrSpace)
const {
1054 unsigned AddrSpace)
const {
1068 unsigned ChainSizeInBytes,
1074 unsigned ChainSizeInBytes,
1165 unsigned MaxRequiredSize =
1166 VT->getElementType()->getPrimitiveSizeInBits().getFixedValue();
1168 unsigned MinRequiredSize = 0;
1169 for (
unsigned i = 0, e = VT->getNumElements(); i < e; ++i) {
1170 if (
auto *IntElement =
1172 bool signedElement = IntElement->getValue().isNegative();
1174 unsigned ElementMinRequiredSize =
1175 IntElement->getValue().getSignificantBits() - 1;
1179 MinRequiredSize = std::max(MinRequiredSize, ElementMinRequiredSize);
1182 return MaxRequiredSize;
1185 return MinRequiredSize;
1189 isSigned = CI->getValue().isNegative();
1190 return CI->getValue().getSignificantBits() - 1;
1195 return Cast->getSrcTy()->getScalarSizeInBits() - 1;
1200 return Cast->getSrcTy()->getScalarSizeInBits();
1212 const SCEV *Ptr)
const {
1220 int64_t MergeDistance)
const {
1234template <
typename T>
1246 assert(PointeeType && Ptr &&
"can't get GEPCost of nullptr");
1248 bool HasBaseReg = (BaseGV ==
nullptr);
1250 auto PtrSizeBits =
DL.getPointerTypeSizeInBits(Ptr->
getType());
1251 APInt BaseOffset(PtrSizeBits, 0);
1255 Type *TargetType =
nullptr;
1259 if (Operands.
empty())
1262 for (
auto I = Operands.
begin();
I != Operands.
end(); ++
I, ++GTI) {
1263 TargetType = GTI.getIndexedType();
1270 if (
StructType *STy = GTI.getStructTypeOrNull()) {
1272 assert(ConstIdx &&
"Unexpected GEP index");
1274 BaseOffset +=
DL.getStructLayout(STy)->getElementOffset(
Field);
1280 int64_t ElementSize =
1281 GTI.getSequentialElementStride(
DL).getFixedValue();
1290 Scale = ElementSize;
1305 AccessType = TargetType;
1336 for (
const Value *V : Ptrs) {
1340 if (
Info.isSameBase() && V !=
Base) {
1341 if (
GEP->hasAllConstantIndices())
1345 {TTI::OK_AnyValue, TTI::OP_None}, {TTI::OK_AnyValue, TTI::OP_None},
1350 GEP->getSourceElementType(),
GEP->getPointerOperand(), Indices,
1362 auto *TargetTTI =
static_cast<const T *
>(
this);
1367 if (
const Function *
F = CB->getCalledFunction()) {
1368 if (!TargetTTI->isLoweredToCall(
F))
1377 Type *Ty = U->getType();
1383 case Instruction::Call: {
1387 return TargetTTI->getIntrinsicInstrCost(CostAttrs,
CostKind);
1389 case Instruction::Br:
1390 case Instruction::Ret:
1391 case Instruction::PHI:
1392 case Instruction::Switch:
1393 return TargetTTI->getCFInstrCost(Opcode,
CostKind,
I);
1394 case Instruction::Freeze:
1396 case Instruction::ExtractValue:
1397 case Instruction::InsertValue:
1398 return TargetTTI->getInsertExtractValueCost(Opcode,
CostKind);
1399 case Instruction::Alloca:
1403 case Instruction::GetElementPtr: {
1405 Type *AccessType =
nullptr;
1408 if (
GEP->hasOneUser() &&
I)
1409 AccessType =
I->user_back()->getAccessType();
1411 return TargetTTI->getGEPCost(
GEP->getSourceElementType(),
1415 case Instruction::Add:
1416 case Instruction::FAdd:
1417 case Instruction::Sub:
1418 case Instruction::FSub:
1419 case Instruction::Mul:
1420 case Instruction::FMul:
1421 case Instruction::UDiv:
1422 case Instruction::SDiv:
1423 case Instruction::FDiv:
1424 case Instruction::URem:
1425 case Instruction::SRem:
1426 case Instruction::FRem:
1427 case Instruction::Shl:
1428 case Instruction::LShr:
1429 case Instruction::AShr:
1430 case Instruction::And:
1431 case Instruction::Or:
1432 case Instruction::Xor:
1433 case Instruction::FNeg: {
1436 if (Opcode != Instruction::FNeg)
1438 return TargetTTI->getArithmeticInstrCost(Opcode, Ty,
CostKind, Op1Info,
1439 Op2Info, Operands,
I);
1441 case Instruction::IntToPtr:
1442 case Instruction::PtrToAddr:
1443 case Instruction::PtrToInt:
1444 case Instruction::SIToFP:
1445 case Instruction::UIToFP:
1446 case Instruction::FPToUI:
1447 case Instruction::FPToSI:
1448 case Instruction::Trunc:
1449 case Instruction::FPTrunc:
1450 case Instruction::BitCast:
1451 case Instruction::FPExt:
1452 case Instruction::SExt:
1453 case Instruction::ZExt:
1454 case Instruction::AddrSpaceCast: {
1455 Type *OpTy = Operands[0]->getType();
1456 return TargetTTI->getCastInstrCost(
1459 case Instruction::Store: {
1461 Type *ValTy = Operands[0]->getType();
1463 return TargetTTI->getMemoryOpCost(Opcode, ValTy,
SI->getAlign(),
1467 case Instruction::Load: {
1472 Type *LoadType = U->getType();
1483 LoadType = TI->getDestTy();
1485 return TargetTTI->getMemoryOpCost(Opcode, LoadType, LI->getAlign(),
1487 {TTI::OK_AnyValue, TTI::OP_None},
I);
1489 case Instruction::Select: {
1490 const Value *Op0, *Op1;
1501 return TargetTTI->getArithmeticInstrCost(
1503 CostKind, Op1Info, Op2Info, Operands,
I);
1507 Type *CondTy = Operands[0]->getType();
1508 return TargetTTI->getCmpSelInstrCost(Opcode, U->getType(), CondTy,
1512 case Instruction::ICmp:
1513 case Instruction::FCmp: {
1516 Type *ValTy = Operands[0]->getType();
1518 return TargetTTI->getCmpSelInstrCost(Opcode, ValTy, U->getType(),
1523 case Instruction::InsertElement: {
1529 if (CI->getValue().getActiveBits() <= 32)
1530 Idx = CI->getZExtValue();
1531 return TargetTTI->getVectorInstrCost(*IE, Ty,
CostKind, Idx);
1533 case Instruction::ShuffleVector: {
1541 int NumSubElts, SubIndex;
1544 if (
all_of(Mask, [](
int M) {
return M < 0; }))
1548 if (Shuffle->changesLength()) {
1550 if (Shuffle->increasesLength() && Shuffle->isIdentityWithPadding())
1553 if (Shuffle->isExtractSubvectorMask(SubIndex))
1555 VecSrcTy, Mask,
CostKind, SubIndex,
1556 VecTy, Operands, Shuffle);
1558 if (Shuffle->isInsertSubvectorMask(NumSubElts, SubIndex))
1559 return TargetTTI->getShuffleCost(
1565 int ReplicationFactor, VF;
1566 if (Shuffle->isReplicationMask(ReplicationFactor, VF)) {
1570 DemandedDstElts.
setBit(
I.index());
1572 return TargetTTI->getReplicationShuffleCost(
1573 VecSrcTy->getElementType(), ReplicationFactor, VF,
1578 NumSubElts = VecSrcTy->getElementCount().getKnownMinValue();
1584 if (Shuffle->increasesLength()) {
1585 for (
int &M : AdjustMask)
1586 M = M >= NumSubElts ? (M + (Mask.size() - NumSubElts)) : M;
1588 return TargetTTI->getShuffleCost(
1590 VecTy, AdjustMask,
CostKind, 0,
nullptr, Operands, Shuffle);
1601 VecSrcTy, VecSrcTy, AdjustMask,
CostKind, 0,
nullptr, Operands,
1605 std::iota(ExtractMask.
begin(), ExtractMask.
end(), 0);
1606 return ShuffleCost + TargetTTI->getShuffleCost(
1608 ExtractMask,
CostKind, 0, VecTy, {}, Shuffle);
1611 if (Shuffle->isIdentity())
1614 if (Shuffle->isReverse())
1615 return TargetTTI->getShuffleCost(
TTI::SK_Reverse, VecTy, VecSrcTy, Mask,
1619 if (Shuffle->isTranspose())
1621 Mask,
CostKind, 0,
nullptr, Operands,
1624 if (Shuffle->isZeroEltSplat())
1626 Mask,
CostKind, 0,
nullptr, Operands,
1629 if (Shuffle->isSingleSource())
1631 VecSrcTy, Mask,
CostKind, 0,
nullptr,
1634 if (Shuffle->isInsertSubvectorMask(NumSubElts, SubIndex))
1635 return TargetTTI->getShuffleCost(
1640 if (Shuffle->isSelect())
1641 return TargetTTI->getShuffleCost(
TTI::SK_Select, VecTy, VecSrcTy, Mask,
1645 if (Shuffle->isSplice(SubIndex))
1646 return TargetTTI->getShuffleCost(
TTI::SK_Splice, VecTy, VecSrcTy, Mask,
1647 CostKind, SubIndex,
nullptr, Operands,
1651 Mask,
CostKind, 0,
nullptr, Operands,
1654 case Instruction::ExtractElement: {
1660 if (CI->getValue().getActiveBits() <= 32)
1661 Idx = CI->getZExtValue();
1662 Type *DstTy = Operands[0]->getType();
1663 return TargetTTI->getVectorInstrCost(*EEI, DstTy,
CostKind, Idx);
1672 auto *TargetTTI =
static_cast<const T *
>(
this);
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
Analysis containing CSE Info
static cl::opt< OutputCostKind > CostKind("cost-kind", cl::desc("Target cost kind"), cl::init(OutputCostKind::RecipThroughput), cl::values(clEnumValN(OutputCostKind::RecipThroughput, "throughput", "Reciprocal throughput"), clEnumValN(OutputCostKind::Latency, "latency", "Instruction latency"), clEnumValN(OutputCostKind::CodeSize, "code-size", "Code size"), clEnumValN(OutputCostKind::SizeAndLatency, "size-latency", "Code size and latency"), clEnumValN(OutputCostKind::All, "all", "Print all cost kinds")))
static bool isSigned(unsigned int Opcode)
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
uint64_t IntrinsicInst * II
OptimizedStructLayoutField Field
static cl::opt< RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode > Mode("regalloc-enable-advisor", cl::Hidden, cl::init(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Default), cl::desc("Enable regalloc advisor mode"), cl::values(clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Default, "default", "Default"), clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Release, "release", "precompiled"), clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Development, "development", "for training")))
static SymbolRef::Type getType(const Symbol *Sym)
Class for arbitrary precision integers.
void setBit(unsigned BitPosition)
Set the given bit to 1 whose position is given as "bitPosition".
unsigned getBitWidth() const
Return the number of bits in the APInt.
LLVM_ABI APInt sextOrTrunc(unsigned width) const
Sign extend or truncate to width.
static APInt getZero(unsigned numBits)
Get the '0' value for the specified bit-width.
int64_t getSExtValue() const
Get sign extended value.
an instruction to allocate memory on the stack
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
ArrayRef< T > drop_front(size_t N=1) const
Drop the first N elements of the array.
const T & front() const
front - Get the first element.
bool empty() const
empty - Check if the array is empty.
Class to represent array types.
A cache of @llvm.assume calls within a function.
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
Conditional or Unconditional Branch instruction.
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
This is the shared class of boolean and integer constants.
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
const APInt & getValue() const
Return the constant as an APInt value reference.
This is an important base class in LLVM.
A parsed version of the target data layout string in and methods for querying it.
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
static constexpr ElementCount get(ScalarTy MinVal, bool Scalable)
Convenience struct for specifying and reasoning about fast-math flags.
static LLVM_ABI FixedVectorType * get(Type *ElementType, unsigned NumElts)
The core instruction combiner logic.
static InstructionCost getInvalid(CostType Val=0)
Intrinsic::ID getID() const
A wrapper class for inspecting calls to intrinsic functions.
This is an important class for using LLVM in a threaded context.
An instruction for reading from memory.
Represents a single loop in the control flow graph.
Information for memory intrinsic cost model.
Intrinsic::ID getID() const
unsigned getOpcode() const
Return the opcode for this Instruction or ConstantExpr.
Analysis providing profile information.
The RecurrenceDescriptor is used to identify recurrences variables in a loop.
This node represents a polynomial recurrence on the trip count of the specified loop.
const SCEV * getStepRecurrence(ScalarEvolution &SE) const
Constructs and returns the recurrence indicating how much this expression steps by.
This class represents a constant integer value.
const APInt & getAPInt() const
This class represents an analyzed expression in the program.
The main scalar evolution driver.
This is a 'bitvector' (really, a variable-sized bit array), optimized for the case when the array is ...
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StackOffset holds a fixed and a scalable offset in bytes.
static StackOffset getScalable(int64_t Scalable)
static StackOffset getFixed(int64_t Fixed)
An instruction for storing to memory.
StringRef - Represent a constant reference to a string, i.e.
Class to represent struct types.
Provides information about what library functions are available for the current target.
This class represents a truncation of integer types.
static constexpr TypeSize getFixed(ScalarTy ExactSize)
The instances of the Type class are immutable: once they are created, they are never changed.
bool isVectorTy() const
True if this is an instance of VectorType.
LLVM_ABI bool isScalableTy(SmallPtrSetImpl< const Type * > &Visited) const
Return true if this is a type whose size is a known multiple of vscale.
LLVM_ABI unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
static LLVM_ABI IntegerType * getInt8Ty(LLVMContext &C)
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
static LLVM_ABI IntegerType * getIntNTy(LLVMContext &C, unsigned N)
This is the common base class for vector predication intrinsics.
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
LLVM_ABI const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs and address space casts.
Base class of all SIMD vector types.
constexpr ScalarTy getFixedValue() const
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
@ Fast
Attempts to make calls as fast as possible (e.g.
@ C
The default llvm calling convention, compatible with C.
This namespace contains an enum with a value for every intrinsic/builtin function known by LLVM.
class_match< Constant > m_Constant()
Match an arbitrary Constant and ignore it.
bool match(Val *V, const Pattern &P)
ThreeOps_match< Cond, LHS, RHS, Instruction::Select > m_Select(const Cond &C, const LHS &L, const RHS &R)
Matches SelectInst.
auto m_LogicalOr()
Matches L || R where L and R are arbitrary values.
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
auto m_LogicalAnd()
Matches L && R where L and R are arbitrary values.
match_combine_or< LTy, RTy > m_CombineOr(const LTy &L, const RTy &R)
Combine two pattern matchers matching L || R.
This is an optimization pass for GlobalISel generic memory operations.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
FunctionAddr VTableAddr uintptr_t uintptr_t DataSize
LLVM_ABI Value * getSplatValue(const Value *V)
Get splat value if the input is a splat vector or return nullptr.
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
constexpr int PoisonMaskElem
RecurKind
These are the kinds of recurrences that we support.
constexpr unsigned BitWidth
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
gep_type_iterator gep_type_begin(const User *GEP)
@ DataWithoutLaneMask
Same as Data, but avoids using the get.active.lane.mask intrinsic to calculate the mask and instead i...
This struct is a compact representation of a valid (non-zero power of two) alignment.
Attributes of a target dependent hardware loop.
Information about a load/store intrinsic defined by the target.