14#ifndef LLVM_ANALYSIS_TARGETTRANSFORMINFOIMPL_H
15#define LLVM_ANALYSIS_TARGETTRANSFORMINFOIMPL_H
56 if (!isa<Constant>(Operand))
69 return SI.getNumCases();
139 std::pair<const Value *, unsigned>
141 return std::make_pair(
nullptr, -1);
150 assert(
F &&
"A concrete function must be provided to this routine.");
157 if (
F->isIntrinsic())
160 if (
F->hasLocalLinkage() || !
F->hasName())
167 if (
Name ==
"copysign" ||
Name ==
"copysignf" ||
Name ==
"copysignl" ||
177 Name ==
"atan2" ||
Name ==
"atan2f" ||
Name ==
"atan2l"||
182 Name ==
"exp10" ||
Name ==
"exp10l" ||
Name ==
"exp10f")
187 Name ==
"exp2l" ||
Name ==
"exp2f" ||
Name ==
"floor" ||
188 Name ==
"floorf" ||
Name ==
"ceil" ||
Name ==
"round" ||
216 std::optional<Value *>
219 bool &KnownBitsComputed)
const {
227 SimplifyAndSetOp)
const {
245 bool HasBaseReg, int64_t Scale,
unsigned AddrSpace,
247 int64_t ScalableOffset = 0)
const {
250 return !BaseGV && BaseOffset == 0 && (Scale == 0 || Scale == 1);
318 Align Alignment)
const {
340 Align Alignment,
unsigned AddrSpace) {
361 unsigned AddrSpace)
const {
364 Scale, AddrSpace,
nullptr,
376 bool useAA()
const {
return false; }
395 unsigned ScalarOpdIdx)
const {
410 const APInt &DemandedElts,
411 bool Insert,
bool Extract,
433 bool IsZeroCmp)
const {
447 return isa<SelectInst>(
I) &&
460 unsigned *
Fast)
const {
516 return "Generic::Unknown Register Class";
518 return "Generic::ScalarRC";
520 return "Generic::VectorRC";
543 unsigned getMaximumVF(
unsigned ElemWidth,
unsigned Opcode)
const {
return 0; }
547 const Instruction &
I,
bool &AllowPromotionWithoutCommonHeader)
const {
548 AllowPromotionWithoutCommonHeader =
false;
553 std::optional<unsigned>
564 std::optional<unsigned>
580 unsigned NumStridedMemAccesses,
581 unsigned NumPrefetches,
bool HasCall)
const {
593 std::optional<unsigned> BinOp = std::nullopt)
const {
606 auto IsWidenableCondition = [](
const Value *V) {
607 if (
auto *
II = dyn_cast<IntrinsicInst>(V))
608 if (
II->getIntrinsicID() == Intrinsic::experimental_widenable_condition)
617 case Instruction::FDiv:
618 case Instruction::FRem:
619 case Instruction::SDiv:
620 case Instruction::SRem:
621 case Instruction::UDiv:
622 case Instruction::URem:
625 case Instruction::And:
626 case Instruction::Or:
627 if (
any_of(Args, IsWidenableCondition))
663 case Instruction::IntToPtr: {
664 unsigned SrcSize = Src->getScalarSizeInBits();
670 case Instruction::PtrToInt: {
671 unsigned DstSize = Dst->getScalarSizeInBits();
677 case Instruction::BitCast:
678 if (Dst == Src || (Dst->isPointerTy() && Src->isPointerTy()))
682 case Instruction::Trunc: {
696 unsigned Index)
const {
732 ArrayRef<std::tuple<Value *, User *, int>> ScalarUserAndIdx)
const {
738 unsigned Index)
const {
743 const APInt &DemandedDstElts,
770 const Value *
Ptr,
bool VariableMask,
778 const Value *
Ptr,
bool VariableMask,
788 bool UseMaskForCond,
bool UseMaskForGaps)
const {
794 switch (ICA.
getID()) {
797 case Intrinsic::experimental_vector_histogram_add:
800 case Intrinsic::allow_runtime_check:
801 case Intrinsic::allow_ubsan_check:
802 case Intrinsic::annotation:
803 case Intrinsic::assume:
804 case Intrinsic::sideeffect:
805 case Intrinsic::pseudoprobe:
806 case Intrinsic::arithmetic_fence:
807 case Intrinsic::dbg_assign:
808 case Intrinsic::dbg_declare:
809 case Intrinsic::dbg_value:
810 case Intrinsic::dbg_label:
811 case Intrinsic::invariant_start:
812 case Intrinsic::invariant_end:
813 case Intrinsic::launder_invariant_group:
814 case Intrinsic::strip_invariant_group:
815 case Intrinsic::is_constant:
816 case Intrinsic::lifetime_start:
817 case Intrinsic::lifetime_end:
818 case Intrinsic::experimental_noalias_scope_decl:
819 case Intrinsic::objectsize:
820 case Intrinsic::ptr_annotation:
821 case Intrinsic::var_annotation:
822 case Intrinsic::experimental_gc_result:
823 case Intrinsic::experimental_gc_relocate:
824 case Intrinsic::coro_alloc:
825 case Intrinsic::coro_begin:
826 case Intrinsic::coro_begin_custom_abi:
827 case Intrinsic::coro_free:
828 case Intrinsic::coro_end:
829 case Intrinsic::coro_frame:
830 case Intrinsic::coro_size:
831 case Intrinsic::coro_align:
832 case Intrinsic::coro_suspend:
833 case Intrinsic::coro_subfn_addr:
834 case Intrinsic::threadlocal_address:
835 case Intrinsic::experimental_widenable_condition:
836 case Intrinsic::ssa_copy:
853 const SCEV *)
const {
858 std::optional<FastMathFlags> FMF,
900 Type *ExpectedType)
const {
906 unsigned SrcAddrSpace,
unsigned DestAddrSpace,
908 std::optional<uint32_t> AtomicElementSize)
const {
909 return AtomicElementSize ?
Type::getIntNTy(Context, *AtomicElementSize * 8)
915 unsigned RemainingBytes,
unsigned SrcAddrSpace,
unsigned DestAddrSpace,
917 std::optional<uint32_t> AtomicCpySize)
const {
918 unsigned OpSizeInBytes = AtomicCpySize.value_or(1);
920 for (
unsigned i = 0; i != RemainingBytes; i += OpSizeInBytes)
926 return (Caller->getFnAttribute(
"target-cpu") ==
927 Callee->getFnAttribute(
"target-cpu")) &&
928 (Caller->getFnAttribute(
"target-features") ==
929 Callee->getFnAttribute(
"target-features"));
933 unsigned DefaultCallPenalty)
const {
934 return DefaultCallPenalty;
939 return (Caller->getFnAttribute(
"target-cpu") ==
940 Callee->getFnAttribute(
"target-cpu")) &&
941 (Caller->getFnAttribute(
"target-features") ==
942 Callee->getFnAttribute(
"target-features"));
962 unsigned AddrSpace)
const {
967 unsigned AddrSpace)
const {
979 unsigned ChainSizeInBytes,
985 unsigned ChainSizeInBytes,
1022 Align Alignment)
const {
1056 if (isa<ConstantDataVector>(Val) || isa<ConstantVector>(Val)) {
1057 const auto *VectorValue = cast<Constant>(Val);
1061 auto *VT = cast<FixedVectorType>(Val->
getType());
1067 unsigned MaxRequiredSize =
1068 VT->getElementType()->getPrimitiveSizeInBits().getFixedValue();
1070 unsigned MinRequiredSize = 0;
1071 for (
unsigned i = 0, e = VT->getNumElements(); i < e; ++i) {
1072 if (
auto *IntElement =
1073 dyn_cast<ConstantInt>(VectorValue->getAggregateElement(i))) {
1074 bool signedElement = IntElement->getValue().isNegative();
1076 unsigned ElementMinRequiredSize =
1077 IntElement->getValue().getSignificantBits() - 1;
1081 MinRequiredSize = std::max(MinRequiredSize, ElementMinRequiredSize);
1084 return MaxRequiredSize;
1087 return MinRequiredSize;
1090 if (
const auto *CI = dyn_cast<ConstantInt>(Val)) {
1091 isSigned = CI->getValue().isNegative();
1092 return CI->getValue().getSignificantBits() - 1;
1095 if (
const auto *Cast = dyn_cast<SExtInst>(Val)) {
1097 return Cast->getSrcTy()->getScalarSizeInBits() - 1;
1100 if (
const auto *Cast = dyn_cast<ZExtInst>(Val)) {
1102 return Cast->getSrcTy()->getScalarSizeInBits();
1110 return Ptr && isa<SCEVAddRecExpr>(
Ptr);
1122 int64_t MergeDistance)
const {
1136template <
typename T>
1150 assert(PointeeType &&
Ptr &&
"can't get GEPCost of nullptr");
1151 auto *BaseGV = dyn_cast<GlobalValue>(
Ptr->stripPointerCasts());
1152 bool HasBaseReg = (BaseGV ==
nullptr);
1155 APInt BaseOffset(PtrSizeBits, 0);
1159 Type *TargetType =
nullptr;
1167 TargetType = GTI.getIndexedType();
1170 const ConstantInt *ConstIdx = dyn_cast<ConstantInt>(*
I);
1173 ConstIdx = dyn_cast<ConstantInt>(
Splat);
1174 if (
StructType *STy = GTI.getStructTypeOrNull()) {
1176 assert(ConstIdx &&
"Unexpected GEP index");
1184 int64_t ElementSize =
1185 GTI.getSequentialElementStride(
DL).getFixedValue();
1194 Scale = ElementSize;
1209 AccessType = TargetType;
1216 Ptr->getType()->getPointerAddressSpace()))
1241 for (
const Value *V : Ptrs) {
1242 const auto *
GEP = dyn_cast<GetElementPtrInst>(V);
1245 if (
Info.isSameBase() && V !=
Base) {
1246 if (
GEP->hasAllConstantIndices())
1255 GEP->getPointerOperand(),
1267 auto *TargetTTI =
static_cast<T *
>(
this);
1270 auto *CB = dyn_cast<CallBase>(U);
1271 if (CB && !isa<IntrinsicInst>(U)) {
1272 if (
const Function *
F = CB->getCalledFunction()) {
1273 if (!TargetTTI->isLoweredToCall(
F))
1282 Type *Ty = U->getType();
1284 auto *
I = dyn_cast<Instruction>(U);
1288 case Instruction::Call: {
1289 assert(isa<IntrinsicInst>(U) &&
"Unexpected non-intrinsic call");
1290 auto *Intrinsic = cast<IntrinsicInst>(U);
1292 return TargetTTI->getIntrinsicInstrCost(CostAttrs,
CostKind);
1294 case Instruction::Br:
1295 case Instruction::Ret:
1296 case Instruction::PHI:
1297 case Instruction::Switch:
1298 return TargetTTI->getCFInstrCost(Opcode,
CostKind,
I);
1299 case Instruction::ExtractValue:
1300 case Instruction::Freeze:
1302 case Instruction::Alloca:
1303 if (cast<AllocaInst>(U)->isStaticAlloca())
1306 case Instruction::GetElementPtr: {
1307 const auto *
GEP = cast<GEPOperator>(U);
1308 Type *AccessType =
nullptr;
1311 if (
GEP->hasOneUser() &&
I)
1312 AccessType =
I->user_back()->getAccessType();
1314 return TargetTTI->getGEPCost(
GEP->getSourceElementType(),
1318 case Instruction::Add:
1319 case Instruction::FAdd:
1320 case Instruction::Sub:
1321 case Instruction::FSub:
1322 case Instruction::Mul:
1323 case Instruction::FMul:
1324 case Instruction::UDiv:
1325 case Instruction::SDiv:
1326 case Instruction::FDiv:
1327 case Instruction::URem:
1328 case Instruction::SRem:
1329 case Instruction::FRem:
1330 case Instruction::Shl:
1331 case Instruction::LShr:
1332 case Instruction::AShr:
1333 case Instruction::And:
1334 case Instruction::Or:
1335 case Instruction::Xor:
1336 case Instruction::FNeg: {
1339 if (Opcode != Instruction::FNeg)
1341 return TargetTTI->getArithmeticInstrCost(Opcode, Ty,
CostKind, Op1Info,
1344 case Instruction::IntToPtr:
1345 case Instruction::PtrToInt:
1346 case Instruction::SIToFP:
1347 case Instruction::UIToFP:
1348 case Instruction::FPToUI:
1349 case Instruction::FPToSI:
1350 case Instruction::Trunc:
1351 case Instruction::FPTrunc:
1352 case Instruction::BitCast:
1353 case Instruction::FPExt:
1354 case Instruction::SExt:
1355 case Instruction::ZExt:
1356 case Instruction::AddrSpaceCast: {
1358 return TargetTTI->getCastInstrCost(
1361 case Instruction::Store: {
1362 auto *SI = cast<StoreInst>(U);
1365 return TargetTTI->getMemoryOpCost(Opcode, ValTy, SI->getAlign(),
1366 SI->getPointerAddressSpace(),
CostKind,
1369 case Instruction::Load: {
1373 auto *LI = cast<LoadInst>(U);
1374 Type *LoadType = U->getType();
1384 if (
const TruncInst *TI = dyn_cast<TruncInst>(*LI->user_begin()))
1385 LoadType = TI->getDestTy();
1387 return TargetTTI->getMemoryOpCost(Opcode, LoadType, LI->getAlign(),
1389 {TTI::OK_AnyValue, TTI::OP_None},
I);
1391 case Instruction::Select: {
1392 const Value *Op0, *Op1;
1403 return TargetTTI->getArithmeticInstrCost(
1410 return TargetTTI->getCmpSelInstrCost(Opcode, U->getType(), CondTy,
1414 case Instruction::ICmp:
1415 case Instruction::FCmp: {
1420 return TargetTTI->getCmpSelInstrCost(Opcode, ValTy, U->getType(),
1421 I ? cast<CmpInst>(
I)->getPredicate()
1425 case Instruction::InsertElement: {
1426 auto *IE = dyn_cast<InsertElementInst>(U);
1430 if (
auto *CI = dyn_cast<ConstantInt>(
Operands[2]))
1431 if (CI->getValue().getActiveBits() <= 32)
1432 Idx = CI->getZExtValue();
1433 return TargetTTI->getVectorInstrCost(*IE, Ty,
CostKind,
Idx);
1435 case Instruction::ShuffleVector: {
1436 auto *Shuffle = dyn_cast<ShuffleVectorInst>(U);
1440 auto *VecTy = cast<VectorType>(U->getType());
1443 int NumSubElts, SubIndex;
1446 if (Shuffle->changesLength()) {
1448 if (Shuffle->increasesLength() && Shuffle->isIdentityWithPadding())
1451 if (Shuffle->isExtractSubvectorMask(SubIndex))
1456 if (Shuffle->isInsertSubvectorMask(NumSubElts, SubIndex))
1457 return TargetTTI->getShuffleCost(
1462 int ReplicationFactor, VF;
1463 if (Shuffle->isReplicationMask(ReplicationFactor, VF)) {
1467 DemandedDstElts.
setBit(
I.index());
1469 return TargetTTI->getReplicationShuffleCost(
1470 VecSrcTy->getElementType(), ReplicationFactor, VF,
1474 bool IsUnary = isa<UndefValue>(
Operands[1]);
1475 NumSubElts = VecSrcTy->getElementCount().getKnownMinValue();
1481 if (Shuffle->increasesLength()) {
1482 for (
int &M : AdjustMask)
1483 M = M >= NumSubElts ? (M + (Mask.size() - NumSubElts)) : M;
1485 return TargetTTI->getShuffleCost(
1499 std::iota(ExtractMask.
begin(), ExtractMask.
end(), 0);
1500 return ShuffleCost + TargetTTI->getShuffleCost(
1502 ExtractMask,
CostKind, 0, VecTy, {}, Shuffle);
1505 if (Shuffle->isIdentity())
1508 if (Shuffle->isReverse())
1512 if (Shuffle->isSelect())
1516 if (Shuffle->isTranspose())
1521 if (Shuffle->isZeroEltSplat())
1526 if (Shuffle->isSingleSource())
1531 if (Shuffle->isInsertSubvectorMask(NumSubElts, SubIndex))
1532 return TargetTTI->getShuffleCost(
1537 if (Shuffle->isSplice(SubIndex))
1539 SubIndex,
nullptr,
Operands, Shuffle);
1544 case Instruction::ExtractElement: {
1545 auto *EEI = dyn_cast<ExtractElementInst>(U);
1549 if (
auto *CI = dyn_cast<ConstantInt>(
Operands[1]))
1550 if (CI->getValue().getActiveBits() <= 32)
1551 Idx = CI->getZExtValue();
1553 return TargetTTI->getVectorInstrCost(*EEI, DstTy,
CostKind,
Idx);
1563 auto *TargetTTI =
static_cast<T *
>(
this);
Analysis containing CSE Info
static cl::opt< TargetTransformInfo::TargetCostKind > CostKind("cost-kind", cl::desc("Target cost kind"), cl::init(TargetTransformInfo::TCK_RecipThroughput), cl::values(clEnumValN(TargetTransformInfo::TCK_RecipThroughput, "throughput", "Reciprocal throughput"), clEnumValN(TargetTransformInfo::TCK_Latency, "latency", "Instruction latency"), clEnumValN(TargetTransformInfo::TCK_CodeSize, "code-size", "Code size"), clEnumValN(TargetTransformInfo::TCK_SizeAndLatency, "size-latency", "Code size and latency")))
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
static bool isSigned(unsigned int Opcode)
mir Rename Register Operands
uint64_t IntrinsicInst * II
static cl::opt< RegAllocEvictionAdvisorAnalysis::AdvisorMode > Mode("regalloc-enable-advisor", cl::Hidden, cl::init(RegAllocEvictionAdvisorAnalysis::AdvisorMode::Default), cl::desc("Enable regalloc advisor mode"), cl::values(clEnumValN(RegAllocEvictionAdvisorAnalysis::AdvisorMode::Default, "default", "Default"), clEnumValN(RegAllocEvictionAdvisorAnalysis::AdvisorMode::Release, "release", "precompiled"), clEnumValN(RegAllocEvictionAdvisorAnalysis::AdvisorMode::Development, "development", "for training")))
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static SymbolRef::Type getType(const Symbol *Sym)
Class for arbitrary precision integers.
void setBit(unsigned BitPosition)
Set the given bit to 1 whose position is given as "bitPosition".
unsigned getBitWidth() const
Return the number of bits in the APInt.
APInt sextOrTrunc(unsigned width) const
Sign extend or truncate to width.
static APInt getZero(unsigned numBits)
Get the '0' value for the specified bit-width.
int64_t getSExtValue() const
Get sign extended value.
an instruction to allocate memory on the stack
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Class to represent array types.
A cache of @llvm.assume calls within a function.
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
Conditional or Unconditional Branch instruction.
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
This is the shared class of boolean and integer constants.
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
const APInt & getValue() const
Return the constant as an APInt value reference.
This is an important base class in LLVM.
A parsed version of the target data layout string in and methods for querying it.
bool isLegalInteger(uint64_t Width) const
Returns true if the specified type is known to be a native integer type supported by the CPU.
const StructLayout * getStructLayout(StructType *Ty) const
Returns a StructLayout object, indicating the alignment of the struct, its size, and the offsets of i...
unsigned getPointerTypeSizeInBits(Type *) const
Layout pointer size, in bits, based on the type.
TypeSize getTypeSizeInBits(Type *Ty) const
Size examples:
TypeSize getTypeStoreSize(Type *Ty) const
Returns the maximum number of bytes that may be overwritten by storing the specified type.
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
static constexpr ElementCount get(ScalarTy MinVal, bool Scalable)
Convenience struct for specifying and reasoning about fast-math flags.
static FixedVectorType * get(Type *ElementType, unsigned NumElts)
The core instruction combiner logic.
static InstructionCost getInvalid(CostType Val=0)
Intrinsic::ID getID() const
A wrapper class for inspecting calls to intrinsic functions.
This is an important class for using LLVM in a threaded context.
An instruction for reading from memory.
Represents a single loop in the control flow graph.
unsigned getOpcode() const
Return the opcode for this Instruction or ConstantExpr.
Analysis providing profile information.
The RecurrenceDescriptor is used to identify recurrences variables in a loop.
This node represents a polynomial recurrence on the trip count of the specified loop.
const SCEV * getStepRecurrence(ScalarEvolution &SE) const
Constructs and returns the recurrence indicating how much this expression steps by.
This class represents a constant integer value.
const APInt & getAPInt() const
This class represents an analyzed expression in the program.
The main scalar evolution driver.
This is a 'bitvector' (really, a variable-sized bit array), optimized for the case when the array is ...
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StackOffset holds a fixed and a scalable offset in bytes.
static StackOffset getScalable(int64_t Scalable)
static StackOffset getFixed(int64_t Fixed)
An instruction for storing to memory.
StringRef - Represent a constant reference to a string, i.e.
TypeSize getElementOffset(unsigned Idx) const
Class to represent struct types.
Provides information about what library functions are available for the current target.
This class represents a truncation of integer types.
static constexpr TypeSize getFixed(ScalarTy ExactSize)
The instances of the Type class are immutable: once they are created, they are never changed.
bool isVectorTy() const
True if this is an instance of VectorType.
unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
static IntegerType * getIntNTy(LLVMContext &C, unsigned N)
unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
bool isScalableTy(SmallPtrSetImpl< const Type * > &Visited) const
Return true if this is a type whose size is a known multiple of vscale.
static IntegerType * getInt8Ty(LLVMContext &C)
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
This is the common base class for vector predication intrinsics.
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
Base class of all SIMD vector types.
constexpr ScalarTy getFixedValue() const
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ Fast
Attempts to make calls as fast as possible (e.g.
@ C
The default llvm calling convention, compatible with C.
class_match< Constant > m_Constant()
Match an arbitrary Constant and ignore it.
bool match(Val *V, const Pattern &P)
ThreeOps_match< Cond, LHS, RHS, Instruction::Select > m_Select(const Cond &C, const LHS &L, const RHS &R)
Matches SelectInst.
auto m_LogicalOr()
Matches L || R where L and R are arbitrary values.
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
auto m_LogicalAnd()
Matches L && R where L and R are arbitrary values.
match_combine_or< LTy, RTy > m_CombineOr(const LTy &L, const RTy &R)
Combine two pattern matchers matching L || R.
This is an optimization pass for GlobalISel generic memory operations.
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
Value * getSplatValue(const Value *V)
Get splat value if the input is a splat vector or return nullptr.
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
constexpr int PoisonMaskElem
constexpr unsigned BitWidth
gep_type_iterator gep_type_begin(const User *GEP)
@ DataWithoutLaneMask
Same as Data, but avoids using the get.active.lane.mask intrinsic to calculate the mask and instead i...
This struct is a compact representation of a valid (non-zero power of two) alignment.
Attributes of a target dependent hardware loop.
Information about a load/store intrinsic defined by the target.