17#include "llvm/IR/IntrinsicsPowerPC.h"
27#define DEBUG_TYPE "ppctti"
37 cl::desc(
"Enable using coldcc calling conv for cold "
38 "internal functions"));
42 cl::desc(
"Do not add instruction count to lsr cost model"));
48 cl::desc(
"Loops with a constant trip count smaller than "
49 "this value will not use the count register."));
66std::optional<Instruction *>
72 case Intrinsic::ppc_altivec_lvx:
73 case Intrinsic::ppc_altivec_lvxl:
82 case Intrinsic::ppc_vsx_lxvw4x:
83 case Intrinsic::ppc_vsx_lxvd2x: {
88 case Intrinsic::ppc_altivec_stvx:
89 case Intrinsic::ppc_altivec_stvxl:
98 case Intrinsic::ppc_vsx_stxvw4x:
99 case Intrinsic::ppc_vsx_stxvd2x: {
104 case Intrinsic::ppc_altivec_vperm:
111 if (
Constant *Mask = dyn_cast<Constant>(
II.getArgOperand(2))) {
112 assert(cast<FixedVectorType>(Mask->getType())->getNumElements() == 16 &&
113 "Bad type for intrinsic!");
116 bool AllEltsOk =
true;
117 for (
unsigned i = 0; i != 16; ++i) {
118 Constant *Elt = Mask->getAggregateElement(i);
119 if (!Elt || !(isa<ConstantInt>(Elt) || isa<UndefValue>(Elt))) {
134 Value *ExtractedElts[32];
135 memset(ExtractedElts, 0,
sizeof(ExtractedElts));
137 for (
unsigned i = 0; i != 16; ++i) {
138 if (isa<UndefValue>(Mask->getAggregateElement(i)))
141 cast<ConstantInt>(Mask->getAggregateElement(i))->getZExtValue();
146 if (!ExtractedElts[
Idx]) {
179 if (Imm.getBitWidth() <= 64) {
180 if (isInt<16>(Imm.getSExtValue()))
183 if (isInt<32>(Imm.getSExtValue())) {
185 if ((Imm.getZExtValue() & 0xFFFF) == 0)
210 case Intrinsic::sadd_with_overflow:
211 case Intrinsic::uadd_with_overflow:
212 case Intrinsic::ssub_with_overflow:
213 case Intrinsic::usub_with_overflow:
214 if ((
Idx == 1) && Imm.getBitWidth() <= 64 && isInt<16>(Imm.getSExtValue()))
217 case Intrinsic::experimental_stackmap:
218 if ((
Idx < 2) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
221 case Intrinsic::experimental_patchpoint_void:
222 case Intrinsic::experimental_patchpoint:
223 if ((
Idx < 4) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
243 unsigned ImmIdx = ~0U;
244 bool ShiftedFree =
false, RunFree =
false, UnsignedFree =
false,
249 case Instruction::GetElementPtr:
256 case Instruction::And:
259 case Instruction::Add:
260 case Instruction::Or:
261 case Instruction::Xor:
264 case Instruction::Sub:
265 case Instruction::Mul:
266 case Instruction::Shl:
267 case Instruction::LShr:
268 case Instruction::AShr:
271 case Instruction::ICmp:
276 case Instruction::Select:
279 case Instruction::PHI:
280 case Instruction::Call:
281 case Instruction::Ret:
282 case Instruction::Load:
283 case Instruction::Store:
287 if (ZeroFree && Imm == 0)
290 if (
Idx == ImmIdx && Imm.getBitWidth() <= 64) {
291 if (isInt<16>(Imm.getSExtValue()))
295 if (Imm.getBitWidth() <= 32 &&
306 if (UnsignedFree && isUInt<16>(Imm.getZExtValue()))
309 if (ShiftedFree && (Imm.getZExtValue() & 0xFFFF) == 0)
328 if (isa<CastInst>(U) || isa<LoadInst>(U) || isa<StoreInst>(U))
331 if (U->getType()->isVectorTy()) {
355 Metrics.analyzeBasicBlock(BB, *
this, EphValues);
362 for (
auto *BB : L->getBlocks())
364 if (
auto *Call = dyn_cast<IntrinsicInst>(&
I))
365 if (Call->getIntrinsicID() == Intrinsic::set_loop_iterations ||
366 Call->getIntrinsicID() == Intrinsic::loop_decrement)
370 L->getExitingBlocks(ExitingBlocks);
374 for (
auto &BB : ExitingBlocks) {
378 if (
BranchInst *BI = dyn_cast<BranchInst>(TI)) {
379 uint64_t TrueWeight = 0, FalseWeight = 0;
380 if (!BI->isConditional() ||
386 bool TrueIsExit = !L->contains(BI->getSuccessor(0));
387 if (( TrueIsExit && FalseWeight < TrueWeight) ||
388 (!TrueIsExit && FalseWeight > TrueWeight))
433 return LoopHasReductions;
439 Options.LoadSizes = {8, 4, 2, 1};
453 return ClassID ==
VSXRC ? 64 : 32;
479 return "PPC::unknown register class";
480 case GPRRC:
return "PPC::GPRRC";
481 case FPRRC:
return "PPC::FPRRC";
482 case VRRC:
return "PPC::VRRC";
483 case VSXRC:
return "PPC::VSXRC";
562 if (!ST->vectorsUseTwoUnits() || !Ty1->
isVectorTy())
568 if (LT1.first != 1 || !LT1.second.isVector())
577 if (LT2.first != 1 || !LT2.second.isVector())
598 Op2Info, Args, CxtI);
602 Opcode, Ty,
CostKind, Op1Info, Op2Info);
603 return Cost * CostFactor;
626 return LT.first * CostFactor;
633 return Opcode == Instruction::PHI ? 0 : 1;
654 return Cost == 0 ? 0 : 1;
673 return Cost * CostFactor;
683 assert(ISD &&
"Invalid opcode");
704 unsigned MaskCostForOneBitSize = (
VecMaskCost && EltSize == 1) ? 1 : 0;
706 unsigned MaskCostForIdx = (
Index != -1U) ? 0 : 1;
707 if (ST->hasP9Altivec()) {
713 if (ST->hasP10Vector())
714 return CostFactor + MaskCostForIdx;
715 else if (
Index != -1U)
716 return 2 * CostFactor;
721 if (EltSize == 64 &&
Index != -1U)
723 else if (EltSize == 32) {
725 if (
Index == MfvsrwzIndex)
730 return CostFactor + MaskCostForIdx;
736 return CostFactor + MaskCostForOneBitSize + MaskCostForIdx;
738 }
else if (ST->hasDirectMove() &&
Index != -1U) {
743 return 3 + MaskCostForOneBitSize;
751 unsigned LHSPenalty = 2;
761 return LHSPenalty +
Cost;
782 assert((Opcode == Instruction::Load || Opcode == Instruction::Store) &&
793 bool IsAltivecType = ST->hasAltivec() &&
794 (LT.second == MVT::v16i8 || LT.second == MVT::v8i16 ||
795 LT.second == MVT::v4i32 || LT.second == MVT::v4f32);
796 bool IsVSXType = ST->hasVSX() &&
797 (LT.second == MVT::v2f64 || LT.second == MVT::v2i64);
804 unsigned MemBytes = Src->getPrimitiveSizeInBits();
805 if (ST->hasVSX() && IsAltivecType &&
806 (MemBytes == 64 || (ST->hasP8Vector() && MemBytes == 32)))
810 unsigned SrcBytes = LT.second.getStoreSize();
811 if (!SrcBytes || !Alignment || *Alignment >= SrcBytes)
821 if (Opcode == Instruction::Load && (!ST->hasP8Vector() && IsAltivecType) &&
822 *Alignment >= LT.second.getScalarType().getStoreSize())
823 return Cost + LT.first;
829 if (IsVSXType || (ST->hasVSX() && IsAltivecType))
841 Cost += LT.first * ((SrcBytes / Alignment->value()) - 1);
846 if (Src->isVectorTy() && Opcode == Instruction::Store)
847 for (
int i = 0, e = cast<FixedVectorType>(Src)->getNumElements(); i < e;
858 bool UseMaskForCond,
bool UseMaskForGaps) {
864 if (UseMaskForCond || UseMaskForGaps)
867 UseMaskForCond, UseMaskForGaps);
869 assert(isa<VectorType>(VecTy) &&
870 "Expect a vector type for interleaved memory op");
884 Cost += Factor*(LT.first-1);
957 if (!TM.isELFv2ABI())
965 case Intrinsic::ppc_altivec_lvx:
966 case Intrinsic::ppc_altivec_lvxl:
967 case Intrinsic::ppc_altivec_lvebx:
968 case Intrinsic::ppc_altivec_lvehx:
969 case Intrinsic::ppc_altivec_lvewx:
970 case Intrinsic::ppc_vsx_lxvd2x:
971 case Intrinsic::ppc_vsx_lxvw4x:
972 case Intrinsic::ppc_vsx_lxvd2x_be:
973 case Intrinsic::ppc_vsx_lxvw4x_be:
974 case Intrinsic::ppc_vsx_lxvl:
975 case Intrinsic::ppc_vsx_lxvll:
976 case Intrinsic::ppc_vsx_lxvp: {
979 Info.WriteMem =
false;
982 case Intrinsic::ppc_altivec_stvx:
983 case Intrinsic::ppc_altivec_stvxl:
984 case Intrinsic::ppc_altivec_stvebx:
985 case Intrinsic::ppc_altivec_stvehx:
986 case Intrinsic::ppc_altivec_stvewx:
987 case Intrinsic::ppc_vsx_stxvd2x:
988 case Intrinsic::ppc_vsx_stxvw4x:
989 case Intrinsic::ppc_vsx_stxvd2x_be:
990 case Intrinsic::ppc_vsx_stxvw4x_be:
991 case Intrinsic::ppc_vsx_stxvl:
992 case Intrinsic::ppc_vsx_stxvll:
993 case Intrinsic::ppc_vsx_stxvp: {
995 Info.ReadMem =
false;
996 Info.WriteMem =
true;
999 case Intrinsic::ppc_stbcx:
1000 case Intrinsic::ppc_sthcx:
1001 case Intrinsic::ppc_stdcx:
1002 case Intrinsic::ppc_stwcx: {
1004 Info.ReadMem =
false;
1005 Info.WriteMem =
true;
1016 Align Alignment)
const {
1018 if (Opcode != Instruction::Load && Opcode != Instruction::Store)
1022 if ((!ST->hasP9Vector() && !ST->hasP10Vector()) || !ST->
isPPC64())
1024 if (isa<FixedVectorType>(DataType)) {
1026 return VecWidth == 128;
1040 return IntWidth == 8 || IntWidth == 16 || IntWidth == 32 || IntWidth == 64;
1056 assert((Opcode == Instruction::Load || Opcode == Instruction::Store) &&
1059 auto *SrcVTy = dyn_cast<FixedVectorType>(Src);
1060 assert(SrcVTy &&
"Expected a vector type for VP memory operations");
1076 const Align DesiredAlignment(16);
1085 float AlignmentProb = ((float)Alignment.
value()) / DesiredAlignment.
value();
1086 float MisalignmentProb = 1.0 - AlignmentProb;
1087 return (MisalignmentProb * P9PipelineFlushEstimate) +
This file provides a helper that implements much of the TTI interface in terms of the target-independ...
Analysis containing CSE Info
static cl::opt< TargetTransformInfo::TargetCostKind > CostKind("cost-kind", cl::desc("Target cost kind"), cl::init(TargetTransformInfo::TCK_RecipThroughput), cl::values(clEnumValN(TargetTransformInfo::TCK_RecipThroughput, "throughput", "Reciprocal throughput"), clEnumValN(TargetTransformInfo::TCK_Latency, "latency", "Instruction latency"), clEnumValN(TargetTransformInfo::TCK_CodeSize, "code-size", "Code size"), clEnumValN(TargetTransformInfo::TCK_SizeAndLatency, "size-latency", "Code size and latency")))
Cost tables and simple lookup functions.
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
This file provides the interface for the instcombine pass implementation.
mir Rename Register Operands
uint64_t IntrinsicInst * II
This file contains the declarations for profiling metadata utility functions.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file describes how to lower LLVM code to machine code.
Class for arbitrary precision integers.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
A cache of @llvm.assume calls within a function.
LLVM Basic Block Representation.
InstructionCost getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, TTI::TargetCostKind CostKind)
Get intrinsic cost based on arguments.
InstructionCost getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef< unsigned > Indices, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, bool UseMaskForCond=false, bool UseMaskForGaps=false)
InstructionCost getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy, CmpInst::Predicate VecPred, TTI::TargetCostKind CostKind, const Instruction *I=nullptr)
void getUnrollingPreferences(Loop *L, ScalarEvolution &SE, TTI::UnrollingPreferences &UP, OptimizationRemarkEmitter *ORE)
InstructionCost getMaskedMemoryOpCost(unsigned Opcode, Type *DataTy, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind)
InstructionCost getVectorInstrCost(unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index, Value *Op0, Value *Op1)
bool shouldBuildRelLookupTables() const
InstructionCost getMemoryOpCost(unsigned Opcode, Type *Src, MaybeAlign Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, TTI::OperandValueInfo OpInfo={TTI::OK_AnyValue, TTI::OP_None}, const Instruction *I=nullptr)
void getPeelingPreferences(Loop *L, ScalarEvolution &SE, TTI::PeelingPreferences &PP)
InstructionCost getArithmeticInstrCost(unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Opd1Info={TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Opd2Info={TTI::OK_AnyValue, TTI::OP_None}, ArrayRef< const Value * > Args=std::nullopt, const Instruction *CxtI=nullptr)
std::pair< InstructionCost, MVT > getTypeLegalizationCost(Type *Ty) const
Estimate the cost of type-legalization and the legalized type.
InstructionCost getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src, TTI::CastContextHint CCH, TTI::TargetCostKind CostKind, const Instruction *I=nullptr)
Conditional or Unconditional Branch instruction.
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Value * getArgOperand(unsigned i) const
static CastInst * Create(Instruction::CastOps, Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Provides a way to construct any of the CastInst subclasses using an opcode instead of the subclass's ...
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
This is an important base class in LLVM.
bool isLittleEndian() const
Layout endianness...
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Value * CreateInsertElement(Type *VecTy, Value *NewElt, Value *Idx, const Twine &Name="")
Value * CreateExtractElement(Value *Vec, Value *Idx, const Twine &Name="")
ConstantInt * getInt32(uint32_t C)
Get a constant 32-bit value.
Value * CreateBitCast(Value *V, Type *DestTy, const Twine &Name="")
The core instruction combiner logic.
const DataLayout & getDataLayout() const
DominatorTree & getDominatorTree() const
AssumptionCache & getAssumptionCache() const
static InstructionCost getInvalid(CostType Val=0)
static InstructionCost getMax()
std::optional< CostType > getValue() const
This function is intended to be used as sparingly as possible, since the class provides the full rang...
A wrapper class for inspecting calls to intrinsic functions.
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
This is an important class for using LLVM in a threaded context.
An instruction for reading from memory.
Represents a single loop in the control flow graph.
bool isPPC64() const
isPPC64 - Return true if we are generating code for 64-bit pointer mode.
unsigned getCPUDirective() const
getCPUDirective - Returns the -m directive specified for the cpu.
POPCNTDKind hasPOPCNTD() const
bool isLittleEndian() const
const PPCTargetMachine & getTargetMachine() const
InstructionCost getIntImmCost(const APInt &Imm, Type *Ty, TTI::TargetCostKind CostKind)
std::optional< Instruction * > instCombineIntrinsic(InstCombiner &IC, IntrinsicInst &II) const
TypeSize getRegisterBitWidth(TargetTransformInfo::RegisterKind K) const
const char * getRegisterClassName(unsigned ClassID) const
bool isLSRCostLess(const TargetTransformInfo::LSRCost &C1, const TargetTransformInfo::LSRCost &C2)
InstructionCost getShuffleCost(TTI::ShuffleKind Kind, Type *Tp, ArrayRef< int > Mask, TTI::TargetCostKind CostKind, int Index, Type *SubTp, ArrayRef< const Value * > Args=std::nullopt, const Instruction *CxtI=nullptr)
InstructionCost getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy, CmpInst::Predicate VecPred, TTI::TargetCostKind CostKind, const Instruction *I=nullptr)
bool useColdCCForColdCall(Function &F)
InstructionCost vectorCostAdjustmentFactor(unsigned Opcode, Type *Ty1, Type *Ty2)
bool isNumRegsMajorCostOfLSR()
bool supportsTailCallFor(const CallBase *CB) const
TTI::MemCmpExpansionOptions enableMemCmpExpansion(bool OptSize, bool IsZeroCmp) const
InstructionCost getVectorInstrCost(unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index, Value *Op0, Value *Op1)
bool getTgtMemIntrinsic(IntrinsicInst *Inst, MemIntrinsicInfo &Info)
unsigned getRegisterClassForType(bool Vector, Type *Ty=nullptr) const
InstructionCost getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, TTI::TargetCostKind CostKind)
unsigned getCacheLineSize() const override
void getUnrollingPreferences(Loop *L, ScalarEvolution &SE, TTI::UnrollingPreferences &UP, OptimizationRemarkEmitter *ORE)
bool enableInterleavedAccessVectorization()
unsigned getMaxInterleaveFactor(ElementCount VF)
TTI::PopcntSupportKind getPopcntSupport(unsigned TyWidth)
bool enableAggressiveInterleaving(bool LoopHasReductions)
unsigned getPrefetchDistance() const override
InstructionCost getMemoryOpCost(unsigned Opcode, Type *Src, MaybeAlign Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, TTI::OperandValueInfo OpInfo={TTI::OK_AnyValue, TTI::OP_None}, const Instruction *I=nullptr)
InstructionCost getIntImmCostIntrin(Intrinsic::ID IID, unsigned Idx, const APInt &Imm, Type *Ty, TTI::TargetCostKind CostKind)
InstructionCost getIntImmCostInst(unsigned Opcode, unsigned Idx, const APInt &Imm, Type *Ty, TTI::TargetCostKind CostKind, Instruction *Inst=nullptr)
InstructionCost getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src, TTI::CastContextHint CCH, TTI::TargetCostKind CostKind, const Instruction *I=nullptr)
void getPeelingPreferences(Loop *L, ScalarEvolution &SE, TTI::PeelingPreferences &PP)
unsigned getNumberOfRegisters(unsigned ClassID) const
bool hasActiveVectorLength(unsigned Opcode, Type *DataType, Align Alignment) const
bool shouldBuildRelLookupTables() const
InstructionCost getInstructionCost(const User *U, ArrayRef< const Value * > Operands, TTI::TargetCostKind CostKind)
bool canSaveCmp(Loop *L, BranchInst **BI, ScalarEvolution *SE, LoopInfo *LI, DominatorTree *DT, AssumptionCache *AC, TargetLibraryInfo *LibInfo)
InstructionCost getVPMemoryOpCost(unsigned Opcode, Type *Src, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, const Instruction *I=nullptr)
InstructionCost getArithmeticInstrCost(unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Op1Info={TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Op2Info={TTI::OK_AnyValue, TTI::OP_None}, ArrayRef< const Value * > Args=std::nullopt, const Instruction *CxtI=nullptr)
bool isHardwareLoopProfitable(Loop *L, ScalarEvolution &SE, AssumptionCache &AC, TargetLibraryInfo *LibInfo, HardwareLoopInfo &HWLoopInfo)
bool areTypesABICompatible(const Function *Caller, const Function *Callee, const ArrayRef< Type * > &Types) const
InstructionCost getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind, const Instruction *I=nullptr)
InstructionCost getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef< unsigned > Indices, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, bool UseMaskForCond=false, bool UseMaskForGaps=false)
bool supportsTailCallFor(const CallBase *CB) const
bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AddrSpace, Align Alignment=Align(1), MachineMemOperand::Flags Flags=MachineMemOperand::MONone, unsigned *Fast=nullptr) const override
Is unaligned memory access allowed for the given type, and is it fast relative to software emulation.
Common code between 32-bit and 64-bit PowerPC targets.
The main scalar evolution driver.
unsigned getSmallConstantTripCount(const Loop *L)
Returns the exact trip count of the loop if we can compute it, and the result is a small constant.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
Provides information about what library functions are available for the current target.
bool isOperationExpand(unsigned Op, EVT VT) const
Return true if the specified operation is illegal on this target or unlikely to be made legal with cu...
int InstructionOpcodeToISD(unsigned Opcode) const
Get the ISD node that corresponds to the Instruction class opcode.
EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the EVT corresponding to this LLVM type.
unsigned getMaxExpandSizeMemcmp(bool OptSize) const
Get maximum # of load operations permitted for memcmp.
Provide an instruction scheduling machine model to CodeGen passes.
unsigned getIssueWidth() const
Maximum number of micro-ops that may be scheduled per cycle.
void init(const TargetSubtargetInfo *TSInfo)
Initialize the machine model for instruction scheduling.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
static constexpr TypeSize getFixed(ScalarTy ExactSize)
static constexpr TypeSize getScalable(ScalarTy MinimumSize)
The instances of the Type class are immutable: once they are created, they are never changed.
unsigned getIntegerBitWidth() const
bool isVectorTy() const
True if this is an instance of VectorType.
bool isIntOrIntVectorTy() const
Return true if this is an integer type or a vector of integer types.
bool isPointerTy() const
True if this is an instance of PointerType.
bool isFloatTy() const
Return true if this is 'float', a 32-bit IEEE fp type.
bool isPPC_FP128Ty() const
Return true if this is powerpc long double.
bool isFP128Ty() const
Return true if this is 'fp128'.
unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
bool isHalfTy() const
Return true if this is 'half', a 16-bit IEEE fp type.
bool isDoubleTy() const
Return true if this is 'double', a 64-bit IEEE fp type.
static IntegerType * getInt32Ty(LLVMContext &C)
static IntegerType * getInt64Ty(LLVMContext &C)
bool isIntegerTy() const
True if this is an instance of IntegerType.
TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
static UndefValue * get(Type *T)
Static factory methods - Return an 'undef' object of the specified type.
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ C
The default llvm calling convention, compatible with C.
@ EXTRACT_VECTOR_ELT
EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR identified by the (potentially...
@ INSERT_VECTOR_ELT
INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element at IDX replaced with VAL.
initializer< Ty > init(const Ty &Val)
This is an optimization pass for GlobalISel generic memory operations.
constexpr bool isShiftedMask_32(uint32_t Value)
Return true if the argument contains a non-empty sequence of ones with the remainder zero (32 bit ver...
constexpr bool isShiftedMask_64(uint64_t Value)
Return true if the argument contains a non-empty sequence of ones with the remainder zero (64 bit ver...
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Align getOrEnforceKnownAlignment(Value *V, MaybeAlign PrefAlign, const DataLayout &DL, const Instruction *CxtI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr)
Try to ensure that the alignment of V is at least PrefAlign bytes.
bool none_of(R &&Range, UnaryPredicate P)
Provide wrappers to std::none_of which take ranges instead of having to pass begin/end explicitly.
bool extractBranchWeights(const MDNode *ProfileData, SmallVectorImpl< uint32_t > &Weights)
Extract branch weights from MD_prof metadata.
This struct is a compact representation of a valid (non-zero power of two) alignment.
uint64_t value() const
This is a hole in the type system and should not be abused.
Utility to calculate the size and a few similar metrics for a set of basic blocks.
static void collectEphemeralValues(const Loop *L, AssumptionCache *AC, SmallPtrSetImpl< const Value * > &EphValues)
Collect a loop's ephemeral values (those used only by an assume or similar intrinsics in the loop).
Attributes of a target dependent hardware loop.
bool canAnalyze(LoopInfo &LI)
bool isHardwareLoopCandidate(ScalarEvolution &SE, LoopInfo &LI, DominatorTree &DT, bool ForceNestedLoop=false, bool ForceHardwareLoopPHI=false)
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Information about a load/store intrinsic defined by the target.