16#include "llvm/IR/IntrinsicsPowerPC.h"
25#define DEBUG_TYPE "ppctti"
35 cl::desc(
"Enable using coldcc calling conv for cold "
36 "internal functions"));
40 cl::desc(
"Do not add instruction count to lsr cost model"));
46 cl::desc(
"Loops with a constant trip count smaller than "
47 "this value will not use the count register."));
64std::optional<Instruction *>
70 case Intrinsic::ppc_altivec_lvx:
71 case Intrinsic::ppc_altivec_lvxl:
80 case Intrinsic::ppc_vsx_lxvw4x:
81 case Intrinsic::ppc_vsx_lxvd2x: {
86 case Intrinsic::ppc_altivec_stvx:
87 case Intrinsic::ppc_altivec_stvxl:
96 case Intrinsic::ppc_vsx_stxvw4x:
97 case Intrinsic::ppc_vsx_stxvd2x: {
102 case Intrinsic::ppc_altivec_vperm:
109 if (
Constant *Mask = dyn_cast<Constant>(
II.getArgOperand(2))) {
110 assert(cast<FixedVectorType>(Mask->getType())->getNumElements() == 16 &&
111 "Bad type for intrinsic!");
114 bool AllEltsOk =
true;
115 for (
unsigned i = 0; i != 16; ++i) {
116 Constant *Elt = Mask->getAggregateElement(i);
117 if (!Elt || !(isa<ConstantInt>(Elt) || isa<UndefValue>(Elt))) {
132 Value *ExtractedElts[32];
133 memset(ExtractedElts, 0,
sizeof(ExtractedElts));
135 for (
unsigned i = 0; i != 16; ++i) {
136 if (isa<UndefValue>(Mask->getAggregateElement(i)))
139 cast<ConstantInt>(Mask->getAggregateElement(i))->getZExtValue();
144 if (!ExtractedElts[
Idx]) {
177 if (Imm.getBitWidth() <= 64) {
178 if (isInt<16>(Imm.getSExtValue()))
181 if (isInt<32>(Imm.getSExtValue())) {
183 if ((Imm.getZExtValue() & 0xFFFF) == 0)
208 case Intrinsic::sadd_with_overflow:
209 case Intrinsic::uadd_with_overflow:
210 case Intrinsic::ssub_with_overflow:
211 case Intrinsic::usub_with_overflow:
212 if ((
Idx == 1) && Imm.getBitWidth() <= 64 && isInt<16>(Imm.getSExtValue()))
215 case Intrinsic::experimental_stackmap:
216 if ((
Idx < 2) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
219 case Intrinsic::experimental_patchpoint_void:
220 case Intrinsic::experimental_patchpoint:
221 if ((
Idx < 4) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
241 unsigned ImmIdx = ~0U;
242 bool ShiftedFree =
false, RunFree =
false, UnsignedFree =
false,
247 case Instruction::GetElementPtr:
254 case Instruction::And:
257 case Instruction::Add:
258 case Instruction::Or:
259 case Instruction::Xor:
262 case Instruction::Sub:
263 case Instruction::Mul:
264 case Instruction::Shl:
265 case Instruction::LShr:
266 case Instruction::AShr:
269 case Instruction::ICmp:
274 case Instruction::Select:
277 case Instruction::PHI:
278 case Instruction::Call:
279 case Instruction::Ret:
280 case Instruction::Load:
281 case Instruction::Store:
285 if (ZeroFree && Imm == 0)
288 if (
Idx == ImmIdx && Imm.getBitWidth() <= 64) {
289 if (isInt<16>(Imm.getSExtValue()))
293 if (Imm.getBitWidth() <= 32 &&
304 if (UnsignedFree && isUInt<16>(Imm.getZExtValue()))
307 if (ShiftedFree && (Imm.getZExtValue() & 0xFFFF) == 0)
326 if (isa<CastInst>(U) || isa<LoadInst>(U) || isa<StoreInst>(U))
329 if (U->getType()->isVectorTy()) {
353 Metrics.analyzeBasicBlock(BB, *
this, EphValues);
360 for (
auto *BB : L->getBlocks())
362 if (
auto *Call = dyn_cast<IntrinsicInst>(&
I))
363 if (Call->getIntrinsicID() == Intrinsic::set_loop_iterations ||
364 Call->getIntrinsicID() == Intrinsic::loop_decrement)
368 L->getExitingBlocks(ExitingBlocks);
372 for (
auto &BB : ExitingBlocks) {
376 if (
BranchInst *BI = dyn_cast<BranchInst>(TI)) {
377 uint64_t TrueWeight = 0, FalseWeight = 0;
378 if (!BI->isConditional() ||
384 bool TrueIsExit = !L->contains(BI->getSuccessor(0));
385 if (( TrueIsExit && FalseWeight < TrueWeight) ||
386 (!TrueIsExit && FalseWeight > TrueWeight))
431 return LoopHasReductions;
437 Options.LoadSizes = {8, 4, 2, 1};
451 return ClassID ==
VSXRC ? 64 : 32;
477 return "PPC::unknown register class";
478 case GPRRC:
return "PPC::GPRRC";
479 case FPRRC:
return "PPC::FPRRC";
480 case VRRC:
return "PPC::VRRC";
481 case VSXRC:
return "PPC::VSXRC";
560 if (!ST->vectorsUseTwoUnits() || !Ty1->
isVectorTy())
566 if (LT1.first != 1 || !LT1.second.isVector())
575 if (LT2.first != 1 || !LT2.second.isVector())
596 Op2Info, Args, CxtI);
600 Opcode, Ty,
CostKind, Op1Info, Op2Info);
601 return Cost * CostFactor;
607 int Index,
Type *SubTp,
624 return LT.first * CostFactor;
631 return Opcode == Instruction::PHI ? 0 : 1;
652 return Cost == 0 ? 0 : 1;
666 Opcode, ValTy, CondTy, VecPred,
CostKind, Op1Info, Op2Info,
I);
670 return Cost * CostFactor;
675 unsigned Index,
Value *Op0,
680 assert(ISD &&
"Invalid opcode");
701 unsigned MaskCostForOneBitSize = (
VecMaskCost && EltSize == 1) ? 1 : 0;
703 unsigned MaskCostForIdx = (Index != -1U) ? 0 : 1;
704 if (ST->hasP9Altivec()) {
710 if (ST->hasP10Vector())
711 return CostFactor + MaskCostForIdx;
712 else if (Index != -1U)
713 return 2 * CostFactor;
718 if (EltSize == 64 && Index != -1U)
720 else if (EltSize == 32) {
722 if (Index == MfvsrwzIndex)
727 return CostFactor + MaskCostForIdx;
733 return CostFactor + MaskCostForOneBitSize + MaskCostForIdx;
735 }
else if (ST->hasDirectMove() && Index != -1U) {
740 return 3 + MaskCostForOneBitSize;
748 unsigned LHSPenalty = 2;
758 return LHSPenalty +
Cost;
779 assert((Opcode == Instruction::Load || Opcode == Instruction::Store) &&
790 bool IsAltivecType = ST->hasAltivec() &&
791 (LT.second == MVT::v16i8 || LT.second == MVT::v8i16 ||
792 LT.second == MVT::v4i32 || LT.second == MVT::v4f32);
793 bool IsVSXType = ST->hasVSX() &&
794 (LT.second == MVT::v2f64 || LT.second == MVT::v2i64);
801 unsigned MemBits = Src->getPrimitiveSizeInBits();
802 unsigned SrcBytes = LT.second.getStoreSize();
803 if (ST->hasVSX() && IsAltivecType) {
804 if (MemBits == 64 || (ST->hasP8Vector() && MemBits == 32))
808 Align AlignBytes = Alignment ? *Alignment :
Align(1);
809 if (Opcode == Instruction::Load && MemBits == 32 && AlignBytes < SrcBytes)
814 if (!SrcBytes || !Alignment || *Alignment >= SrcBytes)
824 if (Opcode == Instruction::Load && (!ST->hasP8Vector() && IsAltivecType) &&
825 *Alignment >= LT.second.getScalarType().getStoreSize())
826 return Cost + LT.first;
832 if (IsVSXType || (ST->hasVSX() && IsAltivecType))
844 Cost += LT.first * ((SrcBytes / Alignment->value()) - 1);
849 if (Src->isVectorTy() && Opcode == Instruction::Store)
850 for (
int i = 0, e = cast<FixedVectorType>(Src)->getNumElements(); i < e;
861 bool UseMaskForCond,
bool UseMaskForGaps) {
867 if (UseMaskForCond || UseMaskForGaps)
870 UseMaskForCond, UseMaskForGaps);
872 assert(isa<VectorType>(VecTy) &&
873 "Expect a vector type for interleaved memory op");
887 Cost += Factor*(LT.first-1);
960 if (!TM.isELFv2ABI())
968 case Intrinsic::ppc_altivec_lvx:
969 case Intrinsic::ppc_altivec_lvxl:
970 case Intrinsic::ppc_altivec_lvebx:
971 case Intrinsic::ppc_altivec_lvehx:
972 case Intrinsic::ppc_altivec_lvewx:
973 case Intrinsic::ppc_vsx_lxvd2x:
974 case Intrinsic::ppc_vsx_lxvw4x:
975 case Intrinsic::ppc_vsx_lxvd2x_be:
976 case Intrinsic::ppc_vsx_lxvw4x_be:
977 case Intrinsic::ppc_vsx_lxvl:
978 case Intrinsic::ppc_vsx_lxvll:
979 case Intrinsic::ppc_vsx_lxvp: {
982 Info.WriteMem =
false;
985 case Intrinsic::ppc_altivec_stvx:
986 case Intrinsic::ppc_altivec_stvxl:
987 case Intrinsic::ppc_altivec_stvebx:
988 case Intrinsic::ppc_altivec_stvehx:
989 case Intrinsic::ppc_altivec_stvewx:
990 case Intrinsic::ppc_vsx_stxvd2x:
991 case Intrinsic::ppc_vsx_stxvw4x:
992 case Intrinsic::ppc_vsx_stxvd2x_be:
993 case Intrinsic::ppc_vsx_stxvw4x_be:
994 case Intrinsic::ppc_vsx_stxvl:
995 case Intrinsic::ppc_vsx_stxvll:
996 case Intrinsic::ppc_vsx_stxvp: {
998 Info.ReadMem =
false;
999 Info.WriteMem =
true;
1002 case Intrinsic::ppc_stbcx:
1003 case Intrinsic::ppc_sthcx:
1004 case Intrinsic::ppc_stdcx:
1005 case Intrinsic::ppc_stwcx: {
1007 Info.ReadMem =
false;
1008 Info.WriteMem =
true;
1019 Align Alignment)
const {
1021 if (Opcode != Instruction::Load && Opcode != Instruction::Store)
1025 if ((!ST->hasP9Vector() && !ST->hasP10Vector()) || !ST->
isPPC64())
1027 if (isa<FixedVectorType>(DataType)) {
1029 return VecWidth == 128;
1043 return IntWidth == 8 || IntWidth == 16 || IntWidth == 32 || IntWidth == 64;
1059 assert((Opcode == Instruction::Load || Opcode == Instruction::Store) &&
1062 auto *SrcVTy = dyn_cast<FixedVectorType>(Src);
1063 assert(SrcVTy &&
"Expected a vector type for VP memory operations");
1079 const Align DesiredAlignment(16);
1088 float AlignmentProb = ((float)Alignment.
value()) / DesiredAlignment.
value();
1089 float MisalignmentProb = 1.0 - AlignmentProb;
1090 return (MisalignmentProb * P9PipelineFlushEstimate) +
This file provides a helper that implements much of the TTI interface in terms of the target-independ...
Analysis containing CSE Info
static cl::opt< TargetTransformInfo::TargetCostKind > CostKind("cost-kind", cl::desc("Target cost kind"), cl::init(TargetTransformInfo::TCK_RecipThroughput), cl::values(clEnumValN(TargetTransformInfo::TCK_RecipThroughput, "throughput", "Reciprocal throughput"), clEnumValN(TargetTransformInfo::TCK_Latency, "latency", "Instruction latency"), clEnumValN(TargetTransformInfo::TCK_CodeSize, "code-size", "Code size"), clEnumValN(TargetTransformInfo::TCK_SizeAndLatency, "size-latency", "Code size and latency")))
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
This file provides the interface for the instcombine pass implementation.
mir Rename Register Operands
uint64_t IntrinsicInst * II
This file contains the declarations for profiling metadata utility functions.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file describes how to lower LLVM code to machine code.
Class for arbitrary precision integers.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
A cache of @llvm.assume calls within a function.
LLVM Basic Block Representation.
InstructionCost getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, TTI::TargetCostKind CostKind)
Get intrinsic cost based on arguments.
InstructionCost getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef< unsigned > Indices, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, bool UseMaskForCond=false, bool UseMaskForGaps=false)
void getUnrollingPreferences(Loop *L, ScalarEvolution &SE, TTI::UnrollingPreferences &UP, OptimizationRemarkEmitter *ORE)
InstructionCost getMaskedMemoryOpCost(unsigned Opcode, Type *DataTy, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind)
InstructionCost getVectorInstrCost(unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index, Value *Op0, Value *Op1)
bool shouldBuildRelLookupTables() const
InstructionCost getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy, CmpInst::Predicate VecPred, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Op1Info={TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Op2Info={TTI::OK_AnyValue, TTI::OP_None}, const Instruction *I=nullptr)
InstructionCost getMemoryOpCost(unsigned Opcode, Type *Src, MaybeAlign Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, TTI::OperandValueInfo OpInfo={TTI::OK_AnyValue, TTI::OP_None}, const Instruction *I=nullptr)
void getPeelingPreferences(Loop *L, ScalarEvolution &SE, TTI::PeelingPreferences &PP)
std::pair< InstructionCost, MVT > getTypeLegalizationCost(Type *Ty) const
Estimate the cost of type-legalization and the legalized type.
InstructionCost getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src, TTI::CastContextHint CCH, TTI::TargetCostKind CostKind, const Instruction *I=nullptr)
InstructionCost getArithmeticInstrCost(unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Opd1Info={TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Opd2Info={TTI::OK_AnyValue, TTI::OP_None}, ArrayRef< const Value * > Args={}, const Instruction *CxtI=nullptr)
Conditional or Unconditional Branch instruction.
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Value * getArgOperand(unsigned i) const
static CastInst * Create(Instruction::CastOps, Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Provides a way to construct any of the CastInst subclasses using an opcode instead of the subclass's ...
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
This is an important base class in LLVM.
bool isLittleEndian() const
Layout endianness...
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Value * CreateInsertElement(Type *VecTy, Value *NewElt, Value *Idx, const Twine &Name="")
Value * CreateExtractElement(Value *Vec, Value *Idx, const Twine &Name="")
ConstantInt * getInt32(uint32_t C)
Get a constant 32-bit value.
Value * CreateBitCast(Value *V, Type *DestTy, const Twine &Name="")
The core instruction combiner logic.
const DataLayout & getDataLayout() const
DominatorTree & getDominatorTree() const
AssumptionCache & getAssumptionCache() const
static InstructionCost getInvalid(CostType Val=0)
static InstructionCost getMax()
std::optional< CostType > getValue() const
This function is intended to be used as sparingly as possible, since the class provides the full rang...
A wrapper class for inspecting calls to intrinsic functions.
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
This is an important class for using LLVM in a threaded context.
An instruction for reading from memory.
Represents a single loop in the control flow graph.
bool isPPC64() const
isPPC64 - Return true if we are generating code for 64-bit pointer mode.
unsigned getCPUDirective() const
getCPUDirective - Returns the -m directive specified for the cpu.
POPCNTDKind hasPOPCNTD() const
bool isLittleEndian() const
const PPCTargetMachine & getTargetMachine() const
InstructionCost getIntImmCost(const APInt &Imm, Type *Ty, TTI::TargetCostKind CostKind)
std::optional< Instruction * > instCombineIntrinsic(InstCombiner &IC, IntrinsicInst &II) const
TypeSize getRegisterBitWidth(TargetTransformInfo::RegisterKind K) const
const char * getRegisterClassName(unsigned ClassID) const
InstructionCost getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy, CmpInst::Predicate VecPred, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Op1Info={TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Op2Info={TTI::OK_AnyValue, TTI::OP_None}, const Instruction *I=nullptr)
InstructionCost getShuffleCost(TTI::ShuffleKind Kind, Type *Tp, ArrayRef< int > Mask, TTI::TargetCostKind CostKind, int Index, Type *SubTp, ArrayRef< const Value * > Args={}, const Instruction *CxtI=nullptr)
bool isLSRCostLess(const TargetTransformInfo::LSRCost &C1, const TargetTransformInfo::LSRCost &C2)
InstructionCost getArithmeticInstrCost(unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Op1Info={TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Op2Info={TTI::OK_AnyValue, TTI::OP_None}, ArrayRef< const Value * > Args={}, const Instruction *CxtI=nullptr)
bool useColdCCForColdCall(Function &F)
InstructionCost vectorCostAdjustmentFactor(unsigned Opcode, Type *Ty1, Type *Ty2)
bool isNumRegsMajorCostOfLSR()
bool supportsTailCallFor(const CallBase *CB) const
TTI::MemCmpExpansionOptions enableMemCmpExpansion(bool OptSize, bool IsZeroCmp) const
InstructionCost getVectorInstrCost(unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index, Value *Op0, Value *Op1)
bool getTgtMemIntrinsic(IntrinsicInst *Inst, MemIntrinsicInfo &Info)
unsigned getRegisterClassForType(bool Vector, Type *Ty=nullptr) const
InstructionCost getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, TTI::TargetCostKind CostKind)
unsigned getCacheLineSize() const override
void getUnrollingPreferences(Loop *L, ScalarEvolution &SE, TTI::UnrollingPreferences &UP, OptimizationRemarkEmitter *ORE)
bool enableInterleavedAccessVectorization()
unsigned getMaxInterleaveFactor(ElementCount VF)
TTI::PopcntSupportKind getPopcntSupport(unsigned TyWidth)
bool enableAggressiveInterleaving(bool LoopHasReductions)
unsigned getPrefetchDistance() const override
InstructionCost getMemoryOpCost(unsigned Opcode, Type *Src, MaybeAlign Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, TTI::OperandValueInfo OpInfo={TTI::OK_AnyValue, TTI::OP_None}, const Instruction *I=nullptr)
InstructionCost getIntImmCostIntrin(Intrinsic::ID IID, unsigned Idx, const APInt &Imm, Type *Ty, TTI::TargetCostKind CostKind)
InstructionCost getIntImmCostInst(unsigned Opcode, unsigned Idx, const APInt &Imm, Type *Ty, TTI::TargetCostKind CostKind, Instruction *Inst=nullptr)
InstructionCost getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src, TTI::CastContextHint CCH, TTI::TargetCostKind CostKind, const Instruction *I=nullptr)
void getPeelingPreferences(Loop *L, ScalarEvolution &SE, TTI::PeelingPreferences &PP)
unsigned getNumberOfRegisters(unsigned ClassID) const
bool hasActiveVectorLength(unsigned Opcode, Type *DataType, Align Alignment) const
bool shouldBuildRelLookupTables() const
InstructionCost getInstructionCost(const User *U, ArrayRef< const Value * > Operands, TTI::TargetCostKind CostKind)
bool canSaveCmp(Loop *L, BranchInst **BI, ScalarEvolution *SE, LoopInfo *LI, DominatorTree *DT, AssumptionCache *AC, TargetLibraryInfo *LibInfo)
InstructionCost getVPMemoryOpCost(unsigned Opcode, Type *Src, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, const Instruction *I=nullptr)
bool isHardwareLoopProfitable(Loop *L, ScalarEvolution &SE, AssumptionCache &AC, TargetLibraryInfo *LibInfo, HardwareLoopInfo &HWLoopInfo)
bool areTypesABICompatible(const Function *Caller, const Function *Callee, const ArrayRef< Type * > &Types) const
InstructionCost getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind, const Instruction *I=nullptr)
InstructionCost getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef< unsigned > Indices, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, bool UseMaskForCond=false, bool UseMaskForGaps=false)
bool supportsTailCallFor(const CallBase *CB) const
bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AddrSpace, Align Alignment=Align(1), MachineMemOperand::Flags Flags=MachineMemOperand::MONone, unsigned *Fast=nullptr) const override
Is unaligned memory access allowed for the given type, and is it fast relative to software emulation.
Common code between 32-bit and 64-bit PowerPC targets.
The main scalar evolution driver.
unsigned getSmallConstantTripCount(const Loop *L)
Returns the exact trip count of the loop if we can compute it, and the result is a small constant.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
Provides information about what library functions are available for the current target.
bool isOperationExpand(unsigned Op, EVT VT) const
Return true if the specified operation is illegal on this target or unlikely to be made legal with cu...
int InstructionOpcodeToISD(unsigned Opcode) const
Get the ISD node that corresponds to the Instruction class opcode.
EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the EVT corresponding to this LLVM type.
unsigned getMaxExpandSizeMemcmp(bool OptSize) const
Get maximum # of load operations permitted for memcmp.
Provide an instruction scheduling machine model to CodeGen passes.
unsigned getIssueWidth() const
Maximum number of micro-ops that may be scheduled per cycle.
void init(const TargetSubtargetInfo *TSInfo)
Initialize the machine model for instruction scheduling.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
static constexpr TypeSize getFixed(ScalarTy ExactSize)
static constexpr TypeSize getScalable(ScalarTy MinimumSize)
The instances of the Type class are immutable: once they are created, they are never changed.
unsigned getIntegerBitWidth() const
bool isVectorTy() const
True if this is an instance of VectorType.
bool isIntOrIntVectorTy() const
Return true if this is an integer type or a vector of integer types.
bool isPointerTy() const
True if this is an instance of PointerType.
bool isFloatTy() const
Return true if this is 'float', a 32-bit IEEE fp type.
bool isPPC_FP128Ty() const
Return true if this is powerpc long double.
bool isFP128Ty() const
Return true if this is 'fp128'.
unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
bool isHalfTy() const
Return true if this is 'half', a 16-bit IEEE fp type.
bool isDoubleTy() const
Return true if this is 'double', a 64-bit IEEE fp type.
static IntegerType * getInt32Ty(LLVMContext &C)
static IntegerType * getInt64Ty(LLVMContext &C)
bool isIntegerTy() const
True if this is an instance of IntegerType.
TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
static UndefValue * get(Type *T)
Static factory methods - Return an 'undef' object of the specified type.
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ C
The default llvm calling convention, compatible with C.
@ EXTRACT_VECTOR_ELT
EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR identified by the (potentially...
@ INSERT_VECTOR_ELT
INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element at IDX replaced with VAL.
initializer< Ty > init(const Ty &Val)
This is an optimization pass for GlobalISel generic memory operations.
constexpr bool isShiftedMask_32(uint32_t Value)
Return true if the argument contains a non-empty sequence of ones with the remainder zero (32 bit ver...
constexpr bool isShiftedMask_64(uint64_t Value)
Return true if the argument contains a non-empty sequence of ones with the remainder zero (64 bit ver...
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Align getOrEnforceKnownAlignment(Value *V, MaybeAlign PrefAlign, const DataLayout &DL, const Instruction *CxtI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr)
Try to ensure that the alignment of V is at least PrefAlign bytes.
bool none_of(R &&Range, UnaryPredicate P)
Provide wrappers to std::none_of which take ranges instead of having to pass begin/end explicitly.
bool extractBranchWeights(const MDNode *ProfileData, SmallVectorImpl< uint32_t > &Weights)
Extract branch weights from MD_prof metadata.
This struct is a compact representation of a valid (non-zero power of two) alignment.
uint64_t value() const
This is a hole in the type system and should not be abused.
Utility to calculate the size and a few similar metrics for a set of basic blocks.
static void collectEphemeralValues(const Loop *L, AssumptionCache *AC, SmallPtrSetImpl< const Value * > &EphValues)
Collect a loop's ephemeral values (those used only by an assume or similar intrinsics in the loop).
Attributes of a target dependent hardware loop.
bool canAnalyze(LoopInfo &LI)
bool isHardwareLoopCandidate(ScalarEvolution &SE, LoopInfo &LI, DominatorTree &DT, bool ForceNestedLoop=false, bool ForceHardwareLoopPHI=false)
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Information about a load/store intrinsic defined by the target.