16#include "llvm/IR/IntrinsicsPowerPC.h" 
   25#define DEBUG_TYPE "ppctti" 
   35                cl::desc(
"Enable using coldcc calling conv for cold " 
   36                         "internal functions"));
 
   40               cl::desc(
"Do not add instruction count to lsr cost model"));
 
   46                      cl::desc(
"Loops with a constant trip count smaller than " 
   47                               "this value will not use the count register."));
 
   64std::optional<Instruction *>
 
   70  case Intrinsic::ppc_altivec_lvx:
 
   71  case Intrinsic::ppc_altivec_lvxl:
 
   80  case Intrinsic::ppc_vsx_lxvw4x:
 
   81  case Intrinsic::ppc_vsx_lxvd2x: {
 
   86  case Intrinsic::ppc_altivec_stvx:
 
   87  case Intrinsic::ppc_altivec_stvxl:
 
   96  case Intrinsic::ppc_vsx_stxvw4x:
 
   97  case Intrinsic::ppc_vsx_stxvd2x: {
 
  102  case Intrinsic::ppc_altivec_vperm:
 
  111             "Bad type for intrinsic!");
 
  114      bool AllEltsOk = 
true;
 
  115      for (
unsigned I = 0; 
I != 16; ++
I) {
 
  116        Constant *Elt = Mask->getAggregateElement(
I);
 
  132        Value *ExtractedElts[32];
 
  133        memset(ExtractedElts, 0, 
sizeof(ExtractedElts));
 
  135        for (
unsigned I = 0; 
I != 16; ++
I) {
 
  141          if (
DL.isLittleEndian())
 
  144          if (!ExtractedElts[Idx]) {
 
  145            Value *Op0ToUse = (
DL.isLittleEndian()) ? Op1 : Op0;
 
  146            Value *Op1ToUse = (
DL.isLittleEndian()) ? Op0 : Op1;
 
 
  168  assert(Ty->isIntegerTy());
 
  170  unsigned BitSize = Ty->getPrimitiveSizeInBits();
 
  177  if (Imm.getBitWidth() <= 64) {
 
  183      if ((Imm.getZExtValue() & 0xFFFF) == 0)
 
 
  200  assert(Ty->isIntegerTy());
 
  202  unsigned BitSize = Ty->getPrimitiveSizeInBits();
 
  209  case Intrinsic::sadd_with_overflow:
 
  210  case Intrinsic::uadd_with_overflow:
 
  211  case Intrinsic::ssub_with_overflow:
 
  212  case Intrinsic::usub_with_overflow:
 
  213    if ((Idx == 1) && Imm.getBitWidth() <= 64 && 
isInt<16>(Imm.getSExtValue()))
 
  216  case Intrinsic::experimental_stackmap:
 
  217    if ((Idx < 2) || (Imm.getBitWidth() <= 64 && 
isInt<64>(Imm.getSExtValue())))
 
  220  case Intrinsic::experimental_patchpoint_void:
 
  221  case Intrinsic::experimental_patchpoint:
 
  222    if ((Idx < 4) || (Imm.getBitWidth() <= 64 && 
isInt<64>(Imm.getSExtValue())))
 
 
  236  assert(Ty->isIntegerTy());
 
  238  unsigned BitSize = Ty->getPrimitiveSizeInBits();
 
  242  unsigned ImmIdx = ~0U;
 
  243  bool ShiftedFree = 
false, RunFree = 
false, UnsignedFree = 
false,
 
  248  case Instruction::GetElementPtr:
 
  255  case Instruction::And:
 
  258  case Instruction::Add:
 
  259  case Instruction::Or:
 
  260  case Instruction::Xor:
 
  263  case Instruction::Sub:
 
  264  case Instruction::Mul:
 
  265  case Instruction::Shl:
 
  266  case Instruction::LShr:
 
  267  case Instruction::AShr:
 
  270  case Instruction::ICmp:
 
  275  case Instruction::Select:
 
  278  case Instruction::PHI:
 
  279  case Instruction::Call:
 
  280  case Instruction::Ret:
 
  281  case Instruction::Load:
 
  282  case Instruction::Store:
 
  286  if (ZeroFree && Imm == 0)
 
  289  if (Idx == ImmIdx && Imm.getBitWidth() <= 64) {
 
  294      if (Imm.getBitWidth() <= 32 &&
 
  305    if (UnsignedFree && 
isUInt<16>(Imm.getZExtValue()))
 
  308    if (ShiftedFree && (Imm.getZExtValue() & 0xFFFF) == 0)
 
 
  318  return Ty->isVectorTy() && (Ty->getScalarSizeInBits() == 1) &&
 
  319         (Ty->getPrimitiveSizeInBits() > 128);
 
 
  330  if (U->getType()->isVectorTy()) {
 
 
  349      TM.getTargetTransformInfo(*L->getHeader()->getParent());
 
  358      Metrics.analyzeBasicBlock(BB, TTI, EphValues);
 
  365  for (
auto *BB : L->getBlocks())
 
  368        if (
Call->getIntrinsicID() == Intrinsic::set_loop_iterations ||
 
  369            Call->getIntrinsicID() == Intrinsic::loop_decrement)
 
  373  L->getExitingBlocks(ExitingBlocks);
 
  377  for (
auto &BB : ExitingBlocks) {
 
  382      uint64_t TrueWeight = 0, FalseWeight = 0;
 
  383      if (!BI->isConditional() ||
 
  389      bool TrueIsExit = !L->contains(BI->getSuccessor(0));
 
  390      if (( TrueIsExit && FalseWeight < TrueWeight) ||
 
  391          (!TrueIsExit && FalseWeight > TrueWeight))
 
 
  436  return LoopHasReductions;
 
 
  442  Options.LoadSizes = {8, 4, 2, 1};
 
  443  Options.MaxNumLoads = TLI->getMaxExpandSizeMemcmp(OptSize);
 
 
  454    return ClassID == 
VSXRC ? 64 : 32;
 
 
  464      (Ty->getScalarType()->isFloatTy() || Ty->getScalarType()->isDoubleTy()))
 
  466  if (Ty && (Ty->getScalarType()->isFP128Ty() ||
 
  467             Ty->getScalarType()->isPPC_FP128Ty()))
 
  469  if (Ty && Ty->getScalarType()->isHalfTy())
 
 
  479      return "PPC::unknown register class";
 
  480    case GPRRC:       
return "PPC::GPRRC";
 
  481    case FPRRC:       
return "PPC::FPRRC";
 
  482    case VRRC:        
return "PPC::VRRC";
 
  483    case VSXRC:       
return "PPC::VSXRC";
 
 
  503  unsigned Directive = ST->getCPUDirective();
 
 
  519  unsigned Directive = ST->getCPUDirective();
 
 
  563  if (!ST->vectorsUseTwoUnits() || !Ty1->
isVectorTy())
 
  569  if (LT1.first != 1 || !LT1.second.isVector())
 
  572  int ISD = TLI->InstructionOpcodeToISD(Opcode);
 
  573  if (TLI->isOperationExpand(
ISD, LT1.second))
 
  578    if (LT2.first != 1 || !LT2.second.isVector())
 
 
  589  assert(TLI->InstructionOpcodeToISD(Opcode) && 
"Invalid opcode");
 
  598                                         Op2Info, Args, CxtI);
 
  602      Opcode, Ty, 
CostKind, Op1Info, Op2Info);
 
  603  return Cost * CostFactor;
 
 
  627  return LT.first * CostFactor;
 
 
  634    return Opcode == Instruction::PHI ? 0 : 1;
 
 
  644  assert(TLI->InstructionOpcodeToISD(Opcode) && 
"Invalid opcode");
 
  655    return Cost == 0 ? 0 : 1;
 
 
  669      Opcode, ValTy, CondTy, VecPred, 
CostKind, Op1Info, Op2Info, 
I);
 
  673  return Cost * CostFactor;
 
 
  678                                               unsigned Index, 
const Value *Op0,
 
  679                                               const Value *Op1)
 const {
 
  682  int ISD = TLI->InstructionOpcodeToISD(Opcode);
 
  696        Index == (ST->isLittleEndian() ? 1 : 0))
 
  704    unsigned MaskCostForOneBitSize = (
VecMaskCost && EltSize == 1) ? 1 : 0;
 
  706    unsigned MaskCostForIdx = (Index != -1U) ? 0 : 1;
 
  707    if (ST->hasP9Altivec()) {
 
  713        if (ST->hasP10Vector())
 
  714          return CostFactor + MaskCostForIdx;
 
  716          return 2 * CostFactor;
 
  721        if (EltSize == 64 && Index != -1U)
 
  724          unsigned MfvsrwzIndex = ST->isLittleEndian() ? 2 : 1;
 
  725          if (Index == MfvsrwzIndex)
 
  730          return CostFactor + MaskCostForIdx;
 
  736        return CostFactor + MaskCostForOneBitSize + MaskCostForIdx;
 
  738    } 
else if (ST->hasDirectMove() && Index != -1U) {
 
  743      return 3 + MaskCostForOneBitSize;
 
  751  unsigned LHSPenalty = 2;
 
  761    return LHSPenalty + 
Cost;
 
 
  777  if (TLI->getValueType(
DL, Src,  
true) == MVT::Other)
 
  782  assert((Opcode == Instruction::Load || Opcode == Instruction::Store) &&
 
  793  bool IsAltivecType = ST->hasAltivec() &&
 
  794                       (LT.second == MVT::v16i8 || LT.second == MVT::v8i16 ||
 
  795                        LT.second == MVT::v4i32 || LT.second == MVT::v4f32);
 
  796  bool IsVSXType = ST->hasVSX() &&
 
  797                   (LT.second == MVT::v2f64 || LT.second == MVT::v2i64);
 
  804  unsigned MemBits = Src->getPrimitiveSizeInBits();
 
  805  unsigned SrcBytes = LT.second.getStoreSize();
 
  806  if (ST->hasVSX() && IsAltivecType) {
 
  807    if (MemBits == 64 || (ST->hasP8Vector() && MemBits == 32))
 
  811    if (Opcode == Instruction::Load && MemBits == 32 && Alignment < SrcBytes)
 
  816  if (!SrcBytes || Alignment >= SrcBytes)
 
  826  if (Opcode == Instruction::Load && (!ST->hasP8Vector() && IsAltivecType) &&
 
  827      Alignment >= LT.second.getScalarType().getStoreSize())
 
  828    return Cost + LT.first; 
 
  834  if (IsVSXType || (ST->hasVSX() && IsAltivecType))
 
  838  if (TLI->allowsMisalignedMemoryAccesses(LT.second, 0))
 
  845  Cost += LT.first * ((SrcBytes / Alignment.
value()) - 1);
 
  850  if (Src->isVectorTy() && Opcode == Instruction::Store)
 
 
  862    bool UseMaskForCond, 
bool UseMaskForGaps)
 const {
 
  868  if (UseMaskForCond || UseMaskForGaps)
 
  871                                             UseMaskForCond, UseMaskForGaps);
 
  874         "Expect a vector type for interleaved memory op");
 
  888  Cost += Factor*(LT.first-1);
 
 
  904      TM.getSubtargetImpl(*Caller)->getFeatureBits();
 
  906      TM.getSubtargetImpl(*Callee)->getFeatureBits();
 
  910  return CallerBits == CalleeBits;
 
 
  927      return Ty->isIntOrIntVectorTy(1) && Ty->getPrimitiveSizeInBits() > 128;
 
 
  973  if (!TM.isELFv2ABI())
 
 
  981  case Intrinsic::ppc_altivec_lvx:
 
  982  case Intrinsic::ppc_altivec_lvxl:
 
  983  case Intrinsic::ppc_altivec_lvebx:
 
  984  case Intrinsic::ppc_altivec_lvehx:
 
  985  case Intrinsic::ppc_altivec_lvewx:
 
  986  case Intrinsic::ppc_vsx_lxvd2x:
 
  987  case Intrinsic::ppc_vsx_lxvw4x:
 
  988  case Intrinsic::ppc_vsx_lxvd2x_be:
 
  989  case Intrinsic::ppc_vsx_lxvw4x_be:
 
  990  case Intrinsic::ppc_vsx_lxvl:
 
  991  case Intrinsic::ppc_vsx_lxvll:
 
  992  case Intrinsic::ppc_vsx_lxvp: {
 
  995    Info.WriteMem = 
false;
 
  998  case Intrinsic::ppc_altivec_stvx:
 
  999  case Intrinsic::ppc_altivec_stvxl:
 
 1000  case Intrinsic::ppc_altivec_stvebx:
 
 1001  case Intrinsic::ppc_altivec_stvehx:
 
 1002  case Intrinsic::ppc_altivec_stvewx:
 
 1003  case Intrinsic::ppc_vsx_stxvd2x:
 
 1004  case Intrinsic::ppc_vsx_stxvw4x:
 
 1005  case Intrinsic::ppc_vsx_stxvd2x_be:
 
 1006  case Intrinsic::ppc_vsx_stxvw4x_be:
 
 1007  case Intrinsic::ppc_vsx_stxvl:
 
 1008  case Intrinsic::ppc_vsx_stxvll:
 
 1009  case Intrinsic::ppc_vsx_stxvp: {
 
 1011    Info.ReadMem = 
false;
 
 1012    Info.WriteMem = 
true;
 
 1015  case Intrinsic::ppc_stbcx:
 
 1016  case Intrinsic::ppc_sthcx:
 
 1017  case Intrinsic::ppc_stdcx:
 
 1018  case Intrinsic::ppc_stwcx: {
 
 1020    Info.ReadMem = 
false;
 
 1021    Info.WriteMem = 
true;
 
 
 1032  return TLI->supportsTailCallFor(CB);
 
 
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
This file provides a helper that implements much of the TTI interface in terms of the target-independ...
static cl::opt< OutputCostKind > CostKind("cost-kind", cl::desc("Target cost kind"), cl::init(OutputCostKind::RecipThroughput), cl::values(clEnumValN(OutputCostKind::RecipThroughput, "throughput", "Reciprocal throughput"), clEnumValN(OutputCostKind::Latency, "latency", "Instruction latency"), clEnumValN(OutputCostKind::CodeSize, "code-size", "Code size"), clEnumValN(OutputCostKind::SizeAndLatency, "size-latency", "Code size and latency"), clEnumValN(OutputCostKind::All, "all", "Print all cost kinds")))
This file provides the interface for the instcombine pass implementation.
uint64_t IntrinsicInst * II
This file contains the declarations for profiling metadata utility functions.
static unsigned getNumElements(Type *Ty)
This file describes how to lower LLVM code to machine code.
Class for arbitrary precision integers.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
A cache of @llvm.assume calls within a function.
LLVM Basic Block Representation.
InstructionCost getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef< unsigned > Indices, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, bool UseMaskForCond=false, bool UseMaskForGaps=false) const override
InstructionCost getVectorInstrCost(unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index, const Value *Op0, const Value *Op1) const override
InstructionCost getArithmeticInstrCost(unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Opd1Info={TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Opd2Info={TTI::OK_AnyValue, TTI::OP_None}, ArrayRef< const Value * > Args={}, const Instruction *CxtI=nullptr) const override
bool shouldBuildRelLookupTables() const override
InstructionCost getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy, CmpInst::Predicate VecPred, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Op1Info={TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Op2Info={TTI::OK_AnyValue, TTI::OP_None}, const Instruction *I=nullptr) const override
void getUnrollingPreferences(Loop *L, ScalarEvolution &SE, TTI::UnrollingPreferences &UP, OptimizationRemarkEmitter *ORE) const override
void getPeelingPreferences(Loop *L, ScalarEvolution &SE, TTI::PeelingPreferences &PP) const override
InstructionCost getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src, TTI::CastContextHint CCH, TTI::TargetCostKind CostKind, const Instruction *I=nullptr) const override
std::pair< InstructionCost, MVT > getTypeLegalizationCost(Type *Ty) const
InstructionCost getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, TTI::TargetCostKind CostKind) const override
InstructionCost getMemoryOpCost(unsigned Opcode, Type *Src, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, TTI::OperandValueInfo OpInfo={TTI::OK_AnyValue, TTI::OP_None}, const Instruction *I=nullptr) const override
Conditional or Unconditional Branch instruction.
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Value * getArgOperand(unsigned i) const
static LLVM_ABI CastInst * Create(Instruction::CastOps, Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Provides a way to construct any of the CastInst subclasses using an opcode instead of the subclass's ...
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
This is an important base class in LLVM.
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Container class for subtarget features.
Value * CreateInsertElement(Type *VecTy, Value *NewElt, Value *Idx, const Twine &Name="")
Value * CreateExtractElement(Value *Vec, Value *Idx, const Twine &Name="")
ConstantInt * getInt32(uint32_t C)
Get a constant 32-bit value.
Value * CreateBitCast(Value *V, Type *DestTy, const Twine &Name="")
The core instruction combiner logic.
const DataLayout & getDataLayout() const
DominatorTree & getDominatorTree() const
AssumptionCache & getAssumptionCache() const
static InstructionCost getInvalid(CostType Val=0)
static InstructionCost getMax()
A wrapper class for inspecting calls to intrinsic functions.
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
This is an important class for using LLVM in a threaded context.
An instruction for reading from memory.
Represents a single loop in the control flow graph.
bool isLSRCostLess(const TargetTransformInfo::LSRCost &C1, const TargetTransformInfo::LSRCost &C2) const override
InstructionCost vectorCostAdjustmentFactor(unsigned Opcode, Type *Ty1, Type *Ty2) const
bool areTypesABICompatible(const Function *Caller, const Function *Callee, const ArrayRef< Type * > &Types) const override
InstructionCost getArithmeticInstrCost(unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Op1Info={TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Op2Info={TTI::OK_AnyValue, TTI::OP_None}, ArrayRef< const Value * > Args={}, const Instruction *CxtI=nullptr) const override
InstructionCost getMemoryOpCost(unsigned Opcode, Type *Src, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, TTI::OperandValueInfo OpInfo={TTI::OK_AnyValue, TTI::OP_None}, const Instruction *I=nullptr) const override
InstructionCost getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef< unsigned > Indices, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, bool UseMaskForCond=false, bool UseMaskForGaps=false) const override
bool enableInterleavedAccessVectorization() const override
unsigned getRegisterClassForType(bool Vector, Type *Ty=nullptr) const override
bool canSaveCmp(Loop *L, BranchInst **BI, ScalarEvolution *SE, LoopInfo *LI, DominatorTree *DT, AssumptionCache *AC, TargetLibraryInfo *LibInfo) const override
TTI::MemCmpExpansionOptions enableMemCmpExpansion(bool OptSize, bool IsZeroCmp) const override
unsigned getCacheLineSize() const override
bool useColdCCForColdCall(Function &F) const override
void getUnrollingPreferences(Loop *L, ScalarEvolution &SE, TTI::UnrollingPreferences &UP, OptimizationRemarkEmitter *ORE) const override
bool getTgtMemIntrinsic(IntrinsicInst *Inst, MemIntrinsicInfo &Info) const override
TypeSize getRegisterBitWidth(TargetTransformInfo::RegisterKind K) const override
bool isNumRegsMajorCostOfLSR() const override
unsigned getPrefetchDistance() const override
bool areInlineCompatible(const Function *Caller, const Function *Callee) const override
unsigned getNumberOfRegisters(unsigned ClassID) const override
bool isHardwareLoopProfitable(Loop *L, ScalarEvolution &SE, AssumptionCache &AC, TargetLibraryInfo *LibInfo, HardwareLoopInfo &HWLoopInfo) const override
void getPeelingPreferences(Loop *L, ScalarEvolution &SE, TTI::PeelingPreferences &PP) const override
InstructionCost getIntImmCostInst(unsigned Opcode, unsigned Idx, const APInt &Imm, Type *Ty, TTI::TargetCostKind CostKind, Instruction *Inst=nullptr) const override
InstructionCost getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind, const Instruction *I=nullptr) const override
InstructionCost getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, TTI::TargetCostKind CostKind) const override
Get intrinsic cost based on arguments.
const char * getRegisterClassName(unsigned ClassID) const override
InstructionCost getInstructionCost(const User *U, ArrayRef< const Value * > Operands, TTI::TargetCostKind CostKind) const override
unsigned getMaxInterleaveFactor(ElementCount VF) const override
bool shouldBuildRelLookupTables() const override
bool supportsTailCallFor(const CallBase *CB) const override
InstructionCost getShuffleCost(TTI::ShuffleKind Kind, VectorType *DstTy, VectorType *SrcTy, ArrayRef< int > Mask, TTI::TargetCostKind CostKind, int Index, VectorType *SubTp, ArrayRef< const Value * > Args={}, const Instruction *CxtI=nullptr) const override
InstructionCost getIntImmCostIntrin(Intrinsic::ID IID, unsigned Idx, const APInt &Imm, Type *Ty, TTI::TargetCostKind CostKind) const override
TTI::PopcntSupportKind getPopcntSupport(unsigned TyWidth) const override
InstructionCost getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy, CmpInst::Predicate VecPred, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Op1Info={TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Op2Info={TTI::OK_AnyValue, TTI::OP_None}, const Instruction *I=nullptr) const override
bool enableAggressiveInterleaving(bool LoopHasReductions) const override
InstructionCost getIntImmCost(const APInt &Imm, Type *Ty, TTI::TargetCostKind CostKind) const override
InstructionCost getVectorInstrCost(unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index, const Value *Op0, const Value *Op1) const override
InstructionCost getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src, TTI::CastContextHint CCH, TTI::TargetCostKind CostKind, const Instruction *I=nullptr) const override
std::optional< Instruction * > instCombineIntrinsic(InstCombiner &IC, IntrinsicInst &II) const override
Common code between 32-bit and 64-bit PowerPC targets.
static LLVM_ABI PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
The main scalar evolution driver.
LLVM_ABI unsigned getSmallConstantTripCount(const Loop *L)
Returns the exact trip count of the loop if we can compute it, and the result is a small constant.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
Provides information about what library functions are available for the current target.
Primary interface to the complete machine description for the target machine.
Provide an instruction scheduling machine model to CodeGen passes.
unsigned getIssueWidth() const
Maximum number of micro-ops that may be scheduled per cycle.
LLVM_ABI void init(const TargetSubtargetInfo *TSInfo, bool EnableSModel=true, bool EnableSItins=true)
Initialize the machine model for instruction scheduling.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
static constexpr TypeSize getFixed(ScalarTy ExactSize)
static constexpr TypeSize getScalable(ScalarTy MinimumSize)
The instances of the Type class are immutable: once they are created, they are never changed.
static LLVM_ABI IntegerType * getInt64Ty(LLVMContext &C)
bool isVectorTy() const
True if this is an instance of VectorType.
static LLVM_ABI IntegerType * getInt32Ty(LLVMContext &C)
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
bool isDoubleTy() const
Return true if this is 'double', a 64-bit IEEE fp type.
bool isIntegerTy() const
True if this is an instance of IntegerType.
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
Base class of all SIMD vector types.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ C
The default llvm calling convention, compatible with C.
ISD namespace - This namespace contains an enum which represents all of the SelectionDAG node types a...
@ EXTRACT_VECTOR_ELT
EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR identified by the (potentially...
@ INSERT_VECTOR_ELT
INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element at IDX replaced with VAL.
initializer< Ty > init(const Ty &Val)
This is an optimization pass for GlobalISel generic memory operations.
constexpr bool isInt(int64_t x)
Checks if an integer fits into the given bit width.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
constexpr bool isShiftedMask_32(uint32_t Value)
Return true if the argument contains a non-empty sequence of ones with the remainder zero (32 bit ver...
constexpr bool isShiftedMask_64(uint64_t Value)
Return true if the argument contains a non-empty sequence of ones with the remainder zero (64 bit ver...
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
LLVM_ABI Align getOrEnforceKnownAlignment(Value *V, MaybeAlign PrefAlign, const DataLayout &DL, const Instruction *CxtI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr)
Try to ensure that the alignment of V is at least PrefAlign bytes.
bool none_of(R &&Range, UnaryPredicate P)
Provide wrappers to std::none_of which take ranges instead of having to pass begin/end explicitly.
constexpr bool isUInt(uint64_t x)
Checks if an unsigned integer fits into the given bit width.
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
LLVM_ABI bool extractBranchWeights(const MDNode *ProfileData, SmallVectorImpl< uint32_t > &Weights)
Extract branch weights from MD_prof metadata.
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
This struct is a compact representation of a valid (non-zero power of two) alignment.
constexpr uint64_t value() const
This is a hole in the type system and should not be abused.
Utility to calculate the size and a few similar metrics for a set of basic blocks.
static LLVM_ABI void collectEphemeralValues(const Loop *L, AssumptionCache *AC, SmallPtrSetImpl< const Value * > &EphValues)
Collect a loop's ephemeral values (those used only by an assume or similar intrinsics in the loop).
Attributes of a target dependent hardware loop.
LLVM_ABI bool canAnalyze(LoopInfo &LI)
LLVM_ABI bool isHardwareLoopCandidate(ScalarEvolution &SE, LoopInfo &LI, DominatorTree &DT, bool ForceNestedLoop=false, bool ForceHardwareLoopPHI=false)
Information about a load/store intrinsic defined by the target.