LLVM 20.0.0git
Classes | Namespaces | Functions
VectorUtils.h File Reference
#include "llvm/ADT/MapVector.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Analysis/LoopAccessAnalysis.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/VFABIDemangler.h"
#include "llvm/IR/VectorTypeUtils.h"
#include "llvm/Support/CheckedArithmetic.h"

Go to the source code of this file.

Classes

class  llvm::VFDatabase
 The Vector Function Database. More...
 
class  llvm::InterleaveGroup< InstTy >
 The group of interleaved loads/stores sharing the same stride and close to each other. More...
 
class  llvm::InterleavedAccessInfo
 Drive the analysis of interleaved memory accesses in the loop. More...
 

Namespaces

namespace  llvm
 This is an optimization pass for GlobalISel generic memory operations.
 
namespace  llvm::Intrinsic
 This namespace contains an enum with a value for every intrinsic/builtin function known by LLVM.
 

Functions

bool llvm::isTriviallyVectorizable (Intrinsic::ID ID)
 Identify if the intrinsic is trivially vectorizable.
 
bool llvm::isTriviallyScalarizable (Intrinsic::ID ID, const TargetTransformInfo *TTI)
 Identify if the intrinsic is trivially scalarizable.
 
bool llvm::isVectorIntrinsicWithScalarOpAtArg (Intrinsic::ID ID, unsigned ScalarOpdIdx, const TargetTransformInfo *TTI)
 Identifies if the vector form of the intrinsic has a scalar operand.
 
bool llvm::isVectorIntrinsicWithOverloadTypeAtArg (Intrinsic::ID ID, int OpdIdx, const TargetTransformInfo *TTI)
 Identifies if the vector form of the intrinsic is overloaded on the type of the operand at index OpdIdx, or on the return type if OpdIdx is -1.
 
bool llvm::isVectorIntrinsicWithStructReturnOverloadAtField (Intrinsic::ID ID, int RetIdx, const TargetTransformInfo *TTI)
 Identifies if the vector form of the intrinsic that returns a struct is overloaded at the struct element index RetIdx.
 
Intrinsic::ID llvm::getVectorIntrinsicIDForCall (const CallInst *CI, const TargetLibraryInfo *TLI)
 Returns intrinsic ID for call.
 
Valuellvm::findScalarElement (Value *V, unsigned EltNo)
 Given a vector and an element number, see if the scalar value is already around as a register, for example if it were inserted then extracted from the vector.
 
int llvm::getSplatIndex (ArrayRef< int > Mask)
 If all non-negative Mask elements are the same value, return that value.
 
Valuellvm::getSplatValue (const Value *V)
 Get splat value if the input is a splat vector or return nullptr.
 
bool llvm::isSplatValue (const Value *V, int Index=-1, unsigned Depth=0)
 Return true if each element of the vector value V is poisoned or equal to every other non-poisoned element.
 
bool llvm::getShuffleDemandedElts (int SrcWidth, ArrayRef< int > Mask, const APInt &DemandedElts, APInt &DemandedLHS, APInt &DemandedRHS, bool AllowUndefElts=false)
 Transform a shuffle mask's output demanded element mask into demanded element masks for the 2 operands, returns false if the mask isn't valid.
 
void llvm::narrowShuffleMaskElts (int Scale, ArrayRef< int > Mask, SmallVectorImpl< int > &ScaledMask)
 Replace each shuffle mask index with the scaled sequential indices for an equivalent mask of narrowed elements.
 
bool llvm::widenShuffleMaskElts (int Scale, ArrayRef< int > Mask, SmallVectorImpl< int > &ScaledMask)
 Try to transform a shuffle mask by replacing elements with the scaled index for an equivalent mask of widened elements.
 
bool llvm::scaleShuffleMaskElts (unsigned NumDstElts, ArrayRef< int > Mask, SmallVectorImpl< int > &ScaledMask)
 Attempt to narrow/widen the Mask shuffle mask to the NumDstElts target width.
 
void llvm::getShuffleMaskWithWidestElts (ArrayRef< int > Mask, SmallVectorImpl< int > &ScaledMask)
 Repetitively apply widenShuffleMaskElts() for as long as it succeeds, to get the shuffle mask with widest possible elements.
 
void llvm::processShuffleMasks (ArrayRef< int > Mask, unsigned NumOfSrcRegs, unsigned NumOfDestRegs, unsigned NumOfUsedRegs, function_ref< void()> NoInputAction, function_ref< void(ArrayRef< int >, unsigned, unsigned)> SingleInputAction, function_ref< void(ArrayRef< int >, unsigned, unsigned)> ManyInputsAction)
 Splits and processes shuffle mask depending on the number of input and output registers.
 
void llvm::getHorizDemandedEltsForFirstOperand (unsigned VectorBitWidth, const APInt &DemandedElts, APInt &DemandedLHS, APInt &DemandedRHS)
 Compute the demanded elements mask of horizontal binary operations.
 
MapVector< Instruction *, uint64_tllvm::computeMinimumValueSizes (ArrayRef< BasicBlock * > Blocks, DemandedBits &DB, const TargetTransformInfo *TTI=nullptr)
 Compute a map of integer instructions to their minimum legal type size.
 
MDNodellvm::uniteAccessGroups (MDNode *AccGroups1, MDNode *AccGroups2)
 Compute the union of two access-group lists.
 
MDNodellvm::intersectAccessGroups (const Instruction *Inst1, const Instruction *Inst2)
 Compute the access-group list of access groups that Inst1 and Inst2 are both in.
 
Instructionllvm::propagateMetadata (Instruction *I, ArrayRef< Value * > VL)
 Specifically, let Kinds = [MD_tbaa, MD_alias_scope, MD_noalias, MD_fpmath, MD_nontemporal, MD_access_group, MD_mmra].
 
Constantllvm::createBitMaskForGaps (IRBuilderBase &Builder, unsigned VF, const InterleaveGroup< Instruction > &Group)
 Create a mask that filters the members of an interleave group where there are gaps.
 
llvm::SmallVector< int, 16 > llvm::createReplicatedMask (unsigned ReplicationFactor, unsigned VF)
 Create a mask with replicated elements.
 
llvm::SmallVector< int, 16 > llvm::createInterleaveMask (unsigned VF, unsigned NumVecs)
 Create an interleave shuffle mask.
 
llvm::SmallVector< int, 16 > llvm::createStrideMask (unsigned Start, unsigned Stride, unsigned VF)
 Create a stride shuffle mask.
 
llvm::SmallVector< int, 16 > llvm::createSequentialMask (unsigned Start, unsigned NumInts, unsigned NumUndefs)
 Create a sequential shuffle mask.
 
llvm::SmallVector< int, 16 > llvm::createUnaryMask (ArrayRef< int > Mask, unsigned NumElts)
 Given a shuffle mask for a binary shuffle, create the equivalent shuffle mask assuming both operands are identical.
 
Valuellvm::concatenateVectors (IRBuilderBase &Builder, ArrayRef< Value * > Vecs)
 Concatenate a list of vectors.
 
bool llvm::maskIsAllZeroOrUndef (Value *Mask)
 Given a mask vector of i1, Return true if all of the elements of this predicate mask are known to be false or undef.
 
bool llvm::maskIsAllOneOrUndef (Value *Mask)
 Given a mask vector of i1, Return true if all of the elements of this predicate mask are known to be true or undef.
 
bool llvm::maskContainsAllOneOrUndef (Value *Mask)
 Given a mask vector of i1, Return true if any of the elements of this predicate mask are known to be true or undef.
 
APInt llvm::possiblyDemandedEltsInMask (Value *Mask)
 Given a mask vector of the form <Y x i1>, return an APInt (of bitwidth Y) for each lane which may be active.