43#define DEBUG_TYPE "scalarize-masked-mem-intrin"
47class ScalarizeMaskedMemIntrinLegacyPass :
public FunctionPass {
59 return "Scalarize Masked Memory Intrinsics";
78char ScalarizeMaskedMemIntrinLegacyPass::ID = 0;
81 "Scalarize unsupported masked memory intrinsics",
false,
86 "Scalarize unsupported masked memory intrinsics",
false,
90 return new ScalarizeMaskedMemIntrinLegacyPass();
99 for (
unsigned i = 0; i != NumElts; ++i) {
100 Constant *CElt =
C->getAggregateElement(i);
110 return DL.isBigEndian() ? VectorWidth - 1 - Idx : Idx;
155 Type *EltTy = VecType->getElementType();
161 Builder.SetInsertPoint(InsertPt);
162 Builder.SetCurrentDebugLocation(CI->
getDebugLoc());
166 LoadInst *NewI = Builder.CreateAlignedLoad(VecType,
Ptr, AlignVal);
175 const Align AdjustedAlignVal =
180 Value *VResult = Src0;
183 for (
unsigned Idx = 0; Idx < VectorWidth; ++Idx) {
184 if (
cast<Constant>(Mask)->getAggregateElement(Idx)->isNullValue())
186 Value *Gep = Builder.CreateConstInBoundsGEP1_32(EltTy,
Ptr, Idx);
187 LoadInst *Load = Builder.CreateAlignedLoad(EltTy, Gep, AdjustedAlignVal);
188 VResult = Builder.CreateInsertElement(VResult, Load, Idx);
200 Mask->getName() +
".first");
206 CondBlock->
setName(
"cond.load");
208 LoadInst *Load = Builder.CreateAlignedLoad(VecType,
Ptr, AlignVal,
210 Load->copyMetadata(*CI);
213 Builder.SetInsertPoint(PostLoad, PostLoad->
begin());
214 PHINode *Phi = Builder.CreatePHI(VecType, 2);
215 Phi->addIncoming(Load, CondBlock);
216 Phi->addIncoming(Src0, IfBlock);
227 Value *SclrMask =
nullptr;
228 if (VectorWidth != 1 && !HasBranchDivergence) {
230 SclrMask = Builder.CreateBitCast(Mask, SclrMaskTy,
"scalar_mask");
233 for (
unsigned Idx = 0; Idx < VectorWidth; ++Idx) {
244 if (SclrMask !=
nullptr) {
247 Predicate = Builder.CreateICmpNE(Builder.CreateAnd(SclrMask, Mask),
248 Builder.getIntN(VectorWidth, 0));
250 Predicate = Builder.CreateExtractElement(Mask, Idx);
264 CondBlock->
setName(
"cond.load");
267 Value *Gep = Builder.CreateConstInBoundsGEP1_32(EltTy,
Ptr, Idx);
268 LoadInst *Load = Builder.CreateAlignedLoad(EltTy, Gep, AdjustedAlignVal);
269 Value *NewVResult = Builder.CreateInsertElement(VResult, Load, Idx);
275 IfBlock = NewIfBlock;
278 Builder.SetInsertPoint(NewIfBlock, NewIfBlock->
begin());
279 PHINode *Phi = Builder.CreatePHI(VecType, 2,
"res.phi.else");
280 Phi->addIncoming(NewVResult, CondBlock);
281 Phi->addIncoming(VResult, PrevIfBlock);
327 Type *EltTy = VecType->getElementType();
331 Builder.SetInsertPoint(InsertPt);
332 Builder.SetCurrentDebugLocation(CI->
getDebugLoc());
336 StoreInst *Store = Builder.CreateAlignedStore(Src,
Ptr, AlignVal);
338 Store->copyMetadata(*CI);
344 const Align AdjustedAlignVal =
349 for (
unsigned Idx = 0; Idx < VectorWidth; ++Idx) {
350 if (
cast<Constant>(Mask)->getAggregateElement(Idx)->isNullValue())
352 Value *OneElt = Builder.CreateExtractElement(Src, Idx);
353 Value *Gep = Builder.CreateConstInBoundsGEP1_32(EltTy,
Ptr, Idx);
354 Builder.CreateAlignedStore(OneElt, Gep, AdjustedAlignVal);
365 Mask->getName() +
".first");
370 CondBlock->
setName(
"cond.store");
373 StoreInst *Store = Builder.CreateAlignedStore(Src,
Ptr, AlignVal);
375 Store->copyMetadata(*CI);
385 Value *SclrMask =
nullptr;
386 if (VectorWidth != 1 && !HasBranchDivergence) {
388 SclrMask = Builder.CreateBitCast(Mask, SclrMaskTy,
"scalar_mask");
391 for (
unsigned Idx = 0; Idx < VectorWidth; ++Idx) {
402 if (SclrMask !=
nullptr) {
405 Predicate = Builder.CreateICmpNE(Builder.CreateAnd(SclrMask, Mask),
406 Builder.getIntN(VectorWidth, 0));
408 Predicate = Builder.CreateExtractElement(Mask, Idx);
422 CondBlock->
setName(
"cond.store");
425 Value *OneElt = Builder.CreateExtractElement(Src, Idx);
426 Value *Gep = Builder.CreateConstInBoundsGEP1_32(EltTy,
Ptr, Idx);
427 Builder.CreateAlignedStore(OneElt, Gep, AdjustedAlignVal);
433 Builder.SetInsertPoint(NewIfBlock, NewIfBlock->
begin());
470 bool HasBranchDivergence,
CallInst *CI,
477 Type *EltTy = VecType->getElementType();
482 Builder.SetInsertPoint(InsertPt);
485 Builder.SetCurrentDebugLocation(CI->
getDebugLoc());
488 Value *VResult = Src0;
489 unsigned VectorWidth = VecType->getNumElements();
493 for (
unsigned Idx = 0; Idx < VectorWidth; ++Idx) {
494 if (
cast<Constant>(Mask)->getAggregateElement(Idx)->isNullValue())
496 Value *
Ptr = Builder.CreateExtractElement(Ptrs, Idx,
"Ptr" +
Twine(Idx));
498 Builder.CreateAlignedLoad(EltTy,
Ptr, AlignVal,
"Load" +
Twine(Idx));
500 Builder.CreateInsertElement(VResult, Load, Idx,
"Res" +
Twine(Idx));
510 Value *SclrMask =
nullptr;
511 if (VectorWidth != 1 && !HasBranchDivergence) {
513 SclrMask = Builder.CreateBitCast(Mask, SclrMaskTy,
"scalar_mask");
516 for (
unsigned Idx = 0; Idx < VectorWidth; ++Idx) {
528 if (SclrMask !=
nullptr) {
531 Predicate = Builder.CreateICmpNE(Builder.CreateAnd(SclrMask, Mask),
532 Builder.getIntN(VectorWidth, 0));
534 Predicate = Builder.CreateExtractElement(Mask, Idx,
"Mask" +
Twine(Idx));
548 CondBlock->
setName(
"cond.load");
551 Value *
Ptr = Builder.CreateExtractElement(Ptrs, Idx,
"Ptr" +
Twine(Idx));
553 Builder.CreateAlignedLoad(EltTy,
Ptr, AlignVal,
"Load" +
Twine(Idx));
555 Builder.CreateInsertElement(VResult, Load, Idx,
"Res" +
Twine(Idx));
561 IfBlock = NewIfBlock;
564 Builder.SetInsertPoint(NewIfBlock, NewIfBlock->
begin());
565 PHINode *Phi = Builder.CreatePHI(VecType, 2,
"res.phi.else");
566 Phi->addIncoming(NewVResult, CondBlock);
567 Phi->addIncoming(VResult, PrevIfBlock);
604 bool HasBranchDivergence,
CallInst *CI,
615 "Vector of pointers is expected in masked scatter intrinsic");
619 Builder.SetInsertPoint(InsertPt);
620 Builder.SetCurrentDebugLocation(CI->
getDebugLoc());
623 unsigned VectorWidth = SrcFVTy->getNumElements();
627 for (
unsigned Idx = 0; Idx < VectorWidth; ++Idx) {
628 if (
cast<Constant>(Mask)->getAggregateElement(Idx)->isNullValue())
631 Builder.CreateExtractElement(Src, Idx,
"Elt" +
Twine(Idx));
632 Value *
Ptr = Builder.CreateExtractElement(Ptrs, Idx,
"Ptr" +
Twine(Idx));
633 Builder.CreateAlignedStore(OneElt,
Ptr, AlignVal);
641 Value *SclrMask =
nullptr;
642 if (VectorWidth != 1 && !HasBranchDivergence) {
644 SclrMask = Builder.CreateBitCast(Mask, SclrMaskTy,
"scalar_mask");
647 for (
unsigned Idx = 0; Idx < VectorWidth; ++Idx) {
658 if (SclrMask !=
nullptr) {
661 Predicate = Builder.CreateICmpNE(Builder.CreateAnd(SclrMask, Mask),
662 Builder.getIntN(VectorWidth, 0));
664 Predicate = Builder.CreateExtractElement(Mask, Idx,
"Mask" +
Twine(Idx));
678 CondBlock->
setName(
"cond.store");
681 Value *OneElt = Builder.CreateExtractElement(Src, Idx,
"Elt" +
Twine(Idx));
682 Value *
Ptr = Builder.CreateExtractElement(Ptrs, Idx,
"Ptr" +
Twine(Idx));
683 Builder.CreateAlignedStore(OneElt,
Ptr, AlignVal);
689 Builder.SetInsertPoint(NewIfBlock, NewIfBlock->
begin());
697 bool HasBranchDivergence,
CallInst *CI,
706 Type *EltTy = VecType->getElementType();
712 Builder.SetInsertPoint(InsertPt);
713 Builder.SetCurrentDebugLocation(CI->
getDebugLoc());
715 unsigned VectorWidth = VecType->getNumElements();
718 Value *VResult = PassThru;
721 const Align AdjustedAlignment =
728 unsigned MemIndex = 0;
731 for (
unsigned Idx = 0; Idx < VectorWidth; ++Idx) {
733 if (
cast<Constant>(Mask)->getAggregateElement(Idx)->isNullValue()) {
735 ShuffleMask[Idx] = Idx + VectorWidth;
738 Builder.CreateConstInBoundsGEP1_32(EltTy,
Ptr, MemIndex);
739 InsertElt = Builder.CreateAlignedLoad(EltTy, NewPtr, AdjustedAlignment,
740 "Load" +
Twine(Idx));
741 ShuffleMask[Idx] = Idx;
744 VResult = Builder.CreateInsertElement(VResult, InsertElt, Idx,
747 VResult = Builder.CreateShuffleVector(VResult, PassThru, ShuffleMask);
756 Value *SclrMask =
nullptr;
757 if (VectorWidth != 1 && !HasBranchDivergence) {
759 SclrMask = Builder.CreateBitCast(Mask, SclrMaskTy,
"scalar_mask");
762 for (
unsigned Idx = 0; Idx < VectorWidth; ++Idx) {
774 if (SclrMask !=
nullptr) {
777 Predicate = Builder.CreateICmpNE(Builder.CreateAnd(SclrMask, Mask),
778 Builder.getIntN(VectorWidth, 0));
780 Predicate = Builder.CreateExtractElement(Mask, Idx,
"Mask" +
Twine(Idx));
794 CondBlock->
setName(
"cond.load");
797 LoadInst *Load = Builder.CreateAlignedLoad(EltTy,
Ptr, AdjustedAlignment);
798 Value *NewVResult = Builder.CreateInsertElement(VResult, Load, Idx);
802 if ((Idx + 1) != VectorWidth)
803 NewPtr = Builder.CreateConstInBoundsGEP1_32(EltTy,
Ptr, 1);
809 IfBlock = NewIfBlock;
812 Builder.SetInsertPoint(NewIfBlock, NewIfBlock->
begin());
813 PHINode *ResultPhi = Builder.CreatePHI(VecType, 2,
"res.phi.else");
819 if ((Idx + 1) != VectorWidth) {
820 PHINode *PtrPhi = Builder.CreatePHI(
Ptr->getType(), 2,
"ptr.phi.else");
834 bool HasBranchDivergence,
CallInst *CI,
848 Builder.SetInsertPoint(InsertPt);
849 Builder.SetCurrentDebugLocation(CI->
getDebugLoc());
851 Type *EltTy = VecType->getElementType();
854 const Align AdjustedAlignment =
857 unsigned VectorWidth = VecType->getNumElements();
861 unsigned MemIndex = 0;
862 for (
unsigned Idx = 0; Idx < VectorWidth; ++Idx) {
863 if (
cast<Constant>(Mask)->getAggregateElement(Idx)->isNullValue())
866 Builder.CreateExtractElement(Src, Idx,
"Elt" +
Twine(Idx));
867 Value *NewPtr = Builder.CreateConstInBoundsGEP1_32(EltTy,
Ptr, MemIndex);
868 Builder.CreateAlignedStore(OneElt, NewPtr, AdjustedAlignment);
878 Value *SclrMask =
nullptr;
879 if (VectorWidth != 1 && !HasBranchDivergence) {
881 SclrMask = Builder.CreateBitCast(Mask, SclrMaskTy,
"scalar_mask");
884 for (
unsigned Idx = 0; Idx < VectorWidth; ++Idx) {
894 if (SclrMask !=
nullptr) {
897 Predicate = Builder.CreateICmpNE(Builder.CreateAnd(SclrMask, Mask),
898 Builder.getIntN(VectorWidth, 0));
900 Predicate = Builder.CreateExtractElement(Mask, Idx,
"Mask" +
Twine(Idx));
914 CondBlock->
setName(
"cond.store");
917 Value *OneElt = Builder.CreateExtractElement(Src, Idx);
918 Builder.CreateAlignedStore(OneElt,
Ptr, AdjustedAlignment);
922 if ((Idx + 1) != VectorWidth)
923 NewPtr = Builder.CreateConstInBoundsGEP1_32(EltTy,
Ptr, 1);
929 IfBlock = NewIfBlock;
931 Builder.SetInsertPoint(NewIfBlock, NewIfBlock->
begin());
934 if ((Idx + 1) != VectorWidth) {
935 PHINode *PtrPhi = Builder.CreatePHI(
Ptr->getType(), 2,
"ptr.phi.else");
961 Builder.SetInsertPoint(InsertPt);
963 Builder.SetCurrentDebugLocation(CI->
getDebugLoc());
966 unsigned VectorWidth = AddrType->getNumElements();
971 case Intrinsic::experimental_vector_histogram_add:
972 UpdateOp = Builder.CreateAdd(Load, Inc);
974 case Intrinsic::experimental_vector_histogram_uadd_sat:
976 Builder.CreateIntrinsic(Intrinsic::uadd_sat, {EltTy}, {Load, Inc});
978 case Intrinsic::experimental_vector_histogram_umin:
979 UpdateOp = Builder.CreateIntrinsic(Intrinsic::umin, {EltTy}, {Load, Inc});
981 case Intrinsic::experimental_vector_histogram_umax:
982 UpdateOp = Builder.CreateIntrinsic(Intrinsic::umax, {EltTy}, {Load, Inc});
993 for (
unsigned Idx = 0; Idx < VectorWidth; ++Idx) {
994 if (
cast<Constant>(Mask)->getAggregateElement(Idx)->isNullValue())
996 Value *
Ptr = Builder.CreateExtractElement(Ptrs, Idx,
"Ptr" +
Twine(Idx));
1000 Builder.CreateStore(Update,
Ptr);
1006 for (
unsigned Idx = 0; Idx < VectorWidth; ++Idx) {
1008 Builder.CreateExtractElement(Mask, Idx,
"Mask" +
Twine(Idx));
1015 CondBlock->
setName(
"cond.histogram.update");
1018 Value *
Ptr = Builder.CreateExtractElement(Ptrs, Idx,
"Ptr" +
Twine(Idx));
1022 Builder.CreateStore(UpdateOp,
Ptr);
1027 Builder.SetInsertPoint(NewIfBlock, NewIfBlock->
begin());
1036 std::optional<DomTreeUpdater> DTU;
1038 DTU.emplace(DT, DomTreeUpdater::UpdateStrategy::Lazy);
1040 bool EverMadeChange =
false;
1041 bool MadeChange =
true;
1042 auto &
DL =
F.getDataLayout();
1043 bool HasBranchDivergence =
TTI.hasBranchDivergence(&
F);
1044 while (MadeChange) {
1047 bool ModifiedDTOnIteration =
false;
1049 HasBranchDivergence, DTU ? &*DTU :
nullptr);
1052 if (ModifiedDTOnIteration)
1056 EverMadeChange |= MadeChange;
1058 return EverMadeChange;
1061bool ScalarizeMaskedMemIntrinLegacyPass::runOnFunction(
Function &
F) {
1062 auto &
TTI = getAnalysis<TargetTransformInfoWrapperPass>().getTTI(
F);
1063 DominatorTree *DT =
nullptr;
1064 if (
auto *DTWP = getAnalysisIfAvailable<DominatorTreeWrapperPass>())
1065 DT = &DTWP->getDomTree();
1084 bool MadeChange =
false;
1087 while (CurInstIterator != BB.
end()) {
1107 [](
Value *V) { return isa<ScalableVectorType>(V->getType()); }))
1109 switch (
II->getIntrinsicID()) {
1112 case Intrinsic::experimental_vector_histogram_add:
1113 case Intrinsic::experimental_vector_histogram_uadd_sat:
1114 case Intrinsic::experimental_vector_histogram_umin:
1115 case Intrinsic::experimental_vector_histogram_umax:
1121 case Intrinsic::masked_load:
1123 if (
TTI.isLegalMaskedLoad(
1126 ->getAddressSpace()))
1130 case Intrinsic::masked_store:
1131 if (
TTI.isLegalMaskedStore(
1135 ->getAddressSpace()))
1139 case Intrinsic::masked_gather: {
1142 if (
TTI.isLegalMaskedGather(LoadTy, Alignment) &&
1148 case Intrinsic::masked_scatter: {
1151 if (
TTI.isLegalMaskedScatter(StoreTy, Alignment) &&
1158 case Intrinsic::masked_expandload:
1159 if (
TTI.isLegalMaskedExpandLoad(
1161 CI->
getAttributes().getParamAttrs(0).getAlignment().valueOrOne()))
1165 case Intrinsic::masked_compressstore:
1166 if (
TTI.isLegalMaskedCompressStore(
1168 CI->
getAttributes().getParamAttrs(1).getAlignment().valueOrOne()))
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
This file contains the declarations for the subclasses of Constant, which represent the different fla...
static bool runOnFunction(Function &F, bool PostInlining)
static bool runImpl(Function &F, const TargetLowering &TLI, AssumptionCache *AC)
uint64_t IntrinsicInst * II
#define INITIALIZE_PASS_DEPENDENCY(depName)
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
static void scalarizeMaskedExpandLoad(const DataLayout &DL, bool HasBranchDivergence, CallInst *CI, DomTreeUpdater *DTU, bool &ModifiedDT)
static void scalarizeMaskedVectorHistogram(const DataLayout &DL, CallInst *CI, DomTreeUpdater *DTU, bool &ModifiedDT)
static bool optimizeBlock(BasicBlock &BB, bool &ModifiedDT, const TargetTransformInfo &TTI, const DataLayout &DL, bool HasBranchDivergence, DomTreeUpdater *DTU)
static void scalarizeMaskedScatter(const DataLayout &DL, bool HasBranchDivergence, CallInst *CI, DomTreeUpdater *DTU, bool &ModifiedDT)
static unsigned adjustForEndian(const DataLayout &DL, unsigned VectorWidth, unsigned Idx)
static bool optimizeCallInst(CallInst *CI, bool &ModifiedDT, const TargetTransformInfo &TTI, const DataLayout &DL, bool HasBranchDivergence, DomTreeUpdater *DTU)
static void scalarizeMaskedStore(const DataLayout &DL, bool HasBranchDivergence, CallInst *CI, DomTreeUpdater *DTU, bool &ModifiedDT)
static void scalarizeMaskedCompressStore(const DataLayout &DL, bool HasBranchDivergence, CallInst *CI, DomTreeUpdater *DTU, bool &ModifiedDT)
static void scalarizeMaskedGather(const DataLayout &DL, bool HasBranchDivergence, CallInst *CI, DomTreeUpdater *DTU, bool &ModifiedDT)
static bool runImpl(Function &F, const TargetTransformInfo &TTI, DominatorTree *DT)
static bool isConstantIntVector(Value *Mask)
static void scalarizeMaskedLoad(const DataLayout &DL, bool HasBranchDivergence, CallInst *CI, DomTreeUpdater *DTU, bool &ModifiedDT)
static APInt getOneBitSet(unsigned numBits, unsigned BitNo)
Return an APInt with exactly one bit set in the result.
PassT::Result * getCachedResult(IRUnitT &IR) const
Get the cached result of an analysis pass for a given IR unit.
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Represent the analysis usage information of a pass.
AnalysisUsage & addRequired()
AnalysisUsage & addPreserved()
Add the specified Pass class to the set of analyses preserved by this pass.
LLVM Basic Block Representation.
iterator begin()
Instruction iterator methods.
InstListType::iterator iterator
Instruction iterators...
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
MaybeAlign getParamAlign(unsigned ArgNo) const
Extract the alignment for a call or parameter (0=unknown).
Value * getArgOperand(unsigned i) const
LLVM_ABI Intrinsic::ID getIntrinsicID() const
Returns the intrinsic ID of the intrinsic called or Intrinsic::not_intrinsic if the called function i...
AttributeList getAttributes() const
Return the attributes for this call.
This class represents a function call, abstracting a target machine's calling convention.
This is an important base class in LLVM.
A parsed version of the target data layout string in and methods for querying it.
Analysis pass which computes a DominatorTree.
Legacy analysis pass which computes a DominatorTree.
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
FunctionPass class - This class is used to implement most global optimizations.
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
LLVM_ABI InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
LLVM_ABI BasicBlock * getSuccessor(unsigned Idx) const LLVM_READONLY
Return the specified successor. This instruction must be a terminator.
LLVM_ABI void copyMetadata(const Instruction &SrcInst, ArrayRef< unsigned > WL=ArrayRef< unsigned >())
Copy metadata from SrcInst to this instruction.
A wrapper class for inspecting calls to intrinsic functions.
An instruction for reading from memory.
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
static LLVM_ABI PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
static LLVM_ABI PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
A set of analyses that are preserved following a run of a transformation pass.
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
PreservedAnalyses & preserve()
Mark an analysis as preserved.
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
StringRef - Represent a constant reference to a string, i.e.
Analysis pass providing the TargetTransformInfo.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
The instances of the Type class are immutable: once they are created, they are never changed.
LLVM_ABI TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
static LLVM_ABI IntegerType * getIntNTy(LLVMContext &C, unsigned N)
bool isVoidTy() const
Return true if this is 'void'.
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
LLVM_ABI void setName(const Twine &Name)
Change the name of the value.
LLVM_ABI void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
LLVM_ABI LLVMContext & getContext() const
All values hold a context through their type.
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
LLVM_ABI void takeName(Value *V)
Transfer the name from V to this value.
const ParentTy * getParent() const
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
@ C
The default llvm calling convention, compatible with C.
This is an optimization pass for GlobalISel generic memory operations.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
LLVM_ABI FunctionPass * createScalarizeMaskedMemIntrinLegacyPass()
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
LLVM_ABI bool isSplatValue(const Value *V, int Index=-1, unsigned Depth=0)
Return true if each element of the vector value V is poisoned or equal to every other non-poisoned el...
LLVM_ABI void initializeScalarizeMaskedMemIntrinLegacyPassPass(PassRegistry &)
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
constexpr int PoisonMaskElem
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Align commonAlignment(Align A, uint64_t Offset)
Returns the alignment that satisfies both alignments.
LLVM_ABI Instruction * SplitBlockAndInsertIfThen(Value *Cond, BasicBlock::iterator SplitBefore, bool Unreachable, MDNode *BranchWeights=nullptr, DomTreeUpdater *DTU=nullptr, LoopInfo *LI=nullptr, BasicBlock *ThenBlock=nullptr)
Split the containing block at the specified instruction - everything before SplitBefore stays in the ...
AnalysisManager< Function > FunctionAnalysisManager
Convenience typedef for the Function analysis manager.
This struct is a compact representation of a valid (non-zero power of two) alignment.
Align valueOrOne() const
For convenience, returns a valid alignment or 1 if undefined.
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)