24#ifndef LLVM_TRANSFORMS_VECTORIZE_VPLAN_H
25#define LLVM_TRANSFORMS_VECTORIZE_VPLAN_H
101 const unsigned char SubclassID;
118 VPlan *Plan =
nullptr;
128 assert(Predecessor &&
"Cannot add nullptr predecessor!");
133 void removePredecessor(VPBlockBase *Predecessor) {
134 auto Pos =
find(Predecessors, Predecessor);
135 assert(Pos &&
"Predecessor does not exist");
136 Predecessors.
erase(Pos);
140 void removeSuccessor(VPBlockBase *Successor) {
141 auto Pos =
find(Successors, Successor);
142 assert(Pos &&
"Successor does not exist");
143 Successors.
erase(Pos);
148 void replacePredecessor(VPBlockBase *Old, VPBlockBase *New) {
149 auto I =
find(Predecessors, Old);
151 assert(Old->getParent() ==
New->getParent() &&
152 "replaced predecessor must have the same parent");
158 void replaceSuccessor(VPBlockBase *Old, VPBlockBase *New) {
159 auto I =
find(Successors, Old);
161 assert(Old->getParent() ==
New->getParent() &&
162 "replaced successor must have the same parent");
168 : SubclassID(SC), Name(
N) {}
175 using VPBlockTy =
enum { VPRegionBlockSC, VPBasicBlockSC, VPIRBasicBlockSC };
181 const std::string &
getName()
const {
return Name; }
195 const VPlan *getPlan()
const;
199 void setPlan(
VPlan *ParentPlan);
232 return (Successors.size() == 1 ? *Successors.begin() :
nullptr);
238 return (Predecessors.size() == 1 ? *Predecessors.begin() :
nullptr);
291 assert(Successors.empty() &&
"Setting one successor when others exist.");
293 "connected blocks must have the same parent");
302 assert(Successors.empty() &&
"Setting two successors when others exist.");
303 appendSuccessor(IfTrue);
304 appendSuccessor(IfFalse);
311 assert(Predecessors.empty() &&
"Block predecessors already set.");
312 for (
auto *Pred : NewPreds)
313 appendPredecessor(Pred);
320 assert(Successors.empty() &&
"Block successors already set.");
321 for (
auto *Succ : NewSuccs)
322 appendSuccessor(Succ);
334 assert(Predecessors.size() == 2 &&
"must have 2 predecessors to swap");
335 std::swap(Predecessors[0], Predecessors[1]);
342 assert(Successors.size() == 2 &&
"must have 2 successors to swap");
349 "must have Pred exactly once in Predecessors");
350 return std::distance(Predecessors.begin(),
find(Predecessors, Pred));
356 "must have Succ exactly once in Successors");
357 return std::distance(Successors.begin(),
find(Successors, Succ));
367#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
411 const unsigned char SubclassID;
414 VPBasicBlock *Parent =
nullptr;
438 VPVectorEndPointerSC,
440 VPWidenCanonicalIVSC,
456 VPCurrentIterationPHISC,
457 VPActiveLaneMaskPHISC,
458 VPFirstOrderRecurrencePHISC,
459 VPWidenIntOrFpInductionSC,
460 VPWidenPointerInductionSC,
464 VPFirstPHISC = VPWidenPHISC,
465 VPFirstHeaderPHISC = VPCurrentIterationPHISC,
466 VPLastHeaderPHISC = VPReductionPHISC,
467 VPLastPHISC = VPReductionPHISC,
472 :
VPDef(),
VPUser(Operands), SubclassID(SC), DL(DL) {}
481 const VPBasicBlock *
getParent()
const {
return Parent; }
543 bool mayReadFromMemory()
const;
546 bool mayWriteToMemory()
const;
557 bool isScalarCast()
const;
562#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
578#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
587#define VP_CLASSOF_IMPL(VPRecipeID) \
588 static inline bool classof(const VPRecipeBase *R) { \
589 return R->getVPRecipeID() == VPRecipeID; \
591 static inline bool classof(const VPValue *V) { \
592 auto *R = V->getDefiningRecipe(); \
593 return R && R->getVPRecipeID() == VPRecipeID; \
595 static inline bool classof(const VPUser *U) { \
596 auto *R = dyn_cast<VPRecipeBase>(U); \
597 return R && R->getVPRecipeID() == VPRecipeID; \
599 static inline bool classof(const VPSingleDefRecipe *R) { \
600 return R->getVPRecipeID() == VPRecipeID; \
617 switch (R->getVPRecipeID()) {
618 case VPRecipeBase::VPDerivedIVSC:
619 case VPRecipeBase::VPExpandSCEVSC:
620 case VPRecipeBase::VPExpressionSC:
621 case VPRecipeBase::VPInstructionSC:
622 case VPRecipeBase::VPReductionEVLSC:
623 case VPRecipeBase::VPReductionSC:
624 case VPRecipeBase::VPReplicateSC:
625 case VPRecipeBase::VPScalarIVStepsSC:
626 case VPRecipeBase::VPVectorPointerSC:
627 case VPRecipeBase::VPVectorEndPointerSC:
628 case VPRecipeBase::VPWidenCallSC:
629 case VPRecipeBase::VPWidenCanonicalIVSC:
630 case VPRecipeBase::VPWidenCastSC:
631 case VPRecipeBase::VPWidenGEPSC:
632 case VPRecipeBase::VPWidenIntrinsicSC:
633 case VPRecipeBase::VPWidenSC:
634 case VPRecipeBase::VPBlendSC:
635 case VPRecipeBase::VPPredInstPHISC:
636 case VPRecipeBase::VPCurrentIterationPHISC:
637 case VPRecipeBase::VPActiveLaneMaskPHISC:
638 case VPRecipeBase::VPFirstOrderRecurrencePHISC:
639 case VPRecipeBase::VPWidenPHISC:
640 case VPRecipeBase::VPWidenIntOrFpInductionSC:
641 case VPRecipeBase::VPWidenPointerInductionSC:
642 case VPRecipeBase::VPReductionPHISC:
644 case VPRecipeBase::VPBranchOnMaskSC:
645 case VPRecipeBase::VPInterleaveEVLSC:
646 case VPRecipeBase::VPInterleaveSC:
647 case VPRecipeBase::VPIRInstructionSC:
648 case VPRecipeBase::VPWidenLoadEVLSC:
649 case VPRecipeBase::VPWidenLoadSC:
650 case VPRecipeBase::VPWidenStoreEVLSC:
651 case VPRecipeBase::VPWidenStoreSC:
652 case VPRecipeBase::VPHistogramSC:
661 auto *R = V->getDefiningRecipe();
680#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
689 enum class OperationType :
unsigned char {
729 struct ExactFlagsTy {
731 ExactFlagsTy(
bool Exact) : IsExact(Exact) {}
733 struct FastMathFlagsTy {
734 char AllowReassoc : 1;
737 char NoSignedZeros : 1;
738 char AllowReciprocal : 1;
739 char AllowContract : 1;
747 uint8_t CmpPredStorage;
748 FastMathFlagsTy FMFs;
751 struct ReductionFlagsTy {
754 unsigned char Kind : 6;
756 unsigned char IsOrdered : 1;
757 unsigned char IsInLoop : 1;
758 FastMathFlagsTy FMFs;
760 ReductionFlagsTy(
RecurKind Kind,
bool IsOrdered,
bool IsInLoop,
762 : Kind(static_cast<unsigned char>(Kind)), IsOrdered(IsOrdered),
763 IsInLoop(IsInLoop), FMFs(FMFs) {}
766 OperationType OpType;
787 OpType = OperationType::FCmp;
789 FCmp->getPredicate());
791 FCmpFlags.FMFs = FCmp->getFastMathFlags();
793 OpType = OperationType::Cmp;
798 OpType = OperationType::DisjointOp;
801 OpType = OperationType::OverflowingBinOp;
802 WrapFlags = {
Op->hasNoUnsignedWrap(),
Op->hasNoSignedWrap()};
804 OpType = OperationType::Trunc;
807 OpType = OperationType::PossiblyExactOp;
810 OpType = OperationType::GEPOp;
813 "wrap flags truncated");
815 OpType = OperationType::NonNegOp;
818 OpType = OperationType::FPMathOp;
819 FMFs =
Op->getFastMathFlags();
829 : OpType(OperationType::FCmp),
AllFlags() {
836 : OpType(OperationType::OverflowingBinOp),
AllFlags() {
841 : OpType(OperationType::Trunc),
AllFlags() {
850 : OpType(OperationType::DisjointOp),
AllFlags() {
855 : OpType(OperationType::NonNegOp),
AllFlags() {
860 : OpType(OperationType::PossiblyExactOp),
AllFlags() {
865 : OpType(OperationType::GEPOp),
AllFlags() {
870 : OpType(OperationType::ReductionOp),
AllFlags() {
875 OpType = Other.OpType;
889 case OperationType::OverflowingBinOp:
893 case OperationType::Trunc:
897 case OperationType::DisjointOp:
900 case OperationType::PossiblyExactOp:
903 case OperationType::GEPOp:
906 case OperationType::FPMathOp:
907 case OperationType::FCmp:
908 case OperationType::ReductionOp:
909 getFMFsRef().NoNaNs =
false;
910 getFMFsRef().NoInfs =
false;
912 case OperationType::NonNegOp:
915 case OperationType::Cmp:
916 case OperationType::Other:
924 case OperationType::OverflowingBinOp:
928 case OperationType::Trunc:
932 case OperationType::DisjointOp:
935 case OperationType::PossiblyExactOp:
938 case OperationType::GEPOp:
942 case OperationType::FPMathOp:
943 case OperationType::FCmp: {
944 const FastMathFlagsTy &
F = getFMFsRef();
945 I.setHasAllowReassoc(
F.AllowReassoc);
946 I.setHasNoNaNs(
F.NoNaNs);
947 I.setHasNoInfs(
F.NoInfs);
948 I.setHasNoSignedZeros(
F.NoSignedZeros);
949 I.setHasAllowReciprocal(
F.AllowReciprocal);
950 I.setHasAllowContract(
F.AllowContract);
951 I.setHasApproxFunc(
F.ApproxFunc);
954 case OperationType::NonNegOp:
957 case OperationType::ReductionOp:
959 case OperationType::Cmp:
960 case OperationType::Other:
966 assert((OpType == OperationType::Cmp || OpType == OperationType::FCmp) &&
967 "recipe doesn't have a compare predicate");
974 assert((OpType == OperationType::Cmp || OpType == OperationType::FCmp) &&
975 "recipe doesn't have a compare predicate");
976 if (OpType == OperationType::FCmp)
989 return OpType == OperationType::Cmp || OpType == OperationType::FCmp;
994 return OpType == OperationType::FPMathOp || OpType == OperationType::FCmp ||
995 OpType == OperationType::ReductionOp;
1004 assert(OpType == OperationType::NonNegOp &&
1005 "recipe doesn't have a NNEG flag");
1011 case OperationType::OverflowingBinOp:
1013 case OperationType::Trunc:
1022 case OperationType::OverflowingBinOp:
1024 case OperationType::Trunc:
1033 case OperationType::OverflowingBinOp:
1034 case OperationType::Trunc:
1046 assert(OpType == OperationType::DisjointOp &&
1047 "recipe cannot have a disjoing flag");
1052 assert(OpType == OperationType::ReductionOp &&
1053 "recipe doesn't have reduction flags");
1058 assert(OpType == OperationType::ReductionOp &&
1059 "recipe doesn't have reduction flags");
1064 assert(OpType == OperationType::ReductionOp &&
1065 "recipe doesn't have reduction flags");
1071 FastMathFlagsTy &getFMFsRef() {
1072 if (OpType == OperationType::FCmp)
1074 if (OpType == OperationType::ReductionOp)
1078 const FastMathFlagsTy &getFMFsRef()
const {
1079 if (OpType == OperationType::FCmp)
1081 if (OpType == OperationType::ReductionOp)
1100#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1106static_assert(
sizeof(
VPIRFlags) <= 3,
"VPIRFlags should not grow");
1117 return R->getVPRecipeID() == VPRecipeBase::VPBlendSC ||
1118 R->getVPRecipeID() == VPRecipeBase::VPInstructionSC ||
1119 R->getVPRecipeID() == VPRecipeBase::VPWidenSC ||
1120 R->getVPRecipeID() == VPRecipeBase::VPWidenGEPSC ||
1121 R->getVPRecipeID() == VPRecipeBase::VPWidenCallSC ||
1122 R->getVPRecipeID() == VPRecipeBase::VPWidenCastSC ||
1123 R->getVPRecipeID() == VPRecipeBase::VPWidenIntrinsicSC ||
1124 R->getVPRecipeID() == VPRecipeBase::VPReductionSC ||
1125 R->getVPRecipeID() == VPRecipeBase::VPReductionEVLSC ||
1126 R->getVPRecipeID() == VPRecipeBase::VPReplicateSC ||
1127 R->getVPRecipeID() == VPRecipeBase::VPVectorEndPointerSC ||
1128 R->getVPRecipeID() == VPRecipeBase::VPVectorPointerSC;
1137 auto *R = V->getDefiningRecipe();
1190 llvm::find_if(Metadata, [Kind](
const std::pair<unsigned, MDNode *> &
P) {
1191 return P.first == Kind;
1193 if (It != Metadata.end())
1196 Metadata.emplace_back(Kind,
Node);
1206 find_if(Metadata, [Kind](
const auto &
P) {
return P.first == Kind; });
1207 return It != Metadata.end() ? It->second :
nullptr;
1210#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1230 Instruction::OtherOpsEnd + 1,
1351 bool doesGeneratePerAllLanes()
const;
1356 unsigned getNumOperandsForOpcode()
const;
1359 typedef unsigned char OpcodeTy;
1367 bool canGenerateScalarForFirstLane()
const;
1375 bool alwaysUnmasked()
const {
1381 if (!getUnderlyingValue())
1384 return Opcode == Instruction::PHI || Opcode == Instruction::GetElementPtr;
1388 VPInstruction(
unsigned Opcode, ArrayRef<VPValue *> Operands,
1389 const VPIRFlags &Flags = {},
const VPIRMetadata &MD = {},
1413#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1422 case Instruction::Ret:
1423 case Instruction::UncondBr:
1424 case Instruction::CondBr:
1425 case Instruction::Store:
1426 case Instruction::Switch:
1427 case Instruction::IndirectBr:
1428 case Instruction::Resume:
1429 case Instruction::CatchRet:
1430 case Instruction::Unreachable:
1431 case Instruction::Fence:
1432 case Instruction::AtomicRMW:
1446 if (NumOpsForOpcode == -1u)
1460 if (alwaysUnmasked())
1481 bool opcodeMayReadOrWriteFromMemory()
const;
1484 bool usesFirstLaneOnly(
const VPValue *
Op)
const override;
1487 bool usesFirstPartOnly(
const VPValue *
Op)
const override;
1491 bool isVectorToScalar()
const;
1495 bool isSingleScalar()
const;
1504#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1525 const Twine &Name =
"")
1527 ResultTy(ResultTy) {}
1532 if (R->isScalarCast())
1537 switch (VPI->getOpcode()) {
1541 case Instruction::Load:
1572#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1620 std::function<
const VPBasicBlock *(size_t)> GetBlock = [
this](
size_t Idx) {
1638#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1646 const Twine &Name =
"")
1651 return VPI && VPI->getOpcode() == Instruction::PHI;
1656 return VPI && VPI->getOpcode() == Instruction::PHI;
1661 return VPI && VPI->getOpcode() == Instruction::PHI;
1673#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1720 "Op must be an operand of the recipe");
1726 "Op must be an operand of the recipe");
1732 "Op must be an operand of the recipe");
1737#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1767#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1788 : VPRecipeWithIRFlags(VPRecipeBase::VPWidenSC, Operands,
Flags,
DL),
1790 setUnderlyingValue(&
I);
1796 : VPRecipeWithIRFlags(VPRecipeBase::VPWidenSC, Operands,
Flags,
DL),
1797 VPIRMetadata(
Metadata), Opcode(Opcode) {}
1821#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1830 "Op must be an operand of the recipe");
1831 return Opcode == Instruction::Select &&
Op ==
getOperand(0) &&
1832 Op->isDefinedOutsideLoopRegions();
1852 "Set flags not supported for the provided opcode");
1854 "Opcode requires specific flags to be set");
1881#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1897 bool MayReadFromMemory;
1900 bool MayWriteToMemory;
1903 bool MayHaveSideEffects;
1913 VPIRMetadata(MD), VectorIntrinsicID(VectorIntrinsicID), ResultTy(Ty),
1932 MayReadFromMemory = !ME.onlyWritesMemory();
1933 MayWriteToMemory = !ME.onlyReadsMemory();
1934 MayHaveSideEffects = MayWriteToMemory ||
1935 !Attrs.hasAttribute(Attribute::NoUnwind) ||
1936 !Attrs.hasAttribute(Attribute::WillReturn);
1944 operands(), ResultTy, *
this, *
this,
1980#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2000 : VPRecipeWithIRFlags(VPRecipeBase::VPWidenCallSC, CallArguments,
Flags,
2003 setUnderlyingValue(UV);
2005 isa<Function>(getOperand(getNumOperands() - 1)->getLiveInIRValue()) &&
2006 "last operand must be the called function");
2033#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2079#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2088 Type *SourceElementTy;
2090 bool isPointerLoopInvariant()
const {
2091 return getOperand(0)->isDefinedOutsideLoopRegions();
2094 bool isIndexLoopInvariant(
unsigned I)
const {
2095 return getOperand(
I + 1)->isDefinedOutsideLoopRegions();
2103 SourceElementTy(
GEP->getSourceElementType()) {
2104 setUnderlyingValue(
GEP);
2136 bool usesFirstLaneOnly(
const VPValue *
Op)
const override;
2139#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2152 Type *SourceElementTy;
2163 SourceElementTy(SourceElementTy), Stride(Stride) {
2164 assert(Stride < 0 &&
"Stride must be negative");
2185 "Op must be an operand of the recipe");
2199 "Op must be an operand of the recipe");
2209 VEPR->addOperand(
Offset);
2214#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2225 Type *SourceElementTy;
2231 SourceElementTy(SourceElementTy) {}
2245 "Op must be an operand of the recipe");
2252 "Op must be an operand of the recipe");
2261 Clone->addOperand(Off);
2273#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2313 return R->getVPRecipeID() >= VPRecipeBase::VPFirstHeaderPHISC &&
2314 R->getVPRecipeID() <= VPRecipeBase::VPLastHeaderPHISC;
2356#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2378 return R->getVPRecipeID() == VPRecipeBase::VPWidenIntOrFpInductionSC ||
2379 R->getVPRecipeID() == VPRecipeBase::VPWidenPointerInductionSC;
2383 auto *R = V->getDefiningRecipe();
2423 "VPWidenIntOrFpInductionRecipe generates its own backedge value");
2430 "VPWidenIntOrFpInductionRecipe generates its own backedge value");
2436 "Op must be an operand of the recipe");
2459 Start, Step, IndDesc,
DL),
2469 Start, Step, IndDesc,
DL),
2491 "expandVPWidenIntOrFpInductionRecipe");
2531#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2547 Start, Step, IndDesc,
DL) {
2564 "expandVPWidenPointerInduction");
2571#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2620#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2656 "Op must be an operand of the recipe");
2661#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2682 unsigned ScaleFactor) {
2683 assert((!Ordered || InLoop) &&
"Ordered implies in-loop");
2704 bool HasUsesOutsideReductionChain;
2711 bool HasUsesOutsideReductionChain =
false)
2713 VPIRFlags(Flags), Kind(Kind), Style(Style),
2714 HasUsesOutsideReductionChain(HasUsesOutsideReductionChain) {
2724 HasUsesOutsideReductionChain);
2735 auto *Partial = std::get_if<RdxUnordered>(&Style);
2736 return Partial ? Partial->VFScaleFactor : 1;
2742 assert(ScaleFactor > 1 &&
"must set to scale factor > 1");
2755 bool isOrdered()
const {
return std::holds_alternative<RdxOrdered>(Style); }
2759 return std::holds_alternative<RdxInLoop>(Style) ||
2760 std::holds_alternative<RdxOrdered>(Style);
2768 return HasUsesOutsideReductionChain;
2774 "Op must be an operand of the recipe");
2779#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2797 assert(Operands.size() >= 2 &&
"Expected at least two operands!");
2844 bool usesFirstLaneOnly(
const VPValue *
Op)
const override;
2847#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2865 bool HasMask =
false;
2869 bool NeedsMaskForGaps =
false;
2878 NeedsMaskForGaps(NeedsMaskForGaps) {
2880 assert((!Mask || !IG->isReverse()) &&
2881 "Reversed masked interleave-group not supported.");
2882 if (StoredValues.
empty()) {
2883 for (
unsigned I = 0;
I < IG->getFactor(); ++
I)
2885 assert(!Inst->getType()->isVoidTy() &&
"must have result");
2889 for (
auto *SV : StoredValues)
2902 return R->getVPRecipeID() == VPRecipeBase::VPInterleaveSC ||
2903 R->getVPRecipeID() == VPRecipeBase::VPInterleaveEVLSC;
2963 Mask, NeedsMaskForGaps, MD, DL) {}
2980 "Op must be an operand of the recipe");
2989#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
3004 R.getStoredValues(), Mask, R.needsMaskForGaps(), R,
3006 assert(!getInterleaveGroup()->isReverse() &&
3007 "Reversed interleave-group with tail folding is not supported.");
3008 assert(!needsMaskForGaps() &&
"Interleaved access with gap mask is not "
3009 "supported for scalable vector.");
3029 "Op must be an operand of the recipe");
3039#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
3055 bool IsConditional =
false;
3066 IsConditional =
true;
3077 {ChainOp, VecOp}, CondOp, Style,
DL) {}
3083 {ChainOp, VecOp}, CondOp, Style,
DL) {}
3094 return R->getVPRecipeID() == VPRecipeBase::VPReductionSC ||
3095 R->getVPRecipeID() == VPRecipeBase::VPReductionEVLSC;
3122 bool isOrdered()
const {
return std::holds_alternative<RdxOrdered>(Style); };
3129 return std::holds_alternative<RdxInLoop>(Style) ||
3130 std::holds_alternative<RdxOrdered>(Style);
3143 auto *Partial = std::get_if<RdxUnordered>(&Style);
3144 return Partial ? Partial->VFScaleFactor : 1;
3148#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
3166 {R.getChainOp(), R.getVecOp(), &EVL}, CondOp,
3187 "Op must be an operand of the recipe");
3192#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
3206 bool IsSingleScalar;
3213 bool IsSingleScalar,
VPValue *Mask =
nullptr,
3215 DebugLoc
DL = DebugLoc::getUnknown())
3216 : VPRecipeWithIRFlags(VPRecipeBase::VPReplicateSC, Operands, Flags,
DL),
3217 VPIRMetadata(
Metadata), IsSingleScalar(IsSingleScalar),
3218 IsPredicated(Mask) {
3219 setUnderlyingValue(
I);
3230 Copy->transferFlags(*
this);
3252 "Op must be an operand of the recipe");
3259 "Op must be an operand of the recipe");
3266 bool shouldPack()
const;
3277#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
3304#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
3308 O << Indent <<
"BRANCH-ON-MASK ";
3316 "Op must be an operand of the recipe");
3339 enum class ExpressionTypes {
3355 ExtNegatedMulAccReduction,
3359 ExpressionTypes ExpressionType;
3367 VPExpressionRecipe(ExpressionTypes ExpressionType,
3372 : VPExpressionRecipe(ExpressionTypes::ExtendedReduction, {Ext, Red}) {}
3374 : VPExpressionRecipe(ExpressionTypes::MulAccReduction, {
Mul, Red}) {}
3377 : VPExpressionRecipe(ExpressionTypes::ExtMulAccReduction,
3378 {Ext0, Ext1,
Mul, Red}) {}
3382 : VPExpressionRecipe(ExpressionTypes::ExtNegatedMulAccReduction,
3383 {Ext0, Ext1,
Mul,
Sub, Red}) {
3384 assert(
Mul->getOpcode() == Instruction::Mul &&
"Expected a mul");
3386 "Expected an add reduction");
3389 assert(SubConst && SubConst->isZero() &&
3390 Sub->getOpcode() == Instruction::Sub &&
"Expected a negating sub");
3395 for (
auto *R :
reverse(ExpressionRecipes)) {
3396 if (ExpressionRecipesSeen.
insert(R).second)
3399 for (
VPValue *
T : LiveInPlaceholders)
3406 assert(!ExpressionRecipes.empty() &&
"empty expressions should be removed");
3408 for (
auto *R : ExpressionRecipes)
3409 NewExpressiondRecipes.
push_back(R->clone());
3410 for (
auto *New : NewExpressiondRecipes) {
3411 for (
const auto &[Idx, Old] :
enumerate(ExpressionRecipes))
3412 New->replaceUsesOfWith(Old, NewExpressiondRecipes[Idx]);
3415 for (
const auto &[Placeholder, OutsideOp] :
3417 New->replaceUsesOfWith(Placeholder, OutsideOp);
3419 return new VPExpressionRecipe(ExpressionType, NewExpressiondRecipes);
3437 return PR ? PR->getVFScaleFactor() : 1;
3460#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
3500 "Op must be an operand of the recipe");
3505#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
3537 std::initializer_list<VPValue *> Operands,
3549 return R->getVPRecipeID() == VPRecipeBase::VPWidenLoadSC ||
3550 R->getVPRecipeID() == VPRecipeBase::VPWidenStoreSC ||
3551 R->getVPRecipeID() == VPRecipeBase::VPWidenLoadEVLSC ||
3552 R->getVPRecipeID() == VPRecipeBase::VPWidenStoreEVLSC;
3616 "Op must be an operand of the recipe");
3623#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
3638 {Addr, &EVL}, L.isConsecutive(), L,
3659 "Op must be an operand of the recipe");
3666#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
3680 {Addr, StoredVal}, Consecutive,
Metadata,
DL) {
3701 "Op must be an operand of the recipe");
3708#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
3722 {Addr, StoredVal, &EVL}, S.isConsecutive(), S,
3745 "Op must be an operand of the recipe");
3757#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
3779 llvm_unreachable(
"SCEV expressions must be expanded before final execute");
3792#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
3822#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
3849 "scalar phi recipe");
3862 "Op must be an operand of the recipe");
3867#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
3907#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
3933 Start, CanonicalIV, Step, Name) {}
3939 Kind(Kind), FPBinOp(FPBinOp), Name(Name.str()) {}
3969 "Op must be an operand of the recipe");
3974#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
3997 InductionOpcode(Opcode) {}
4016 NewR->setStartIndex(StartIndex);
4055 "Op must be an operand of the recipe");
4062#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
4082 switch (R->getVPRecipeID()) {
4083 case VPRecipeBase::VPInstructionSC:
4085 case VPRecipeBase::VPIRInstructionSC:
4087 case VPRecipeBase::VPWidenPHISC:
4106 CastInfo<VPPhiAccessors, VPRecipeBase *>> {};
4125 switch (R->getVPRecipeID()) {
4126 case VPRecipeBase::VPInstructionSC:
4128 case VPRecipeBase::VPWidenSC:
4130 case VPRecipeBase::VPWidenCastSC:
4132 case VPRecipeBase::VPWidenIntrinsicSC:
4134 case VPRecipeBase::VPWidenCallSC:
4136 case VPRecipeBase::VPReplicateSC:
4138 case VPRecipeBase::VPInterleaveSC:
4139 case VPRecipeBase::VPInterleaveEVLSC:
4141 case VPRecipeBase::VPWidenLoadSC:
4142 case VPRecipeBase::VPWidenLoadEVLSC:
4143 case VPRecipeBase::VPWidenStoreSC:
4144 case VPRecipeBase::VPWidenStoreEVLSC:
4163 CastInfo<VPIRMetadata, VPRecipeBase *>> {};
4230 return V->getVPBlockID() == VPBlockBase::VPBasicBlockSC ||
4231 V->getVPBlockID() == VPBlockBase::VPIRBasicBlockSC;
4235 assert(Recipe &&
"No recipe to append.");
4236 assert(!Recipe->Parent &&
"Recipe already in VPlan");
4237 Recipe->Parent =
this;
4238 Recipes.insert(InsertPt, Recipe);
4268#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
4285 bool isExiting()
const;
4295 const VPBasicBlock *getCFGPredecessor(
unsigned Idx)
const;
4311inline const VPBasicBlock *
4321class VPIRBasicBlock :
public VPBasicBlock {
4328 : VPBasicBlock(VPIRBasicBlockSC,
4336 return V->getVPBlockID() == VPBlockBase::VPIRBasicBlockSC;
4353 std::unique_ptr<VPRegionValue> CanIV;
4392 std::unique_ptr<VPCanonicalIVInfo> CanIVInfo;
4397 const std::string &Name =
"")
4398 :
VPBlockBase(VPRegionBlockSC, Name), Entry(Entry), Exiting(Exiting) {
4400 assert(!Entry->hasPredecessors() &&
"Entry block has predecessors.");
4401 assert(Exiting &&
"Must also pass Exiting if Entry is passed.");
4402 assert(!Exiting->hasSuccessors() &&
"Exit block has successors.");
4403 Entry->setParent(
this);
4404 Exiting->setParent(
this);
4408 VPRegionBlock(Type *CanIVTy, DebugLoc
DL, VPBlockBase *Entry,
4409 VPBlockBase *Exiting,
const std::string &Name =
"")
4410 : VPRegionBlock(Entry, Exiting, Name) {
4411 CanIVInfo = std::make_unique<VPCanonicalIVInfo>(CanIVTy,
DL,
this);
4419 return V->getVPBlockID() == VPBlockBase::VPRegionBlockSC;
4429 "Entry block cannot have predecessors.");
4441 "Exit block cannot have successors.");
4442 Exiting = ExitingBlock;
4463#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
4481 void dissolveToCFGLoop();
4491 return CanIVInfo ? CanIVInfo->getRegionValue() :
nullptr;
4494 return CanIVInfo ? CanIVInfo->getRegionValue() :
nullptr;
4499 return CanIVInfo->getRegionValue()->getType();
4509 CanIVInfo->clearNUW();
4586 : Entry(Entry), ScalarHeader(ScalarHeader) {
4587 Entry->setPlan(
this);
4588 assert(ScalarHeader->getNumSuccessors() == 0 &&
4589 "scalar header must be a leaf node");
4647 "cannot call the function after vector loop region has been removed");
4678 assert(TripCount &&
"trip count needs to be set before accessing it");
4685 assert(!TripCount && NewTripCount &&
"TripCount should not be set yet.");
4686 TripCount = NewTripCount;
4693 "TripCount must be set when resetting");
4694 TripCount = NewTripCount;
4699 if (!BackedgeTakenCount)
4701 return BackedgeTakenCount;
4729 assert(
hasVF(VF) &&
"Cannot set VF not already in plan");
4736 assert(
hasVF(VF) &&
"tried to remove VF not present in plan");
4754 assert(VFs.size() == 1 &&
"expected plan with single VF");
4759 bool HasScalarVFOnly = VFs.size() == 1 && VFs[0].isScalar();
4761 "Plan with scalar VF should only have a single VF");
4762 return HasScalarVFOnly;
4765 bool hasUF(
unsigned UF)
const {
return UFs.empty() || UFs.contains(UF); }
4769 assert(UFs.size() == 1 &&
"Expected a single UF");
4774 assert(
hasUF(UF) &&
"Cannot set the UF not already in plan");
4791 assert(V &&
"Trying to get or add the VPIRValue of a null Value");
4792 auto [It, Inserted] = LiveIns.try_emplace(V);
4801 "Only VPIRValues should be in mapping");
4805 assert(V &&
"Trying to get or add the VPIRValue of a null VPIRValue");
4831 bool IsSigned =
false) {
4847#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
4871 CreatedBlocks.push_back(VPB);
4880 const std::string &Name =
"",
4884 CreatedBlocks.push_back(VPB);
4892 const std::string &Name =
"") {
4894 CreatedBlocks.push_back(VPB);
4917 (ExitBlocks.size() == 1 && ExitBlocks[0]->getNumPredecessors() > 1);
4930#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static MCDisassembler::DecodeStatus addOperand(MCInst &Inst, const MCOperand &Opnd)
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static void print(raw_ostream &Out, object::Archive::Kind Kind, T Val)
This file implements methods to test, set and extract typed bits from packed unsigned integers.
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
#define LLVM_DUMP_METHOD
Mark debug helper function definitions like dump() that should not be stripped from debug builds.
#define LLVM_ABI_FOR_TEST
#define LLVM_PACKED_START
This file defines an InstructionCost class that is used when calculating the cost of an instruction,...
static std::pair< Value *, APInt > getMask(Value *WideMask, unsigned Factor, ElementCount LeafValueEC)
static DebugLoc getDebugLoc(MachineBasicBlock::instr_iterator FirstMI, MachineBasicBlock::instr_iterator LastMI)
Return the first DebugLoc that has line number information, given a range of instructions.
This file implements a map that provides insertion order iteration.
This file provides utility analysis objects describing memory locations.
MachineInstr unsigned OpIdx
static StringRef getName(Value *V)
static bool mayHaveSideEffects(MachineInstr &MI)
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
static const BasicSubtargetSubTypeKV * find(StringRef S, ArrayRef< BasicSubtargetSubTypeKV > A)
Find KV in array using binary search.
static std::optional< unsigned > getOpcode(ArrayRef< VPValue * > Values)
Returns the opcode of Values or ~0 if they do not all agree.
This file contains the declarations of the entities induced by Vectorization Plans,...
#define VP_CLASSOF_IMPL(VPRecipeID)
static const uint32_t IV[8]
Class for arbitrary precision integers.
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
bool empty() const
empty - Check if the array is empty.
LLVM Basic Block Representation.
LLVM_ABI const DataLayout & getDataLayout() const
Get the data layout of the module this basic block belongs to.
LLVM_ABI LLVMContext & getContext() const
Get the context in which this basic block lives.
This class represents a function call, abstracting a target machine's calling convention.
This is the base class for all instructions that perform data casts.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
A parsed version of the target data layout string in and methods for querying it.
static DebugLoc getUnknown()
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
static constexpr ElementCount getFixed(ScalarTy MinVal)
Utility class for floating point operations which can have information about relaxed accuracy require...
Convenience struct for specifying and reasoning about fast-math flags.
Represents flags for the getelementptr instruction/expression.
static GEPNoWrapFlags fromRaw(unsigned Flags)
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
Common base class shared among various IRBuilders.
A struct for saving information about induction variables.
InductionKind
This enum represents the kinds of inductions that we support.
InnerLoopVectorizer vectorizes loops which contain only one basic block to a specified vectorization ...
The group of interleaved loads/stores sharing the same stride and close to each other.
This is an important class for using LLVM in a threaded context.
An instruction for reading from memory.
LoopVectorizationCostModel - estimates the expected speedups due to vectorization.
Represents a single loop in the control flow graph.
The RecurrenceDescriptor is used to identify recurrences variables in a loop.
This class represents an analyzed expression in the program.
This class provides computation of slot numbers for LLVM Assembly writing.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
A SetVector that performs no allocations if smaller than a certain size.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
iterator erase(const_iterator CI)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
StringRef - Represent a constant reference to a string, i.e.
std::string str() const
str - Get the contents as an std::string.
This class represents a truncation of integer types.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
LLVM_ABI std::string str() const
Return the twine contents as a std::string.
The instances of the Type class are immutable: once they are created, they are never changed.
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
void execute(VPTransformState &State) override
Generate the active lane mask phi of the vector loop.
VPActiveLaneMaskPHIRecipe * clone() override
Clone the current recipe.
void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
VPActiveLaneMaskPHIRecipe(VPValue *StartMask, DebugLoc DL)
~VPActiveLaneMaskPHIRecipe() override=default
VPBasicBlock serves as the leaf of the Hierarchical Control-Flow Graph.
RecipeListTy::const_iterator const_iterator
void appendRecipe(VPRecipeBase *Recipe)
Augment the existing recipes of a VPBasicBlock with an additional Recipe as the last recipe.
RecipeListTy::const_reverse_iterator const_reverse_iterator
RecipeListTy::iterator iterator
Instruction iterators...
RecipeListTy & getRecipeList()
Returns a reference to the list of recipes.
iplist< VPRecipeBase > RecipeListTy
VPBasicBlock(const unsigned char BlockSC, const Twine &Name="")
iterator begin()
Recipe iterator methods.
RecipeListTy::reverse_iterator reverse_iterator
iterator_range< iterator > phis()
Returns an iterator range over the PHI-like recipes in the block.
const VPBasicBlock * getCFGPredecessor(unsigned Idx) const
Returns the predecessor block at index Idx with the predecessors as per the corresponding plain CFG.
iterator getFirstNonPhi()
Return the position of the first non-phi node recipe in the block.
const_reverse_iterator rbegin() const
RecipeListTy Recipes
The VPRecipes held in the order of output instructions to generate.
const VPRecipeBase & front() const
const_iterator begin() const
const VPRecipeBase & back() const
void insert(VPRecipeBase *Recipe, iterator InsertPt)
const_iterator end() const
static bool classof(const VPBlockBase *V)
Method to support type inquiry through isa, cast, and dyn_cast.
static RecipeListTy VPBasicBlock::* getSublistAccess(VPRecipeBase *)
Returns a pointer to a member of the recipe list.
reverse_iterator rbegin()
const_reverse_iterator rend() const
VPValue * getIncomingValue(unsigned Idx) const
Return incoming value number Idx.
VPValue * getMask(unsigned Idx) const
Return mask number Idx.
VPBlendRecipe(PHINode *Phi, ArrayRef< VPValue * > Operands, const VPIRFlags &Flags, DebugLoc DL)
The blend operation is a User of the incoming values and of their respective masks,...
unsigned getNumIncomingValues() const
Return the number of incoming values, taking into account when normalized the first incoming value wi...
void execute(VPTransformState &State) override
The method which generates the output IR instructions that correspond to this VPRecipe,...
VPBlendRecipe * clone() override
Clone the current recipe.
void setMask(unsigned Idx, VPValue *V)
Set mask number Idx to V.
bool isNormalized() const
A normalized blend is one that has an odd number of operands, whereby the first operand does not have...
VPBlockBase is the building block of the Hierarchical Control-Flow Graph.
void setSuccessors(ArrayRef< VPBlockBase * > NewSuccs)
Set each VPBasicBlock in NewSuccss as successor of this VPBlockBase.
VPRegionBlock * getParent()
VPBlocksTy & getPredecessors()
iterator_range< VPBlockBase ** > predecessors()
LLVM_DUMP_METHOD void dump() const
Dump this VPBlockBase to dbgs().
void setName(const Twine &newName)
size_t getNumSuccessors() const
iterator_range< VPBlockBase ** > successors()
virtual void print(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const =0
Print plain-text dump of this VPBlockBase to O, prefixing all lines with Indent.
bool hasPredecessors() const
Returns true if this block has any predecessors.
void swapSuccessors()
Swap successors of the block. The block must have exactly 2 successors.
void printSuccessors(raw_ostream &O, const Twine &Indent) const
Print the successors of this block to O, prefixing all lines with Indent.
SmallVectorImpl< VPBlockBase * > VPBlocksTy
virtual ~VPBlockBase()=default
const VPBlocksTy & getHierarchicalPredecessors()
unsigned getIndexForSuccessor(const VPBlockBase *Succ) const
Returns the index for Succ in the blocks successor list.
size_t getNumPredecessors() const
void setPredecessors(ArrayRef< VPBlockBase * > NewPreds)
Set each VPBasicBlock in NewPreds as predecessor of this VPBlockBase.
VPBlockBase * getEnclosingBlockWithPredecessors()
unsigned getIndexForPredecessor(const VPBlockBase *Pred) const
Returns the index for Pred in the blocks predecessors list.
bool hasSuccessors() const
Returns true if this block has any successors.
const VPBlocksTy & getPredecessors() const
virtual VPBlockBase * clone()=0
Clone the current block and it's recipes without updating the operands of the cloned recipes,...
enum { VPRegionBlockSC, VPBasicBlockSC, VPIRBasicBlockSC } VPBlockTy
An enumeration for keeping track of the concrete subclass of VPBlockBase that are actually instantiat...
virtual InstructionCost cost(ElementCount VF, VPCostContext &Ctx)=0
Return the cost of the block.
void setPlan(VPlan *ParentPlan)
Sets the pointer of the plan containing the block.
const VPRegionBlock * getParent() const
const std::string & getName() const
void clearSuccessors()
Remove all the successors of this block.
VPBlockBase * getSingleHierarchicalSuccessor()
void setTwoSuccessors(VPBlockBase *IfTrue, VPBlockBase *IfFalse)
Set two given VPBlockBases IfTrue and IfFalse to be the two successors of this VPBlockBase.
VPBlockBase * getSinglePredecessor() const
virtual void execute(VPTransformState *State)=0
The method which generates the output IR that correspond to this VPBlockBase, thereby "executing" the...
const VPBlocksTy & getHierarchicalSuccessors()
void clearPredecessors()
Remove all the predecessor of this block.
friend class VPBlockUtils
unsigned getVPBlockID() const
void printAsOperand(raw_ostream &OS, bool PrintType=false) const
void swapPredecessors()
Swap predecessors of the block.
VPBlockBase(const unsigned char SC, const std::string &N)
VPBlocksTy & getSuccessors()
VPBlockBase * getEnclosingBlockWithSuccessors()
An Enclosing Block of a block B is any block containing B, including B itself.
void setOneSuccessor(VPBlockBase *Successor)
Set a given VPBlockBase Successor as the single successor of this VPBlockBase.
void setParent(VPRegionBlock *P)
VPBlockBase * getSingleHierarchicalPredecessor()
VPBlockBase * getSingleSuccessor() const
const VPBlocksTy & getSuccessors() const
void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
VPBranchOnMaskRecipe * clone() override
Clone the current recipe.
bool usesScalars(const VPValue *Op) const override
Returns true if the recipe uses scalars of operand Op.
VPBranchOnMaskRecipe(VPValue *BlockInMask, DebugLoc DL)
VPlan-based builder utility analogous to IRBuilder.
VPRegionValue * getRegionValue()
VPCanonicalIVInfo(Type *Ty, DebugLoc DL, VPRegionBlock *Region)
const VPRegionValue * getRegionValue() const
VPCurrentIterationPHIRecipe * clone() override
Clone the current recipe.
VPCurrentIterationPHIRecipe(VPValue *StartIV, DebugLoc DL)
InstructionCost computeCost(ElementCount VF, VPCostContext &Ctx) const override
Return the cost of this VPCurrentIterationPHIRecipe.
LLVM_ABI_FOR_TEST void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
void execute(VPTransformState &State) override
Generate the phi nodes.
bool usesFirstLaneOnly(const VPValue *Op) const override
Returns true if the recipe only uses the first lane of operand Op.
~VPCurrentIterationPHIRecipe() override=default
void execute(VPTransformState &State) override
Generate the transformed value of the induction at offset StartValue (1.
VPIRValue * getStartValue() const
InstructionCost computeCost(ElementCount VF, VPCostContext &Ctx) const override
Return the cost of this VPDerivedIVRecipe.
VPValue * getStepValue() const
Type * getScalarType() const
VPDerivedIVRecipe * clone() override
Clone the current recipe.
void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
VPDerivedIVRecipe(const InductionDescriptor &IndDesc, VPIRValue *Start, VPValue *CanonicalIV, VPValue *Step, const Twine &Name="")
~VPDerivedIVRecipe() override=default
bool usesFirstLaneOnly(const VPValue *Op) const override
Returns true if the recipe only uses the first lane of operand Op.
VPDerivedIVRecipe(InductionDescriptor::InductionKind Kind, const FPMathOperator *FPBinOp, VPIRValue *Start, VPValue *IV, VPValue *Step, const Twine &Name="")
Template specialization of the standard LLVM dominator tree utility for VPBlockBases.
void execute(VPTransformState &State) override
The method which generates the output IR instructions that correspond to this VPRecipe,...
void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
InstructionCost computeCost(ElementCount VF, VPCostContext &Ctx) const override
Return the cost of this VPExpandSCEVRecipe.
VPExpandSCEVRecipe(const SCEV *Expr)
const SCEV * getSCEV() const
VPExpandSCEVRecipe * clone() override
Clone the current recipe.
~VPExpandSCEVRecipe() override=default
void execute(VPTransformState &State) override
Method for generating code, must not be called as this recipe is abstract.
VPValue * getOperandOfResultType() const
Return the VPValue to use to infer the result type of the recipe.
VPExpressionRecipe * clone() override
Clone the current recipe.
void decompose()
Insert the recipes of the expression back into the VPlan, directly before the current recipe.
~VPExpressionRecipe() override
bool isSingleScalar() const
Returns true if the result of this VPExpressionRecipe is a single-scalar.
VPExpressionRecipe(VPWidenCastRecipe *Ext0, VPWidenCastRecipe *Ext1, VPWidenRecipe *Mul, VPWidenRecipe *Sub, VPReductionRecipe *Red)
VPExpressionRecipe(VPWidenCastRecipe *Ext, VPReductionRecipe *Red)
bool mayHaveSideEffects() const
Returns true if this expression contains recipes that may have side effects.
InstructionCost computeCost(ElementCount VF, VPCostContext &Ctx) const override
Compute the cost of this recipe either using a recipe's specialized implementation or using the legac...
bool mayReadOrWriteMemory() const
Returns true if this expression contains recipes that may read from or write to memory.
VPExpressionRecipe(VPWidenCastRecipe *Ext0, VPWidenCastRecipe *Ext1, VPWidenRecipe *Mul, VPReductionRecipe *Red)
void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
unsigned getVFScaleFactor() const
VPExpressionRecipe(VPWidenRecipe *Mul, VPReductionRecipe *Red)
void execute(VPTransformState &State) override
Produce a vectorized histogram operation.
VPHistogramRecipe * clone() override
Clone the current recipe.
InstructionCost computeCost(ElementCount VF, VPCostContext &Ctx) const override
Return the cost of this VPHistogramRecipe.
void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
VPValue * getMask() const
Return the mask operand if one was provided, or a null pointer if all lanes should be executed uncond...
unsigned getOpcode() const
VPHistogramRecipe(unsigned Opcode, ArrayRef< VPValue * > Operands, DebugLoc DL=DebugLoc::getUnknown())
VP_CLASSOF_IMPL(VPRecipeBase::VPHistogramSC)
~VPHistogramRecipe() override=default
A special type of VPBasicBlock that wraps an existing IR basic block.
void execute(VPTransformState *State) override
The method which generates the output IR instructions that correspond to this VPBasicBlock,...
BasicBlock * getIRBasicBlock() const
static bool classof(const VPBlockBase *V)
~VPIRBasicBlock() override=default
VPIRBasicBlock * clone() override
Clone the current block and it's recipes, without updating the operands of the cloned recipes.
Class to record and manage LLVM IR flags.
ReductionFlagsTy ReductionFlags
LLVM_ABI_FOR_TEST bool hasRequiredFlagsForOpcode(unsigned Opcode) const
Returns true if Opcode has its required flags set.
bool hasNoWrapFlags() const
VPIRFlags(RecurKind Kind, bool IsOrdered, bool IsInLoop, FastMathFlags FMFs)
LLVM_ABI_FOR_TEST bool flagsValidForOpcode(unsigned Opcode) const
Returns true if the set flags are valid for Opcode.
static VPIRFlags getDefaultFlags(unsigned Opcode)
Returns default flags for Opcode for opcodes that support it, asserts otherwise.
VPIRFlags(DisjointFlagsTy DisjointFlags)
VPIRFlags(WrapFlagsTy WrapFlags)
void printFlags(raw_ostream &O) const
VPIRFlags(CmpInst::Predicate Pred, FastMathFlags FMFs)
bool hasFastMathFlags() const
Returns true if the recipe has fast-math flags.
LLVM_ABI_FOR_TEST FastMathFlags getFastMathFlags() const
bool isReductionOrdered() const
CmpInst::Predicate getPredicate() const
WrapFlagsTy getNoWrapFlags() const
bool hasNonNegFlag() const
Returns true if the recipe has non-negative flag.
void transferFlags(VPIRFlags &Other)
bool hasNoSignedWrap() const
void intersectFlags(const VPIRFlags &Other)
Only keep flags also present in Other.
VPIRFlags(TruncFlagsTy TruncFlags)
VPIRFlags(FastMathFlags FMFs)
VPIRFlags(NonNegFlagsTy NonNegFlags)
VPIRFlags(CmpInst::Predicate Pred)
VPIRFlags(ExactFlagsTy ExactFlags)
GEPNoWrapFlags getGEPNoWrapFlags() const
bool hasPredicate() const
Returns true if the recipe has a comparison predicate.
DisjointFlagsTy DisjointFlags
void setPredicate(CmpInst::Predicate Pred)
bool hasNoUnsignedWrap() const
NonNegFlagsTy NonNegFlags
bool isReductionInLoop() const
void dropPoisonGeneratingFlags()
Drop all poison-generating flags.
void applyFlags(Instruction &I) const
Apply the IR flags to I.
VPIRFlags(GEPNoWrapFlags GEPFlags)
RecurKind getRecurKind() const
VPIRFlags(Instruction &I)
Instruction & getInstruction() const
bool usesFirstPartOnly(const VPValue *Op) const override
Returns true if the VPUser only uses the first part of operand Op.
~VPIRInstruction() override=default
void execute(VPTransformState &State) override
The method which generates the output IR instructions that correspond to this VPRecipe,...
VPIRInstruction * clone() override
Clone the current recipe.
bool usesFirstLaneOnly(const VPValue *Op) const override
Returns true if the VPUser only uses the first lane of operand Op.
static LLVM_ABI_FOR_TEST VPIRInstruction * create(Instruction &I)
Create a new VPIRPhi for \I , if it is a PHINode, otherwise create a VPIRInstruction.
LLVM_ABI_FOR_TEST InstructionCost computeCost(ElementCount VF, VPCostContext &Ctx) const override
Return the cost of this VPIRInstruction.
bool usesScalars(const VPValue *Op) const override
Returns true if the VPUser uses scalars of operand Op.
VPIRInstruction(Instruction &I)
VPIRInstruction::create() should be used to create VPIRInstructions, as subclasses may need to be cre...
void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
VPInstructionWithType(unsigned Opcode, ArrayRef< VPValue * > Operands, Type *ResultTy, const VPIRFlags &Flags={}, const VPIRMetadata &Metadata={}, DebugLoc DL=DebugLoc::getUnknown(), const Twine &Name="")
InstructionCost computeCost(ElementCount VF, VPCostContext &Ctx) const override
Return the cost of this VPInstruction.
static bool classof(const VPUser *R)
static bool classof(const VPRecipeBase *R)
Type * getResultType() const
VPInstruction * clone() override
Clone the current recipe.
void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
void execute(VPTransformState &State) override
Generate the instruction.
This is a concrete Recipe that models a single VPlan-level instruction.
unsigned getNumOperandsWithoutMask() const
Returns the number of operands, excluding the mask if the VPInstruction is masked.
iterator_range< operand_iterator > operandsWithoutMask()
Returns an iterator range over the operands excluding the mask operand if present.
VPInstruction * clone() override
Clone the current recipe.
@ ExtractLastActive
Extracts the last active lane from a set of vectors.
@ ExtractLane
Extracts a single lane (first operand) from a set of vector operands.
@ ExitingIVValue
Compute the exiting value of a wide induction after vectorization, that is the value of the last lane...
@ WideIVStep
Scale the first operand (vector step) by the second operand (scalar-step).
@ ExtractPenultimateElement
@ ResumeForEpilogue
Explicit user for the resume phi of the canonical induction in the main VPlan, used by the epilogue v...
@ Unpack
Extracts all lanes from its (non-scalable) vector operand.
@ FirstOrderRecurrenceSplice
@ ReductionStartVector
Start vector for reductions with 3 operands: the original start value, the identity value for the red...
@ BuildVector
Creates a fixed-width vector containing all operands.
@ BuildStructVector
Given operands of (the same) struct type, creates a struct of fixed- width vectors each containing a ...
@ VScale
Returns the value for vscale.
@ CanonicalIVIncrementForPart
@ ComputeReductionResult
Reduce the operands to the final reduction result using the operation specified via the operation's V...
@ CalculateTripCountMinusVF
iterator_range< const_operand_iterator > operandsWithoutMask() const
void addMask(VPValue *Mask)
Add mask Mask to an unmasked VPInstruction, if it needs masking.
StringRef getName() const
Returns the symbolic name assigned to the VPInstruction.
unsigned getOpcode() const
VPInstruction(unsigned Opcode, ArrayRef< VPValue * > Operands, const VPIRFlags &Flags={}, const VPIRMetadata &MD={}, DebugLoc DL=DebugLoc::getUnknown(), const Twine &Name="")
void setName(StringRef NewName)
Set the symbolic name for the VPInstruction.
VPValue * getMask() const
Returns the mask for the VPInstruction.
unsigned getNumOperandsForOpcode() const
Return the number of operands determined by the opcode of the VPInstruction, excluding mask.
bool isMasked() const
Returns true if the VPInstruction has a mask operand.
virtual unsigned getNumStoreOperands() const =0
Returns the number of stored operands of this interleave group.
bool usesFirstLaneOnly(const VPValue *Op) const override=0
Returns true if the recipe only uses the first lane of operand Op.
bool needsMaskForGaps() const
Return true if the access needs a mask because of the gaps.
void execute(VPTransformState &State) override
The method which generates the output IR instructions that correspond to this VPRecipe,...
static bool classof(const VPUser *U)
VPInterleaveBase(const unsigned char SC, const InterleaveGroup< Instruction > *IG, ArrayRef< VPValue * > Operands, ArrayRef< VPValue * > StoredValues, VPValue *Mask, bool NeedsMaskForGaps, const VPIRMetadata &MD, DebugLoc DL)
Instruction * getInsertPos() const
static bool classof(const VPRecipeBase *R)
const InterleaveGroup< Instruction > * getInterleaveGroup() const
VPValue * getMask() const
Return the mask used by this recipe.
ArrayRef< VPValue * > getStoredValues() const
Return the VPValues stored by this interleave group.
VPInterleaveBase * clone() override=0
Clone the current recipe.
VPValue * getAddr() const
Return the address accessed by this recipe.
A recipe for interleaved memory operations with vector-predication intrinsics.
bool usesFirstLaneOnly(const VPValue *Op) const override
The recipe only uses the first lane of the address, and EVL operand.
VPValue * getEVL() const
The VPValue of the explicit vector length.
~VPInterleaveEVLRecipe() override=default
unsigned getNumStoreOperands() const override
Returns the number of stored operands of this interleave group.
VPInterleaveEVLRecipe * clone() override
Clone the current recipe.
VPInterleaveEVLRecipe(VPInterleaveRecipe &R, VPValue &EVL, VPValue *Mask)
VPInterleaveRecipe is a recipe for transforming an interleave group of load or stores into one wide l...
unsigned getNumStoreOperands() const override
Returns the number of stored operands of this interleave group.
~VPInterleaveRecipe() override=default
VPInterleaveRecipe * clone() override
Clone the current recipe.
bool usesFirstLaneOnly(const VPValue *Op) const override
Returns true if the recipe only uses the first lane of operand Op.
VPInterleaveRecipe(const InterleaveGroup< Instruction > *IG, VPValue *Addr, ArrayRef< VPValue * > StoredValues, VPValue *Mask, bool NeedsMaskForGaps, const VPIRMetadata &MD, DebugLoc DL)
In what follows, the term "input IR" refers to code that is fed into the vectorizer whereas the term ...
Helper type to provide functions to access incoming values and blocks for phi-like recipes.
virtual const VPRecipeBase * getAsRecipe() const =0
Return a VPRecipeBase* to the current object.
VPValue * getIncomingValueForBlock(const VPBasicBlock *VPBB) const
Returns the incoming value for VPBB. VPBB must be an incoming block.
VPUser::const_operand_range incoming_values() const
Returns an interator range over the incoming values.
virtual unsigned getNumIncoming() const
Returns the number of incoming values, also number of incoming blocks.
void removeIncomingValueFor(VPBlockBase *IncomingBlock) const
Removes the incoming value for IncomingBlock, which must be a predecessor.
const VPBasicBlock * getIncomingBlock(unsigned Idx) const
Returns the incoming block with index Idx.
detail::zippy< llvm::detail::zip_first, VPUser::const_operand_range, const_incoming_blocks_range > incoming_values_and_blocks() const
Returns an iterator range over pairs of incoming values and corresponding incoming blocks.
VPValue * getIncomingValue(unsigned Idx) const
Returns the incoming VPValue with index Idx.
virtual ~VPPhiAccessors()=default
void printPhiOperands(raw_ostream &O, VPSlotTracker &SlotTracker) const
Print the recipe.
void setIncomingValueForBlock(const VPBasicBlock *VPBB, VPValue *V) const
Sets the incoming value for VPBB to V.
iterator_range< mapped_iterator< detail::index_iterator, std::function< const VPBasicBlock *(size_t)> > > const_incoming_blocks_range
const_incoming_blocks_range incoming_blocks() const
Returns an iterator range over the incoming blocks.
~VPPredInstPHIRecipe() override=default
bool usesScalars(const VPValue *Op) const override
Returns true if the recipe uses scalars of operand Op.
VPPredInstPHIRecipe * clone() override
Clone the current recipe.
InstructionCost computeCost(ElementCount VF, VPCostContext &Ctx) const override
Return the cost of this VPPredInstPHIRecipe.
VPPredInstPHIRecipe(VPValue *PredV, DebugLoc DL)
Construct a VPPredInstPHIRecipe given PredInst whose value needs a phi nodes after merging back from ...
VPRecipeBase is a base class modeling a sequence of one or more output IR instructions.
bool mayReadFromMemory() const
Returns true if the recipe may read from memory.
bool mayReadOrWriteMemory() const
Returns true if the recipe may read from or write to memory.
virtual void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const =0
Each concrete VPRecipe prints itself, without printing common information, like debug info or metadat...
VPRegionBlock * getRegion()
void setDebugLoc(DebugLoc NewDL)
Set the recipe's debug location to NewDL.
bool mayWriteToMemory() const
Returns true if the recipe may write to memory.
~VPRecipeBase() override=default
VPBasicBlock * getParent()
DebugLoc getDebugLoc() const
Returns the debug location of the recipe.
virtual void execute(VPTransformState &State)=0
The method which generates the output IR instructions that correspond to this VPRecipe,...
void moveBefore(VPBasicBlock &BB, iplist< VPRecipeBase >::iterator I)
Unlink this recipe and insert into BB before I.
void insertBefore(VPRecipeBase *InsertPos)
Insert an unlinked recipe into a basic block immediately before the specified recipe.
void insertAfter(VPRecipeBase *InsertPos)
Insert an unlinked Recipe into a basic block immediately after the specified Recipe.
static bool classof(const VPDef *D)
Method to support type inquiry through isa, cast, and dyn_cast.
iplist< VPRecipeBase >::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
virtual VPRecipeBase * clone()=0
Clone the current recipe.
friend class VPBlockUtils
const VPBasicBlock * getParent() const
InstructionCost cost(ElementCount VF, VPCostContext &Ctx)
Return the cost of this recipe, taking into account if the cost computation should be skipped and the...
static bool classof(const VPUser *U)
enum { VPBranchOnMaskSC, VPDerivedIVSC, VPExpandSCEVSC, VPExpressionSC, VPIRInstructionSC, VPInstructionSC, VPInterleaveEVLSC, VPInterleaveSC, VPReductionEVLSC, VPReductionSC, VPReplicateSC, VPScalarIVStepsSC, VPVectorPointerSC, VPVectorEndPointerSC, VPWidenCallSC, VPWidenCanonicalIVSC, VPWidenCastSC, VPWidenGEPSC, VPWidenIntrinsicSC, VPWidenLoadEVLSC, VPWidenLoadSC, VPWidenStoreEVLSC, VPWidenStoreSC, VPWidenSC, VPBlendSC, VPHistogramSC, VPWidenPHISC, VPPredInstPHISC, VPCurrentIterationPHISC, VPActiveLaneMaskPHISC, VPFirstOrderRecurrencePHISC, VPWidenIntOrFpInductionSC, VPWidenPointerInductionSC, VPReductionPHISC, VPFirstPHISC=VPWidenPHISC, VPFirstHeaderPHISC=VPCurrentIterationPHISC, VPLastHeaderPHISC=VPReductionPHISC, VPLastPHISC=VPReductionPHISC, } VPRecipeTy
An enumeration for keeping track of the concrete subclass of VPRecipeBase that is actually instantiat...
void removeFromParent()
This method unlinks 'this' from the containing basic block, but does not delete it.
unsigned getVPRecipeID() const
void moveAfter(VPRecipeBase *MovePos)
Unlink this recipe from its current VPBasicBlock and insert it into the VPBasicBlock that MovePos liv...
VPRecipeBase(const unsigned char SC, ArrayRef< VPValue * > Operands, DebugLoc DL=DebugLoc::getUnknown())
A VPValue defined by a recipe that produces one or more values.
LLVM_ABI_FOR_TEST VPRecipeValue(VPRecipeBase *Def, Value *UV=nullptr)
VPValue * getEVL() const
The VPValue of the explicit vector length.
VPReductionEVLRecipe(VPReductionRecipe &R, VPValue &EVL, VPValue *CondOp, DebugLoc DL=DebugLoc::getUnknown())
bool usesFirstLaneOnly(const VPValue *Op) const override
Returns true if the recipe only uses the first lane of operand Op.
VPReductionEVLRecipe * clone() override
Clone the current recipe.
~VPReductionEVLRecipe() override=default
bool isOrdered() const
Returns true, if the phi is part of an ordered reduction.
void setVFScaleFactor(unsigned ScaleFactor)
Set the VFScaleFactor for this reduction phi.
VPReductionPHIRecipe * clone() override
Clone the current recipe.
unsigned getVFScaleFactor() const
Get the factor that the VF of this recipe's output should be scaled by, or 1 if it isn't scaled.
~VPReductionPHIRecipe() override=default
bool hasUsesOutsideReductionChain() const
Returns true, if the phi is part of a multi-use reduction.
unsigned getNumIncoming() const override
Returns the number of incoming values, also number of incoming blocks.
VPReductionPHIRecipe(PHINode *Phi, RecurKind Kind, VPValue &Start, VPValue &BackedgeValue, ReductionStyle Style, const VPIRFlags &Flags, bool HasUsesOutsideReductionChain=false)
Create a new VPReductionPHIRecipe for the reduction Phi.
bool isInLoop() const
Returns true if the phi is part of an in-loop reduction.
bool usesFirstLaneOnly(const VPValue *Op) const override
Returns true if the recipe only uses the first lane of operand Op.
void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
void execute(VPTransformState &State) override
Generate the phi/select nodes.
bool isPartialReduction() const
Returns true if the reduction outputs a vector with a scaled down VF.
RecurKind getRecurrenceKind() const
Returns the recurrence kind of the reduction.
A recipe to represent inloop, ordered or partial reduction operations.
VPReductionRecipe(const unsigned char SC, RecurKind RdxKind, FastMathFlags FMFs, Instruction *I, ArrayRef< VPValue * > Operands, VPValue *CondOp, ReductionStyle Style, DebugLoc DL)
bool isConditional() const
Return true if the in-loop reduction is conditional.
static bool classof(const VPRecipeBase *R)
static bool classof(const VPSingleDefRecipe *R)
VPValue * getVecOp() const
The VPValue of the vector value to be reduced.
VPValue * getCondOp() const
The VPValue of the condition for the block.
RecurKind getRecurrenceKind() const
Return the recurrence kind for the in-loop reduction.
VPReductionRecipe(RecurKind RdxKind, FastMathFlags FMFs, Instruction *I, VPValue *ChainOp, VPValue *VecOp, VPValue *CondOp, ReductionStyle Style, DebugLoc DL=DebugLoc::getUnknown())
bool isOrdered() const
Return true if the in-loop reduction is ordered.
VPReductionRecipe(const RecurKind RdxKind, FastMathFlags FMFs, VPValue *ChainOp, VPValue *VecOp, VPValue *CondOp, ReductionStyle Style, DebugLoc DL=DebugLoc::getUnknown())
bool isPartialReduction() const
Returns true if the reduction outputs a vector with a scaled down VF.
~VPReductionRecipe() override=default
VPValue * getChainOp() const
The VPValue of the scalar Chain being accumulated.
bool isInLoop() const
Returns true if the reduction is in-loop.
VPReductionRecipe * clone() override
Clone the current recipe.
static bool classof(const VPUser *U)
static bool classof(const VPValue *VPV)
unsigned getVFScaleFactor() const
Get the factor that the VF of this recipe's output should be scaled by, or 1 if it isn't scaled.
VPRegionBlock represents a collection of VPBasicBlocks and VPRegionBlocks which form a Single-Entry-S...
const VPBlockBase * getEntry() const
bool isReplicator() const
An indicator whether this region is to generate multiple replicated instances of output IR correspond...
~VPRegionBlock() override=default
void setExiting(VPBlockBase *ExitingBlock)
Set ExitingBlock as the exiting VPBlockBase of this VPRegionBlock.
VPBlockBase * getExiting()
const VPRegionValue * getCanonicalIV() const
void setEntry(VPBlockBase *EntryBlock)
Set EntryBlock as the entry VPBlockBase of this VPRegionBlock.
Type * getCanonicalIVType() const
Return the type of the canonical IV for loop regions.
bool hasCanonicalIVNUW() const
Indicates if NUW is set for the canonical IV increment, for loop regions.
void clearCanonicalIVNUW(VPInstruction *Increment)
Unsets NUW for the canonical IV increment Increment, for loop regions.
VPRegionValue * getCanonicalIV()
Return the canonical induction variable of the region, null for replicating regions.
const VPBlockBase * getExiting() const
VPBasicBlock * getPreheaderVPBB()
Returns the pre-header VPBasicBlock of the loop region.
static bool classof(const VPBlockBase *V)
Method to support type inquiry through isa, cast, and dyn_cast.
VPValues defined by a VPRegionBlock, like the canonical IV.
VPReplicateRecipe replicates a given instruction producing multiple scalar copies of the original sca...
bool isSingleScalar() const
VPReplicateRecipe(Instruction *I, ArrayRef< VPValue * > Operands, bool IsSingleScalar, VPValue *Mask=nullptr, const VPIRFlags &Flags={}, VPIRMetadata Metadata={}, DebugLoc DL=DebugLoc::getUnknown())
~VPReplicateRecipe() override=default
bool usesScalars(const VPValue *Op) const override
Returns true if the recipe uses scalars of operand Op.
bool isPredicated() const
VPReplicateRecipe * clone() override
Clone the current recipe.
bool usesFirstLaneOnly(const VPValue *Op) const override
Returns true if the recipe only uses the first lane of operand Op.
unsigned getOpcode() const
VPValue * getMask()
Return the mask of a predicated VPReplicateRecipe.
Instruction::BinaryOps getInductionOpcode() const
VPValue * getStepValue() const
InstructionCost computeCost(ElementCount VF, VPCostContext &Ctx) const override
Return the cost of this VPScalarIVStepsRecipe.
VPScalarIVStepsRecipe(const InductionDescriptor &IndDesc, VPValue *IV, VPValue *Step, VPValue *VF, DebugLoc DL=DebugLoc::getUnknown())
void setStartIndex(VPValue *StartIndex)
Set or add the StartIndex operand.
VPScalarIVStepsRecipe * clone() override
Clone the current recipe.
VPValue * getStartIndex() const
Return the StartIndex, or null if known to be zero, valid only after unrolling.
VPValue * getVFValue() const
Return the number of scalars to produce per unroll part, used to compute StartIndex during unrolling.
VPScalarIVStepsRecipe(VPValue *IV, VPValue *Step, VPValue *VF, Instruction::BinaryOps Opcode, FastMathFlags FMFs, DebugLoc DL)
~VPScalarIVStepsRecipe() override=default
bool usesFirstLaneOnly(const VPValue *Op) const override
Returns true if the recipe only uses the first lane of operand Op.
VPSingleDef is a base class for recipes for modeling a sequence of one or more output IR that define ...
VPSingleDefRecipe(const unsigned char SC, ArrayRef< VPValue * > Operands, Value *UV, DebugLoc DL=DebugLoc::getUnknown())
static bool classof(const VPValue *V)
Instruction * getUnderlyingInstr()
Returns the underlying instruction.
static bool classof(const VPRecipeBase *R)
const Instruction * getUnderlyingInstr() const
static bool classof(const VPUser *U)
LLVM_ABI_FOR_TEST LLVM_DUMP_METHOD void dump() const
Print this VPSingleDefRecipe to dbgs() (for debugging).
VPSingleDefRecipe * clone() override=0
Clone the current recipe.
VPSingleDefRecipe(const unsigned char SC, ArrayRef< VPValue * > Operands, DebugLoc DL=DebugLoc::getUnknown())
This class can be used to assign names to VPValues.
Helper to access the operand that contains the unroll part for this recipe after unrolling.
VPValue * getUnrollPartOperand(const VPUser &U) const
Return the VPValue operand containing the unroll part or null if there is no such operand.
unsigned getUnrollPart(const VPUser &U) const
Return the unroll part.
This class augments VPValue with operands which provide the inverse def-use edges from VPValue's user...
void printOperands(raw_ostream &O, VPSlotTracker &SlotTracker) const
Print the operands to O.
void setOperand(unsigned I, VPValue *New)
unsigned getNumOperands() const
operand_iterator op_end()
operand_iterator op_begin()
VPValue * getOperand(unsigned N) const
VPUser(ArrayRef< VPValue * > Operands)
iterator_range< const_operand_iterator > const_operand_range
iterator_range< operand_iterator > operand_range
This is the base class of the VPlan Def/Use graph, used for modeling the data flow into,...
Value * getLiveInIRValue() const
Return the underlying IR value for a VPIRValue.
VPRecipeBase * getDefiningRecipe()
Returns the recipe defining this VPValue or nullptr if it is not defined by a recipe,...
friend class VPRecipeValue
Value * getUnderlyingValue() const
Return the underlying Value attached to this VPValue.
void setUnderlyingValue(Value *Val)
unsigned getNumUsers() const
bool usesFirstLaneOnly(const VPValue *Op) const override
Returns true if the VPUser only uses the first lane of operand Op.
VPValue * getVFValue() const
void execute(VPTransformState &State) override
The method which generates the output IR instructions that correspond to this VPRecipe,...
void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
Type * getSourceElementType() const
int64_t getStride() const
VPVectorEndPointerRecipe * clone() override
Clone the current recipe.
VPValue * getOffset() const
bool usesFirstPartOnly(const VPValue *Op) const override
Returns true if the recipe only uses the first part of operand Op.
VPVectorEndPointerRecipe(VPValue *Ptr, VPValue *VF, Type *SourceElementTy, int64_t Stride, GEPNoWrapFlags GEPFlags, DebugLoc DL)
InstructionCost computeCost(ElementCount VF, VPCostContext &Ctx) const override
Return the cost of this VPVectorPointerRecipe.
VPValue * getPointer() const
void materializeOffset(unsigned Part=0)
Adds the offset operand to the recipe.
Type * getSourceElementType() const
bool usesFirstLaneOnly(const VPValue *Op) const override
Returns true if the VPUser only uses the first lane of operand Op.
void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
void execute(VPTransformState &State) override
The method which generates the output IR instructions that correspond to this VPRecipe,...
bool usesFirstPartOnly(const VPValue *Op) const override
Returns true if the recipe only uses the first part of operand Op.
VPVectorPointerRecipe(VPValue *Ptr, Type *SourceElementTy, GEPNoWrapFlags GEPFlags, DebugLoc DL)
InstructionCost computeCost(ElementCount VF, VPCostContext &Ctx) const override
Return the cost of this VPHeaderPHIRecipe.
VPVectorPointerRecipe * clone() override
Clone the current recipe.
A recipe for widening Call instructions using library calls.
VPWidenCallRecipe(Value *UV, Function *Variant, ArrayRef< VPValue * > CallArguments, const VPIRFlags &Flags={}, const VPIRMetadata &Metadata={}, DebugLoc DL={})
const_operand_range args() const
VPWidenCallRecipe * clone() override
Clone the current recipe.
Function * getCalledScalarFunction() const
~VPWidenCallRecipe() override=default
void execute(VPTransformState &State) override
Generate a canonical vector induction variable of the vector loop, with start = {<Part*VF,...
~VPWidenCanonicalIVRecipe() override=default
void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
VPWidenCanonicalIVRecipe(VPRegionValue *CanonicalIV)
InstructionCost computeCost(ElementCount VF, VPCostContext &Ctx) const override
Return the cost of this VPWidenCanonicalIVPHIRecipe.
VPRegionValue * getCanonicalIV() const
Return the canonical IV being widened.
VPWidenCanonicalIVRecipe * clone() override
Clone the current recipe.
VPWidenCastRecipe is a recipe to create vector cast instructions.
Instruction::CastOps getOpcode() const
LLVM_ABI_FOR_TEST void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
Type * getResultType() const
Returns the result type of the cast.
LLVM_ABI_FOR_TEST void execute(VPTransformState &State) override
Produce widened copies of the cast.
~VPWidenCastRecipe() override=default
LLVM_ABI_FOR_TEST InstructionCost computeCost(ElementCount VF, VPCostContext &Ctx) const override
Return the cost of this VPWidenCastRecipe.
VPWidenCastRecipe(Instruction::CastOps Opcode, VPValue *Op, Type *ResultTy, CastInst *CI=nullptr, const VPIRFlags &Flags={}, const VPIRMetadata &Metadata={}, DebugLoc DL=DebugLoc::getUnknown())
VPWidenCastRecipe * clone() override
Clone the current recipe.
unsigned getOpcode() const
This recipe generates a GEP instruction.
Type * getSourceElementType() const
InstructionCost computeCost(ElementCount VF, VPCostContext &Ctx) const override
Return the cost of this VPWidenGEPRecipe.
VPWidenGEPRecipe * clone() override
Clone the current recipe.
~VPWidenGEPRecipe() override=default
VPWidenGEPRecipe(GetElementPtrInst *GEP, ArrayRef< VPValue * > Operands, const VPIRFlags &Flags={}, DebugLoc DL=DebugLoc::getUnknown())
void execute(VPTransformState &State) override=0
Generate the phi nodes.
bool usesFirstLaneOnly(const VPValue *Op) const override
Returns true if the recipe only uses the first lane of operand Op.
static bool classof(const VPValue *V)
void setStepValue(VPValue *V)
Update the step value of the recipe.
VPValue * getBackedgeValue() override
Returns the incoming value from the loop backedge.
VPIRValue * getStartValue() const
Returns the start value of the induction.
unsigned getNumIncoming() const override
Returns the number of incoming values, also number of incoming blocks.
PHINode * getPHINode() const
Returns the underlying PHINode if one exists, or null otherwise.
VPWidenInductionRecipe(unsigned char Kind, PHINode *IV, VPValue *Start, VPValue *Step, const InductionDescriptor &IndDesc, DebugLoc DL)
VPValue * getStepValue()
Returns the step value of the induction.
const InductionDescriptor & getInductionDescriptor() const
Returns the induction descriptor for the recipe.
VPRecipeBase & getBackedgeRecipe() override
Returns the backedge value as a recipe.
static bool classof(const VPRecipeBase *R)
const VPValue * getVFValue() const
static bool classof(const VPSingleDefRecipe *R)
const VPValue * getStepValue() const
VPIRValue * getStartValue() const
Returns the start value of the induction.
const TruncInst * getTruncInst() const
void execute(VPTransformState &State) override
Generate the phi nodes.
~VPWidenIntOrFpInductionRecipe() override=default
VPValue * getSplatVFValue() const
If the recipe has been unrolled, return the VPValue for the induction increment, otherwise return nul...
VPWidenIntOrFpInductionRecipe * clone() override
Clone the current recipe.
VPWidenIntOrFpInductionRecipe(PHINode *IV, VPIRValue *Start, VPValue *Step, VPValue *VF, const InductionDescriptor &IndDesc, const VPIRFlags &Flags, DebugLoc DL)
TruncInst * getTruncInst()
Returns the first defined value as TruncInst, if it is one or nullptr otherwise.
VPWidenIntOrFpInductionRecipe(PHINode *IV, VPIRValue *Start, VPValue *Step, VPValue *VF, const InductionDescriptor &IndDesc, TruncInst *Trunc, const VPIRFlags &Flags, DebugLoc DL)
VPValue * getLastUnrolledPartOperand()
Returns the VPValue representing the value of this induction at the last unrolled part,...
unsigned getNumIncoming() const override
Returns the number of incoming values, also number of incoming blocks.
Type * getScalarType() const
Returns the scalar type of the induction.
bool isCanonical() const
Returns true if the induction is canonical, i.e.
void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
A recipe for widening vector intrinsics.
VPWidenIntrinsicRecipe(Intrinsic::ID VectorIntrinsicID, ArrayRef< VPValue * > CallArguments, Type *Ty, const VPIRFlags &Flags={}, const VPIRMetadata &Metadata={}, DebugLoc DL=DebugLoc::getUnknown())
Intrinsic::ID getVectorIntrinsicID() const
Return the ID of the intrinsic.
LLVM_ABI_FOR_TEST void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
bool mayReadFromMemory() const
Returns true if the intrinsic may read from memory.
StringRef getIntrinsicName() const
Return to name of the intrinsic as string.
VPWidenIntrinsicRecipe(CallInst &CI, Intrinsic::ID VectorIntrinsicID, ArrayRef< VPValue * > CallArguments, Type *Ty, const VPIRFlags &Flags={}, const VPIRMetadata &MD={}, DebugLoc DL=DebugLoc::getUnknown())
bool mayHaveSideEffects() const
Returns true if the intrinsic may have side-effects.
VPWidenIntrinsicRecipe * clone() override
Clone the current recipe.
bool mayWriteToMemory() const
Returns true if the intrinsic may write to memory.
~VPWidenIntrinsicRecipe() override=default
LLVM_ABI_FOR_TEST bool usesFirstLaneOnly(const VPValue *Op) const override
Returns true if the VPUser only uses the first lane of operand Op.
Type * getResultType() const
Return the scalar return type of the intrinsic.
LLVM_ABI_FOR_TEST void execute(VPTransformState &State) override
Produce a widened version of the vector intrinsic.
LLVM_ABI_FOR_TEST InstructionCost computeCost(ElementCount VF, VPCostContext &Ctx) const override
Return the cost of this vector intrinsic.
bool IsMasked
Whether the memory access is masked.
bool isConsecutive() const
Return whether the loaded-from / stored-to addresses are consecutive.
static bool classof(const VPUser *U)
void execute(VPTransformState &State) override
Generate the wide load/store.
VPWidenMemoryRecipe * clone() override
Clone the current recipe.
VPWidenMemoryRecipe(const char unsigned SC, Instruction &I, std::initializer_list< VPValue * > Operands, bool Consecutive, const VPIRMetadata &Metadata, DebugLoc DL)
Instruction & getIngredient() const
bool Consecutive
Whether the accessed addresses are consecutive.
static bool classof(const VPRecipeBase *R)
VPValue * getMask() const
Return the mask used by this recipe.
Align Alignment
Alignment information for this memory access.
bool isMasked() const
Returns true if the recipe is masked.
void setMask(VPValue *Mask)
Align getAlign() const
Returns the alignment of the memory access.
VPValue * getAddr() const
Return the address accessed by this recipe.
const VPRecipeBase * getAsRecipe() const override
Return a VPRecipeBase* to the current object.
VPWidenPHIRecipe(PHINode *Phi, VPValue *Start=nullptr, DebugLoc DL=DebugLoc::getUnknown(), const Twine &Name="")
Create a new VPWidenPHIRecipe for Phi with start value Start and debug location DL.
InstructionCost computeCost(ElementCount VF, VPCostContext &Ctx) const override
Return the cost of this VPWidenPHIRecipe.
VPWidenPHIRecipe * clone() override
Clone the current recipe.
void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
~VPWidenPHIRecipe() override=default
void execute(VPTransformState &State) override
Generate the phi/select nodes.
VPWidenPointerInductionRecipe * clone() override
Clone the current recipe.
~VPWidenPointerInductionRecipe() override=default
bool onlyScalarsGenerated(bool IsScalable)
Returns true if only scalar values will be generated.
void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
void execute(VPTransformState &State) override
Generate vector values for the pointer induction.
VPWidenPointerInductionRecipe(PHINode *Phi, VPValue *Start, VPValue *Step, VPValue *NumUnrolledElems, const InductionDescriptor &IndDesc, DebugLoc DL)
Create a new VPWidenPointerInductionRecipe for Phi with start value Start and the number of elements ...
VPWidenRecipe is a recipe for producing a widened instruction using the opcode and operands of the re...
VPWidenRecipe * clone() override
Clone the current recipe.
bool usesFirstLaneOnly(const VPValue *Op) const override
Returns true if the recipe only uses the first lane of operand Op.
VPWidenRecipe(Instruction &I, ArrayRef< VPValue * > Operands, const VPIRFlags &Flags={}, const VPIRMetadata &Metadata={}, DebugLoc DL={})
VPWidenRecipe(unsigned Opcode, ArrayRef< VPValue * > Operands, const VPIRFlags &Flags={}, const VPIRMetadata &Metadata={}, DebugLoc DL={})
~VPWidenRecipe() override=default
unsigned getOpcode() const
Class that maps (parts of) an existing VPlan to trees of combined VPInstructions.
VPlan models a candidate for vectorization, encoding various decisions take to produce efficient outp...
VPIRValue * getLiveIn(Value *V) const
Return the live-in VPIRValue for V, if there is one or nullptr otherwise.
LLVM_ABI_FOR_TEST void printDOT(raw_ostream &O) const
Print this VPlan in DOT format to O.
friend class VPSlotTracker
std::string getName() const
Return a string with the name of the plan and the applicable VFs and UFs.
bool hasVF(ElementCount VF) const
ElementCount getSingleVF() const
Returns the single VF of the plan, asserting that the plan has exactly one VF.
const DataLayout & getDataLayout() const
LLVMContext & getContext() const
VPBasicBlock * getEntry()
void setName(const Twine &newName)
bool hasScalableVF() const
VPValue * getTripCount() const
The trip count of the original loop.
VPValue * getOrCreateBackedgeTakenCount()
The backedge taken count of the original loop.
iterator_range< SmallSetVector< ElementCount, 2 >::iterator > vectorFactors() const
Returns an iterator range over all VFs of the plan.
VPIRBasicBlock * getExitBlock(BasicBlock *IRBB) const
Return the VPIRBasicBlock corresponding to IRBB.
LLVM_ABI_FOR_TEST ~VPlan()
VPIRValue * getOrAddLiveIn(VPIRValue *V)
bool isExitBlock(VPBlockBase *VPBB)
Returns true if VPBB is an exit block.
const VPBasicBlock * getEntry() const
friend class VPlanPrinter
VPIRValue * getFalse()
Return a VPIRValue wrapping i1 false.
VPIRValue * getConstantInt(const APInt &Val)
Return a VPIRValue wrapping a ConstantInt with the given APInt value.
VPSymbolicValue & getVFxUF()
Returns VF * UF of the vector loop region.
VPIRValue * getAllOnesValue(Type *Ty)
Return a VPIRValue wrapping the AllOnes value of type Ty.
VPRegionBlock * createReplicateRegion(VPBlockBase *Entry, VPBlockBase *Exiting, const std::string &Name="")
Create a new replicate region with Entry, Exiting and Name.
VPIRBasicBlock * createEmptyVPIRBasicBlock(BasicBlock *IRBB)
Create a VPIRBasicBlock wrapping IRBB, but do not create VPIRInstructions wrapping the instructions i...
auto getLiveIns() const
Return the list of live-in VPValues available in the VPlan.
bool hasUF(unsigned UF) const
ArrayRef< VPIRBasicBlock * > getExitBlocks() const
Return an ArrayRef containing VPIRBasicBlocks wrapping the exit blocks of the original scalar loop.
VPSymbolicValue & getVectorTripCount()
The vector trip count.
VPValue * getBackedgeTakenCount() const
VPIRValue * getOrAddLiveIn(Value *V)
Gets the live-in VPIRValue for V or adds a new live-in (if none exists yet) for V.
VPRegionBlock * createLoopRegion(Type *CanIVTy, DebugLoc DL, const std::string &Name="", VPBlockBase *Entry=nullptr, VPBlockBase *Exiting=nullptr)
Create a new loop region with a canonical IV using CanIVTy and DL.
VPIRValue * getZero(Type *Ty)
Return a VPIRValue wrapping the null value of type Ty.
void setVF(ElementCount VF)
bool isUnrolled() const
Returns true if the VPlan already has been unrolled, i.e.
LLVM_ABI_FOR_TEST VPRegionBlock * getVectorLoopRegion()
Returns the VPRegionBlock of the vector loop.
bool hasEarlyExit() const
Returns true if the VPlan is based on a loop with an early exit.
InstructionCost cost(ElementCount VF, VPCostContext &Ctx)
Return the cost of this plan.
unsigned getConcreteUF() const
Returns the concrete UF of the plan, after unrolling.
VPIRValue * getConstantInt(unsigned BitWidth, uint64_t Val, bool IsSigned=false)
Return a VPIRValue wrapping a ConstantInt with the given bitwidth and value.
const VPBasicBlock * getMiddleBlock() const
void setTripCount(VPValue *NewTripCount)
Set the trip count assuming it is currently null; if it is not - use resetTripCount().
void resetTripCount(VPValue *NewTripCount)
Resets the trip count for the VPlan.
VPBasicBlock * getMiddleBlock()
Returns the 'middle' block of the plan, that is the block that selects whether to execute the scalar ...
void setEntry(VPBasicBlock *VPBB)
VPBasicBlock * createVPBasicBlock(const Twine &Name, VPRecipeBase *Recipe=nullptr)
Create a new VPBasicBlock with Name and containing Recipe if present.
LLVM_ABI_FOR_TEST VPIRBasicBlock * createVPIRBasicBlock(BasicBlock *IRBB)
Create a VPIRBasicBlock from IRBB containing VPIRInstructions for all instructions in IRBB,...
void removeVF(ElementCount VF)
Remove VF from the plan.
VPIRValue * getTrue()
Return a VPIRValue wrapping i1 true.
LLVM_DUMP_METHOD void dump() const
Dump the plan to stderr (for debugging).
VPSymbolicValue & getUF()
Returns the UF of the vector loop region.
bool hasScalarVFOnly() const
VPBasicBlock * getScalarPreheader() const
Return the VPBasicBlock for the preheader of the scalar loop.
void execute(VPTransformState *State)
Generate the IR code for this VPlan.
LLVM_ABI_FOR_TEST void print(raw_ostream &O) const
Print this VPlan to O.
void addVF(ElementCount VF)
VPIRBasicBlock * getScalarHeader() const
Return the VPIRBasicBlock wrapping the header of the scalar loop.
void printLiveIns(raw_ostream &O) const
Print the live-ins of this VPlan to O.
VPBasicBlock * getVectorPreheader()
Returns the preheader of the vector loop region, if one exists, or null otherwise.
VPSymbolicValue & getVF()
Returns the VF of the vector loop region.
const VPSymbolicValue & getVF() const
bool hasScalarTail() const
Returns true if the scalar tail may execute after the vector loop, i.e.
LLVM_ABI_FOR_TEST VPlan * duplicate()
Clone the current VPlan, update all VPValues of the new VPlan and cloned recipes to refer to the clon...
VPlan(BasicBlock *ScalarHeaderBB)
Construct a VPlan with a new VPBasicBlock as entry, a VPIRBasicBlock wrapping ScalarHeaderBB and a tr...
VPIRValue * getConstantInt(Type *Ty, uint64_t Val, bool IsSigned=false)
Return a VPIRValue wrapping a ConstantInt with the given type and value.
LLVM Value Representation.
ilist_node_with_parent()=default
Increasing range of size_t indices.
typename base_list_type::const_reverse_iterator const_reverse_iterator
typename base_list_type::reverse_iterator reverse_iterator
typename base_list_type::iterator iterator
typename base_list_type::const_iterator const_iterator
An intrusive list with ownership and callbacks specified/controlled by ilist_traits,...
A range adaptor for a pair of iterators.
This class implements an extremely fast bulk output stream that can only output to a stream.
This file defines classes to implement an intrusive doubly linked list class (i.e.
This file defines the ilist_node class template, which is a convenient base class for creating classe...
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ C
The default llvm calling convention, compatible with C.
This namespace contains an enum with a value for every intrinsic/builtin function known by LLVM.
LLVM_ABI AttributeSet getFnAttributes(LLVMContext &C, ID id)
Return the function attributes for an intrinsic.
std::variant< std::monostate, Loc::Single, Loc::Multi, Loc::MMI, Loc::EntryValue > Variant
Alias for the std::variant specialization base class of DbgVariable.
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
void dump(const SparseBitVector< ElementSize > &LHS, raw_ostream &out)
detail::zippy< detail::zip_shortest, T, U, Args... > zip(T &&t, U &&u, Args &&...args)
zip iterator for two or more iteratable types.
auto cast_if_present(const Y &Val)
cast_if_present<X> - Functionally identical to cast, except that a null value is accepted.
auto find(R &&Range, const T &Val)
Provide wrappers to std::find which take ranges instead of having to pass begin/end explicitly.
detail::zippy< detail::zip_first, T, U, Args... > zip_equal(T &&t, U &&u, Args &&...args)
zip iterator that assumes that all iteratees have the same length.
ReductionStyle getReductionStyle(bool InLoop, bool Ordered, unsigned ScaleFactor)
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
LLVM_ABI void getMetadataToPropagate(Instruction *Inst, SmallVectorImpl< std::pair< unsigned, MDNode * > > &Metadata)
Add metadata from Inst to Metadata, if it can be preserved after vectorization.
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
auto cast_or_null(const Y &Val)
Align getLoadStoreAlignment(const Value *I)
A helper function that returns the alignment of load or store instruction.
MemoryEffectsBase< IRMemLocation > MemoryEffects
Summary of how a function affects memory in the program.
auto map_range(ContainerTy &&C, FuncTy F)
Return a range that applies F to the elements of C.
auto dyn_cast_or_null(const Y &Val)
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
auto reverse(ContainerTy &&C)
UncountableExitStyle
Different methods of handling early exits.
@ MaskedHandleExitInScalarLoop
All memory operations other than the load(s) required to determine whether an uncountable exit occurr...
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
auto drop_end(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the last N elements excluded.
RecurKind
These are the kinds of recurrences that we support.
@ Mul
Product of integers.
@ Sub
Subtraction of integers.
auto count(R &&Range, const E &Element)
Wrapper function around std::count to count the number of times an element Element occurs in the give...
DWARFExpression::Operation Op
raw_ostream & operator<<(raw_ostream &OS, const APFixedPoint &FX)
constexpr unsigned BitWidth
auto count_if(R &&Range, UnaryPredicate P)
Wrapper function around std::count_if to count the number of times an element satisfying a given pred...
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
std::variant< RdxOrdered, RdxInLoop, RdxUnordered > ReductionStyle
@ Increment
Incrementally increasing token ID.
std::unique_ptr< VPlan > VPlanPtr
Implement std::hash so that hash_code can be used in STL containers.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
This struct is a compact representation of a valid (non-zero power of two) alignment.
static Bitfield::Type get(StorageType Packed)
Unpacks the field from the Packed value.
static void set(StorageType &Packed, typename Bitfield::Type Value)
Sets the typed value in the provided Packed value.
static VPPhiAccessors * castFailed()
Used by inherited doCastIfPossible to dyn_cast.
static VPPhiAccessors * doCast(VPRecipeBase *R)
Used by cast.
static bool isPossible(VPRecipeBase *R)
Used by isa.
This struct provides a method for customizing the way a cast is performed.
Provides a cast trait that strips const from types to make it easier to implement a const-version of ...
This cast trait just provides the default implementation of doCastIfPossible to make CastInfo special...
Provides a cast trait that uses a defined pointer to pointer cast as a base for reference-to-referenc...
This reduction is in-loop.
Possible variants of a reduction.
This reduction is unordered with the partial result scaled down by some factor.
A MapVector that performs no allocations if smaller than a certain size.
An overlay on VPIRValue for VPValues that wrap a ConstantInt.
Struct to hold various analysis needed for cost computations.
void execute(VPTransformState &State) override
Generate the phi nodes.
VPFirstOrderRecurrencePHIRecipe * clone() override
Clone the current recipe.
InstructionCost computeCost(ElementCount VF, VPCostContext &Ctx) const override
Return the cost of this first-order recurrence phi recipe.
bool usesFirstLaneOnly(const VPValue *Op) const override
Returns true if the recipe only uses the first lane of operand Op.
void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
VPFirstOrderRecurrencePHIRecipe(PHINode *Phi, VPValue &Start, VPValue &BackedgeValue)
DisjointFlagsTy(bool IsDisjoint)
NonNegFlagsTy(bool IsNonNeg)
TruncFlagsTy(bool HasNUW, bool HasNSW)
WrapFlagsTy(bool HasNUW, bool HasNSW)
static bool classof(const VPRecipeBase *U)
static bool classof(const VPUser *U)
const VPRecipeBase * getAsRecipe() const override
Return a VPRecipeBase* to the current object.
A VPValue representing a live-in from the input IR or a constant.
Type * getType() const
Returns the type of the underlying IR value.
static bool classof(const VPUser *U)
VPPhi(ArrayRef< VPValue * > Operands, const VPIRFlags &Flags, DebugLoc DL, const Twine &Name="")
VPPhi * clone() override
Clone the current recipe.
const VPRecipeBase * getAsRecipe() const override
Return a VPRecipeBase* to the current object.
static bool classof(const VPSingleDefRecipe *SDR)
static bool classof(const VPValue *V)
A pure-virtual common base class for recipes defining a single VPValue and using IR flags.
static bool classof(const VPSingleDefRecipe *R)
static bool classof(const VPRecipeBase *R)
InstructionCost getCostForRecipeWithOpcode(unsigned Opcode, ElementCount VF, VPCostContext &Ctx) const
Compute the cost for this recipe for VF, using Opcode and Ctx.
VPRecipeWithIRFlags(const unsigned char SC, ArrayRef< VPValue * > Operands, const VPIRFlags &Flags, DebugLoc DL=DebugLoc::getUnknown())
static bool classof(const VPValue *V)
void execute(VPTransformState &State) override=0
The method which generates the output IR instructions that correspond to this VPRecipe,...
VPRecipeWithIRFlags * clone() override=0
Clone the current recipe.
static bool classof(const VPUser *U)
A symbolic live-in VPValue, used for values like vector trip count, VF, and VFxUF.
A recipe for widening load operations with vector-predication intrinsics, using the address to load f...
LLVM_ABI_FOR_TEST void execute(VPTransformState &State) override
Generate the wide load or gather.
LLVM_ABI_FOR_TEST void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
LLVM_ABI_FOR_TEST InstructionCost computeCost(ElementCount VF, VPCostContext &Ctx) const override
Return the cost of this VPWidenLoadEVLRecipe.
VPValue * getEVL() const
Return the EVL operand.
VPWidenLoadEVLRecipe(VPWidenLoadRecipe &L, VPValue *Addr, VPValue &EVL, VPValue *Mask)
bool usesFirstLaneOnly(const VPValue *Op) const override
Returns true if the recipe only uses the first lane of operand Op.
A recipe for widening load operations, using the address to load from and an optional mask.
VPWidenLoadRecipe(LoadInst &Load, VPValue *Addr, VPValue *Mask, bool Consecutive, const VPIRMetadata &Metadata, DebugLoc DL)
bool usesFirstLaneOnly(const VPValue *Op) const override
Returns true if the recipe only uses the first lane of operand Op.
void execute(VPTransformState &State) override
Generate a wide load or gather.
VPWidenLoadRecipe * clone() override
Clone the current recipe.
VP_CLASSOF_IMPL(VPRecipeBase::VPWidenLoadSC)
A recipe for widening store operations with vector-predication intrinsics, using the value to store,...
VPValue * getStoredValue() const
Return the address accessed by this recipe.
LLVM_ABI_FOR_TEST void execute(VPTransformState &State) override
Generate the wide store or scatter.
VPWidenStoreEVLRecipe(VPWidenStoreRecipe &S, VPValue *Addr, VPValue *StoredVal, VPValue &EVL, VPValue *Mask)
bool usesFirstLaneOnly(const VPValue *Op) const override
Returns true if the recipe only uses the first lane of operand Op.
LLVM_ABI_FOR_TEST void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
LLVM_ABI_FOR_TEST InstructionCost computeCost(ElementCount VF, VPCostContext &Ctx) const override
Return the cost of this VPWidenStoreEVLRecipe.
VPValue * getEVL() const
Return the EVL operand.
A recipe for widening store operations, using the stored value, the address to store to and an option...
VPWidenStoreRecipe(StoreInst &Store, VPValue *Addr, VPValue *StoredVal, VPValue *Mask, bool Consecutive, const VPIRMetadata &Metadata, DebugLoc DL)
VP_CLASSOF_IMPL(VPRecipeBase::VPWidenStoreSC)
VPValue * getStoredValue() const
Return the value stored by this recipe.
VPWidenStoreRecipe * clone() override
Clone the current recipe.
bool usesFirstLaneOnly(const VPValue *Op) const override
Returns true if the recipe only uses the first lane of operand Op.