47#define LV_NAME "loop-vectorize"
48#define DEBUG_TYPE LV_NAME
54 case VPInstructionSC: {
57 if (VPI->getOpcode() == Instruction::Load)
59 return VPI->opcodeMayReadOrWriteFromMemory();
61 case VPInterleaveEVLSC:
64 case VPWidenStoreEVLSC:
72 ->getCalledScalarFunction()
74 case VPWidenIntrinsicSC:
76 case VPCanonicalIVPHISC:
77 case VPBranchOnMaskSC:
79 case VPFirstOrderRecurrencePHISC:
80 case VPReductionPHISC:
81 case VPScalarIVStepsSC:
85 case VPReductionEVLSC:
87 case VPVectorPointerSC:
88 case VPWidenCanonicalIVSC:
91 case VPWidenIntOrFpInductionSC:
92 case VPWidenLoadEVLSC:
95 case VPWidenPointerInductionSC:
100 assert((!
I || !
I->mayWriteToMemory()) &&
101 "underlying instruction may write to memory");
113 case VPInstructionSC:
115 case VPWidenLoadEVLSC:
120 ->mayReadFromMemory();
123 ->getCalledScalarFunction()
124 ->onlyWritesMemory();
125 case VPWidenIntrinsicSC:
127 case VPBranchOnMaskSC:
129 case VPFirstOrderRecurrencePHISC:
130 case VPPredInstPHISC:
131 case VPScalarIVStepsSC:
132 case VPWidenStoreEVLSC:
136 case VPReductionEVLSC:
138 case VPVectorPointerSC:
139 case VPWidenCanonicalIVSC:
142 case VPWidenIntOrFpInductionSC:
144 case VPWidenPointerInductionSC:
149 assert((!
I || !
I->mayReadFromMemory()) &&
150 "underlying instruction may read from memory");
164 case VPFirstOrderRecurrencePHISC:
165 case VPPredInstPHISC:
166 case VPVectorEndPointerSC:
168 case VPInstructionSC: {
175 case VPWidenCallSC: {
179 case VPWidenIntrinsicSC:
182 case VPReductionEVLSC:
184 case VPScalarIVStepsSC:
185 case VPVectorPointerSC:
186 case VPWidenCanonicalIVSC:
189 case VPWidenIntOrFpInductionSC:
191 case VPWidenPointerInductionSC:
196 assert((!
I || !
I->mayHaveSideEffects()) &&
197 "underlying instruction has side-effects");
200 case VPInterleaveEVLSC:
203 case VPWidenLoadEVLSC:
205 case VPWidenStoreEVLSC:
210 "mayHaveSideffects result for ingredient differs from this "
213 case VPReplicateSC: {
215 return R->getUnderlyingInstr()->mayHaveSideEffects();
223 assert(!Parent &&
"Recipe already in some VPBasicBlock");
225 "Insertion position not in any VPBasicBlock");
231 assert(!Parent &&
"Recipe already in some VPBasicBlock");
237 assert(!Parent &&
"Recipe already in some VPBasicBlock");
239 "Insertion position not in any VPBasicBlock");
274 UI = IG->getInsertPos();
276 UI = &WidenMem->getIngredient();
279 if (UI && Ctx.skipCostComputation(UI, VF.
isVector())) {
293 dbgs() <<
"Cost of " << RecipeCost <<
" for VF " << VF <<
": ";
315 assert(OpType == Other.OpType &&
"OpType must match");
317 case OperationType::OverflowingBinOp:
318 WrapFlags.HasNUW &= Other.WrapFlags.HasNUW;
319 WrapFlags.HasNSW &= Other.WrapFlags.HasNSW;
321 case OperationType::Trunc:
325 case OperationType::DisjointOp:
328 case OperationType::PossiblyExactOp:
329 ExactFlags.IsExact &= Other.ExactFlags.IsExact;
331 case OperationType::GEPOp:
334 case OperationType::FPMathOp:
335 case OperationType::FCmp:
336 assert((OpType != OperationType::FCmp ||
337 FCmpFlags.Pred == Other.FCmpFlags.Pred) &&
338 "Cannot drop CmpPredicate");
339 getFMFsRef().NoNaNs &= Other.getFMFsRef().NoNaNs;
340 getFMFsRef().NoInfs &= Other.getFMFsRef().NoInfs;
342 case OperationType::NonNegOp:
345 case OperationType::Cmp:
348 case OperationType::ReductionOp:
350 "Cannot change RecurKind");
352 "Cannot change IsOrdered");
354 "Cannot change IsInLoop");
355 getFMFsRef().NoNaNs &= Other.getFMFsRef().NoNaNs;
356 getFMFsRef().NoInfs &= Other.getFMFsRef().NoInfs;
358 case OperationType::Other:
365 assert((OpType == OperationType::FPMathOp || OpType == OperationType::FCmp ||
366 OpType == OperationType::ReductionOp) &&
367 "recipe doesn't have fast math flags");
368 const FastMathFlagsTy &
F = getFMFsRef();
380#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
396template <
unsigned PartOpIdx>
399 if (U.getNumOperands() == PartOpIdx + 1)
400 return U.getOperand(PartOpIdx);
404template <
unsigned PartOpIdx>
423 "Set flags not supported for the provided opcode");
426 "number of operands does not match opcode");
441 case Instruction::Alloca:
442 case Instruction::ExtractValue:
443 case Instruction::Freeze:
444 case Instruction::Load:
461 case Instruction::ICmp:
462 case Instruction::FCmp:
463 case Instruction::ExtractElement:
464 case Instruction::Store:
473 case Instruction::Select:
479 case Instruction::Call:
480 case Instruction::GetElementPtr:
481 case Instruction::PHI:
482 case Instruction::Switch:
500bool VPInstruction::canGenerateScalarForFirstLane()
const {
506 case Instruction::Freeze:
507 case Instruction::ICmp:
508 case Instruction::PHI:
509 case Instruction::Select:
526 IRBuilderBase &Builder = State.
Builder;
545 case Instruction::ExtractElement: {
548 return State.
get(
getOperand(0), VPLane(Idx->getZExtValue()));
553 case Instruction::Freeze: {
557 case Instruction::FCmp:
558 case Instruction::ICmp: {
564 case Instruction::PHI: {
567 case Instruction::Select: {
593 {VIVElem0, ScalarTC},
nullptr, Name);
609 if (!V1->getType()->isVectorTy())
629 "Requested vector length should be an integer.");
635 Builder.
getInt32Ty(), Intrinsic::experimental_get_vector_length,
636 {AVL, VFArg, Builder.getTrue()});
642 assert(Part != 0 &&
"Must have a positive part");
655 VPBasicBlock *SecondVPSucc =
677 for (
unsigned FieldIndex = 0; FieldIndex != StructTy->getNumElements();
701 IRBuilderBase::FastMathFlagGuard FMFG(Builder);
718 ReducedResult,
"bin.rdx");
725 return Builder.
CreateSelect(ReducedResult, NewVal, Start,
"rdx.select");
732 "FindIV should use min/max reduction kinds");
737 for (
unsigned Part = 0; Part < NumOperandsToReduce; ++Part)
740 IRBuilderBase::FastMathFlagGuard FMFG(Builder);
745 Value *ReducedPartRdx = RdxParts[0];
747 ReducedPartRdx = RdxParts[NumOperandsToReduce - 1];
750 for (
unsigned Part = 1; Part < NumOperandsToReduce; ++Part) {
751 Value *RdxPart = RdxParts[Part];
753 ReducedPartRdx =
createMinMaxOp(Builder, RK, ReducedPartRdx, RdxPart);
762 Builder.
CreateBinOp(Opcode, RdxPart, ReducedPartRdx,
"bin.rdx");
776 return ReducedPartRdx;
785 "invalid offset to extract from");
790 assert(
Offset <= 1 &&
"invalid offset to extract from");
804 "can only generate first lane for PtrAdd");
823 "simplified to ExtractElement.");
826 Value *Res =
nullptr;
831 Builder.
CreateMul(RuntimeVF, ConstantInt::get(IdxTy, Idx - 1));
832 Value *VectorIdx = Idx == 1
834 : Builder.
CreateSub(LaneToExtract, VectorStart);
859 Value *Res =
nullptr;
860 for (
int Idx = LastOpIdx; Idx >= 0; --Idx) {
861 Value *TrailingZeros =
892 Intrinsic::experimental_vector_extract_last_active, {VTy},
902 Type *ScalarTy = Ctx.Types.inferScalarType(
this);
905 case Instruction::FNeg:
906 return Ctx.TTI.getArithmeticInstrCost(Opcode, ResultTy, Ctx.CostKind);
907 case Instruction::UDiv:
908 case Instruction::SDiv:
909 case Instruction::SRem:
910 case Instruction::URem:
911 case Instruction::Add:
912 case Instruction::FAdd:
913 case Instruction::Sub:
914 case Instruction::FSub:
915 case Instruction::Mul:
916 case Instruction::FMul:
917 case Instruction::FDiv:
918 case Instruction::FRem:
919 case Instruction::Shl:
920 case Instruction::LShr:
921 case Instruction::AShr:
922 case Instruction::And:
923 case Instruction::Or:
924 case Instruction::Xor: {
932 RHSInfo = Ctx.getOperandInfo(RHS);
943 return Ctx.TTI.getArithmeticInstrCost(
944 Opcode, ResultTy, Ctx.CostKind,
945 {TargetTransformInfo::OK_AnyValue, TargetTransformInfo::OP_None},
946 RHSInfo, Operands, CtxI, &Ctx.TLI);
948 case Instruction::Freeze:
950 return Ctx.TTI.getArithmeticInstrCost(Instruction::Mul, ResultTy,
952 case Instruction::ExtractValue:
953 return Ctx.TTI.getInsertExtractValueCost(Instruction::ExtractValue,
955 case Instruction::ICmp:
956 case Instruction::FCmp: {
960 return Ctx.TTI.getCmpSelInstrCost(
962 Ctx.CostKind, {TTI::OK_AnyValue, TTI::OP_None},
963 {TTI::OK_AnyValue, TTI::OP_None}, CtxI);
965 case Instruction::BitCast: {
966 Type *ScalarTy = Ctx.Types.inferScalarType(
this);
971 case Instruction::SExt:
972 case Instruction::ZExt:
973 case Instruction::FPToUI:
974 case Instruction::FPToSI:
975 case Instruction::FPExt:
976 case Instruction::PtrToInt:
977 case Instruction::PtrToAddr:
978 case Instruction::IntToPtr:
979 case Instruction::SIToFP:
980 case Instruction::UIToFP:
981 case Instruction::Trunc:
982 case Instruction::FPTrunc:
983 case Instruction::AddrSpaceCast: {
998 if (WidenMemoryRecipe ==
nullptr)
1002 if (!WidenMemoryRecipe->isConsecutive())
1004 if (WidenMemoryRecipe->isReverse())
1006 if (WidenMemoryRecipe->isMasked())
1014 if (Opcode == Instruction::Trunc || Opcode == Instruction::FPTrunc) {
1016 if (R->getNumUsers() == 0 || R->hasMoreThanOneUniqueUser())
1024 CCH = ComputeCCH(Recipe);
1028 else if (Opcode == Instruction::ZExt || Opcode == Instruction::SExt ||
1029 Opcode == Instruction::FPExt) {
1035 CCH = ComputeCCH(Recipe);
1043 Opcode, ResultTy, SrcTy, CCH, Ctx.
CostKind,
1046 case Instruction::Select: {
1064 (IsLogicalAnd || IsLogicalOr)) {
1070 SmallVector<const Value *, 2> Operands;
1072 [](
VPValue *
Op) {
return Op->getUnderlyingValue(); }))
1075 IsLogicalOr ? Instruction::Or : Instruction::And, ResultTy,
1076 Ctx.
CostKind, {Op1VK, Op1VP}, {Op2VK, Op2VP}, Operands, SI);
1083 llvm::CmpPredicate Pred;
1087 Pred = Cmp->getPredicate();
1090 Instruction::Select, VectorTy, CondTy, Pred, Ctx.
CostKind,
1091 {TTI::OK_AnyValue, TTI::OP_None}, {TTI::OK_AnyValue, TTI::OP_None}, SI);
1107 "Should only generate a vector value or single scalar, not scalars "
1115 case Instruction::Select: {
1118 auto *CondTy = Ctx.Types.inferScalarType(
getOperand(0));
1119 auto *VecTy = Ctx.Types.inferScalarType(
getOperand(1));
1124 return Ctx.TTI.getCmpSelInstrCost(Instruction::Select, VecTy, CondTy, Pred,
1127 case Instruction::ExtractElement:
1137 return Ctx.TTI.getVectorInstrCost(Instruction::ExtractElement, VecTy,
1141 auto *VecTy =
toVectorTy(Ctx.Types.inferScalarType(
this), VF);
1142 return Ctx.TTI.getArithmeticReductionCost(
1148 return Ctx.TTI.getCmpSelInstrCost(Instruction::ICmp, ScalarTy,
1155 {PredTy, Type::getInt1Ty(Ctx.LLVMCtx)});
1156 return Ctx.TTI.getIntrinsicInstrCost(Attrs, Ctx.CostKind);
1161 return Ctx.TTI.getCmpSelInstrCost(Instruction::ICmp, ScalarTy,
1168 {PredTy, Type::getInt1Ty(Ctx.LLVMCtx)});
1171 Cost += Ctx.TTI.getArithmeticInstrCost(
1172 Instruction::Xor, PredTy, Ctx.CostKind,
1173 {TargetTransformInfo::OK_AnyValue, TargetTransformInfo::OP_None},
1174 {TargetTransformInfo::OK_UniformConstantValue,
1175 TargetTransformInfo::OP_None});
1177 Cost += Ctx.TTI.getArithmeticInstrCost(
1182 Type *ScalarTy = Ctx.Types.inferScalarType(
this);
1186 Intrinsic::experimental_vector_extract_last_active, ScalarTy,
1187 {VecTy, MaskTy, ScalarTy});
1188 return Ctx.TTI.getIntrinsicInstrCost(ICA, Ctx.CostKind);
1194 Type *VectorTy =
toVectorTy(Ctx.Types.inferScalarType(
this), VF);
1207 return Ctx.TTI.getIntrinsicInstrCost(Attrs, Ctx.CostKind);
1214 I32Ty, {Arg0Ty, I32Ty, I1Ty});
1215 return Ctx.TTI.getIntrinsicInstrCost(Attrs, Ctx.CostKind);
1218 assert(VF.
isVector() &&
"Reverse operation must be vector type");
1222 VectorTy, {}, Ctx.CostKind,
1228 return Ctx.TTI.getIndexedVectorInstrCostFromEnd(Instruction::ExtractElement,
1229 VecTy, Ctx.CostKind, 0);
1239 "unexpected VPInstruction witht underlying value");
1247 getOpcode() == Instruction::ExtractElement ||
1259 case Instruction::PHI:
1270 assert(!State.Lane &&
"VPInstruction executing an Lane");
1273 "Set flags not supported for the provided opcode");
1276 Value *GeneratedValue = generate(State);
1279 assert(GeneratedValue &&
"generate must produce a value");
1280 bool GeneratesPerFirstLaneOnly = canGenerateScalarForFirstLane() &&
1285 !GeneratesPerFirstLaneOnly) ||
1286 State.VF.isScalar()) &&
1287 "scalar value but not only first lane defined");
1288 State.set(
this, GeneratedValue,
1289 GeneratesPerFirstLaneOnly);
1296 case Instruction::GetElementPtr:
1297 case Instruction::ExtractElement:
1298 case Instruction::Freeze:
1299 case Instruction::FCmp:
1300 case Instruction::ICmp:
1301 case Instruction::Select:
1302 case Instruction::PHI:
1346 case Instruction::ExtractElement:
1348 case Instruction::PHI:
1350 case Instruction::FCmp:
1351 case Instruction::ICmp:
1352 case Instruction::Select:
1353 case Instruction::Or:
1354 case Instruction::Freeze:
1394 case Instruction::FCmp:
1395 case Instruction::ICmp:
1396 case Instruction::Select:
1407#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1415 O << Indent <<
"EMIT" << (
isSingleScalar() ?
"-SCALAR" :
"") <<
" ";
1427 O <<
"combined load";
1430 O <<
"combined store";
1433 O <<
"active lane mask";
1436 O <<
"EXPLICIT-VECTOR-LENGTH";
1439 O <<
"first-order splice";
1442 O <<
"branch-on-cond";
1445 O <<
"branch-on-two-conds";
1448 O <<
"TC > VF ? TC - VF : 0";
1454 O <<
"branch-on-count";
1460 O <<
"buildstructvector";
1466 O <<
"extract-lane";
1469 O <<
"extract-last-lane";
1472 O <<
"extract-last-part";
1475 O <<
"extract-penultimate-element";
1478 O <<
"compute-anyof-result";
1481 O <<
"compute-reduction-result";
1496 O <<
"first-active-lane";
1499 O <<
"last-active-lane";
1502 O <<
"reduction-start-vector";
1505 O <<
"resume-for-epilogue";
1514 O <<
"extract-last-active";
1531 State.set(
this, Cast,
VPLane(0));
1542 Value *
VScale = State.Builder.CreateVScale(ResultTy);
1543 State.set(
this,
VScale,
true);
1552#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1555 O << Indent <<
"EMIT" << (
isSingleScalar() ?
"-SCALAR" :
"") <<
" ";
1561 O <<
"wide-iv-step ";
1565 O <<
"step-vector " << *ResultTy;
1568 O <<
"vscale " << *ResultTy;
1574 O <<
" to " << *ResultTy;
1581 PHINode *NewPhi = State.Builder.CreatePHI(
1582 State.TypeAnalysis.inferScalarType(
this), 2,
getName());
1589 for (
unsigned Idx = 0; Idx != NumIncoming; ++Idx) {
1594 State.set(
this, NewPhi,
VPLane(0));
1597#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1600 O << Indent <<
"EMIT" << (
isSingleScalar() ?
"-SCALAR" :
"") <<
" ";
1615 "PHINodes must be handled by VPIRPhi");
1618 State.Builder.SetInsertPoint(I.getParent(), std::next(I.getIterator()));
1631 "can only update exiting operands to phi nodes");
1642#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1645 O << Indent <<
"IR " << I;
1657 auto *PredVPBB = Pred->getExitingBasicBlock();
1658 BasicBlock *PredBB = State.CFG.VPBB2IRBB[PredVPBB];
1665 if (Phi->getBasicBlockIndex(PredBB) == -1)
1666 Phi->addIncoming(V, PredBB);
1668 Phi->setIncomingValueForBlock(PredBB, V);
1673 State.Builder.SetInsertPoint(Phi->getParent(), std::next(Phi->getIterator()));
1678 assert(R->getNumOperands() == R->getParent()->getNumPredecessors() &&
1679 "Number of phi operands must match number of predecessors");
1680 unsigned Position = R->getParent()->getIndexForPredecessor(IncomingBlock);
1681 R->removeOperand(Position);
1684#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1698#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1704 O <<
" (extra operand" << (
getNumOperands() > 1 ?
"s" :
"") <<
": ";
1709 std::get<1>(
Op)->printAsOperand(O);
1717 for (
const auto &[Kind,
Node] : Metadata)
1718 I.setMetadata(Kind,
Node);
1723 for (
const auto &[KindA, MDA] : Metadata) {
1724 for (
const auto &[KindB, MDB] :
Other.Metadata) {
1725 if (KindA == KindB && MDA == MDB) {
1731 Metadata = std::move(MetadataIntersection);
1734#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1737 if (Metadata.empty() || !M)
1743 auto [Kind,
Node] = KindNodePair;
1745 "Unexpected unnamed metadata kind");
1746 O <<
"!" << MDNames[Kind] <<
" ";
1754 assert(State.VF.isVector() &&
"not widening");
1755 assert(Variant !=
nullptr &&
"Can't create vector function.");
1766 Arg = State.get(
I.value(),
VPLane(0));
1769 Args.push_back(Arg);
1775 CI->getOperandBundlesAsDefs(OpBundles);
1777 CallInst *V = State.Builder.CreateCall(Variant, Args, OpBundles);
1780 V->setCallingConv(Variant->getCallingConv());
1782 if (!V->getType()->isVoidTy())
1788 return Ctx.TTI.getCallInstrCost(
nullptr, Variant->getReturnType(),
1789 Variant->getFunctionType()->params(),
1793#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1796 O << Indent <<
"WIDEN-CALL ";
1808 O <<
" @" << CalledFn->
getName() <<
"(";
1814 O <<
" (using library function";
1815 if (Variant->hasName())
1816 O <<
": " << Variant->getName();
1822 assert(State.VF.isVector() &&
"not widening");
1830 for (
auto [Idx, Ty] :
enumerate(ContainedTys)) {
1843 Arg = State.get(
I.value(),
VPLane(0));
1849 Args.push_back(Arg);
1853 Module *M = State.Builder.GetInsertBlock()->getModule();
1857 "Can't retrieve vector intrinsic or vector-predication intrinsics.");
1862 CI->getOperandBundlesAsDefs(OpBundles);
1864 CallInst *V = State.Builder.CreateCall(VectorF, Args, OpBundles);
1869 if (!V->getType()->isVoidTy())
1885 for (
const auto &[Idx,
Op] :
enumerate(Operands)) {
1886 auto *V =
Op->getUnderlyingValue();
1889 Arguments.push_back(UI->getArgOperand(Idx));
1898 Type *ScalarRetTy = Ctx.Types.inferScalarType(&R);
1904 : Ctx.Types.inferScalarType(
Op));
1909 R.hasFastMathFlags() ? R.getFastMathFlags() :
FastMathFlags();
1914 return Ctx.TTI.getIntrinsicInstrCost(CostAttrs, Ctx.CostKind);
1936#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1939 O << Indent <<
"WIDEN-INTRINSIC ";
1940 if (ResultTy->isVoidTy()) {
1968 Value *Mask =
nullptr;
1970 Mask = State.get(VPMask);
1973 Builder.CreateVectorSplat(VTy->
getElementCount(), Builder.getInt1(1));
1977 if (Opcode == Instruction::Sub)
1978 IncAmt = Builder.CreateNeg(IncAmt);
1980 assert(Opcode == Instruction::Add &&
"only add or sub supported for now");
1982 State.Builder.CreateIntrinsic(Intrinsic::experimental_vector_histogram_add,
1997 Type *IncTy = Ctx.Types.inferScalarType(IncAmt);
2003 Ctx.TTI.getArithmeticInstrCost(Instruction::Mul, VTy, Ctx.CostKind);
2013 {PtrTy, IncTy, MaskTy});
2016 return Ctx.TTI.getIntrinsicInstrCost(ICA, Ctx.CostKind) + MulCost +
2017 Ctx.TTI.getArithmeticInstrCost(Opcode, VTy, Ctx.CostKind);
2020#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2023 O << Indent <<
"WIDEN-HISTOGRAM buckets: ";
2026 if (Opcode == Instruction::Sub)
2029 assert(Opcode == Instruction::Add);
2041VPIRFlags::FastMathFlagsTy::FastMathFlagsTy(
const FastMathFlags &FMF) {
2054 case OperationType::OverflowingBinOp:
2055 return Opcode == Instruction::Add || Opcode == Instruction::Sub ||
2056 Opcode == Instruction::Mul || Opcode == Instruction::Shl ||
2057 Opcode == VPInstruction::VPInstruction::CanonicalIVIncrementForPart;
2058 case OperationType::Trunc:
2059 return Opcode == Instruction::Trunc;
2060 case OperationType::DisjointOp:
2061 return Opcode == Instruction::Or;
2062 case OperationType::PossiblyExactOp:
2063 return Opcode == Instruction::AShr || Opcode == Instruction::LShr ||
2064 Opcode == Instruction::UDiv || Opcode == Instruction::SDiv;
2065 case OperationType::GEPOp:
2066 return Opcode == Instruction::GetElementPtr ||
2069 case OperationType::FPMathOp:
2070 return Opcode == Instruction::Call || Opcode == Instruction::FAdd ||
2071 Opcode == Instruction::FMul || Opcode == Instruction::FSub ||
2072 Opcode == Instruction::FNeg || Opcode == Instruction::FDiv ||
2073 Opcode == Instruction::FRem || Opcode == Instruction::FPExt ||
2074 Opcode == Instruction::FPTrunc || Opcode == Instruction::Select ||
2077 case OperationType::FCmp:
2078 return Opcode == Instruction::FCmp;
2079 case OperationType::NonNegOp:
2080 return Opcode == Instruction::ZExt || Opcode == Instruction::UIToFP;
2081 case OperationType::Cmp:
2082 return Opcode == Instruction::FCmp || Opcode == Instruction::ICmp;
2083 case OperationType::ReductionOp:
2085 case OperationType::Other:
2092#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2095 case OperationType::Cmp:
2098 case OperationType::FCmp:
2102 case OperationType::DisjointOp:
2106 case OperationType::PossiblyExactOp:
2110 case OperationType::OverflowingBinOp:
2116 case OperationType::Trunc:
2122 case OperationType::FPMathOp:
2125 case OperationType::GEPOp:
2128 else if (
GEPFlags.hasNoUnsignedSignedWrap())
2133 case OperationType::NonNegOp:
2137 case OperationType::ReductionOp: {
2186 case OperationType::Other:
2194 auto &Builder = State.Builder;
2196 case Instruction::Call:
2197 case Instruction::Br:
2198 case Instruction::PHI:
2199 case Instruction::GetElementPtr:
2201 case Instruction::UDiv:
2202 case Instruction::SDiv:
2203 case Instruction::SRem:
2204 case Instruction::URem:
2205 case Instruction::Add:
2206 case Instruction::FAdd:
2207 case Instruction::Sub:
2208 case Instruction::FSub:
2209 case Instruction::FNeg:
2210 case Instruction::Mul:
2211 case Instruction::FMul:
2212 case Instruction::FDiv:
2213 case Instruction::FRem:
2214 case Instruction::Shl:
2215 case Instruction::LShr:
2216 case Instruction::AShr:
2217 case Instruction::And:
2218 case Instruction::Or:
2219 case Instruction::Xor: {
2223 Ops.push_back(State.get(VPOp));
2225 Value *V = Builder.CreateNAryOp(Opcode,
Ops);
2236 case Instruction::ExtractValue: {
2239 Value *Extract = Builder.CreateExtractValue(
2241 State.set(
this, Extract);
2244 case Instruction::Freeze: {
2246 Value *Freeze = Builder.CreateFreeze(
Op);
2247 State.set(
this, Freeze);
2250 case Instruction::ICmp:
2251 case Instruction::FCmp: {
2253 bool FCmp = Opcode == Instruction::FCmp;
2269 case Instruction::Select: {
2274 Value *Sel = State.Builder.CreateSelect(
Cond, Op0, Op1);
2275 State.set(
this, Sel);
2294 State.get(
this)->getType() &&
2295 "inferred type and type from generated instructions do not match");
2302 case Instruction::UDiv:
2303 case Instruction::SDiv:
2304 case Instruction::SRem:
2305 case Instruction::URem:
2310 case Instruction::FNeg:
2311 case Instruction::Add:
2312 case Instruction::FAdd:
2313 case Instruction::Sub:
2314 case Instruction::FSub:
2315 case Instruction::Mul:
2316 case Instruction::FMul:
2317 case Instruction::FDiv:
2318 case Instruction::FRem:
2319 case Instruction::Shl:
2320 case Instruction::LShr:
2321 case Instruction::AShr:
2322 case Instruction::And:
2323 case Instruction::Or:
2324 case Instruction::Xor:
2325 case Instruction::Freeze:
2326 case Instruction::ExtractValue:
2327 case Instruction::ICmp:
2328 case Instruction::FCmp:
2329 case Instruction::Select:
2336#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2339 O << Indent <<
"WIDEN ";
2348 auto &Builder = State.Builder;
2350 assert(State.VF.isVector() &&
"Not vectorizing?");
2355 State.set(
this, Cast);
2372#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2375 O << Indent <<
"WIDEN-CAST ";
2386 return Ctx.TTI.getCFInstrCost(Instruction::PHI, Ctx.CostKind);
2389#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2394 O <<
" = WIDEN-INDUCTION";
2399 O <<
" (truncated to " << *TI->getType() <<
")";
2409 return StartC && StartC->isZero() && StepC && StepC->isOne() &&
2414 assert(!State.Lane &&
"VPDerivedIVRecipe being replicated.");
2419 State.Builder.setFastMathFlags(FPBinOp->getFastMathFlags());
2427 State.set(
this, DerivedIV,
VPLane(0));
2430#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2435 O <<
" = DERIVED-IV ";
2459 assert(BaseIVTy == Step->
getType() &&
"Types of BaseIV and Step must match!");
2466 AddOp = Instruction::Add;
2467 MulOp = Instruction::Mul;
2469 AddOp = InductionOpcode;
2470 MulOp = Instruction::FMul;
2477 unsigned StartLane = 0;
2478 unsigned EndLane = FirstLaneOnly ? 1 : State.VF.getKnownMinValue();
2480 StartLane = State.Lane->getKnownLane();
2481 EndLane = StartLane + 1;
2486 for (
unsigned Lane = StartLane; Lane < EndLane; ++Lane) {
2491 ? ConstantInt::get(BaseIVTy, Lane,
false,
2493 : ConstantFP::get(BaseIVTy, Lane);
2494 Value *StartIdx = Builder.CreateBinOp(AddOp, StartIdx0, LaneValue);
2498 "Expected StartIdx to be folded to a constant when VF is not "
2500 auto *
Mul = Builder.CreateBinOp(MulOp, StartIdx, Step);
2501 auto *
Add = Builder.CreateBinOp(AddOp, BaseIV,
Mul);
2506#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2511 O <<
" = SCALAR-STEPS ";
2522 assert(State.VF.isVector() &&
"not widening");
2530 return Op->isDefinedOutsideLoopRegions();
2532 if (AllOperandsAreInvariant) {
2547 Value *
Splat = State.Builder.CreateVectorSplat(State.VF, NewGEP);
2548 State.set(
this,
Splat);
2556 auto *Ptr = State.get(
getOperand(0), isPointerLoopInvariant());
2563 Indices.
push_back(State.get(Operand, isIndexLoopInvariant(
I - 1)));
2570 assert((State.VF.isScalar() || NewGEP->getType()->isVectorTy()) &&
2571 "NewGEP is not a pointer vector");
2572 State.set(
this, NewGEP);
2575#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2578 O << Indent <<
"WIDEN-GEP ";
2579 O << (isPointerLoopInvariant() ?
"Inv" :
"Var");
2581 O <<
"[" << (isIndexLoopInvariant(
I) ?
"Inv" :
"Var") <<
"]";
2585 O <<
" = getelementptr";
2592 auto &Builder = State.Builder;
2594 const DataLayout &DL = Builder.GetInsertBlock()->getDataLayout();
2595 Type *IndexTy = DL.getIndexType(State.TypeAnalysis.inferScalarType(
this));
2599 if (IndexTy != RunTimeVF->
getType())
2600 RunTimeVF = Builder.CreateZExtOrTrunc(RunTimeVF, IndexTy);
2602 Value *NumElt = Builder.CreateMul(
2606 Value *LastLane = Builder.CreateSub(RunTimeVF, ConstantInt::get(IndexTy, 1));
2613 ResultPtr = Builder.CreateGEP(IndexedTy, ResultPtr, LastLane,
"",
2616 State.set(
this, ResultPtr,
true);
2619#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2624 O <<
" = vector-end-pointer";
2631 auto &Builder = State.Builder;
2633 "Expected prior simplification of recipe without offset");
2638 State.set(
this, ResultPtr,
true);
2641#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2646 O <<
" = vector-pointer";
2659 Type *ResultTy =
toVectorTy(Ctx.Types.inferScalarType(
this), VF);
2662 Ctx.TTI.getCmpSelInstrCost(Instruction::Select, ResultTy, CmpTy,
2666#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2669 O << Indent <<
"BLEND ";
2691 assert(!State.Lane &&
"Reduction being replicated.");
2694 "In-loop AnyOf reductions aren't currently supported");
2700 Value *NewCond = State.get(
Cond, State.VF.isScalar());
2705 if (State.VF.isVector())
2706 Start = State.Builder.CreateVectorSplat(VecTy->
getElementCount(), Start);
2708 Value *
Select = State.Builder.CreateSelect(NewCond, NewVecOp, Start);
2715 if (State.VF.isVector())
2719 NewRed = State.Builder.CreateBinOp(
2721 PrevInChain, NewVecOp);
2722 PrevInChain = NewRed;
2723 NextInChain = NewRed;
2727 NewRed = State.Builder.CreateIntrinsic(
2728 PrevInChain->
getType(), Intrinsic::vector_partial_reduce_add,
2729 {PrevInChain, NewVecOp},
nullptr,
"partial.reduce");
2730 PrevInChain = NewRed;
2731 NextInChain = NewRed;
2734 "The reduction must either be ordered, partial or in-loop");
2738 NextInChain =
createMinMaxOp(State.Builder, Kind, NewRed, PrevInChain);
2740 NextInChain = State.Builder.CreateBinOp(
2742 PrevInChain, NewRed);
2748 assert(!State.Lane &&
"Reduction being replicated.");
2750 auto &Builder = State.Builder;
2762 Mask = State.get(CondOp);
2764 Mask = Builder.CreateVectorSplat(State.VF, Builder.getTrue());
2774 NewRed = Builder.CreateBinOp(
2778 State.set(
this, NewRed,
true);
2784 Type *ElementTy = Ctx.Types.inferScalarType(
this);
2788 std::optional<FastMathFlags> OptionalFMF =
2797 CondCost = Ctx.TTI.getCmpSelInstrCost(Instruction::Select, VectorTy,
2798 CondTy, Pred, Ctx.CostKind);
2800 return CondCost + Ctx.TTI.getPartialReductionCost(
2801 Opcode, ElementTy, ElementTy, ElementTy, VF,
2811 "Any-of reduction not implemented in VPlan-based cost model currently.");
2817 return Ctx.TTI.getMinMaxReductionCost(Id, VectorTy,
FMFs, Ctx.CostKind);
2822 return Ctx.TTI.getArithmeticReductionCost(Opcode, VectorTy, OptionalFMF,
2826VPExpressionRecipe::VPExpressionRecipe(
2827 ExpressionTypes ExpressionType,
2830 ExpressionRecipes(ExpressionRecipes),
ExpressionType(ExpressionType) {
2831 assert(!ExpressionRecipes.empty() &&
"Nothing to combine?");
2835 "expression cannot contain recipes with side-effects");
2839 for (
auto *R : ExpressionRecipes)
2840 ExpressionRecipesAsSetOfUsers.
insert(R);
2846 if (R != ExpressionRecipes.back() &&
2847 any_of(
R->users(), [&ExpressionRecipesAsSetOfUsers](
VPUser *U) {
2848 return !ExpressionRecipesAsSetOfUsers.contains(U);
2853 R->replaceUsesWithIf(CopyForExtUsers, [&ExpressionRecipesAsSetOfUsers](
2855 return !ExpressionRecipesAsSetOfUsers.contains(&U);
2860 R->removeFromParent();
2867 for (
auto *R : ExpressionRecipes) {
2868 for (
const auto &[Idx,
Op] :
enumerate(
R->operands())) {
2869 auto *
Def =
Op->getDefiningRecipe();
2870 if (Def && ExpressionRecipesAsSetOfUsers.contains(Def))
2879 for (
auto *R : ExpressionRecipes)
2880 for (
auto const &[LiveIn, Tmp] :
zip(operands(), LiveInPlaceholders))
2881 R->replaceUsesOfWith(LiveIn, Tmp);
2885 for (
auto *R : ExpressionRecipes)
2888 if (!R->getParent())
2889 R->insertBefore(
this);
2892 LiveInPlaceholders[Idx]->replaceAllUsesWith(
Op);
2895 ExpressionRecipes.clear();
2900 Type *RedTy = Ctx.Types.inferScalarType(
this);
2904 "VPExpressionRecipe only supports integer types currently.");
2907 switch (ExpressionType) {
2908 case ExpressionTypes::ExtendedReduction: {
2914 ->isPartialReduction()
2915 ? Ctx.TTI.getPartialReductionCost(
2916 Opcode, Ctx.Types.inferScalarType(
getOperand(0)),
nullptr,
2921 : Ctx.TTI.getExtendedReductionCost(
2922 Opcode, ExtR->getOpcode() == Instruction::ZExt, RedTy,
2923 SrcVecTy, std::nullopt, Ctx.CostKind);
2925 case ExpressionTypes::MulAccReduction:
2926 return Ctx.TTI.getMulAccReductionCost(
false, Opcode, RedTy, SrcVecTy,
2929 case ExpressionTypes::ExtNegatedMulAccReduction:
2930 assert(Opcode == Instruction::Add &&
"Unexpected opcode");
2931 Opcode = Instruction::Sub;
2933 case ExpressionTypes::ExtMulAccReduction: {
2935 if (RedR->isPartialReduction()) {
2939 return Ctx.TTI.getPartialReductionCost(
2940 Opcode, Ctx.Types.inferScalarType(
getOperand(0)),
2941 Ctx.Types.inferScalarType(
getOperand(1)), RedTy, VF,
2943 Ext0R->getOpcode()),
2945 Ext1R->getOpcode()),
2946 Mul->getOpcode(), Ctx.CostKind);
2948 return Ctx.TTI.getMulAccReductionCost(
2951 Opcode, RedTy, SrcVecTy, Ctx.CostKind);
2959 return R->mayReadFromMemory() || R->mayWriteToMemory();
2967 "expression cannot contain recipes with side-effects");
2975 return RR && !RR->isPartialReduction();
2978#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2982 O << Indent <<
"EXPRESSION ";
2988 switch (ExpressionType) {
2989 case ExpressionTypes::ExtendedReduction: {
2991 O <<
" + " << (Red->isPartialReduction() ?
"partial." :
"") <<
"reduce.";
2998 << *Ext0->getResultType();
2999 if (Red->isConditional()) {
3006 case ExpressionTypes::ExtNegatedMulAccReduction: {
3008 O <<
" + " << (Red->isPartialReduction() ?
"partial." :
"") <<
"reduce.";
3018 << *Ext0->getResultType() <<
"), (";
3022 << *Ext1->getResultType() <<
")";
3023 if (Red->isConditional()) {
3030 case ExpressionTypes::MulAccReduction:
3031 case ExpressionTypes::ExtMulAccReduction: {
3033 O <<
" + " << (Red->isPartialReduction() ?
"partial." :
"") <<
"reduce.";
3038 bool IsExtended = ExpressionType == ExpressionTypes::ExtMulAccReduction;
3040 : ExpressionRecipes[0]);
3048 << *Ext0->getResultType() <<
"), (";
3056 << *Ext1->getResultType() <<
")";
3058 if (Red->isConditional()) {
3071 O << Indent <<
"PARTIAL-REDUCE ";
3073 O << Indent <<
"REDUCE ";
3093 O << Indent <<
"REDUCE ";
3121 assert((!Instr->getType()->isAggregateType() ||
3123 "Expected vectorizable or non-aggregate type.");
3126 bool IsVoidRetTy = Instr->getType()->isVoidTy();
3130 Cloned->
setName(Instr->getName() +
".cloned");
3131 Type *ResultTy = State.TypeAnalysis.inferScalarType(RepRecipe);
3135 if (ResultTy != Cloned->
getType())
3146 State.setDebugLocFrom(
DL);
3151 auto InputLane = Lane;
3155 Cloned->
setOperand(
I.index(), State.get(Operand, InputLane));
3159 State.Builder.Insert(Cloned);
3161 State.set(RepRecipe, Cloned, Lane);
3165 State.AC->registerAssumption(
II);
3171 [](
VPValue *
Op) { return Op->isDefinedOutsideLoopRegions(); })) &&
3172 "Expected a recipe is either within a region or all of its operands "
3173 "are defined outside the vectorized region.");
3180 assert(IsSingleScalar &&
"VPReplicateRecipes outside replicate regions "
3181 "must have already been unrolled");
3187 "uniform recipe shouldn't be predicated");
3188 assert(!State.VF.isScalable() &&
"Can't scalarize a scalable vector");
3193 State.Lane->isFirstLane()
3196 State.set(
this, State.packScalarIntoVectorizedValue(
this, WideValue,
3232 while (!WorkList.
empty()) {
3234 if (!Cur || !Seen.
insert(Cur).second)
3242 return Seen.contains(
3243 Blend->getIncomingValue(I)->getDefiningRecipe());
3247 for (
VPUser *U : Cur->users()) {
3249 if (InterleaveR->getAddr() == Cur)
3252 if (RepR->getOpcode() == Instruction::Load &&
3253 RepR->getOperand(0) == Cur)
3255 if (RepR->getOpcode() == Instruction::Store &&
3256 RepR->getOperand(1) == Cur)
3260 if (MemR->getAddr() == Cur && MemR->isConsecutive())
3281 Ctx.SkipCostComputation.insert(UI);
3287 case Instruction::Alloca:
3290 return Ctx.TTI.getArithmeticInstrCost(
3291 Instruction::Mul, Ctx.Types.inferScalarType(
this), Ctx.CostKind);
3292 case Instruction::GetElementPtr:
3298 case Instruction::Call: {
3304 for (
const VPValue *ArgOp : ArgOps)
3305 Tys.
push_back(Ctx.Types.inferScalarType(ArgOp));
3307 if (CalledFn->isIntrinsic())
3310 switch (CalledFn->getIntrinsicID()) {
3311 case Intrinsic::assume:
3312 case Intrinsic::lifetime_end:
3313 case Intrinsic::lifetime_start:
3314 case Intrinsic::sideeffect:
3315 case Intrinsic::pseudoprobe:
3316 case Intrinsic::experimental_noalias_scope_decl: {
3319 "scalarizing intrinsic should be free");
3326 Type *ResultTy = Ctx.Types.inferScalarType(
this);
3328 Ctx.TTI.getCallInstrCost(CalledFn, ResultTy, Tys, Ctx.CostKind);
3330 if (CalledFn->isIntrinsic())
3331 ScalarCallCost = std::min(
3335 return ScalarCallCost;
3339 Ctx.getScalarizationOverhead(ResultTy, ArgOps, VF);
3341 case Instruction::Add:
3342 case Instruction::Sub:
3343 case Instruction::FAdd:
3344 case Instruction::FSub:
3345 case Instruction::Mul:
3346 case Instruction::FMul:
3347 case Instruction::FDiv:
3348 case Instruction::FRem:
3349 case Instruction::Shl:
3350 case Instruction::LShr:
3351 case Instruction::AShr:
3352 case Instruction::And:
3353 case Instruction::Or:
3354 case Instruction::Xor:
3355 case Instruction::ICmp:
3356 case Instruction::FCmp:
3360 case Instruction::SDiv:
3361 case Instruction::UDiv:
3362 case Instruction::SRem:
3363 case Instruction::URem: {
3376 return Ctx.skipCostComputation(
3378 PredR->getOperand(0)->getUnderlyingValue()),
3384 Ctx.getScalarizationOverhead(Ctx.Types.inferScalarType(
this),
3393 Ctx.TTI.getCFInstrCost(Instruction::PHI, Ctx.CostKind);
3397 ScalarCost /= Ctx.getPredBlockCostDivisor(UI->
getParent());
3400 case Instruction::Load:
3401 case Instruction::Store: {
3408 bool IsLoad = UI->
getOpcode() == Instruction::Load;
3414 Type *ValTy = Ctx.Types.inferScalarType(IsLoad ?
this :
getOperand(0));
3415 Type *ScalarPtrTy = Ctx.Types.inferScalarType(PtrOp);
3420 UI->
getOpcode(), ValTy, Alignment, AS, Ctx.CostKind, OpInfo);
3423 bool PreferVectorizedAddressing = Ctx.TTI.prefersVectorizedAddressing();
3424 bool UsedByLoadStoreAddress =
3428 Ctx.TTI.getAddressComputationCost(
3429 PtrTy, UsedByLoadStoreAddress ?
nullptr : Ctx.PSE.getSE(), PtrSCEV,
3440 if (!UsedByLoadStoreAddress) {
3441 bool EfficientVectorLoadStore =
3442 Ctx.TTI.supportsEfficientVectorElementLoadStore();
3443 if (!(IsLoad && !PreferVectorizedAddressing) &&
3444 !(!IsLoad && EfficientVectorLoadStore))
3447 if (!EfficientVectorLoadStore)
3448 ResultTy = Ctx.Types.inferScalarType(
this);
3452 Ctx.getScalarizationOverhead(ResultTy, OpsToScalarize, VF,
true);
3454 case Instruction::SExt:
3455 case Instruction::ZExt:
3456 case Instruction::FPToUI:
3457 case Instruction::FPToSI:
3458 case Instruction::FPExt:
3459 case Instruction::PtrToInt:
3460 case Instruction::PtrToAddr:
3461 case Instruction::IntToPtr:
3462 case Instruction::SIToFP:
3463 case Instruction::UIToFP:
3464 case Instruction::Trunc:
3465 case Instruction::FPTrunc:
3466 case Instruction::AddrSpaceCast: {
3471 case Instruction::ExtractValue:
3472 case Instruction::InsertValue:
3473 return Ctx.TTI.getInsertExtractValueCost(
getOpcode(), Ctx.CostKind);
3476 return Ctx.getLegacyCost(UI, VF);
3479#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
3482 O << Indent << (IsSingleScalar ?
"CLONE " :
"REPLICATE ");
3491 O <<
"@" << CB->getCalledFunction()->getName() <<
"(";
3509 assert(State.Lane &&
"Branch on Mask works only on single instance.");
3512 Value *ConditionBit = State.get(BlockInMask, *State.Lane);
3516 auto *CurrentTerminator = State.CFG.PrevBB->getTerminator();
3518 "Expected to replace unreachable terminator with conditional branch.");
3520 State.Builder.CreateCondBr(ConditionBit, State.CFG.PrevBB,
nullptr);
3521 CondBr->setSuccessor(0,
nullptr);
3522 CurrentTerminator->eraseFromParent();
3534 assert(State.Lane &&
"Predicated instruction PHI works per instance.");
3539 assert(PredicatingBB &&
"Predicated block has no single predecessor.");
3541 "operand must be VPReplicateRecipe");
3552 "Packed operands must generate an insertelement or insertvalue");
3560 for (
unsigned I = 0;
I < StructTy->getNumContainedTypes() - 1;
I++)
3563 PHINode *VPhi = State.Builder.CreatePHI(VecI->getType(), 2);
3564 VPhi->
addIncoming(VecI->getOperand(0), PredicatingBB);
3566 if (State.hasVectorValue(
this))
3567 State.reset(
this, VPhi);
3569 State.set(
this, VPhi);
3577 Type *PredInstType = State.TypeAnalysis.inferScalarType(
getOperand(0));
3578 PHINode *Phi = State.Builder.CreatePHI(PredInstType, 2);
3581 Phi->addIncoming(ScalarPredInst, PredicatedBB);
3582 if (State.hasScalarValue(
this, *State.Lane))
3583 State.reset(
this, Phi, *State.Lane);
3585 State.set(
this, Phi, *State.Lane);
3588 State.reset(
getOperand(0), Phi, *State.Lane);
3592#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
3595 O << Indent <<
"PHI-PREDICATED-INSTRUCTION ";
3606 ->getAddressSpace();
3609 : Instruction::Store;
3616 "Inconsecutive memory access should not have the order.");
3629 : Intrinsic::vp_scatter;
3630 return Ctx.TTI.getAddressComputationCost(PtrTy,
nullptr,
nullptr,
3632 Ctx.TTI.getMemIntrinsicInstrCost(
3641 : Intrinsic::masked_store;
3642 Cost += Ctx.TTI.getMemIntrinsicInstrCost(
3648 Cost += Ctx.TTI.getMemoryOpCost(Opcode, Ty,
Alignment, AS, Ctx.CostKind,
3659 auto &Builder = State.Builder;
3660 Value *Mask =
nullptr;
3661 if (
auto *VPMask =
getMask()) {
3664 Mask = State.get(VPMask);
3666 Mask = Builder.CreateVectorReverse(Mask,
"reverse");
3672 NewLI = Builder.CreateMaskedGather(DataTy, Addr,
Alignment, Mask,
nullptr,
3673 "wide.masked.gather");
3676 Builder.CreateMaskedLoad(DataTy, Addr,
Alignment, Mask,
3679 NewLI = Builder.CreateAlignedLoad(DataTy, Addr,
Alignment,
"wide.load");
3682 State.set(
this, NewLI);
3685#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
3688 O << Indent <<
"WIDEN ";
3700 Value *AllTrueMask =
3701 Builder.CreateVectorSplat(ValTy->getElementCount(), Builder.getTrue());
3702 return Builder.CreateIntrinsic(ValTy, Intrinsic::experimental_vp_reverse,
3703 {Operand, AllTrueMask, EVL},
nullptr, Name);
3711 auto &Builder = State.Builder;
3715 Value *Mask =
nullptr;
3717 Mask = State.get(VPMask);
3721 Mask = Builder.CreateVectorSplat(State.VF, Builder.getTrue());
3726 Builder.CreateIntrinsic(DataTy, Intrinsic::vp_gather, {Addr, Mask, EVL},
3727 nullptr,
"wide.masked.gather");
3729 NewLI = Builder.CreateIntrinsic(DataTy, Intrinsic::vp_load,
3730 {Addr, Mask, EVL},
nullptr,
"vp.op.load");
3736 State.set(
this, Res);
3751 ->getAddressSpace();
3752 return Ctx.TTI.getMemIntrinsicInstrCost(
3757#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
3760 O << Indent <<
"WIDEN ";
3771 auto &Builder = State.Builder;
3773 Value *Mask =
nullptr;
3774 if (
auto *VPMask =
getMask()) {
3777 Mask = State.get(VPMask);
3779 Mask = Builder.CreateVectorReverse(Mask,
"reverse");
3782 Value *StoredVal = State.get(StoredVPValue);
3786 NewSI = Builder.CreateMaskedScatter(StoredVal, Addr,
Alignment, Mask);
3788 NewSI = Builder.CreateMaskedStore(StoredVal, Addr,
Alignment, Mask);
3790 NewSI = Builder.CreateAlignedStore(StoredVal, Addr,
Alignment);
3794#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
3797 O << Indent <<
"WIDEN store ";
3806 auto &Builder = State.Builder;
3809 Value *StoredVal = State.get(StoredValue);
3811 Value *Mask =
nullptr;
3813 Mask = State.get(VPMask);
3817 Mask = Builder.CreateVectorSplat(State.VF, Builder.getTrue());
3820 if (CreateScatter) {
3822 Intrinsic::vp_scatter,
3823 {StoredVal, Addr, Mask, EVL});
3826 Intrinsic::vp_store,
3827 {StoredVal, Addr, Mask, EVL});
3846 ->getAddressSpace();
3847 return Ctx.TTI.getMemIntrinsicInstrCost(
3852#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
3855 O << Indent <<
"WIDEN vp.store ";
3863 auto VF = DstVTy->getElementCount();
3865 assert(VF == SrcVecTy->getElementCount() &&
"Vector dimensions do not match");
3866 Type *SrcElemTy = SrcVecTy->getElementType();
3867 Type *DstElemTy = DstVTy->getElementType();
3868 assert((
DL.getTypeSizeInBits(SrcElemTy) ==
DL.getTypeSizeInBits(DstElemTy)) &&
3869 "Vector elements must have same size");
3873 return Builder.CreateBitOrPointerCast(V, DstVTy);
3880 "Only one type should be a pointer type");
3882 "Only one type should be a floating point type");
3886 Value *CastVal = Builder.CreateBitOrPointerCast(V, VecIntTy);
3887 return Builder.CreateBitOrPointerCast(CastVal, DstVTy);
3893 const Twine &Name) {
3894 unsigned Factor = Vals.
size();
3895 assert(Factor > 1 &&
"Tried to interleave invalid number of vectors");
3899 for (
Value *Val : Vals)
3900 assert(Val->getType() == VecTy &&
"Tried to interleave mismatched types");
3905 if (VecTy->isScalableTy()) {
3906 assert(Factor <= 8 &&
"Unsupported interleave factor for scalable vectors");
3907 return Builder.CreateVectorInterleave(Vals, Name);
3914 const unsigned NumElts = VecTy->getElementCount().getFixedValue();
3915 return Builder.CreateShuffleVector(
3948 assert(!State.Lane &&
"Interleave group being replicated.");
3950 "Masking gaps for scalable vectors is not yet supported.");
3956 unsigned InterleaveFactor = Group->
getFactor();
3963 auto CreateGroupMask = [&BlockInMask, &State,
3964 &InterleaveFactor](
Value *MaskForGaps) ->
Value * {
3965 if (State.VF.isScalable()) {
3966 assert(!MaskForGaps &&
"Interleaved groups with gaps are not supported.");
3967 assert(InterleaveFactor <= 8 &&
3968 "Unsupported deinterleave factor for scalable vectors");
3969 auto *ResBlockInMask = State.get(BlockInMask);
3977 Value *ResBlockInMask = State.get(BlockInMask);
3978 Value *ShuffledMask = State.Builder.CreateShuffleVector(
3981 "interleaved.mask");
3982 return MaskForGaps ? State.Builder.CreateBinOp(Instruction::And,
3983 ShuffledMask, MaskForGaps)
3987 const DataLayout &DL = Instr->getDataLayout();
3990 Value *MaskForGaps =
nullptr;
3994 assert(MaskForGaps &&
"Mask for Gaps is required but it is null");
3998 if (BlockInMask || MaskForGaps) {
3999 Value *GroupMask = CreateGroupMask(MaskForGaps);
4001 NewLoad = State.Builder.CreateMaskedLoad(VecTy, ResAddr,
4003 PoisonVec,
"wide.masked.vec");
4005 NewLoad = State.Builder.CreateAlignedLoad(VecTy, ResAddr,
4012 if (VecTy->isScalableTy()) {
4015 assert(InterleaveFactor <= 8 &&
4016 "Unsupported deinterleave factor for scalable vectors");
4017 NewLoad = State.Builder.CreateIntrinsic(
4020 nullptr,
"strided.vec");
4023 auto CreateStridedVector = [&InterleaveFactor, &State,
4024 &NewLoad](
unsigned Index) ->
Value * {
4025 assert(Index < InterleaveFactor &&
"Illegal group index");
4026 if (State.VF.isScalable())
4027 return State.Builder.CreateExtractValue(NewLoad, Index);
4033 return State.Builder.CreateShuffleVector(NewLoad, StrideMask,
4037 for (
unsigned I = 0, J = 0;
I < InterleaveFactor; ++
I) {
4044 Value *StridedVec = CreateStridedVector(
I);
4047 if (Member->getType() != ScalarTy) {
4054 StridedVec = State.Builder.CreateVectorReverse(StridedVec,
"reverse");
4056 State.set(VPDefs[J], StridedVec);
4066 Value *MaskForGaps =
4069 "Mismatch between NeedsMaskForGaps and MaskForGaps");
4073 unsigned StoredIdx = 0;
4074 for (
unsigned i = 0; i < InterleaveFactor; i++) {
4076 "Fail to get a member from an interleaved store group");
4086 Value *StoredVec = State.get(StoredValues[StoredIdx]);
4090 StoredVec = State.Builder.CreateVectorReverse(StoredVec,
"reverse");
4094 if (StoredVec->
getType() != SubVT)
4103 if (BlockInMask || MaskForGaps) {
4104 Value *GroupMask = CreateGroupMask(MaskForGaps);
4105 NewStoreInstr = State.Builder.CreateMaskedStore(
4106 IVec, ResAddr, Group->
getAlign(), GroupMask);
4109 State.Builder.CreateAlignedStore(IVec, ResAddr, Group->
getAlign());
4116#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
4120 O << Indent <<
"INTERLEAVE-GROUP with factor " << IG->getFactor() <<
" at ";
4121 IG->getInsertPos()->printAsOperand(O,
false);
4131 for (
unsigned i = 0; i < IG->getFactor(); ++i) {
4132 if (!IG->getMember(i))
4135 O <<
"\n" << Indent <<
" store ";
4137 O <<
" to index " << i;
4139 O <<
"\n" << Indent <<
" ";
4141 O <<
" = load from index " << i;
4149 assert(!State.Lane &&
"Interleave group being replicated.");
4150 assert(State.VF.isScalable() &&
4151 "Only support scalable VF for EVL tail-folding.");
4153 "Masking gaps for scalable vectors is not yet supported.");
4159 unsigned InterleaveFactor = Group->
getFactor();
4160 assert(InterleaveFactor <= 8 &&
4161 "Unsupported deinterleave/interleave factor for scalable vectors");
4168 Value *InterleaveEVL = State.Builder.CreateMul(
4169 EVL, ConstantInt::get(EVL->
getType(), InterleaveFactor),
"interleave.evl",
4173 Value *GroupMask =
nullptr;
4179 State.Builder.CreateVectorSplat(WideVF, State.Builder.getTrue());
4184 CallInst *NewLoad = State.Builder.CreateIntrinsic(
4185 VecTy, Intrinsic::vp_load, {ResAddr, GroupMask, InterleaveEVL},
nullptr,
4196 NewLoad = State.Builder.CreateIntrinsic(
4199 nullptr,
"strided.vec");
4201 const DataLayout &DL = Instr->getDataLayout();
4202 for (
unsigned I = 0, J = 0;
I < InterleaveFactor; ++
I) {
4208 Value *StridedVec = State.Builder.CreateExtractValue(NewLoad,
I);
4210 if (Member->getType() != ScalarTy) {
4228 const DataLayout &DL = Instr->getDataLayout();
4229 for (
unsigned I = 0, StoredIdx = 0;
I < InterleaveFactor;
I++) {
4237 Value *StoredVec = State.get(StoredValues[StoredIdx]);
4239 if (StoredVec->
getType() != SubVT)
4249 State.Builder.CreateIntrinsic(
Type::getVoidTy(Ctx), Intrinsic::vp_store,
4250 {IVec, ResAddr, GroupMask, InterleaveEVL});
4259#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
4263 O << Indent <<
"INTERLEAVE-GROUP with factor " << IG->getFactor() <<
" at ";
4264 IG->getInsertPos()->printAsOperand(O,
false);
4275 for (
unsigned i = 0; i < IG->getFactor(); ++i) {
4276 if (!IG->getMember(i))
4279 O <<
"\n" << Indent <<
" vp.store ";
4281 O <<
" to index " << i;
4283 O <<
"\n" << Indent <<
" ";
4285 O <<
" = vp.load from index " << i;
4296 unsigned InsertPosIdx = 0;
4297 for (
unsigned Idx = 0; IG->getFactor(); ++Idx)
4298 if (
auto *Member = IG->getMember(Idx)) {
4299 if (Member == InsertPos)
4303 Type *ValTy = Ctx.Types.inferScalarType(
4308 ->getAddressSpace();
4310 unsigned InterleaveFactor = IG->getFactor();
4315 for (
unsigned IF = 0; IF < InterleaveFactor; IF++)
4316 if (IG->getMember(IF))
4321 InsertPos->
getOpcode(), WideVecTy, IG->getFactor(), Indices,
4322 IG->getAlign(), AS, Ctx.CostKind,
getMask(), NeedsMaskForGaps);
4324 if (!IG->isReverse())
4327 return Cost + IG->getNumMembers() *
4329 VectorTy, VectorTy, {}, Ctx.CostKind,
4333#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
4336 O << Indent <<
"EMIT ";
4338 O <<
" = CANONICAL-INDUCTION ";
4348#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
4352 "unexpected number of operands");
4353 O << Indent <<
"EMIT ";
4355 O <<
" = WIDEN-POINTER-INDUCTION ";
4371 O << Indent <<
"EMIT ";
4373 O <<
" = EXPAND SCEV " << *Expr;
4380 IRBuilder<> Builder(State.CFG.PrevBB->getTerminator());
4384 : Builder.CreateVectorSplat(VF, CanonicalIV,
"broadcast");
4387 VStep = Builder.CreateVectorSplat(VF, VStep);
4389 Builder.CreateAdd(VStep, Builder.CreateStepVector(VStep->
getType()));
4391 Value *CanonicalVectorIV = Builder.CreateAdd(VStart, VStep,
"vec.iv");
4392 State.set(
this, CanonicalVectorIV);
4395#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
4398 O << Indent <<
"EMIT ";
4400 O <<
" = WIDEN-CANONICAL-INDUCTION ";
4406 auto &Builder = State.Builder;
4410 Type *VecTy = State.VF.isScalar()
4411 ? VectorInit->getType()
4415 State.CFG.VPBB2IRBB.at(
getParent()->getCFGPredecessor(0));
4416 if (State.VF.isVector()) {
4418 auto *One = ConstantInt::get(IdxTy, 1);
4421 auto *RuntimeVF =
getRuntimeVF(Builder, IdxTy, State.VF);
4422 auto *LastIdx = Builder.CreateSub(RuntimeVF, One);
4423 VectorInit = Builder.CreateInsertElement(
4429 Phi->insertBefore(State.CFG.PrevBB->getFirstInsertionPt());
4430 Phi->addIncoming(VectorInit, VectorPH);
4431 State.set(
this, Phi);
4438 return Ctx.TTI.getCFInstrCost(Instruction::PHI, Ctx.CostKind);
4443#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
4446 O << Indent <<
"FIRST-ORDER-RECURRENCE-PHI ";
4463 State.CFG.VPBB2IRBB.at(
getParent()->getCFGPredecessor(0));
4464 bool ScalarPHI = State.VF.isScalar() ||
isInLoop();
4465 Value *StartV = State.get(StartVPV, ScalarPHI);
4469 assert(State.CurrentParentLoop->getHeader() == HeaderBB &&
4470 "recipe must be in the vector loop header");
4475 Phi->addIncoming(StartV, VectorPH);
4478#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
4481 O << Indent <<
"WIDEN-REDUCTION-PHI ";
4494 Instruction *VecPhi = State.Builder.CreatePHI(VecTy, 2, Name);
4495 State.set(
this, VecPhi);
4500 return Ctx.TTI.getCFInstrCost(Instruction::PHI, Ctx.CostKind);
4503#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
4506 O << Indent <<
"WIDEN-PHI ";
4516 State.CFG.VPBB2IRBB.at(
getParent()->getCFGPredecessor(0));
4519 State.Builder.CreatePHI(StartMask->
getType(), 2,
"active.lane.mask");
4520 Phi->addIncoming(StartMask, VectorPH);
4521 State.set(
this, Phi);
4524#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
4527 O << Indent <<
"ACTIVE-LANE-MASK-PHI ";
4535#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
4538 O << Indent <<
"EXPLICIT-VECTOR-LENGTH-BASED-IV-PHI ";
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static MCDisassembler::DecodeStatus addOperand(MCInst &Inst, const MCOperand &Opnd)
AMDGPU Lower Kernel Arguments
AMDGPU Register Bank Select
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static std::pair< Value *, APInt > getMask(Value *WideMask, unsigned Factor, ElementCount LeafValueEC)
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
This file provides a LoopVectorizationPlanner class.
static const SCEV * getAddressAccessSCEV(Value *Ptr, PredicatedScalarEvolution &PSE, const Loop *TheLoop)
Gets the address access SCEV for Ptr, if it should be used for cost modeling according to isAddressSC...
static bool isOrdered(const Instruction *I)
MachineInstr unsigned OpIdx
uint64_t IntrinsicInst * II
const SmallVectorImpl< MachineOperand > & Cond
This file defines the SmallVector class.
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
static SymbolRef::Type getType(const Symbol *Sym)
This file contains the declarations of different VPlan-related auxiliary helpers.
static Instruction * createReverseEVL(IRBuilderBase &Builder, Value *Operand, Value *EVL, const Twine &Name)
Use all-true mask for reverse rather than actual mask, as it avoids a dependence w/o affecting the re...
static Value * interleaveVectors(IRBuilderBase &Builder, ArrayRef< Value * > Vals, const Twine &Name)
Return a vector containing interleaved elements from multiple smaller input vectors.
static InstructionCost getCostForIntrinsics(Intrinsic::ID ID, ArrayRef< const VPValue * > Operands, const VPRecipeWithIRFlags &R, ElementCount VF, VPCostContext &Ctx)
Compute the cost for the intrinsic ID with Operands, produced by R.
static Value * createBitOrPointerCast(IRBuilderBase &Builder, Value *V, VectorType *DstVTy, const DataLayout &DL)
SmallVector< Value *, 2 > VectorParts
static bool isUsedByLoadStoreAddress(const VPUser *V)
Returns true if V is used as part of the address of another load or store.
static void scalarizeInstruction(const Instruction *Instr, VPReplicateRecipe *RepRecipe, const VPLane &Lane, VPTransformState &State)
A helper function to scalarize a single Instruction in the innermost loop.
static std::optional< unsigned > getOpcode(ArrayRef< VPValue * > Values)
Returns the opcode of Values or ~0 if they do not all agree.
This file contains the declarations of the Vectorization Plan base classes:
static const uint32_t IV[8]
void printAsOperand(OutputBuffer &OB, Prec P=Prec::Default, bool StrictlyWorse=false) const
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
bool empty() const
empty - Check if the array is empty.
static LLVM_ABI Attribute getWithAlignment(LLVMContext &Context, Align Alignment)
Return a uniquified Attribute object that has the specific alignment set.
LLVM Basic Block Representation.
LLVM_ABI const_iterator getFirstInsertionPt() const
Returns an iterator to the first instruction in this block that is suitable for inserting a non-PHI i...
LLVM_ABI InstListType::const_iterator getFirstNonPHIIt() const
Returns an iterator to the first instruction in this block that is not a PHINode instruction.
LLVM_ABI const BasicBlock * getSinglePredecessor() const
Return the predecessor of this block if it has a single predecessor block.
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
void setSuccessor(unsigned idx, BasicBlock *NewSucc)
void addParamAttr(unsigned ArgNo, Attribute::AttrKind Kind)
Adds the attribute to the indicated argument.
This class represents a function call, abstracting a target machine's calling convention.
static LLVM_ABI bool isBitOrNoopPointerCastable(Type *SrcTy, Type *DestTy, const DataLayout &DL)
Check whether a bitcast, inttoptr, or ptrtoint cast between these types is valid and a no-op.
static Type * makeCmpResultType(Type *opnd_type)
Create a result type for fcmp/icmp.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
@ ICMP_UGT
unsigned greater than
@ ICMP_ULT
unsigned less than
static LLVM_ABI StringRef getPredicateName(Predicate P)
An abstraction over a floating-point predicate, and a pack of an integer predicate with samesign info...
static ConstantInt * getSigned(IntegerType *Ty, int64_t V, bool ImplicitTrunc=false)
Return a ConstantInt with the specified value for the specified type.
This is an important base class in LLVM.
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
A parsed version of the target data layout string in and methods for querying it.
constexpr bool isVector() const
One or more elements.
static constexpr ElementCount getScalable(ScalarTy MinVal)
static constexpr ElementCount getFixed(ScalarTy MinVal)
constexpr bool isScalar() const
Exactly one element.
Convenience struct for specifying and reasoning about fast-math flags.
LLVM_ABI void print(raw_ostream &O) const
Print fast-math flags to O.
void setAllowContract(bool B=true)
bool noSignedZeros() const
void setAllowReciprocal(bool B=true)
bool allowReciprocal() const
void setNoSignedZeros(bool B=true)
bool allowReassoc() const
Flag queries.
void setNoNaNs(bool B=true)
void setAllowReassoc(bool B=true)
Flag setters.
void setApproxFunc(bool B=true)
void setNoInfs(bool B=true)
bool allowContract() const
Class to represent function types.
Type * getParamType(unsigned i) const
Parameter type accessors.
bool willReturn() const
Determine if the function will return.
bool doesNotThrow() const
Determine if the function cannot unwind.
Type * getReturnType() const
Returns the type of the ret val.
Common base class shared among various IRBuilders.
Value * CreateInsertElement(Type *VecTy, Value *NewElt, Value *Idx, const Twine &Name="")
IntegerType * getInt1Ty()
Fetch the type representing a single bit.
Value * CreateInsertValue(Value *Agg, Value *Val, ArrayRef< unsigned > Idxs, const Twine &Name="")
Value * CreateExtractElement(Value *Vec, Value *Idx, const Twine &Name="")
LLVM_ABI Value * CreateVectorSpliceRight(Value *V1, Value *V2, Value *Offset, const Twine &Name="")
Create a vector.splice.right intrinsic call, or a shufflevector that produces the same result if the ...
LLVM_ABI Value * CreateVectorSplat(unsigned NumElts, Value *V, const Twine &Name="")
Return a vector value that contains.
Value * CreateExtractValue(Value *Agg, ArrayRef< unsigned > Idxs, const Twine &Name="")
LLVM_ABI Value * CreateSelect(Value *C, Value *True, Value *False, const Twine &Name="", Instruction *MDFrom=nullptr)
Value * CreateFreeze(Value *V, const Twine &Name="")
IntegerType * getInt32Ty()
Fetch the type representing a 32-bit integer.
Value * CreatePtrAdd(Value *Ptr, Value *Offset, const Twine &Name="", GEPNoWrapFlags NW=GEPNoWrapFlags::none())
void setFastMathFlags(FastMathFlags NewFMF)
Set the fast-math flags to be used with generated fp-math operators.
IntegerType * getInt64Ty()
Fetch the type representing a 64-bit integer.
LLVM_ABI Value * CreateVectorReverse(Value *V, const Twine &Name="")
Return a vector value that contains the vector V reversed.
Value * CreateICmpNE(Value *LHS, Value *RHS, const Twine &Name="")
ConstantInt * getInt64(uint64_t C)
Get a constant 64-bit value.
LLVM_ABI CallInst * CreateOrReduce(Value *Src)
Create a vector int OR reduction intrinsic of the source vector.
Value * CreateLogicalAnd(Value *Cond1, Value *Cond2, const Twine &Name="", Instruction *MDFrom=nullptr)
LLVM_ABI CallInst * CreateIntrinsic(Intrinsic::ID ID, ArrayRef< Type * > Types, ArrayRef< Value * > Args, FMFSource FMFSource={}, const Twine &Name="")
Create a call to intrinsic ID with Args, mangled using Types.
ConstantInt * getInt32(uint32_t C)
Get a constant 32-bit value.
Value * CreateCmp(CmpInst::Predicate Pred, Value *LHS, Value *RHS, const Twine &Name="", MDNode *FPMathTag=nullptr)
Value * CreateNot(Value *V, const Twine &Name="")
Value * CreateICmpEQ(Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateCountTrailingZeroElems(Type *ResTy, Value *Mask, bool ZeroIsPoison=true, const Twine &Name="")
Create a call to llvm.experimental_cttz_elts.
Value * CreateSub(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
BranchInst * CreateCondBr(Value *Cond, BasicBlock *True, BasicBlock *False, MDNode *BranchWeights=nullptr, MDNode *Unpredictable=nullptr)
Create a conditional 'br Cond, TrueDest, FalseDest' instruction.
Value * CreateZExt(Value *V, Type *DestTy, const Twine &Name="", bool IsNonNeg=false)
Value * CreateAdd(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
ConstantInt * getFalse()
Get the constant value for i1 false.
Value * CreateBinOp(Instruction::BinaryOps Opc, Value *LHS, Value *RHS, const Twine &Name="", MDNode *FPMathTag=nullptr)
Value * CreateICmpUGE(Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateICmp(CmpInst::Predicate P, Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateOr(Value *LHS, Value *RHS, const Twine &Name="", bool IsDisjoint=false)
Value * CreateMul(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
static InstructionCost getInvalid(CostType Val=0)
LLVM_ABI InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
const char * getOpcodeName() const
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
The group of interleaved loads/stores sharing the same stride and close to each other.
uint32_t getFactor() const
InstTy * getMember(uint32_t Index) const
Get the member with the given index Index.
InstTy * getInsertPos() const
void addMetadata(InstTy *NewInst) const
Add metadata (e.g.
This is an important class for using LLVM in a threaded context.
Represents a single loop in the control flow graph.
Information for memory intrinsic cost model.
A Module instance is used to store all the information related to an LLVM module.
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
static LLVM_ABI PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
An interface layer with SCEV used to manage how we see SCEV expressions for values in the context of ...
ScalarEvolution * getSE() const
Returns the ScalarEvolution analysis used.
static LLVM_ABI unsigned getOpcode(RecurKind Kind)
Returns the opcode corresponding to the RecurrenceKind.
unsigned getOpcode() const
static bool isAnyOfRecurrenceKind(RecurKind Kind)
Returns true if the recurrence kind is of the form select(cmp(),x,y) where one of (x,...
static bool isFindIVRecurrenceKind(RecurKind Kind)
Returns true if the recurrence kind is of the form select(cmp(),x,y) where one of (x,...
static bool isMinMaxRecurrenceKind(RecurKind Kind)
Returns true if the recurrence kind is any min/max kind.
This class represents an analyzed expression in the program.
This class provides computation of slot numbers for LLVM Assembly writing.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
reference emplace_back(ArgTypes &&... Args)
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringRef - Represent a constant reference to a string, i.e.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
The instances of the Type class are immutable: once they are created, they are never changed.
static LLVM_ABI IntegerType * getInt64Ty(LLVMContext &C)
bool isVectorTy() const
True if this is an instance of VectorType.
static LLVM_ABI IntegerType * getInt32Ty(LLVMContext &C)
bool isPointerTy() const
True if this is an instance of PointerType.
static LLVM_ABI Type * getVoidTy(LLVMContext &C)
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
bool isStructTy() const
True if this is an instance of StructType.
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
static LLVM_ABI IntegerType * getInt1Ty(LLVMContext &C)
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
bool isIntegerTy() const
True if this is an instance of IntegerType.
static LLVM_ABI IntegerType * getIntNTy(LLVMContext &C, unsigned N)
bool isVoidTy() const
Return true if this is 'void'.
value_op_iterator value_op_end()
void setOperand(unsigned i, Value *Val)
Value * getOperand(unsigned i) const
value_op_iterator value_op_begin()
void execute(VPTransformState &State) override
Generate the active lane mask phi of the vector loop.
void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
RecipeListTy & getRecipeList()
Returns a reference to the list of recipes.
void insert(VPRecipeBase *Recipe, iterator InsertPt)
InstructionCost computeCost(ElementCount VF, VPCostContext &Ctx) const override
Return the cost of this VPWidenMemoryRecipe.
VPValue * getIncomingValue(unsigned Idx) const
Return incoming value number Idx.
unsigned getNumIncomingValues() const
Return the number of incoming values, taking into account when normalized the first incoming value wi...
void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
VPBlockBase is the building block of the Hierarchical Control-Flow Graph.
const VPBlocksTy & getPredecessors() const
void printAsOperand(raw_ostream &OS, bool PrintType=false) const
InstructionCost computeCost(ElementCount VF, VPCostContext &Ctx) const override
Return the cost of this VPBranchOnMaskRecipe.
void execute(VPTransformState &State) override
Generate the extraction of the appropriate bit from the block mask and the conditional branch.
VPlan-based builder utility analogous to IRBuilder.
LLVM_ABI_FOR_TEST void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
This class augments a recipe with a set of VPValues defined by the recipe.
LLVM_ABI_FOR_TEST void dump() const
Dump the VPDef to stderr (for debugging).
unsigned getNumDefinedValues() const
Returns the number of values defined by the VPDef.
VPValue * getVPSingleValue()
Returns the only VPValue defined by the VPDef.
VPValue * getVPValue(unsigned I)
Returns the VPValue with index I defined by the VPDef.
ArrayRef< VPRecipeValue * > definedValues()
Returns an ArrayRef of the values defined by the VPDef.
unsigned getVPDefID() const
void execute(VPTransformState &State) override
Generate the transformed value of the induction at offset StartValue (1.
VPIRValue * getStartValue() const
VPValue * getStepValue() const
void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
LLVM_ABI_FOR_TEST void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
void decompose()
Insert the recipes of the expression back into the VPlan, directly before the current recipe.
bool isSingleScalar() const
Returns true if the result of this VPExpressionRecipe is a single-scalar.
bool mayHaveSideEffects() const
Returns true if this expression contains recipes that may have side effects.
InstructionCost computeCost(ElementCount VF, VPCostContext &Ctx) const override
Compute the cost of this recipe either using a recipe's specialized implementation or using the legac...
bool mayReadOrWriteMemory() const
Returns true if this expression contains recipes that may read from or write to memory.
void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
void execute(VPTransformState &State) override
Produce a vectorized histogram operation.
InstructionCost computeCost(ElementCount VF, VPCostContext &Ctx) const override
Return the cost of this VPHistogramRecipe.
void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
VPValue * getMask() const
Return the mask operand if one was provided, or a null pointer if all lanes should be executed uncond...
Class to record and manage LLVM IR flags.
ReductionFlagsTy ReductionFlags
LLVM_ABI_FOR_TEST bool flagsValidForOpcode(unsigned Opcode) const
Returns true if the set flags are valid for Opcode.
CmpInst::Predicate CmpPredicate
void printFlags(raw_ostream &O) const
bool hasFastMathFlags() const
Returns true if the recipe has fast-math flags.
LLVM_ABI_FOR_TEST FastMathFlags getFastMathFlags() const
bool isReductionOrdered() const
CmpInst::Predicate getPredicate() const
bool hasNoSignedWrap() const
void intersectFlags(const VPIRFlags &Other)
Only keep flags also present in Other.
GEPNoWrapFlags getGEPNoWrapFlags() const
bool hasPredicate() const
Returns true if the recipe has a comparison predicate.
DisjointFlagsTy DisjointFlags
bool hasNoUnsignedWrap() const
NonNegFlagsTy NonNegFlags
bool isReductionInLoop() const
void applyFlags(Instruction &I) const
Apply the IR flags to I.
RecurKind getRecurKind() const
Instruction & getInstruction() const
void extractLastLaneOfLastPartOfFirstOperand(VPBuilder &Builder)
Update the recipe's first operand to the last lane of the last part of the operand using Builder.
void execute(VPTransformState &State) override
The method which generates the output IR instructions that correspond to this VPRecipe,...
LLVM_ABI_FOR_TEST InstructionCost computeCost(ElementCount VF, VPCostContext &Ctx) const override
Return the cost of this VPIRInstruction.
VPIRInstruction(Instruction &I)
VPIRInstruction::create() should be used to create VPIRInstructions, as subclasses may need to be cre...
void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
void execute(VPTransformState &State) override
Generate the instruction.
InstructionCost computeCost(ElementCount VF, VPCostContext &Ctx) const override
Return the cost of this VPInstruction.
static unsigned getNumOperandsForOpcode(unsigned Opcode)
Return the number of operands determined by the opcode of the VPInstruction.
bool doesGeneratePerAllLanes() const
Returns true if this VPInstruction generates scalar values for all lanes.
@ ExtractLastActive
Extracts the lane from the first operand corresponding to the last active (non-zero) lane in the mask...
@ ExtractLane
Extracts a single lane (first operand) from a set of vector operands.
@ ComputeAnyOfResult
Compute the final result of a AnyOf reduction with select(cmp(),x,y), where one of (x,...
@ WideIVStep
Scale the first operand (vector step) by the second operand (scalar-step).
@ ExtractPenultimateElement
@ ResumeForEpilogue
Explicit user for the resume phi of the canonical induction in the main VPlan, used by the epilogue v...
@ Unpack
Extracts all lanes from its (non-scalable) vector operand.
@ FirstOrderRecurrenceSplice
@ ReductionStartVector
Start vector for reductions with 3 operands: the original start value, the identity value for the red...
@ BuildVector
Creates a fixed-width vector containing all operands.
@ BuildStructVector
Given operands of (the same) struct type, creates a struct of fixed- width vectors each containing a ...
@ VScale
Returns the value for vscale.
@ CanonicalIVIncrementForPart
@ CalculateTripCountMinusVF
bool opcodeMayReadOrWriteFromMemory() const
Returns true if the underlying opcode may read from or write to memory.
LLVM_DUMP_METHOD void dump() const
Print the VPInstruction to dbgs() (for debugging).
void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the VPInstruction to O.
StringRef getName() const
Returns the symbolic name assigned to the VPInstruction.
unsigned getOpcode() const
VPInstruction(unsigned Opcode, ArrayRef< VPValue * > Operands, const VPIRFlags &Flags={}, const VPIRMetadata &MD={}, DebugLoc DL=DebugLoc::getUnknown(), const Twine &Name="")
bool usesFirstLaneOnly(const VPValue *Op) const override
Returns true if the recipe only uses the first lane of operand Op.
bool isVectorToScalar() const
Returns true if this VPInstruction produces a scalar value from a vector, e.g.
bool isSingleScalar() const
Returns true if this VPInstruction's operands are single scalars and the result is also a single scal...
void execute(VPTransformState &State) override
Generate the instruction.
bool usesFirstPartOnly(const VPValue *Op) const override
Returns true if the recipe only uses the first part of operand Op.
bool needsMaskForGaps() const
Return true if the access needs a mask because of the gaps.
InstructionCost computeCost(ElementCount VF, VPCostContext &Ctx) const override
Return the cost of this recipe.
Instruction * getInsertPos() const
const InterleaveGroup< Instruction > * getInterleaveGroup() const
VPValue * getMask() const
Return the mask used by this recipe.
ArrayRef< VPValue * > getStoredValues() const
Return the VPValues stored by this interleave group.
VPValue * getAddr() const
Return the address accessed by this recipe.
VPValue * getEVL() const
The VPValue of the explicit vector length.
void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
unsigned getNumStoreOperands() const override
Returns the number of stored operands of this interleave group.
void execute(VPTransformState &State) override
Generate the wide load or store, and shuffles.
void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
unsigned getNumStoreOperands() const override
Returns the number of stored operands of this interleave group.
void execute(VPTransformState &State) override
Generate the wide load or store, and shuffles.
In what follows, the term "input IR" refers to code that is fed into the vectorizer whereas the term ...
static VPLane getLastLaneForVF(const ElementCount &VF)
static VPLane getLaneFromEnd(const ElementCount &VF, unsigned Offset)
static VPLane getFirstLane()
virtual const VPRecipeBase * getAsRecipe() const =0
Return a VPRecipeBase* to the current object.
virtual unsigned getNumIncoming() const
Returns the number of incoming values, also number of incoming blocks.
void removeIncomingValueFor(VPBlockBase *IncomingBlock) const
Removes the incoming value for IncomingBlock, which must be a predecessor.
const VPBasicBlock * getIncomingBlock(unsigned Idx) const
Returns the incoming block with index Idx.
detail::zippy< llvm::detail::zip_first, VPUser::const_operand_range, const_incoming_blocks_range > incoming_values_and_blocks() const
Returns an iterator range over pairs of incoming values and corresponding incoming blocks.
VPValue * getIncomingValue(unsigned Idx) const
Returns the incoming VPValue with index Idx.
void printPhiOperands(raw_ostream &O, VPSlotTracker &SlotTracker) const
Print the recipe.
void execute(VPTransformState &State) override
Generates phi nodes for live-outs (from a replicate region) as needed to retain SSA form.
void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
VPRecipeBase is a base class modeling a sequence of one or more output IR instructions.
bool mayReadFromMemory() const
Returns true if the recipe may read from memory.
bool mayHaveSideEffects() const
Returns true if the recipe may have side-effects.
virtual void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const =0
Each concrete VPRecipe prints itself, without printing common information, like debug info or metadat...
VPRegionBlock * getRegion()
void print(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override final
Print the recipe, delegating to printRecipe().
bool isPhi() const
Returns true for PHI-like recipes.
bool mayWriteToMemory() const
Returns true if the recipe may write to memory.
virtual InstructionCost computeCost(ElementCount VF, VPCostContext &Ctx) const
Compute the cost of this recipe either using a recipe's specialized implementation or using the legac...
VPBasicBlock * getParent()
DebugLoc getDebugLoc() const
Returns the debug location of the recipe.
void moveBefore(VPBasicBlock &BB, iplist< VPRecipeBase >::iterator I)
Unlink this recipe and insert into BB before I.
void insertBefore(VPRecipeBase *InsertPos)
Insert an unlinked recipe into a basic block immediately before the specified recipe.
void insertAfter(VPRecipeBase *InsertPos)
Insert an unlinked Recipe into a basic block immediately after the specified Recipe.
iplist< VPRecipeBase >::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
InstructionCost cost(ElementCount VF, VPCostContext &Ctx)
Return the cost of this recipe, taking into account if the cost computation should be skipped and the...
bool isScalarCast() const
Return true if the recipe is a scalar cast.
void removeFromParent()
This method unlinks 'this' from the containing basic block, but does not delete it.
void moveAfter(VPRecipeBase *MovePos)
Unlink this recipe from its current VPBasicBlock and insert it into the VPBasicBlock that MovePos liv...
VPRecipeBase(const unsigned char SC, ArrayRef< VPValue * > Operands, DebugLoc DL=DebugLoc::getUnknown())
void execute(VPTransformState &State) override
Generate the reduction in the loop.
void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
VPValue * getEVL() const
The VPValue of the explicit vector length.
unsigned getVFScaleFactor() const
Get the factor that the VF of this recipe's output should be scaled by, or 1 if it isn't scaled.
bool isInLoop() const
Returns true if the phi is part of an in-loop reduction.
void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
void execute(VPTransformState &State) override
Generate the phi/select nodes.
bool isConditional() const
Return true if the in-loop reduction is conditional.
InstructionCost computeCost(ElementCount VF, VPCostContext &Ctx) const override
Return the cost of VPReductionRecipe.
VPValue * getVecOp() const
The VPValue of the vector value to be reduced.
VPValue * getCondOp() const
The VPValue of the condition for the block.
RecurKind getRecurrenceKind() const
Return the recurrence kind for the in-loop reduction.
bool isPartialReduction() const
Returns true if the reduction outputs a vector with a scaled down VF.
VPValue * getChainOp() const
The VPValue of the scalar Chain being accumulated.
bool isInLoop() const
Returns true if the reduction is in-loop.
void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
void execute(VPTransformState &State) override
Generate the reduction in the loop.
VPRegionBlock represents a collection of VPBasicBlocks and VPRegionBlocks which form a Single-Entry-S...
bool isReplicator() const
An indicator whether this region is to generate multiple replicated instances of output IR correspond...
VPReplicateRecipe replicates a given instruction producing multiple scalar copies of the original sca...
void execute(VPTransformState &State) override
Generate replicas of the desired Ingredient.
bool isSingleScalar() const
InstructionCost computeCost(ElementCount VF, VPCostContext &Ctx) const override
Return the cost of this VPReplicateRecipe.
void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
unsigned getOpcode() const
bool shouldPack() const
Returns true if the recipe is used by a widened recipe via an intervening VPPredInstPHIRecipe.
VPValue * getStepValue() const
VPValue * getStartIndex() const
Return the StartIndex, or null if known to be zero, valid only after unrolling.
void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
void execute(VPTransformState &State) override
Generate the scalarized versions of the phi node as needed by their users.
VPSingleDef is a base class for recipes for modeling a sequence of one or more output IR that define ...
Instruction * getUnderlyingInstr()
Returns the underlying instruction.
LLVM_ABI_FOR_TEST LLVM_DUMP_METHOD void dump() const
Print this VPSingleDefRecipe to dbgs() (for debugging).
VPSingleDefRecipe(const unsigned char SC, ArrayRef< VPValue * > Operands, DebugLoc DL=DebugLoc::getUnknown())
This class can be used to assign names to VPValues.
Type * inferScalarType(const VPValue *V)
Infer the type of V. Returns the scalar type of V.
Helper to access the operand that contains the unroll part for this recipe after unrolling.
VPValue * getUnrollPartOperand(const VPUser &U) const
Return the VPValue operand containing the unroll part or null if there is no such operand.
unsigned getUnrollPart(const VPUser &U) const
Return the unroll part.
This class augments VPValue with operands which provide the inverse def-use edges from VPValue's user...
void printOperands(raw_ostream &O, VPSlotTracker &SlotTracker) const
Print the operands to O.
void setOperand(unsigned I, VPValue *New)
unsigned getNumOperands() const
operand_iterator op_begin()
VPValue * getOperand(unsigned N) const
virtual bool usesFirstLaneOnly(const VPValue *Op) const
Returns true if the VPUser only uses the first lane of operand Op.
This is the base class of the VPlan Def/Use graph, used for modeling the data flow into,...
Value * getLiveInIRValue() const
Return the underlying IR value for a VPIRValue.
bool isDefinedOutsideLoopRegions() const
Returns true if the VPValue is defined outside any loop.
VPRecipeBase * getDefiningRecipe()
Returns the recipe defining this VPValue or nullptr if it is not defined by a recipe,...
void printAsOperand(raw_ostream &OS, VPSlotTracker &Tracker) const
Value * getUnderlyingValue() const
Return the underlying Value attached to this VPValue.
void replaceAllUsesWith(VPValue *New)
void execute(VPTransformState &State) override
The method which generates the output IR instructions that correspond to this VPRecipe,...
void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
Type * getSourceElementType() const
void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
void execute(VPTransformState &State) override
The method which generates the output IR instructions that correspond to this VPRecipe,...
Function * getCalledScalarFunction() const
InstructionCost computeCost(ElementCount VF, VPCostContext &Ctx) const override
Return the cost of this VPWidenCallRecipe.
void execute(VPTransformState &State) override
Produce a widened version of the call instruction.
void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
void execute(VPTransformState &State) override
Generate a canonical vector induction variable of the vector loop, with start = {<Part*VF,...
void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
LLVM_ABI_FOR_TEST void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
Type * getResultType() const
Returns the result type of the cast.
LLVM_ABI_FOR_TEST void execute(VPTransformState &State) override
Produce widened copies of the cast.
LLVM_ABI_FOR_TEST InstructionCost computeCost(ElementCount VF, VPCostContext &Ctx) const override
Return the cost of this VPWidenCastRecipe.
void execute(VPTransformState &State) override
Generate the gep nodes.
Type * getSourceElementType() const
void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
bool usesFirstLaneOnly(const VPValue *Op) const override
Returns true if the recipe only uses the first lane of operand Op.
VPIRValue * getStartValue() const
Returns the start value of the induction.
VPValue * getStepValue()
Returns the step value of the induction.
VPIRValue * getStartValue() const
Returns the start value of the induction.
TruncInst * getTruncInst()
Returns the first defined value as TruncInst, if it is one or nullptr otherwise.
Type * getScalarType() const
Returns the scalar type of the induction.
bool isCanonical() const
Returns true if the induction is canonical, i.e.
void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
Intrinsic::ID getVectorIntrinsicID() const
Return the ID of the intrinsic.
LLVM_ABI_FOR_TEST void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
StringRef getIntrinsicName() const
Return to name of the intrinsic as string.
LLVM_ABI_FOR_TEST bool usesFirstLaneOnly(const VPValue *Op) const override
Returns true if the VPUser only uses the first lane of operand Op.
Type * getResultType() const
Return the scalar return type of the intrinsic.
LLVM_ABI_FOR_TEST void execute(VPTransformState &State) override
Produce a widened version of the vector intrinsic.
LLVM_ABI_FOR_TEST InstructionCost computeCost(ElementCount VF, VPCostContext &Ctx) const override
Return the cost of this vector intrinsic.
bool IsMasked
Whether the memory access is masked.
bool Reverse
Whether the consecutive accessed addresses are in reverse order.
bool isConsecutive() const
Return whether the loaded-from / stored-to addresses are consecutive.
InstructionCost computeCost(ElementCount VF, VPCostContext &Ctx) const override
Return the cost of this VPWidenMemoryRecipe.
bool Consecutive
Whether the accessed addresses are consecutive.
VPValue * getMask() const
Return the mask used by this recipe.
Align Alignment
Alignment information for this memory access.
VPValue * getAddr() const
Return the address accessed by this recipe.
bool isReverse() const
Return whether the consecutive loaded/stored addresses are in reverse order.
InstructionCost computeCost(ElementCount VF, VPCostContext &Ctx) const override
Return the cost of this VPWidenPHIRecipe.
void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
void execute(VPTransformState &State) override
Generate the phi/select nodes.
bool onlyScalarsGenerated(bool IsScalable)
Returns true if only scalar values will be generated.
void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
InstructionCost computeCost(ElementCount VF, VPCostContext &Ctx) const override
Return the cost of this VPWidenRecipe.
void execute(VPTransformState &State) override
Produce a widened instruction using the opcode and operands of the recipe, processing State....
void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
LLVM_ABI_FOR_TEST VPRegionBlock * getVectorLoopRegion()
Returns the VPRegionBlock of the vector loop.
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
LLVM_ABI void setName(const Twine &Name)
Change the name of the value.
LLVMContext & getContext() const
All values hold a context through their type.
void mutateType(Type *Ty)
Mutate the type of this Value to be of the specified type.
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Base class of all SIMD vector types.
ElementCount getElementCount() const
Return an ElementCount instance to represent the (possibly scalable) number of elements in the vector...
static LLVM_ABI VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
Type * getElementType() const
constexpr ScalarTy getFixedValue() const
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
constexpr LeafTy multiplyCoefficientBy(ScalarTy RHS) const
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
constexpr LeafTy divideCoefficientBy(ScalarTy RHS) const
We do not provide the '/' operator here because division for polynomial types does not work in the sa...
const ParentTy * getParent() const
self_iterator getIterator()
typename base_list_type::iterator iterator
iterator erase(iterator where)
pointer remove(iterator &IT)
This class implements an extremely fast bulk output stream that can only output to a stream.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
@ C
The default llvm calling convention, compatible with C.
@ BasicBlock
Various leaf nodes.
LLVM_ABI Function * getOrInsertDeclaration(Module *M, ID id, ArrayRef< Type * > Tys={})
Look up the Function declaration of the intrinsic id in the Module M.
LLVM_ABI Intrinsic::ID getDeinterleaveIntrinsicID(unsigned Factor)
Returns the corresponding llvm.vector.deinterleaveN intrinsic for factor N.
LLVM_ABI StringRef getBaseName(ID id)
Return the LLVM name for an intrinsic, without encoded types for overloading, such as "llvm....
bool match(Val *V, const Pattern &P)
ThreeOps_match< Cond, LHS, RHS, Instruction::Select > m_Select(const Cond &C, const LHS &L, const RHS &R)
Matches SelectInst.
auto m_LogicalOr()
Matches L || R where L and R are arbitrary values.
class_match< CmpInst > m_Cmp()
Matches any compare instruction and ignore it.
auto m_LogicalAnd()
Matches L && R where L and R are arbitrary values.
specific_intval< 1 > m_False()
specific_intval< 1 > m_True()
class_match< VPValue > m_VPValue()
Match an arbitrary VPValue and ignore it.
VPInstruction_match< VPInstruction::Reverse, Op0_t > m_Reverse(const Op0_t &Op0)
NodeAddr< DefNode * > Def
bool isSingleScalar(const VPValue *VPV)
Returns true if VPV is a single scalar, either because it produces the same value for all lanes or on...
bool isAddressSCEVForCost(const SCEV *Addr, ScalarEvolution &SE, const Loop *L)
Returns true if Addr is an address SCEV that can be passed to TTI::getAddressComputationCost,...
bool onlyFirstPartUsed(const VPValue *Def)
Returns true if only the first part of Def is used.
bool onlyFirstLaneUsed(const VPValue *Def)
Returns true if only the first lane of Def is used.
bool onlyScalarValuesUsed(const VPValue *Def)
Returns true if only scalar values of Def are used by all users.
const SCEV * getSCEVExprForVPValue(const VPValue *V, PredicatedScalarEvolution &PSE, const Loop *L=nullptr)
Return the SCEV expression for V.
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
LLVM_ABI Value * createSimpleReduction(IRBuilderBase &B, Value *Src, RecurKind RdxKind)
Create a reduction of the given vector.
detail::zippy< detail::zip_shortest, T, U, Args... > zip(T &&t, U &&u, Args &&...args)
zip iterator for two or more iteratable types.
FunctionAddr VTableAddr Value
auto cast_if_present(const Y &Val)
cast_if_present<X> - Functionally identical to cast, except that a null value is accepted.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
LLVM_ABI Intrinsic::ID getMinMaxReductionIntrinsicOp(Intrinsic::ID RdxID)
Returns the min/max intrinsic used when expanding a min/max reduction.
@ Undef
Value of the register doesn't matter.
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
const Value * getLoadStorePointerOperand(const Value *V)
A helper function that returns the pointer operand of a load or store instruction.
Value * getRuntimeVF(IRBuilderBase &B, Type *Ty, ElementCount VF)
Return the runtime value for VF.
auto dyn_cast_if_present(const Y &Val)
dyn_cast_if_present<X> - Functionally identical to dyn_cast, except that a null (or none in the case ...
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
void interleaveComma(const Container &c, StreamT &os, UnaryFunctor each_fn)
auto cast_or_null(const Y &Val)
LLVM_ABI Value * concatenateVectors(IRBuilderBase &Builder, ArrayRef< Value * > Vecs)
Concatenate a list of vectors.
Align getLoadStoreAlignment(const Value *I)
A helper function that returns the alignment of load or store instruction.
bool isa_and_nonnull(const Y &Val)
LLVM_ABI Value * createMinMaxOp(IRBuilderBase &Builder, RecurKind RK, Value *Left, Value *Right)
Returns a Min/Max operation corresponding to MinMaxRecurrenceKind.
auto dyn_cast_or_null(const Y &Val)
static Error getOffset(const SymbolRef &Sym, SectionRef Sec, uint64_t &Result)
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
LLVM_ABI Constant * createBitMaskForGaps(IRBuilderBase &Builder, unsigned VF, const InterleaveGroup< Instruction > &Group)
Create a mask that filters the members of an interleave group where there are gaps.
LLVM_ABI llvm::SmallVector< int, 16 > createStrideMask(unsigned Start, unsigned Stride, unsigned VF)
Create a stride shuffle mask.
auto reverse(ContainerTy &&C)
LLVM_ABI llvm::SmallVector< int, 16 > createReplicatedMask(unsigned ReplicationFactor, unsigned VF)
Create a mask with replicated elements.
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
bool none_of(R &&Range, UnaryPredicate P)
Provide wrappers to std::none_of which take ranges instead of having to pass begin/end explicitly.
SmallVector< ValueTypeFromRangeType< R >, Size > to_vector(R &&Range)
Given a range of type R, iterate the entire range and return a SmallVector with elements of the vecto...
Type * toVectorizedTy(Type *Ty, ElementCount EC)
A helper for converting to vectorized types.
cl::opt< unsigned > ForceTargetInstructionCost
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
auto drop_end(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the last N elements excluded.
LLVM_ABI bool isVectorIntrinsicWithStructReturnOverloadAtField(Intrinsic::ID ID, int RetIdx, const TargetTransformInfo *TTI)
Identifies if the vector form of the intrinsic that returns a struct is overloaded at the struct elem...
bool canVectorizeTy(Type *Ty)
Returns true if Ty is a valid vector element type, void, or an unpacked literal struct where all elem...
FunctionAddr VTableAddr uintptr_t uintptr_t Data
LLVM_ABI llvm::SmallVector< int, 16 > createInterleaveMask(unsigned VF, unsigned NumVecs)
Create an interleave shuffle mask.
RecurKind
These are the kinds of recurrences that we support.
@ UMin
Unsigned integer min implemented in terms of select(cmp()).
@ FMinimumNum
FP min with llvm.minimumnum semantics.
@ FMinimum
FP min with llvm.minimum semantics.
@ FMaxNum
FP max with llvm.maxnum semantics including NaNs.
@ Mul
Product of integers.
@ AnyOf
AnyOf reduction with select(cmp(),x,y) where one of (x,y) is loop invariant, and both x and y are int...
@ FMaximum
FP max with llvm.maximum semantics.
@ SMax
Signed integer max implemented in terms of select(cmp()).
@ SMin
Signed integer min implemented in terms of select(cmp()).
@ FMinNum
FP min with llvm.minnum semantics including NaNs.
@ Sub
Subtraction of integers.
@ FMaximumNum
FP max with llvm.maximumnum semantics.
@ UMax
Unsigned integer max implemented in terms of select(cmp()).
LLVM_ABI bool isVectorIntrinsicWithScalarOpAtArg(Intrinsic::ID ID, unsigned ScalarOpdIdx, const TargetTransformInfo *TTI)
Identifies if the vector form of the intrinsic has a scalar operand.
LLVM_ABI Value * getRecurrenceIdentity(RecurKind K, Type *Tp, FastMathFlags FMF)
Given information about an recurrence kind, return the identity for the @llvm.vector....
DWARFExpression::Operation Op
Value * createStepForVF(IRBuilderBase &B, Type *Ty, ElementCount VF, int64_t Step)
Return a value for Step multiplied by VF.
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Value * emitTransformedIndex(IRBuilderBase &B, Value *Index, Value *StartValue, Value *Step, InductionDescriptor::InductionKind InductionKind, const BinaryOperator *InductionBinOp)
Compute the transformed value of Index at offset StartValue using step StepValue.
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Type * getLoadStoreType(const Value *I)
A helper function that returns the type of a load or store instruction.
LLVM_ABI Value * createOrderedReduction(IRBuilderBase &B, RecurKind RdxKind, Value *Src, Value *Start)
Create an ordered reduction intrinsic using the given recurrence kind RdxKind.
ArrayRef< Type * > getContainedTypes(Type *const &Ty)
Returns the types contained in Ty.
auto seq(T Begin, T End)
Iterate over an integral type from Begin up to - but not including - End.
Type * toVectorTy(Type *Scalar, ElementCount EC)
A helper function for converting Scalar types to vector types.
@ Default
The result values are uniform if and only if all operands are uniform.
LLVM_ABI bool isVectorIntrinsicWithOverloadTypeAtArg(Intrinsic::ID ID, int OpdIdx, const TargetTransformInfo *TTI)
Identifies if the vector form of the intrinsic is overloaded on the type of the operand at index OpdI...
This struct is a compact representation of a valid (non-zero power of two) alignment.
Struct to hold various analysis needed for cost computations.
TargetTransformInfo::OperandValueInfo getOperandInfo(VPValue *V) const
Returns the OperandInfo for V, if it is a live-in.
TargetTransformInfo::TargetCostKind CostKind
const TargetTransformInfo & TTI
void execute(VPTransformState &State) override
Generate the phi nodes.
InstructionCost computeCost(ElementCount VF, VPCostContext &Ctx) const override
Return the cost of this first-order recurrence phi recipe.
void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
An overlay for VPIRInstructions wrapping PHI nodes enabling convenient use cast/dyn_cast/isa and exec...
void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
void execute(VPTransformState &State) override
The method which generates the output IR instructions that correspond to this VPRecipe,...
void execute(VPTransformState &State) override
Generate the instruction.
void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
A pure-virtual common base class for recipes defining a single VPValue and using IR flags.
InstructionCost getCostForRecipeWithOpcode(unsigned Opcode, ElementCount VF, VPCostContext &Ctx) const
Compute the cost for this recipe for VF, using Opcode and Ctx.
VPRecipeWithIRFlags(const unsigned char SC, ArrayRef< VPValue * > Operands, const VPIRFlags &Flags, DebugLoc DL=DebugLoc::getUnknown())
A symbolic live-in VPValue, used for values like vector trip count, VF, and VFxUF.
LLVM_ABI_FOR_TEST void execute(VPTransformState &State) override
Generate the wide load or gather.
LLVM_ABI_FOR_TEST void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
LLVM_ABI_FOR_TEST InstructionCost computeCost(ElementCount VF, VPCostContext &Ctx) const override
Return the cost of this VPWidenLoadEVLRecipe.
VPValue * getEVL() const
Return the EVL operand.
void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
void execute(VPTransformState &State) override
Generate a wide load or gather.
VPValue * getStoredValue() const
Return the address accessed by this recipe.
LLVM_ABI_FOR_TEST void execute(VPTransformState &State) override
Generate the wide store or scatter.
LLVM_ABI_FOR_TEST void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
LLVM_ABI_FOR_TEST InstructionCost computeCost(ElementCount VF, VPCostContext &Ctx) const override
Return the cost of this VPWidenStoreEVLRecipe.
VPValue * getEVL() const
Return the EVL operand.
void execute(VPTransformState &State) override
Generate a wide store or scatter.
void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
VPValue * getStoredValue() const
Return the value stored by this recipe.