LLVM 22.0.0git
VPlanPatternMatch.h
Go to the documentation of this file.
1//===- VPlanPatternMatch.h - Match on VPValues and recipes ------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file provides a simple and efficient mechanism for performing general
10// tree-based pattern matches on the VPlan values and recipes, based on
11// LLVM's IR pattern matchers.
12//
13//===----------------------------------------------------------------------===//
14
15#ifndef LLVM_TRANSFORM_VECTORIZE_VPLANPATTERNMATCH_H
16#define LLVM_TRANSFORM_VECTORIZE_VPLANPATTERNMATCH_H
17
18#include "VPlan.h"
19
21
22template <typename Val, typename Pattern> bool match(Val *V, const Pattern &P) {
23 return P.match(V);
24}
25
26template <typename Pattern> bool match(VPUser *U, const Pattern &P) {
27 auto *R = dyn_cast<VPRecipeBase>(U);
28 return R && match(R, P);
29}
30
31template <typename Pattern> bool match(VPSingleDefRecipe *R, const Pattern &P) {
32 return P.match(static_cast<const VPRecipeBase *>(R));
33}
34
35template <typename Val, typename Pattern> struct VPMatchFunctor {
36 const Pattern &P;
37 VPMatchFunctor(const Pattern &P) : P(P) {}
38 bool operator()(Val *V) const { return match(V, P); }
39};
40
41/// A match functor that can be used as a UnaryPredicate in functional
42/// algorithms like all_of.
43template <typename Val = VPUser, typename Pattern>
47
48template <typename Class> struct class_match {
49 template <typename ITy> bool match(ITy *V) const { return isa<Class>(V); }
50};
51
52/// Match an arbitrary VPValue and ignore it.
54
55template <typename Class> struct bind_ty {
56 Class *&VR;
57
58 bind_ty(Class *&V) : VR(V) {}
59
60 template <typename ITy> bool match(ITy *V) const {
61 if (auto *CV = dyn_cast<Class>(V)) {
62 VR = CV;
63 return true;
64 }
65 return false;
66 }
67};
68
69/// Match a specified VPValue.
71 const VPValue *Val;
72
73 specificval_ty(const VPValue *V) : Val(V) {}
74
75 bool match(VPValue *VPV) const { return VPV == Val; }
76};
77
78inline specificval_ty m_Specific(const VPValue *VPV) { return VPV; }
79
80/// Stores a reference to the VPValue *, not the VPValue * itself,
81/// thus can be used in commutative matchers.
83 VPValue *const &Val;
84
85 deferredval_ty(VPValue *const &V) : Val(V) {}
86
87 bool match(VPValue *const V) const { return V == Val; }
88};
89
90/// Like m_Specific(), but works if the specific value to match is determined
91/// as part of the same match() expression. For example:
92/// m_Mul(m_VPValue(X), m_Specific(X)) is incorrect, because m_Specific() will
93/// bind X before the pattern match starts.
94/// m_Mul(m_VPValue(X), m_Deferred(X)) is correct, and will check against
95/// whichever value m_VPValue(X) populated.
96inline deferredval_ty m_Deferred(VPValue *const &V) { return V; }
97
98/// Match an integer constant or vector of constants if Pred::isValue returns
99/// true for the APInt. \p BitWidth optionally specifies the bitwidth the
100/// matched constant must have. If it is 0, the matched constant can have any
101/// bitwidth.
102template <typename Pred, unsigned BitWidth = 0> struct int_pred_ty {
103 Pred P;
104
105 int_pred_ty(Pred P) : P(std::move(P)) {}
106 int_pred_ty() : P() {}
107
108 bool match(VPValue *VPV) const {
109 if (!VPV->isLiveIn())
110 return false;
111 Value *V = VPV->getLiveInIRValue();
112 if (!V)
113 return false;
114 assert(!V->getType()->isVectorTy() && "Unexpected vector live-in");
115 const auto *CI = dyn_cast<ConstantInt>(V);
116 if (!CI)
117 return false;
118
119 if (BitWidth != 0 && CI->getBitWidth() != BitWidth)
120 return false;
121 return P.isValue(CI->getValue());
122 }
123};
124
125/// Match a specified integer value or vector of all elements of that
126/// value. \p BitWidth optionally specifies the bitwidth the matched constant
127/// must have. If it is 0, the matched constant can have any bitwidth.
130
132
133 bool isValue(const APInt &C) const { return APInt::isSameValue(Val, C); }
134};
135
136template <unsigned Bitwidth = 0>
138
142
146
150
152 bool isValue(const APInt &C) const { return C.isAllOnes(); }
153};
154
155/// Match an integer or vector with all bits set.
156/// For vectors, this includes constants with undefined elements.
160
162 bool isValue(const APInt &C) const { return C.isZero(); }
163};
164
165struct is_one {
166 bool isValue(const APInt &C) const { return C.isOne(); }
167};
168
169/// Match an integer 0 or a vector with all elements equal to 0.
170/// For vectors, this includes constants with undefined elements.
174
175/// Match an integer 1 or a vector with all elements equal to 1.
176/// For vectors, this includes constants with undefined elements.
178
180 const APInt *&Res;
181
182 bind_apint(const APInt *&Res) : Res(Res) {}
183
184 bool match(VPValue *VPV) const {
185 if (!VPV->isLiveIn())
186 return false;
187 Value *V = VPV->getLiveInIRValue();
188 if (!V)
189 return false;
190 assert(!V->getType()->isVectorTy() && "Unexpected vector live-in");
191 const auto *CI = dyn_cast<ConstantInt>(V);
192 if (!CI)
193 return false;
194 Res = &CI->getValue();
195 return true;
196 }
197};
198
199inline bind_apint m_APInt(const APInt *&C) { return C; }
200
203
205
206 bool match(VPValue *VPV) const {
207 const APInt *APConst;
208 if (!bind_apint(APConst).match(VPV))
209 return false;
210 if (auto C = APConst->tryZExtValue()) {
211 Res = *C;
212 return true;
213 }
214 return false;
215 }
216};
217
218/// Match a plain integer constant no wider than 64-bits, capturing it if we
219/// match.
221
222/// Matching combinators
223template <typename LTy, typename RTy> struct match_combine_or {
224 LTy L;
225 RTy R;
226
227 match_combine_or(const LTy &Left, const RTy &Right) : L(Left), R(Right) {}
228
229 template <typename ITy> bool match(ITy *V) const {
230 return L.match(V) || R.match(V);
231 }
232};
233
234template <typename LTy, typename RTy> struct match_combine_and {
235 LTy L;
236 RTy R;
237
238 match_combine_and(const LTy &Left, const RTy &Right) : L(Left), R(Right) {}
239
240 template <typename ITy> bool match(ITy *V) const {
241 return L.match(V) && R.match(V);
242 }
243};
244
245/// Combine two pattern matchers matching L || R
246template <typename LTy, typename RTy>
247inline match_combine_or<LTy, RTy> m_CombineOr(const LTy &L, const RTy &R) {
248 return match_combine_or<LTy, RTy>(L, R);
249}
250
251/// Combine two pattern matchers matching L && R
252template <typename LTy, typename RTy>
253inline match_combine_and<LTy, RTy> m_CombineAnd(const LTy &L, const RTy &R) {
254 return match_combine_and<LTy, RTy>(L, R);
255}
256
257/// Match a VPValue, capturing it if we match.
258inline bind_ty<VPValue> m_VPValue(VPValue *&V) { return V; }
259
260/// Match a VPInstruction, capturing if we match.
262
263template <typename Ops_t, unsigned Opcode, bool Commutative,
264 typename... RecipeTys>
266 Ops_t Ops;
267
268 template <typename... OpTy> Recipe_match(OpTy... Ops) : Ops(Ops...) {
269 static_assert(std::tuple_size<Ops_t>::value == sizeof...(Ops) &&
270 "number of operands in constructor doesn't match Ops_t");
271 static_assert((!Commutative || std::tuple_size<Ops_t>::value == 2) &&
272 "only binary ops can be commutative");
273 }
274
275 bool match(const VPValue *V) const {
276 auto *DefR = V->getDefiningRecipe();
277 return DefR && match(DefR);
278 }
279
280 bool match(const VPSingleDefRecipe *R) const {
281 return match(static_cast<const VPRecipeBase *>(R));
282 }
283
284 bool match(const VPRecipeBase *R) const {
285 if (std::tuple_size_v<Ops_t> == 0) {
286 auto *VPI = dyn_cast<VPInstruction>(R);
287 return VPI && VPI->getOpcode() == Opcode;
288 }
289
290 if ((!matchRecipeAndOpcode<RecipeTys>(R) && ...))
291 return false;
292
293 if (R->getNumOperands() != std::tuple_size<Ops_t>::value) {
294 assert(Opcode == Instruction::PHI &&
295 "non-variadic recipe with matched opcode does not have the "
296 "expected number of operands");
297 return false;
298 }
299
300 auto IdxSeq = std::make_index_sequence<std::tuple_size<Ops_t>::value>();
301 if (all_of_tuple_elements(IdxSeq, [R](auto Op, unsigned Idx) {
302 return Op.match(R->getOperand(Idx));
303 }))
304 return true;
305
306 return Commutative &&
307 all_of_tuple_elements(IdxSeq, [R](auto Op, unsigned Idx) {
308 return Op.match(R->getOperand(R->getNumOperands() - Idx - 1));
309 });
310 }
311
312private:
313 template <typename RecipeTy>
314 static bool matchRecipeAndOpcode(const VPRecipeBase *R) {
315 auto *DefR = dyn_cast<RecipeTy>(R);
316 // Check for recipes that do not have opcodes.
317 if constexpr (std::is_same_v<RecipeTy, VPScalarIVStepsRecipe> ||
318 std::is_same_v<RecipeTy, VPCanonicalIVPHIRecipe> ||
319 std::is_same_v<RecipeTy, VPDerivedIVRecipe> ||
320 std::is_same_v<RecipeTy, VPVectorEndPointerRecipe>)
321 return DefR;
322 else
323 return DefR && DefR->getOpcode() == Opcode;
324 }
325
326 /// Helper to check if predicate \p P holds on all tuple elements in Ops using
327 /// the provided index sequence.
328 template <typename Fn, std::size_t... Is>
329 bool all_of_tuple_elements(std::index_sequence<Is...>, Fn P) const {
330 return (P(std::get<Is>(Ops), Is) && ...);
331 }
332};
333
334template <unsigned Opcode, typename... OpTys>
336 Recipe_match<std::tuple<OpTys...>, Opcode, /*Commutative*/ false,
339
340template <unsigned Opcode, typename... OpTys>
342 Recipe_match<std::tuple<OpTys...>, Opcode, /*Commutative*/ true,
344
345template <unsigned Opcode, typename... OpTys>
346using VPInstruction_match = Recipe_match<std::tuple<OpTys...>, Opcode,
347 /*Commutative*/ false, VPInstruction>;
348
349template <unsigned Opcode, typename... OpTys>
350inline VPInstruction_match<Opcode, OpTys...>
351m_VPInstruction(const OpTys &...Ops) {
352 return VPInstruction_match<Opcode, OpTys...>(Ops...);
353}
354
355/// BuildVector is matches only its opcode, w/o matching its operands as the
356/// number of operands is not fixed.
360
361template <typename Op0_t>
363m_Freeze(const Op0_t &Op0) {
365}
366
370
371template <typename Op0_t>
373m_BranchOnCond(const Op0_t &Op0) {
375}
376
381
382template <typename Op0_t, typename Op1_t>
384m_BranchOnTwoConds(const Op0_t &Op0, const Op1_t &Op1) {
386}
387
388template <typename Op0_t>
390m_Broadcast(const Op0_t &Op0) {
392}
393
394template <typename Op0_t>
396m_EVL(const Op0_t &Op0) {
398}
399
400template <typename Op0_t>
405
406template <typename Op0_t, typename Op1_t>
408m_ExtractElement(const Op0_t &Op0, const Op1_t &Op1) {
410}
411
412template <typename Op0_t, typename Op1_t>
414m_ExtractLane(const Op0_t &Op0, const Op1_t &Op1) {
416}
417
418template <typename Op0_t>
423
424template <typename Op0_t>
430}
431
432template <typename Op0_t>
437
438template <typename Op0_t, typename Op1_t, typename Op2_t>
440m_ActiveLaneMask(const Op0_t &Op0, const Op1_t &Op1, const Op2_t &Op2) {
442}
443
447
448template <typename Op0_t, typename Op1_t>
450m_BranchOnCount(const Op0_t &Op0, const Op1_t &Op1) {
452}
453
457
458template <typename Op0_t>
460m_AnyOf(const Op0_t &Op0) {
462}
463
464template <typename Op0_t>
469
470template <typename Op0_t>
472m_LastActiveLane(const Op0_t &Op0) {
474}
475
476template <typename Op0_t>
478m_Reverse(const Op0_t &Op0) {
480}
481
485
486template <unsigned Opcode, typename Op0_t>
487inline AllRecipe_match<Opcode, Op0_t> m_Unary(const Op0_t &Op0) {
489}
490
491template <typename Op0_t>
495
496template <typename Op0_t>
498m_TruncOrSelf(const Op0_t &Op0) {
499 return m_CombineOr(m_Trunc(Op0), Op0);
500}
501
502template <typename Op0_t>
506
507template <typename Op0_t>
511
512template <typename Op0_t>
515m_ZExtOrSExt(const Op0_t &Op0) {
516 return m_CombineOr(m_ZExt(Op0), m_SExt(Op0));
517}
518
519template <typename Op0_t>
521m_ZExtOrSelf(const Op0_t &Op0) {
522 return m_CombineOr(m_ZExt(Op0), Op0);
523}
524
525template <unsigned Opcode, typename Op0_t, typename Op1_t>
527 const Op1_t &Op1) {
529}
530
531template <unsigned Opcode, typename Op0_t, typename Op1_t>
533m_c_Binary(const Op0_t &Op0, const Op1_t &Op1) {
535}
536
537template <typename Op0_t, typename Op1_t>
539 const Op1_t &Op1) {
541}
542
543template <typename Op0_t, typename Op1_t>
545m_c_Add(const Op0_t &Op0, const Op1_t &Op1) {
547}
548
549template <typename Op0_t, typename Op1_t>
551 const Op1_t &Op1) {
553}
554
555template <typename Op0_t, typename Op1_t>
557 const Op1_t &Op1) {
559}
560
561template <typename Op0_t, typename Op1_t>
563m_c_Mul(const Op0_t &Op0, const Op1_t &Op1) {
565}
566
567/// Match a binary AND operation.
568template <typename Op0_t, typename Op1_t>
570m_c_BinaryAnd(const Op0_t &Op0, const Op1_t &Op1) {
572}
573
574/// Match a binary OR operation. Note that while conceptually the operands can
575/// be matched commutatively, \p Commutative defaults to false in line with the
576/// IR-based pattern matching infrastructure. Use m_c_BinaryOr for a commutative
577/// version of the matcher.
578template <typename Op0_t, typename Op1_t>
580m_BinaryOr(const Op0_t &Op0, const Op1_t &Op1) {
582}
583
584template <typename Op0_t, typename Op1_t>
586m_c_BinaryOr(const Op0_t &Op0, const Op1_t &Op1) {
588}
589
590/// Cmp_match is a variant of BinaryRecipe_match that also binds the comparison
591/// predicate. Opcodes must either be Instruction::ICmp or Instruction::FCmp, or
592/// both.
593template <typename Op0_t, typename Op1_t, unsigned... Opcodes>
594struct Cmp_match {
595 static_assert((sizeof...(Opcodes) == 1 || sizeof...(Opcodes) == 2) &&
596 "Expected one or two opcodes");
597 static_assert(
598 ((Opcodes == Instruction::ICmp || Opcodes == Instruction::FCmp) && ...) &&
599 "Expected a compare instruction opcode");
600
602 Op0_t Op0;
604
605 Cmp_match(CmpPredicate &Pred, const Op0_t &Op0, const Op1_t &Op1)
606 : Predicate(&Pred), Op0(Op0), Op1(Op1) {}
607 Cmp_match(const Op0_t &Op0, const Op1_t &Op1) : Op0(Op0), Op1(Op1) {}
608
609 bool match(const VPValue *V) const {
610 auto *DefR = V->getDefiningRecipe();
611 return DefR && match(DefR);
612 }
613
614 bool match(const VPRecipeBase *V) const {
615 if ((m_Binary<Opcodes>(Op0, Op1).match(V) || ...)) {
616 if (Predicate)
617 *Predicate = cast<VPRecipeWithIRFlags>(V)->getPredicate();
618 return true;
619 }
620 return false;
621 }
622};
623
624/// SpecificCmp_match is a variant of Cmp_match that matches the comparison
625/// predicate, instead of binding it.
626template <typename Op0_t, typename Op1_t, unsigned... Opcodes>
629 Op0_t Op0;
631
632 SpecificCmp_match(CmpPredicate Pred, const Op0_t &LHS, const Op1_t &RHS)
633 : Predicate(Pred), Op0(LHS), Op1(RHS) {}
634
635 bool match(const VPValue *V) const {
636 CmpPredicate CurrentPred;
637 return Cmp_match<Op0_t, Op1_t, Opcodes...>(CurrentPred, Op0, Op1)
638 .match(V) &&
640 }
641};
642
643template <typename Op0_t, typename Op1_t>
645 const Op1_t &Op1) {
647}
648
649template <typename Op0_t, typename Op1_t>
650inline Cmp_match<Op0_t, Op1_t, Instruction::ICmp>
651m_ICmp(CmpPredicate &Pred, const Op0_t &Op0, const Op1_t &Op1) {
652 return Cmp_match<Op0_t, Op1_t, Instruction::ICmp>(Pred, Op0, Op1);
653}
654
655template <typename Op0_t, typename Op1_t>
656inline SpecificCmp_match<Op0_t, Op1_t, Instruction::ICmp>
657m_SpecificICmp(CmpPredicate MatchPred, const Op0_t &Op0, const Op1_t &Op1) {
659 Op1);
660}
661
662template <typename Op0_t, typename Op1_t>
663inline Cmp_match<Op0_t, Op1_t, Instruction::ICmp, Instruction::FCmp>
664m_Cmp(const Op0_t &Op0, const Op1_t &Op1) {
666 Op1);
667}
668
669template <typename Op0_t, typename Op1_t>
670inline Cmp_match<Op0_t, Op1_t, Instruction::ICmp, Instruction::FCmp>
671m_Cmp(CmpPredicate &Pred, const Op0_t &Op0, const Op1_t &Op1) {
673 Pred, Op0, Op1);
674}
675
676template <typename Op0_t, typename Op1_t>
677inline SpecificCmp_match<Op0_t, Op1_t, Instruction::ICmp, Instruction::FCmp>
678m_SpecificCmp(CmpPredicate MatchPred, const Op0_t &Op0, const Op1_t &Op1) {
680 MatchPred, Op0, Op1);
681}
682
683template <typename Op0_t, typename Op1_t>
685 Recipe_match<std::tuple<Op0_t, Op1_t>, Instruction::GetElementPtr,
686 /*Commutative*/ false, VPReplicateRecipe, VPWidenGEPRecipe>,
690
691template <typename Op0_t, typename Op1_t>
693 const Op1_t &Op1) {
694 return m_CombineOr(
695 Recipe_match<std::tuple<Op0_t, Op1_t>, Instruction::GetElementPtr,
696 /*Commutative*/ false, VPReplicateRecipe, VPWidenGEPRecipe>(
697 Op0, Op1),
701 Op1)));
702}
703
704template <typename Op0_t, typename Op1_t, typename Op2_t>
706m_Select(const Op0_t &Op0, const Op1_t &Op1, const Op2_t &Op2) {
708 {Op0, Op1, Op2});
709}
710
711template <typename Op0_t>
714 Instruction::Xor, int_pred_ty<is_all_ones>, Op0_t>>
719
720template <typename Op0_t, typename Op1_t>
721inline match_combine_or<
724m_LogicalAnd(const Op0_t &Op0, const Op1_t &Op1) {
725 return m_CombineOr(
727 m_Select(Op0, Op1, m_False()));
728}
729
730template <typename Op0_t, typename Op1_t>
732m_LogicalOr(const Op0_t &Op0, const Op1_t &Op1) {
733 return m_Select(Op0, m_True(), Op1);
734}
735
736template <typename Op0_t, typename Op1_t, typename Op2_t>
738 false, VPScalarIVStepsRecipe>;
739
740template <typename Op0_t, typename Op1_t, typename Op2_t>
742m_ScalarIVSteps(const Op0_t &Op0, const Op1_t &Op1, const Op2_t &Op2) {
743 return VPScalarIVSteps_match<Op0_t, Op1_t, Op2_t>({Op0, Op1, Op2});
744}
745
746template <typename Op0_t, typename Op1_t, typename Op2_t>
749
750template <typename Op0_t, typename Op1_t, typename Op2_t>
752m_DerivedIV(const Op0_t &Op0, const Op1_t &Op1, const Op2_t &Op2) {
753 return VPDerivedIV_match<Op0_t, Op1_t, Op2_t>({Op0, Op1, Op2});
754}
755
756template <typename Addr_t, typename Mask_t> struct Load_match {
757 Addr_t Addr;
758 Mask_t Mask;
759
760 Load_match(Addr_t Addr, Mask_t Mask) : Addr(Addr), Mask(Mask) {}
761
762 template <typename OpTy> bool match(const OpTy *V) const {
763 auto *Load = dyn_cast<VPWidenLoadRecipe>(V);
764 if (!Load || !Addr.match(Load->getAddr()) || !Load->isMasked() ||
765 !Mask.match(Load->getMask()))
766 return false;
767 return true;
768 }
769};
770
771/// Match a (possibly reversed) masked load.
772template <typename Addr_t, typename Mask_t>
773inline Load_match<Addr_t, Mask_t> m_MaskedLoad(const Addr_t &Addr,
774 const Mask_t &Mask) {
775 return Load_match<Addr_t, Mask_t>(Addr, Mask);
776}
777
778template <typename Addr_t, typename Val_t, typename Mask_t> struct Store_match {
779 Addr_t Addr;
780 Val_t Val;
781 Mask_t Mask;
782
783 Store_match(Addr_t Addr, Val_t Val, Mask_t Mask)
784 : Addr(Addr), Val(Val), Mask(Mask) {}
785
786 template <typename OpTy> bool match(const OpTy *V) const {
787 auto *Store = dyn_cast<VPWidenStoreRecipe>(V);
788 if (!Store || !Addr.match(Store->getAddr()) ||
789 !Val.match(Store->getStoredValue()) || !Store->isMasked() ||
790 !Mask.match(Store->getMask()))
791 return false;
792 return true;
793 }
794};
795
796/// Match a (possibly reversed) masked store.
797template <typename Addr_t, typename Val_t, typename Mask_t>
798inline Store_match<Addr_t, Val_t, Mask_t>
799m_MaskedStore(const Addr_t &Addr, const Val_t &Val, const Mask_t &Mask) {
800 return Store_match<Addr_t, Val_t, Mask_t>(Addr, Val, Mask);
801}
802
803template <typename Op0_t, typename Op1_t>
806 /*Commutative*/ false, VPVectorEndPointerRecipe>;
807
808template <typename Op0_t, typename Op1_t>
813
814/// Match a call argument at a given argument index.
815template <typename Opnd_t> struct Argument_match {
816 /// Call argument index to match.
817 unsigned OpI;
818 Opnd_t Val;
819
820 Argument_match(unsigned OpIdx, const Opnd_t &V) : OpI(OpIdx), Val(V) {}
821
822 template <typename OpTy> bool match(OpTy *V) const {
823 if (const auto *R = dyn_cast<VPWidenIntrinsicRecipe>(V))
824 return Val.match(R->getOperand(OpI));
825 if (const auto *R = dyn_cast<VPWidenCallRecipe>(V))
826 return Val.match(R->getOperand(OpI));
827 if (const auto *R = dyn_cast<VPReplicateRecipe>(V))
828 if (R->getOpcode() == Instruction::Call)
829 return Val.match(R->getOperand(OpI));
830 if (const auto *R = dyn_cast<VPInstruction>(V))
831 if (R->getOpcode() == Instruction::Call)
832 return Val.match(R->getOperand(OpI));
833 return false;
834 }
835};
836
837/// Match a call argument.
838template <unsigned OpI, typename Opnd_t>
839inline Argument_match<Opnd_t> m_Argument(const Opnd_t &Op) {
840 return Argument_match<Opnd_t>(OpI, Op);
841}
842
843/// Intrinsic matchers.
845 unsigned ID;
846
847 IntrinsicID_match(Intrinsic::ID IntrID) : ID(IntrID) {}
848
849 template <typename OpTy> bool match(OpTy *V) const {
850 if (const auto *R = dyn_cast<VPWidenIntrinsicRecipe>(V))
851 return R->getVectorIntrinsicID() == ID;
852 if (const auto *R = dyn_cast<VPWidenCallRecipe>(V))
853 return R->getCalledScalarFunction()->getIntrinsicID() == ID;
854
855 auto MatchCalleeIntrinsic = [&](VPValue *CalleeOp) {
856 if (!CalleeOp->isLiveIn())
857 return false;
858 auto *F = cast<Function>(CalleeOp->getLiveInIRValue());
859 return F->getIntrinsicID() == ID;
860 };
861 if (const auto *R = dyn_cast<VPReplicateRecipe>(V))
862 if (R->getOpcode() == Instruction::Call) {
863 // The mask is always the last operand if predicated.
864 return MatchCalleeIntrinsic(
865 R->getOperand(R->getNumOperands() - 1 - R->isPredicated()));
866 }
867 if (const auto *R = dyn_cast<VPInstruction>(V))
868 if (R->getOpcode() == Instruction::Call)
869 return MatchCalleeIntrinsic(R->getOperand(R->getNumOperands() - 1));
870 return false;
871 }
872};
873
874/// Intrinsic matches are combinations of ID matchers, and argument
875/// matchers. Higher arity matcher are defined recursively in terms of and-ing
876/// them with lower arity matchers. Here's some convenient typedefs for up to
877/// several arguments, and more can be added as needed
878template <typename T0 = void, typename T1 = void, typename T2 = void,
879 typename T3 = void>
880struct m_Intrinsic_Ty;
881template <typename T0> struct m_Intrinsic_Ty<T0> {
883};
884template <typename T0, typename T1> struct m_Intrinsic_Ty<T0, T1> {
885 using Ty =
887};
888template <typename T0, typename T1, typename T2>
893template <typename T0, typename T1, typename T2, typename T3>
898
899/// Match intrinsic calls like this:
900/// m_Intrinsic<Intrinsic::fabs>(m_VPValue(X), ...)
901template <Intrinsic::ID IntrID> inline IntrinsicID_match m_Intrinsic() {
902 return IntrinsicID_match(IntrID);
903}
904
905/// Match intrinsic calls with a runtime intrinsic ID.
907 return IntrinsicID_match(IntrID);
908}
909
910template <Intrinsic::ID IntrID, typename T0>
911inline typename m_Intrinsic_Ty<T0>::Ty m_Intrinsic(const T0 &Op0) {
913}
914
915template <Intrinsic::ID IntrID, typename T0, typename T1>
916inline typename m_Intrinsic_Ty<T0, T1>::Ty m_Intrinsic(const T0 &Op0,
917 const T1 &Op1) {
919}
920
921template <Intrinsic::ID IntrID, typename T0, typename T1, typename T2>
922inline typename m_Intrinsic_Ty<T0, T1, T2>::Ty
923m_Intrinsic(const T0 &Op0, const T1 &Op1, const T2 &Op2) {
924 return m_CombineAnd(m_Intrinsic<IntrID>(Op0, Op1), m_Argument<2>(Op2));
925}
926
927template <Intrinsic::ID IntrID, typename T0, typename T1, typename T2,
928 typename T3>
930m_Intrinsic(const T0 &Op0, const T1 &Op1, const T2 &Op2, const T3 &Op3) {
931 return m_CombineAnd(m_Intrinsic<IntrID>(Op0, Op1, Op2), m_Argument<3>(Op3));
932}
933
935 template <typename ITy> bool match(ITy *V) const {
936 VPValue *Val = dyn_cast<VPValue>(V);
937 return Val && Val->isLiveIn();
938 }
939};
940
942
943template <typename SubPattern_t> struct OneUse_match {
944 SubPattern_t SubPattern;
945
946 OneUse_match(const SubPattern_t &SP) : SubPattern(SP) {}
947
948 template <typename OpTy> bool match(OpTy *V) {
949 return V->hasOneUse() && SubPattern.match(V);
950 }
951};
952
953template <typename T> inline OneUse_match<T> m_OneUse(const T &SubPattern) {
954 return SubPattern;
955}
956
957} // namespace llvm::VPlanPatternMatch
958
959#endif
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
#define F(x, y, z)
Definition MD5.cpp:54
#define T
#define T1
MachineInstr unsigned OpIdx
#define P(N)
This file contains the declarations of the Vectorization Plan base classes:
Value * RHS
Value * LHS
Class for arbitrary precision integers.
Definition APInt.h:78
std::optional< uint64_t > tryZExtValue() const
Get zero extended value if possible.
Definition APInt.h:1553
static bool isSameValue(const APInt &I1, const APInt &I2)
Determine if two APInts have the same value, after zero-extending one of them (if needed!...
Definition APInt.h:554
An abstraction over a floating-point predicate, and a pack of an integer predicate with samesign info...
static LLVM_ABI std::optional< CmpPredicate > getMatching(CmpPredicate A, CmpPredicate B)
Compares two CmpPredicates taking samesign into account and returns the canonicalized CmpPredicate if...
A recipe for converting the input value IV value to the corresponding value of an IV with different s...
Definition VPlan.h:3740
This is a concrete Recipe that models a single VPlan-level instruction.
Definition VPlan.h:1036
VPRecipeBase is a base class modeling a sequence of one or more output IR instructions.
Definition VPlan.h:387
VPReplicateRecipe replicates a given instruction producing multiple scalar copies of the original sca...
Definition VPlan.h:2961
A recipe for handling phi nodes of integer and floating-point inductions, producing their scalar valu...
Definition VPlan.h:3810
VPSingleDef is a base class for recipes for modeling a sequence of one or more output IR that define ...
Definition VPlan.h:531
This class augments VPValue with operands which provide the inverse def-use edges from VPValue's user...
Definition VPlanValue.h:202
This is the base class of the VPlan Def/Use graph, used for modeling the data flow into,...
Definition VPlanValue.h:46
Value * getLiveInIRValue() const
Returns the underlying IR value, if this VPValue is defined outside the scope of VPlan.
Definition VPlanValue.h:181
bool isLiveIn() const
Returns true if this VPValue is a live-in, i.e. defined outside the VPlan.
Definition VPlanValue.h:176
A recipe to compute a pointer to the last element of each part of a widened memory access for widened...
Definition VPlan.h:1923
VPWidenCastRecipe is a recipe to create vector cast instructions.
Definition VPlan.h:1564
A recipe for handling GEP instructions.
Definition VPlan.h:1860
VPWidenRecipe is a recipe for producing a widened instruction using the opcode and operands of the re...
Definition VPlan.h:1524
LLVM Value Representation.
Definition Value.h:75
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
class_match< ConstantInt > m_ConstantInt()
Match an arbitrary ConstantInt and ignore it.
auto m_LogicalOr()
Matches L || R where L and R are arbitrary values.
class_match< CmpInst > m_Cmp()
Matches any compare instruction and ignore it.
auto m_LogicalAnd()
Matches L && R where L and R are arbitrary values.
VPInstruction_match< VPInstruction::ExtractLastLane, VPInstruction_match< VPInstruction::ExtractLastPart, Op0_t > > m_ExtractLastLaneOfLastPart(const Op0_t &Op0)
AllRecipe_match< Instruction::Select, Op0_t, Op1_t, Op2_t > m_Select(const Op0_t &Op0, const Op1_t &Op1, const Op2_t &Op2)
VPInstruction_match< Instruction::Freeze, Op0_t > m_Freeze(const Op0_t &Op0)
AllRecipe_commutative_match< Instruction::And, Op0_t, Op1_t > m_c_BinaryAnd(const Op0_t &Op0, const Op1_t &Op1)
Match a binary AND operation.
AllRecipe_match< Instruction::ZExt, Op0_t > m_ZExt(const Op0_t &Op0)
AllRecipe_match< Instruction::Or, Op0_t, Op1_t > m_BinaryOr(const Op0_t &Op0, const Op1_t &Op1)
Match a binary OR operation.
int_pred_ty< is_specific_int, Bitwidth > specific_intval
Store_match< Addr_t, Val_t, Mask_t > m_MaskedStore(const Addr_t &Addr, const Val_t &Val, const Mask_t &Mask)
Match a (possibly reversed) masked store.
int_pred_ty< is_zero_int > m_ZeroInt()
Match an integer 0 or a vector with all elements equal to 0.
SpecificCmp_match< Op0_t, Op1_t, Instruction::ICmp, Instruction::FCmp > m_SpecificCmp(CmpPredicate MatchPred, const Op0_t &Op0, const Op1_t &Op1)
match_combine_or< VPInstruction_match< VPInstruction::Not, Op0_t >, AllRecipe_commutative_match< Instruction::Xor, int_pred_ty< is_all_ones >, Op0_t > > m_Not(const Op0_t &Op0)
VPInstruction_match< VPInstruction::AnyOf > m_AnyOf()
int_pred_ty< is_all_ones > m_AllOnes()
Match an integer or vector with all bits set.
AllRecipe_commutative_match< Opcode, Op0_t, Op1_t > m_c_Binary(const Op0_t &Op0, const Op1_t &Op1)
AllRecipe_commutative_match< Instruction::Add, Op0_t, Op1_t > m_c_Add(const Op0_t &Op0, const Op1_t &Op1)
AllRecipe_commutative_match< Instruction::Or, Op0_t, Op1_t > m_c_BinaryOr(const Op0_t &Op0, const Op1_t &Op1)
VPInstruction_match< VPInstruction::StepVector > m_StepVector()
match_combine_or< AllRecipe_match< Instruction::ZExt, Op0_t >, AllRecipe_match< Instruction::SExt, Op0_t > > m_ZExtOrSExt(const Op0_t &Op0)
match_combine_and< LTy, RTy > m_CombineAnd(const LTy &L, const RTy &R)
Combine two pattern matchers matching L && R.
SpecificCmp_match< Op0_t, Op1_t, Instruction::ICmp > m_SpecificICmp(CmpPredicate MatchPred, const Op0_t &Op0, const Op1_t &Op1)
VPScalarIVSteps_match< Op0_t, Op1_t, Op2_t > m_ScalarIVSteps(const Op0_t &Op0, const Op1_t &Op1, const Op2_t &Op2)
match_combine_or< LTy, RTy > m_CombineOr(const LTy &L, const RTy &R)
Combine two pattern matchers matching L || R.
AllRecipe_match< Instruction::Add, Op0_t, Op1_t > m_Add(const Op0_t &Op0, const Op1_t &Op1)
GEPLikeRecipe_match< Op0_t, Op1_t > m_GetElementPtr(const Op0_t &Op0, const Op1_t &Op1)
Recipe_match< std::tuple< OpTys... >, Opcode, false, VPInstruction > VPInstruction_match
VPInstruction_match< VPInstruction::BranchOnTwoConds > m_BranchOnTwoConds()
AllRecipe_match< Opcode, Op0_t, Op1_t > m_Binary(const Op0_t &Op0, const Op1_t &Op1)
VPInstruction_match< VPInstruction::LastActiveLane, Op0_t > m_LastActiveLane(const Op0_t &Op0)
AllRecipe_match< Opcode, Op0_t > m_Unary(const Op0_t &Op0)
Load_match< Addr_t, Mask_t > m_MaskedLoad(const Addr_t &Addr, const Mask_t &Mask)
Match a (possibly reversed) masked load.
match_combine_or< AllRecipe_match< Instruction::Trunc, Op0_t >, Op0_t > m_TruncOrSelf(const Op0_t &Op0)
AllRecipe_commutative_match< Instruction::Mul, Op0_t, Op1_t > m_c_Mul(const Op0_t &Op0, const Op1_t &Op1)
Cmp_match< Op0_t, Op1_t, Instruction::ICmp > m_ICmp(const Op0_t &Op0, const Op1_t &Op1)
AllRecipe_match< Instruction::Mul, Op0_t, Op1_t > m_Mul(const Op0_t &Op0, const Op1_t &Op1)
specificval_ty m_Specific(const VPValue *VPV)
match_combine_or< Recipe_match< std::tuple< Op0_t, Op1_t >, Instruction::GetElementPtr, false, VPReplicateRecipe, VPWidenGEPRecipe >, match_combine_or< VPInstruction_match< VPInstruction::PtrAdd, Op0_t, Op1_t >, VPInstruction_match< VPInstruction::WidePtrAdd, Op0_t, Op1_t > > > GEPLikeRecipe_match
VPInstruction_match< Instruction::ExtractElement, Op0_t, Op1_t > m_ExtractElement(const Op0_t &Op0, const Op1_t &Op1)
specific_intval< 1 > m_False()
VPDerivedIV_match< Op0_t, Op1_t, Op2_t > m_DerivedIV(const Op0_t &Op0, const Op1_t &Op1, const Op2_t &Op2)
VPMatchFunctor< Val, Pattern > match_fn(const Pattern &P)
A match functor that can be used as a UnaryPredicate in functional algorithms like all_of.
VPInstruction_match< VPInstruction::ExtractLastLane, Op0_t > m_ExtractLastLane(const Op0_t &Op0)
specific_intval< 0 > m_SpecificInt(uint64_t V)
VPInstruction_match< VPInstruction::ActiveLaneMask, Op0_t, Op1_t, Op2_t > m_ActiveLaneMask(const Op0_t &Op0, const Op1_t &Op1, const Op2_t &Op2)
VPInstruction_match< VPInstruction::BranchOnCount > m_BranchOnCount()
Recipe_match< std::tuple< Op0_t, Op1_t, Op2_t >, 0, false, VPDerivedIVRecipe > VPDerivedIV_match
AllRecipe_match< Instruction::Sub, Op0_t, Op1_t > m_Sub(const Op0_t &Op0, const Op1_t &Op1)
AllRecipe_match< Instruction::SExt, Op0_t > m_SExt(const Op0_t &Op0)
specific_intval< 1 > m_True()
Recipe_match< std::tuple< OpTys... >, Opcode, false, VPWidenRecipe, VPReplicateRecipe, VPWidenCastRecipe, VPInstruction, VPWidenSelectRecipe > AllRecipe_match
IntrinsicID_match m_Intrinsic()
Match intrinsic calls like this: m_Intrinsic<Intrinsic::fabs>(m_VPValue(X), ...)
Recipe_match< std::tuple< OpTys... >, Opcode, true, VPWidenRecipe, VPReplicateRecipe, VPInstruction > AllRecipe_commutative_match
deferredval_ty m_Deferred(VPValue *const &V)
Like m_Specific(), but works if the specific value to match is determined as part of the same match()...
VectorEndPointerRecipe_match< Op0_t, Op1_t > m_VecEndPtr(const Op0_t &Op0, const Op1_t &Op1)
VPInstruction_match< VPInstruction::ExtractLastPart, Op0_t > m_ExtractLastPart(const Op0_t &Op0)
VPInstruction_match< VPInstruction::Broadcast, Op0_t > m_Broadcast(const Op0_t &Op0)
bool match(Val *V, const Pattern &P)
class_match< VPValue > m_VPValue()
Match an arbitrary VPValue and ignore it.
OneUse_match< T > m_OneUse(const T &SubPattern)
VPInstruction_match< VPInstruction::ExplicitVectorLength, Op0_t > m_EVL(const Op0_t &Op0)
VPInstruction_match< VPInstruction::BuildVector > m_BuildVector()
BuildVector is matches only its opcode, w/o matching its operands as the number of operands is not fi...
AllRecipe_match< Instruction::Trunc, Op0_t > m_Trunc(const Op0_t &Op0)
VPInstruction_match< VPInstruction::ExtractPenultimateElement, Op0_t > m_ExtractPenultimateElement(const Op0_t &Op0)
Recipe_match< std::tuple< Op0_t, Op1_t >, 0, false, VPVectorEndPointerRecipe > VectorEndPointerRecipe_match
match_combine_or< AllRecipe_match< Instruction::ZExt, Op0_t >, Op0_t > m_ZExtOrSelf(const Op0_t &Op0)
VPInstruction_match< VPInstruction::FirstActiveLane, Op0_t > m_FirstActiveLane(const Op0_t &Op0)
Argument_match< Opnd_t > m_Argument(const Opnd_t &Op)
Match a call argument.
bind_ty< VPInstruction > m_VPInstruction(VPInstruction *&V)
Match a VPInstruction, capturing if we match.
Recipe_match< std::tuple< Op0_t, Op1_t, Op2_t >, 0, false, VPScalarIVStepsRecipe > VPScalarIVSteps_match
int_pred_ty< is_one > m_One()
Match an integer 1 or a vector with all elements equal to 1.
VPInstruction_match< VPInstruction::BranchOnCond > m_BranchOnCond()
VPInstruction_match< VPInstruction::ExtractLane, Op0_t, Op1_t > m_ExtractLane(const Op0_t &Op0, const Op1_t &Op1)
bind_apint m_APInt(const APInt *&C)
VPInstruction_match< VPInstruction::Reverse, Op0_t > m_Reverse(const Op0_t &Op0)
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
DWARFExpression::Operation Op
constexpr unsigned BitWidth
OutputIt move(R &&Range, OutputIt Out)
Provide wrappers to std::move which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1915
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
Implement std::hash so that hash_code can be used in STL containers.
Definition BitVector.h:870
Intrinsic matches are combinations of ID matchers, and argument matchers.
A recipe for widening select instructions.
Definition VPlan.h:1813
Match a call argument at a given argument index.
unsigned OpI
Call argument index to match.
Argument_match(unsigned OpIdx, const Opnd_t &V)
Cmp_match is a variant of BinaryRecipe_match that also binds the comparison predicate.
Cmp_match(CmpPredicate &Pred, const Op0_t &Op0, const Op1_t &Op1)
Cmp_match(const Op0_t &Op0, const Op1_t &Op1)
bool match(const VPValue *V) const
bool match(const VPRecipeBase *V) const
Load_match(Addr_t Addr, Mask_t Mask)
bool match(const VPSingleDefRecipe *R) const
bool match(const VPValue *V) const
bool match(const VPRecipeBase *R) const
SpecificCmp_match is a variant of Cmp_match that matches the comparison predicate,...
SpecificCmp_match(CmpPredicate Pred, const Op0_t &LHS, const Op1_t &RHS)
Store_match(Addr_t Addr, Val_t Val, Mask_t Mask)
Stores a reference to the VPValue *, not the VPValue * itself, thus can be used in commutative matche...
Match an integer constant or vector of constants if Pred::isValue returns true for the APInt.
bool isValue(const APInt &C) const
Match a specified integer value or vector of all elements of that value.
match_combine_and< typename m_Intrinsic_Ty< T0, T1 >::Ty, Argument_match< T2 > > Ty
match_combine_and< typename m_Intrinsic_Ty< T0 >::Ty, Argument_match< T1 > > Ty
match_combine_and< IntrinsicID_match, Argument_match< T0 > > Ty
Intrinsic matches are combinations of ID matchers, and argument matchers.
match_combine_and< typename m_Intrinsic_Ty< T0, T1, T2 >::Ty, Argument_match< T3 > > Ty
match_combine_and(const LTy &Left, const RTy &Right)
match_combine_or(const LTy &Left, const RTy &Right)