Bug Summary

File:build/source/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
Warning:line 12848, column 9
Value stored to 'VectorizedTree' is never read

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -clear-ast-before-backend -disable-llvm-verifier -discard-value-names -main-file-name SLPVectorizer.cpp -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mframe-pointer=none -fmath-errno -ffp-contract=on -fno-rounding-math -mconstructor-aliases -funwind-tables=2 -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -ffunction-sections -fdata-sections -fcoverage-compilation-dir=/build/source/build-llvm -resource-dir /usr/lib/llvm-16/lib/clang/16 -I lib/Transforms/Vectorize -I /build/source/llvm/lib/Transforms/Vectorize -I include -I /build/source/llvm/include -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -D _FORTIFY_SOURCE=2 -D NDEBUG -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/x86_64-linux-gnu/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10/backward -internal-isystem /usr/lib/llvm-16/lib/clang/16/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -fmacro-prefix-map=/build/source/build-llvm=build-llvm -fmacro-prefix-map=/build/source/= -fcoverage-prefix-map=/build/source/build-llvm=build-llvm -fcoverage-prefix-map=/build/source/= -source-date-epoch 1674602410 -O3 -Wno-unused-command-line-argument -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-class-memaccess -Wno-redundant-move -Wno-pessimizing-move -Wno-noexcept-type -Wno-comment -Wno-misleading-indentation -std=c++17 -fdeprecated-macro -fdebug-compilation-dir=/build/source/build-llvm -fdebug-prefix-map=/build/source/build-llvm=build-llvm -fdebug-prefix-map=/build/source/= -fdebug-prefix-map=/build/source/build-llvm=build-llvm -fdebug-prefix-map=/build/source/= -ferror-limit 19 -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -fcolor-diagnostics -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /tmp/scan-build-2023-01-25-024556-16494-1 -x c++ /build/source/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
1//===- SLPVectorizer.cpp - A bottom up SLP Vectorizer ---------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This pass implements the Bottom Up SLP vectorizer. It detects consecutive
10// stores that can be put together into vector-stores. Next, it attempts to
11// construct vectorizable tree using the use-def chains. If a profitable tree
12// was found, the SLP vectorizer performs vectorization on the tree.
13//
14// The pass is inspired by the work described in the paper:
15// "Loop-Aware SLP in GCC" by Ira Rosen, Dorit Nuzman, Ayal Zaks.
16//
17//===----------------------------------------------------------------------===//
18
19#include "llvm/Transforms/Vectorize/SLPVectorizer.h"
20#include "llvm/ADT/DenseMap.h"
21#include "llvm/ADT/DenseSet.h"
22#include "llvm/ADT/PostOrderIterator.h"
23#include "llvm/ADT/PriorityQueue.h"
24#include "llvm/ADT/STLExtras.h"
25#include "llvm/ADT/SetOperations.h"
26#include "llvm/ADT/SetVector.h"
27#include "llvm/ADT/SmallBitVector.h"
28#include "llvm/ADT/SmallPtrSet.h"
29#include "llvm/ADT/SmallSet.h"
30#include "llvm/ADT/SmallString.h"
31#include "llvm/ADT/Statistic.h"
32#include "llvm/ADT/iterator.h"
33#include "llvm/ADT/iterator_range.h"
34#include "llvm/Analysis/AliasAnalysis.h"
35#include "llvm/Analysis/AssumptionCache.h"
36#include "llvm/Analysis/CodeMetrics.h"
37#include "llvm/Analysis/DemandedBits.h"
38#include "llvm/Analysis/GlobalsModRef.h"
39#include "llvm/Analysis/IVDescriptors.h"
40#include "llvm/Analysis/LoopAccessAnalysis.h"
41#include "llvm/Analysis/LoopInfo.h"
42#include "llvm/Analysis/MemoryLocation.h"
43#include "llvm/Analysis/OptimizationRemarkEmitter.h"
44#include "llvm/Analysis/ScalarEvolution.h"
45#include "llvm/Analysis/ScalarEvolutionExpressions.h"
46#include "llvm/Analysis/TargetLibraryInfo.h"
47#include "llvm/Analysis/TargetTransformInfo.h"
48#include "llvm/Analysis/ValueTracking.h"
49#include "llvm/Analysis/VectorUtils.h"
50#include "llvm/IR/Attributes.h"
51#include "llvm/IR/BasicBlock.h"
52#include "llvm/IR/Constant.h"
53#include "llvm/IR/Constants.h"
54#include "llvm/IR/DataLayout.h"
55#include "llvm/IR/DerivedTypes.h"
56#include "llvm/IR/Dominators.h"
57#include "llvm/IR/Function.h"
58#include "llvm/IR/IRBuilder.h"
59#include "llvm/IR/InstrTypes.h"
60#include "llvm/IR/Instruction.h"
61#include "llvm/IR/Instructions.h"
62#include "llvm/IR/IntrinsicInst.h"
63#include "llvm/IR/Intrinsics.h"
64#include "llvm/IR/Module.h"
65#include "llvm/IR/Operator.h"
66#include "llvm/IR/PatternMatch.h"
67#include "llvm/IR/Type.h"
68#include "llvm/IR/Use.h"
69#include "llvm/IR/User.h"
70#include "llvm/IR/Value.h"
71#include "llvm/IR/ValueHandle.h"
72#ifdef EXPENSIVE_CHECKS
73#include "llvm/IR/Verifier.h"
74#endif
75#include "llvm/Pass.h"
76#include "llvm/Support/Casting.h"
77#include "llvm/Support/CommandLine.h"
78#include "llvm/Support/Compiler.h"
79#include "llvm/Support/DOTGraphTraits.h"
80#include "llvm/Support/Debug.h"
81#include "llvm/Support/ErrorHandling.h"
82#include "llvm/Support/GraphWriter.h"
83#include "llvm/Support/InstructionCost.h"
84#include "llvm/Support/KnownBits.h"
85#include "llvm/Support/MathExtras.h"
86#include "llvm/Support/raw_ostream.h"
87#include "llvm/Transforms/Utils/InjectTLIMappings.h"
88#include "llvm/Transforms/Utils/Local.h"
89#include "llvm/Transforms/Utils/LoopUtils.h"
90#include "llvm/Transforms/Vectorize.h"
91#include <algorithm>
92#include <cassert>
93#include <cstdint>
94#include <iterator>
95#include <memory>
96#include <optional>
97#include <set>
98#include <string>
99#include <tuple>
100#include <utility>
101#include <vector>
102
103using namespace llvm;
104using namespace llvm::PatternMatch;
105using namespace slpvectorizer;
106
107#define SV_NAME"slp-vectorizer" "slp-vectorizer"
108#define DEBUG_TYPE"SLP" "SLP"
109
110STATISTIC(NumVectorInstructions, "Number of vector instructions generated")static llvm::Statistic NumVectorInstructions = {"SLP", "NumVectorInstructions"
, "Number of vector instructions generated"}
;
111
112cl::opt<bool> RunSLPVectorization("vectorize-slp", cl::init(true), cl::Hidden,
113 cl::desc("Run the SLP vectorization passes"));
114
115static cl::opt<int>
116 SLPCostThreshold("slp-threshold", cl::init(0), cl::Hidden,
117 cl::desc("Only vectorize if you gain more than this "
118 "number "));
119
120static cl::opt<bool>
121ShouldVectorizeHor("slp-vectorize-hor", cl::init(true), cl::Hidden,
122 cl::desc("Attempt to vectorize horizontal reductions"));
123
124static cl::opt<bool> ShouldStartVectorizeHorAtStore(
125 "slp-vectorize-hor-store", cl::init(false), cl::Hidden,
126 cl::desc(
127 "Attempt to vectorize horizontal reductions feeding into a store"));
128
129static cl::opt<int>
130MaxVectorRegSizeOption("slp-max-reg-size", cl::init(128), cl::Hidden,
131 cl::desc("Attempt to vectorize for this register size in bits"));
132
133static cl::opt<unsigned>
134MaxVFOption("slp-max-vf", cl::init(0), cl::Hidden,
135 cl::desc("Maximum SLP vectorization factor (0=unlimited)"));
136
137static cl::opt<int>
138MaxStoreLookup("slp-max-store-lookup", cl::init(32), cl::Hidden,
139 cl::desc("Maximum depth of the lookup for consecutive stores."));
140
141/// Limits the size of scheduling regions in a block.
142/// It avoid long compile times for _very_ large blocks where vector
143/// instructions are spread over a wide range.
144/// This limit is way higher than needed by real-world functions.
145static cl::opt<int>
146ScheduleRegionSizeBudget("slp-schedule-budget", cl::init(100000), cl::Hidden,
147 cl::desc("Limit the size of the SLP scheduling region per block"));
148
149static cl::opt<int> MinVectorRegSizeOption(
150 "slp-min-reg-size", cl::init(128), cl::Hidden,
151 cl::desc("Attempt to vectorize for this register size in bits"));
152
153static cl::opt<unsigned> RecursionMaxDepth(
154 "slp-recursion-max-depth", cl::init(12), cl::Hidden,
155 cl::desc("Limit the recursion depth when building a vectorizable tree"));
156
157static cl::opt<unsigned> MinTreeSize(
158 "slp-min-tree-size", cl::init(3), cl::Hidden,
159 cl::desc("Only vectorize small trees if they are fully vectorizable"));
160
161// The maximum depth that the look-ahead score heuristic will explore.
162// The higher this value, the higher the compilation time overhead.
163static cl::opt<int> LookAheadMaxDepth(
164 "slp-max-look-ahead-depth", cl::init(2), cl::Hidden,
165 cl::desc("The maximum look-ahead depth for operand reordering scores"));
166
167// The maximum depth that the look-ahead score heuristic will explore
168// when it probing among candidates for vectorization tree roots.
169// The higher this value, the higher the compilation time overhead but unlike
170// similar limit for operands ordering this is less frequently used, hence
171// impact of higher value is less noticeable.
172static cl::opt<int> RootLookAheadMaxDepth(
173 "slp-max-root-look-ahead-depth", cl::init(2), cl::Hidden,
174 cl::desc("The maximum look-ahead depth for searching best rooting option"));
175
176static cl::opt<bool>
177 ViewSLPTree("view-slp-tree", cl::Hidden,
178 cl::desc("Display the SLP trees with Graphviz"));
179
180// Limit the number of alias checks. The limit is chosen so that
181// it has no negative effect on the llvm benchmarks.
182static const unsigned AliasedCheckLimit = 10;
183
184// Another limit for the alias checks: The maximum distance between load/store
185// instructions where alias checks are done.
186// This limit is useful for very large basic blocks.
187static const unsigned MaxMemDepDistance = 160;
188
189/// If the ScheduleRegionSizeBudget is exhausted, we allow small scheduling
190/// regions to be handled.
191static const int MinScheduleRegionSize = 16;
192
193/// Predicate for the element types that the SLP vectorizer supports.
194///
195/// The most important thing to filter here are types which are invalid in LLVM
196/// vectors. We also filter target specific types which have absolutely no
197/// meaningful vectorization path such as x86_fp80 and ppc_f128. This just
198/// avoids spending time checking the cost model and realizing that they will
199/// be inevitably scalarized.
200static bool isValidElementType(Type *Ty) {
201 return VectorType::isValidElementType(Ty) && !Ty->isX86_FP80Ty() &&
202 !Ty->isPPC_FP128Ty();
203}
204
205/// \returns True if the value is a constant (but not globals/constant
206/// expressions).
207static bool isConstant(Value *V) {
208 return isa<Constant>(V) && !isa<ConstantExpr, GlobalValue>(V);
209}
210
211/// Checks if \p V is one of vector-like instructions, i.e. undef,
212/// insertelement/extractelement with constant indices for fixed vector type or
213/// extractvalue instruction.
214static bool isVectorLikeInstWithConstOps(Value *V) {
215 if (!isa<InsertElementInst, ExtractElementInst>(V) &&
216 !isa<ExtractValueInst, UndefValue>(V))
217 return false;
218 auto *I = dyn_cast<Instruction>(V);
219 if (!I || isa<ExtractValueInst>(I))
220 return true;
221 if (!isa<FixedVectorType>(I->getOperand(0)->getType()))
222 return false;
223 if (isa<ExtractElementInst>(I))
224 return isConstant(I->getOperand(1));
225 assert(isa<InsertElementInst>(V) && "Expected only insertelement.")(static_cast <bool> (isa<InsertElementInst>(V) &&
"Expected only insertelement.") ? void (0) : __assert_fail (
"isa<InsertElementInst>(V) && \"Expected only insertelement.\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 225, __extension__
__PRETTY_FUNCTION__))
;
226 return isConstant(I->getOperand(2));
227}
228
229/// \returns true if all of the instructions in \p VL are in the same block or
230/// false otherwise.
231static bool allSameBlock(ArrayRef<Value *> VL) {
232 Instruction *I0 = dyn_cast<Instruction>(VL[0]);
233 if (!I0)
234 return false;
235 if (all_of(VL, isVectorLikeInstWithConstOps))
236 return true;
237
238 BasicBlock *BB = I0->getParent();
239 for (int I = 1, E = VL.size(); I < E; I++) {
240 auto *II = dyn_cast<Instruction>(VL[I]);
241 if (!II)
242 return false;
243
244 if (BB != II->getParent())
245 return false;
246 }
247 return true;
248}
249
250/// \returns True if all of the values in \p VL are constants (but not
251/// globals/constant expressions).
252static bool allConstant(ArrayRef<Value *> VL) {
253 // Constant expressions and globals can't be vectorized like normal integer/FP
254 // constants.
255 return all_of(VL, isConstant);
256}
257
258/// \returns True if all of the values in \p VL are identical or some of them
259/// are UndefValue.
260static bool isSplat(ArrayRef<Value *> VL) {
261 Value *FirstNonUndef = nullptr;
262 for (Value *V : VL) {
263 if (isa<UndefValue>(V))
264 continue;
265 if (!FirstNonUndef) {
266 FirstNonUndef = V;
267 continue;
268 }
269 if (V != FirstNonUndef)
270 return false;
271 }
272 return FirstNonUndef != nullptr;
273}
274
275/// \returns True if \p I is commutative, handles CmpInst and BinaryOperator.
276static bool isCommutative(Instruction *I) {
277 if (auto *Cmp = dyn_cast<CmpInst>(I))
278 return Cmp->isCommutative();
279 if (auto *BO = dyn_cast<BinaryOperator>(I))
280 return BO->isCommutative();
281 // TODO: This should check for generic Instruction::isCommutative(), but
282 // we need to confirm that the caller code correctly handles Intrinsics
283 // for example (does not have 2 operands).
284 return false;
285}
286
287/// \returns inserting index of InsertElement or InsertValue instruction,
288/// using Offset as base offset for index.
289static std::optional<unsigned> getInsertIndex(const Value *InsertInst,
290 unsigned Offset = 0) {
291 int Index = Offset;
292 if (const auto *IE = dyn_cast<InsertElementInst>(InsertInst)) {
293 const auto *VT = dyn_cast<FixedVectorType>(IE->getType());
294 if (!VT)
295 return std::nullopt;
296 const auto *CI = dyn_cast<ConstantInt>(IE->getOperand(2));
297 if (!CI)
298 return std::nullopt;
299 if (CI->getValue().uge(VT->getNumElements()))
300 return std::nullopt;
301 Index *= VT->getNumElements();
302 Index += CI->getZExtValue();
303 return Index;
304 }
305
306 const auto *IV = cast<InsertValueInst>(InsertInst);
307 Type *CurrentType = IV->getType();
308 for (unsigned I : IV->indices()) {
309 if (const auto *ST = dyn_cast<StructType>(CurrentType)) {
310 Index *= ST->getNumElements();
311 CurrentType = ST->getElementType(I);
312 } else if (const auto *AT = dyn_cast<ArrayType>(CurrentType)) {
313 Index *= AT->getNumElements();
314 CurrentType = AT->getElementType();
315 } else {
316 return std::nullopt;
317 }
318 Index += I;
319 }
320 return Index;
321}
322
323namespace {
324/// Specifies the way the mask should be analyzed for undefs/poisonous elements
325/// in the shuffle mask.
326enum class UseMask {
327 FirstArg, ///< The mask is expected to be for permutation of 1-2 vectors,
328 ///< check for the mask elements for the first argument (mask
329 ///< indices are in range [0:VF)).
330 SecondArg, ///< The mask is expected to be for permutation of 2 vectors, check
331 ///< for the mask elements for the second argument (mask indices
332 ///< are in range [VF:2*VF))
333 UndefsAsMask ///< Consider undef mask elements (-1) as placeholders for
334 ///< future shuffle elements and mark them as ones as being used
335 ///< in future. Non-undef elements are considered as unused since
336 ///< they're already marked as used in the mask.
337};
338} // namespace
339
340/// Prepares a use bitset for the given mask either for the first argument or
341/// for the second.
342static SmallBitVector buildUseMask(int VF, ArrayRef<int> Mask,
343 UseMask MaskArg) {
344 SmallBitVector UseMask(VF, true);
345 for (auto P : enumerate(Mask)) {
346 if (P.value() == UndefMaskElem) {
347 if (MaskArg == UseMask::UndefsAsMask)
348 UseMask.reset(P.index());
349 continue;
350 }
351 if (MaskArg == UseMask::FirstArg && P.value() < VF)
352 UseMask.reset(P.value());
353 else if (MaskArg == UseMask::SecondArg && P.value() >= VF)
354 UseMask.reset(P.value() - VF);
355 }
356 return UseMask;
357}
358
359/// Checks if the given value is actually an undefined constant vector.
360/// Also, if the \p UseMask is not empty, tries to check if the non-masked
361/// elements actually mask the insertelement buildvector, if any.
362template <bool IsPoisonOnly = false>
363static SmallBitVector isUndefVector(const Value *V,
364 const SmallBitVector &UseMask = {}) {
365 SmallBitVector Res(UseMask.empty() ? 1 : UseMask.size(), true);
366 using T = std::conditional_t<IsPoisonOnly, PoisonValue, UndefValue>;
367 if (isa<T>(V))
368 return Res;
369 auto *VecTy = dyn_cast<FixedVectorType>(V->getType());
370 if (!VecTy)
371 return Res.reset();
372 auto *C = dyn_cast<Constant>(V);
373 if (!C) {
374 if (!UseMask.empty()) {
375 const Value *Base = V;
376 while (auto *II = dyn_cast<InsertElementInst>(Base)) {
377 if (isa<T>(II->getOperand(1)))
378 continue;
379 Base = II->getOperand(0);
380 std::optional<unsigned> Idx = getInsertIndex(II);
381 if (!Idx)
382 continue;
383 if (*Idx < UseMask.size() && !UseMask.test(*Idx))
384 Res.reset(*Idx);
385 }
386 // TODO: Add analysis for shuffles here too.
387 if (V == Base) {
388 Res.reset();
389 } else {
390 SmallBitVector SubMask(UseMask.size(), false);
391 Res &= isUndefVector<IsPoisonOnly>(Base, SubMask);
392 }
393 } else {
394 Res.reset();
395 }
396 return Res;
397 }
398 for (unsigned I = 0, E = VecTy->getNumElements(); I != E; ++I) {
399 if (Constant *Elem = C->getAggregateElement(I))
400 if (!isa<T>(Elem) &&
401 (UseMask.empty() || (I < UseMask.size() && !UseMask.test(I))))
402 Res.reset(I);
403 }
404 return Res;
405}
406
407/// Checks if the vector of instructions can be represented as a shuffle, like:
408/// %x0 = extractelement <4 x i8> %x, i32 0
409/// %x3 = extractelement <4 x i8> %x, i32 3
410/// %y1 = extractelement <4 x i8> %y, i32 1
411/// %y2 = extractelement <4 x i8> %y, i32 2
412/// %x0x0 = mul i8 %x0, %x0
413/// %x3x3 = mul i8 %x3, %x3
414/// %y1y1 = mul i8 %y1, %y1
415/// %y2y2 = mul i8 %y2, %y2
416/// %ins1 = insertelement <4 x i8> poison, i8 %x0x0, i32 0
417/// %ins2 = insertelement <4 x i8> %ins1, i8 %x3x3, i32 1
418/// %ins3 = insertelement <4 x i8> %ins2, i8 %y1y1, i32 2
419/// %ins4 = insertelement <4 x i8> %ins3, i8 %y2y2, i32 3
420/// ret <4 x i8> %ins4
421/// can be transformed into:
422/// %1 = shufflevector <4 x i8> %x, <4 x i8> %y, <4 x i32> <i32 0, i32 3, i32 5,
423/// i32 6>
424/// %2 = mul <4 x i8> %1, %1
425/// ret <4 x i8> %2
426/// We convert this initially to something like:
427/// %x0 = extractelement <4 x i8> %x, i32 0
428/// %x3 = extractelement <4 x i8> %x, i32 3
429/// %y1 = extractelement <4 x i8> %y, i32 1
430/// %y2 = extractelement <4 x i8> %y, i32 2
431/// %1 = insertelement <4 x i8> poison, i8 %x0, i32 0
432/// %2 = insertelement <4 x i8> %1, i8 %x3, i32 1
433/// %3 = insertelement <4 x i8> %2, i8 %y1, i32 2
434/// %4 = insertelement <4 x i8> %3, i8 %y2, i32 3
435/// %5 = mul <4 x i8> %4, %4
436/// %6 = extractelement <4 x i8> %5, i32 0
437/// %ins1 = insertelement <4 x i8> poison, i8 %6, i32 0
438/// %7 = extractelement <4 x i8> %5, i32 1
439/// %ins2 = insertelement <4 x i8> %ins1, i8 %7, i32 1
440/// %8 = extractelement <4 x i8> %5, i32 2
441/// %ins3 = insertelement <4 x i8> %ins2, i8 %8, i32 2
442/// %9 = extractelement <4 x i8> %5, i32 3
443/// %ins4 = insertelement <4 x i8> %ins3, i8 %9, i32 3
444/// ret <4 x i8> %ins4
445/// InstCombiner transforms this into a shuffle and vector mul
446/// Mask will return the Shuffle Mask equivalent to the extracted elements.
447/// TODO: Can we split off and reuse the shuffle mask detection from
448/// ShuffleVectorInst/getShuffleCost?
449static std::optional<TargetTransformInfo::ShuffleKind>
450isFixedVectorShuffle(ArrayRef<Value *> VL, SmallVectorImpl<int> &Mask) {
451 const auto *It =
452 find_if(VL, [](Value *V) { return isa<ExtractElementInst>(V); });
453 if (It == VL.end())
454 return std::nullopt;
455 auto *EI0 = cast<ExtractElementInst>(*It);
456 if (isa<ScalableVectorType>(EI0->getVectorOperandType()))
457 return std::nullopt;
458 unsigned Size =
459 cast<FixedVectorType>(EI0->getVectorOperandType())->getNumElements();
460 Value *Vec1 = nullptr;
461 Value *Vec2 = nullptr;
462 enum ShuffleMode { Unknown, Select, Permute };
463 ShuffleMode CommonShuffleMode = Unknown;
464 Mask.assign(VL.size(), UndefMaskElem);
465 for (unsigned I = 0, E = VL.size(); I < E; ++I) {
466 // Undef can be represented as an undef element in a vector.
467 if (isa<UndefValue>(VL[I]))
468 continue;
469 auto *EI = cast<ExtractElementInst>(VL[I]);
470 if (isa<ScalableVectorType>(EI->getVectorOperandType()))
471 return std::nullopt;
472 auto *Vec = EI->getVectorOperand();
473 // We can extractelement from undef or poison vector.
474 if (isUndefVector(Vec).all())
475 continue;
476 // All vector operands must have the same number of vector elements.
477 if (cast<FixedVectorType>(Vec->getType())->getNumElements() != Size)
478 return std::nullopt;
479 if (isa<UndefValue>(EI->getIndexOperand()))
480 continue;
481 auto *Idx = dyn_cast<ConstantInt>(EI->getIndexOperand());
482 if (!Idx)
483 return std::nullopt;
484 // Undefined behavior if Idx is negative or >= Size.
485 if (Idx->getValue().uge(Size))
486 continue;
487 unsigned IntIdx = Idx->getValue().getZExtValue();
488 Mask[I] = IntIdx;
489 // For correct shuffling we have to have at most 2 different vector operands
490 // in all extractelement instructions.
491 if (!Vec1 || Vec1 == Vec) {
492 Vec1 = Vec;
493 } else if (!Vec2 || Vec2 == Vec) {
494 Vec2 = Vec;
495 Mask[I] += Size;
496 } else {
497 return std::nullopt;
498 }
499 if (CommonShuffleMode == Permute)
500 continue;
501 // If the extract index is not the same as the operation number, it is a
502 // permutation.
503 if (IntIdx != I) {
504 CommonShuffleMode = Permute;
505 continue;
506 }
507 CommonShuffleMode = Select;
508 }
509 // If we're not crossing lanes in different vectors, consider it as blending.
510 if (CommonShuffleMode == Select && Vec2)
511 return TargetTransformInfo::SK_Select;
512 // If Vec2 was never used, we have a permutation of a single vector, otherwise
513 // we have permutation of 2 vectors.
514 return Vec2 ? TargetTransformInfo::SK_PermuteTwoSrc
515 : TargetTransformInfo::SK_PermuteSingleSrc;
516}
517
518/// \returns True if Extract{Value,Element} instruction extracts element Idx.
519static std::optional<unsigned> getExtractIndex(Instruction *E) {
520 unsigned Opcode = E->getOpcode();
521 assert((Opcode == Instruction::ExtractElement ||(static_cast <bool> ((Opcode == Instruction::ExtractElement
|| Opcode == Instruction::ExtractValue) && "Expected extractelement or extractvalue instruction."
) ? void (0) : __assert_fail ("(Opcode == Instruction::ExtractElement || Opcode == Instruction::ExtractValue) && \"Expected extractelement or extractvalue instruction.\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 523, __extension__
__PRETTY_FUNCTION__))
522 Opcode == Instruction::ExtractValue) &&(static_cast <bool> ((Opcode == Instruction::ExtractElement
|| Opcode == Instruction::ExtractValue) && "Expected extractelement or extractvalue instruction."
) ? void (0) : __assert_fail ("(Opcode == Instruction::ExtractElement || Opcode == Instruction::ExtractValue) && \"Expected extractelement or extractvalue instruction.\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 523, __extension__
__PRETTY_FUNCTION__))
523 "Expected extractelement or extractvalue instruction.")(static_cast <bool> ((Opcode == Instruction::ExtractElement
|| Opcode == Instruction::ExtractValue) && "Expected extractelement or extractvalue instruction."
) ? void (0) : __assert_fail ("(Opcode == Instruction::ExtractElement || Opcode == Instruction::ExtractValue) && \"Expected extractelement or extractvalue instruction.\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 523, __extension__
__PRETTY_FUNCTION__))
;
524 if (Opcode == Instruction::ExtractElement) {
525 auto *CI = dyn_cast<ConstantInt>(E->getOperand(1));
526 if (!CI)
527 return std::nullopt;
528 return CI->getZExtValue();
529 }
530 auto *EI = cast<ExtractValueInst>(E);
531 if (EI->getNumIndices() != 1)
532 return std::nullopt;
533 return *EI->idx_begin();
534}
535
536namespace {
537
538/// Main data required for vectorization of instructions.
539struct InstructionsState {
540 /// The very first instruction in the list with the main opcode.
541 Value *OpValue = nullptr;
542
543 /// The main/alternate instruction.
544 Instruction *MainOp = nullptr;
545 Instruction *AltOp = nullptr;
546
547 /// The main/alternate opcodes for the list of instructions.
548 unsigned getOpcode() const {
549 return MainOp ? MainOp->getOpcode() : 0;
550 }
551
552 unsigned getAltOpcode() const {
553 return AltOp ? AltOp->getOpcode() : 0;
554 }
555
556 /// Some of the instructions in the list have alternate opcodes.
557 bool isAltShuffle() const { return AltOp != MainOp; }
558
559 bool isOpcodeOrAlt(Instruction *I) const {
560 unsigned CheckedOpcode = I->getOpcode();
561 return getOpcode() == CheckedOpcode || getAltOpcode() == CheckedOpcode;
562 }
563
564 InstructionsState() = delete;
565 InstructionsState(Value *OpValue, Instruction *MainOp, Instruction *AltOp)
566 : OpValue(OpValue), MainOp(MainOp), AltOp(AltOp) {}
567};
568
569} // end anonymous namespace
570
571/// Chooses the correct key for scheduling data. If \p Op has the same (or
572/// alternate) opcode as \p OpValue, the key is \p Op. Otherwise the key is \p
573/// OpValue.
574static Value *isOneOf(const InstructionsState &S, Value *Op) {
575 auto *I = dyn_cast<Instruction>(Op);
576 if (I && S.isOpcodeOrAlt(I))
577 return Op;
578 return S.OpValue;
579}
580
581/// \returns true if \p Opcode is allowed as part of of the main/alternate
582/// instruction for SLP vectorization.
583///
584/// Example of unsupported opcode is SDIV that can potentially cause UB if the
585/// "shuffled out" lane would result in division by zero.
586static bool isValidForAlternation(unsigned Opcode) {
587 if (Instruction::isIntDivRem(Opcode))
588 return false;
589
590 return true;
591}
592
593static InstructionsState getSameOpcode(ArrayRef<Value *> VL,
594 const TargetLibraryInfo &TLI,
595 unsigned BaseIndex = 0);
596
597/// Checks if the provided operands of 2 cmp instructions are compatible, i.e.
598/// compatible instructions or constants, or just some other regular values.
599static bool areCompatibleCmpOps(Value *BaseOp0, Value *BaseOp1, Value *Op0,
600 Value *Op1, const TargetLibraryInfo &TLI) {
601 return (isConstant(BaseOp0) && isConstant(Op0)) ||
602 (isConstant(BaseOp1) && isConstant(Op1)) ||
603 (!isa<Instruction>(BaseOp0) && !isa<Instruction>(Op0) &&
604 !isa<Instruction>(BaseOp1) && !isa<Instruction>(Op1)) ||
605 BaseOp0 == Op0 || BaseOp1 == Op1 ||
606 getSameOpcode({BaseOp0, Op0}, TLI).getOpcode() ||
607 getSameOpcode({BaseOp1, Op1}, TLI).getOpcode();
608}
609
610/// \returns true if a compare instruction \p CI has similar "look" and
611/// same predicate as \p BaseCI, "as is" or with its operands and predicate
612/// swapped, false otherwise.
613static bool isCmpSameOrSwapped(const CmpInst *BaseCI, const CmpInst *CI,
614 const TargetLibraryInfo &TLI) {
615 assert(BaseCI->getOperand(0)->getType() == CI->getOperand(0)->getType() &&(static_cast <bool> (BaseCI->getOperand(0)->getType
() == CI->getOperand(0)->getType() && "Assessing comparisons of different types?"
) ? void (0) : __assert_fail ("BaseCI->getOperand(0)->getType() == CI->getOperand(0)->getType() && \"Assessing comparisons of different types?\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 616, __extension__
__PRETTY_FUNCTION__))
616 "Assessing comparisons of different types?")(static_cast <bool> (BaseCI->getOperand(0)->getType
() == CI->getOperand(0)->getType() && "Assessing comparisons of different types?"
) ? void (0) : __assert_fail ("BaseCI->getOperand(0)->getType() == CI->getOperand(0)->getType() && \"Assessing comparisons of different types?\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 616, __extension__
__PRETTY_FUNCTION__))
;
617 CmpInst::Predicate BasePred = BaseCI->getPredicate();
618 CmpInst::Predicate Pred = CI->getPredicate();
619 CmpInst::Predicate SwappedPred = CmpInst::getSwappedPredicate(Pred);
620
621 Value *BaseOp0 = BaseCI->getOperand(0);
622 Value *BaseOp1 = BaseCI->getOperand(1);
623 Value *Op0 = CI->getOperand(0);
624 Value *Op1 = CI->getOperand(1);
625
626 return (BasePred == Pred &&
627 areCompatibleCmpOps(BaseOp0, BaseOp1, Op0, Op1, TLI)) ||
628 (BasePred == SwappedPred &&
629 areCompatibleCmpOps(BaseOp0, BaseOp1, Op1, Op0, TLI));
630}
631
632/// \returns analysis of the Instructions in \p VL described in
633/// InstructionsState, the Opcode that we suppose the whole list
634/// could be vectorized even if its structure is diverse.
635static InstructionsState getSameOpcode(ArrayRef<Value *> VL,
636 const TargetLibraryInfo &TLI,
637 unsigned BaseIndex) {
638 // Make sure these are all Instructions.
639 if (llvm::any_of(VL, [](Value *V) { return !isa<Instruction>(V); }))
640 return InstructionsState(VL[BaseIndex], nullptr, nullptr);
641
642 bool IsCastOp = isa<CastInst>(VL[BaseIndex]);
643 bool IsBinOp = isa<BinaryOperator>(VL[BaseIndex]);
644 bool IsCmpOp = isa<CmpInst>(VL[BaseIndex]);
645 CmpInst::Predicate BasePred =
646 IsCmpOp ? cast<CmpInst>(VL[BaseIndex])->getPredicate()
647 : CmpInst::BAD_ICMP_PREDICATE;
648 unsigned Opcode = cast<Instruction>(VL[BaseIndex])->getOpcode();
649 unsigned AltOpcode = Opcode;
650 unsigned AltIndex = BaseIndex;
651
652 // Check for one alternate opcode from another BinaryOperator.
653 // TODO - generalize to support all operators (types, calls etc.).
654 auto *IBase = cast<Instruction>(VL[BaseIndex]);
655 Intrinsic::ID BaseID = 0;
656 SmallVector<VFInfo> BaseMappings;
657 if (auto *CallBase = dyn_cast<CallInst>(IBase)) {
658 BaseID = getVectorIntrinsicIDForCall(CallBase, &TLI);
659 BaseMappings = VFDatabase(*CallBase).getMappings(*CallBase);
660 if (!isTriviallyVectorizable(BaseID) && BaseMappings.empty())
661 return InstructionsState(VL[BaseIndex], nullptr, nullptr);
662 }
663 for (int Cnt = 0, E = VL.size(); Cnt < E; Cnt++) {
664 auto *I = cast<Instruction>(VL[Cnt]);
665 unsigned InstOpcode = I->getOpcode();
666 if (IsBinOp && isa<BinaryOperator>(I)) {
667 if (InstOpcode == Opcode || InstOpcode == AltOpcode)
668 continue;
669 if (Opcode == AltOpcode && isValidForAlternation(InstOpcode) &&
670 isValidForAlternation(Opcode)) {
671 AltOpcode = InstOpcode;
672 AltIndex = Cnt;
673 continue;
674 }
675 } else if (IsCastOp && isa<CastInst>(I)) {
676 Value *Op0 = IBase->getOperand(0);
677 Type *Ty0 = Op0->getType();
678 Value *Op1 = I->getOperand(0);
679 Type *Ty1 = Op1->getType();
680 if (Ty0 == Ty1) {
681 if (InstOpcode == Opcode || InstOpcode == AltOpcode)
682 continue;
683 if (Opcode == AltOpcode) {
684 assert(isValidForAlternation(Opcode) &&(static_cast <bool> (isValidForAlternation(Opcode) &&
isValidForAlternation(InstOpcode) && "Cast isn't safe for alternation, logic needs to be updated!"
) ? void (0) : __assert_fail ("isValidForAlternation(Opcode) && isValidForAlternation(InstOpcode) && \"Cast isn't safe for alternation, logic needs to be updated!\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 686, __extension__
__PRETTY_FUNCTION__))
685 isValidForAlternation(InstOpcode) &&(static_cast <bool> (isValidForAlternation(Opcode) &&
isValidForAlternation(InstOpcode) && "Cast isn't safe for alternation, logic needs to be updated!"
) ? void (0) : __assert_fail ("isValidForAlternation(Opcode) && isValidForAlternation(InstOpcode) && \"Cast isn't safe for alternation, logic needs to be updated!\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 686, __extension__
__PRETTY_FUNCTION__))
686 "Cast isn't safe for alternation, logic needs to be updated!")(static_cast <bool> (isValidForAlternation(Opcode) &&
isValidForAlternation(InstOpcode) && "Cast isn't safe for alternation, logic needs to be updated!"
) ? void (0) : __assert_fail ("isValidForAlternation(Opcode) && isValidForAlternation(InstOpcode) && \"Cast isn't safe for alternation, logic needs to be updated!\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 686, __extension__
__PRETTY_FUNCTION__))
;
687 AltOpcode = InstOpcode;
688 AltIndex = Cnt;
689 continue;
690 }
691 }
692 } else if (auto *Inst = dyn_cast<CmpInst>(VL[Cnt]); Inst && IsCmpOp) {
693 auto *BaseInst = cast<CmpInst>(VL[BaseIndex]);
694 Type *Ty0 = BaseInst->getOperand(0)->getType();
695 Type *Ty1 = Inst->getOperand(0)->getType();
696 if (Ty0 == Ty1) {
697 assert(InstOpcode == Opcode && "Expected same CmpInst opcode.")(static_cast <bool> (InstOpcode == Opcode && "Expected same CmpInst opcode."
) ? void (0) : __assert_fail ("InstOpcode == Opcode && \"Expected same CmpInst opcode.\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 697, __extension__
__PRETTY_FUNCTION__))
;
698 // Check for compatible operands. If the corresponding operands are not
699 // compatible - need to perform alternate vectorization.
700 CmpInst::Predicate CurrentPred = Inst->getPredicate();
701 CmpInst::Predicate SwappedCurrentPred =
702 CmpInst::getSwappedPredicate(CurrentPred);
703
704 if (E == 2 &&
705 (BasePred == CurrentPred || BasePred == SwappedCurrentPred))
706 continue;
707
708 if (isCmpSameOrSwapped(BaseInst, Inst, TLI))
709 continue;
710 auto *AltInst = cast<CmpInst>(VL[AltIndex]);
711 if (AltIndex != BaseIndex) {
712 if (isCmpSameOrSwapped(AltInst, Inst, TLI))
713 continue;
714 } else if (BasePred != CurrentPred) {
715 assert((static_cast <bool> (isValidForAlternation(InstOpcode) &&
"CmpInst isn't safe for alternation, logic needs to be updated!"
) ? void (0) : __assert_fail ("isValidForAlternation(InstOpcode) && \"CmpInst isn't safe for alternation, logic needs to be updated!\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 717, __extension__
__PRETTY_FUNCTION__))
716 isValidForAlternation(InstOpcode) &&(static_cast <bool> (isValidForAlternation(InstOpcode) &&
"CmpInst isn't safe for alternation, logic needs to be updated!"
) ? void (0) : __assert_fail ("isValidForAlternation(InstOpcode) && \"CmpInst isn't safe for alternation, logic needs to be updated!\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 717, __extension__
__PRETTY_FUNCTION__))
717 "CmpInst isn't safe for alternation, logic needs to be updated!")(static_cast <bool> (isValidForAlternation(InstOpcode) &&
"CmpInst isn't safe for alternation, logic needs to be updated!"
) ? void (0) : __assert_fail ("isValidForAlternation(InstOpcode) && \"CmpInst isn't safe for alternation, logic needs to be updated!\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 717, __extension__
__PRETTY_FUNCTION__))
;
718 AltIndex = Cnt;
719 continue;
720 }
721 CmpInst::Predicate AltPred = AltInst->getPredicate();
722 if (BasePred == CurrentPred || BasePred == SwappedCurrentPred ||
723 AltPred == CurrentPred || AltPred == SwappedCurrentPred)
724 continue;
725 }
726 } else if (InstOpcode == Opcode || InstOpcode == AltOpcode) {
727 if (auto *Gep = dyn_cast<GetElementPtrInst>(I)) {
728 if (Gep->getNumOperands() != 2 ||
729 Gep->getOperand(0)->getType() != IBase->getOperand(0)->getType())
730 return InstructionsState(VL[BaseIndex], nullptr, nullptr);
731 } else if (auto *EI = dyn_cast<ExtractElementInst>(I)) {
732 if (!isVectorLikeInstWithConstOps(EI))
733 return InstructionsState(VL[BaseIndex], nullptr, nullptr);
734 } else if (auto *LI = dyn_cast<LoadInst>(I)) {
735 auto *BaseLI = cast<LoadInst>(IBase);
736 if (!LI->isSimple() || !BaseLI->isSimple())
737 return InstructionsState(VL[BaseIndex], nullptr, nullptr);
738 } else if (auto *Call = dyn_cast<CallInst>(I)) {
739 auto *CallBase = cast<CallInst>(IBase);
740 if (Call->getCalledFunction() != CallBase->getCalledFunction())
741 return InstructionsState(VL[BaseIndex], nullptr, nullptr);
742 if (Call->hasOperandBundles() &&
743 !std::equal(Call->op_begin() + Call->getBundleOperandsStartIndex(),
744 Call->op_begin() + Call->getBundleOperandsEndIndex(),
745 CallBase->op_begin() +
746 CallBase->getBundleOperandsStartIndex()))
747 return InstructionsState(VL[BaseIndex], nullptr, nullptr);
748 Intrinsic::ID ID = getVectorIntrinsicIDForCall(Call, &TLI);
749 if (ID != BaseID)
750 return InstructionsState(VL[BaseIndex], nullptr, nullptr);
751 if (!ID) {
752 SmallVector<VFInfo> Mappings = VFDatabase(*Call).getMappings(*Call);
753 if (Mappings.size() != BaseMappings.size() ||
754 Mappings.front().ISA != BaseMappings.front().ISA ||
755 Mappings.front().ScalarName != BaseMappings.front().ScalarName ||
756 Mappings.front().VectorName != BaseMappings.front().VectorName ||
757 Mappings.front().Shape.VF != BaseMappings.front().Shape.VF ||
758 Mappings.front().Shape.Parameters !=
759 BaseMappings.front().Shape.Parameters)
760 return InstructionsState(VL[BaseIndex], nullptr, nullptr);
761 }
762 }
763 continue;
764 }
765 return InstructionsState(VL[BaseIndex], nullptr, nullptr);
766 }
767
768 return InstructionsState(VL[BaseIndex], cast<Instruction>(VL[BaseIndex]),
769 cast<Instruction>(VL[AltIndex]));
770}
771
772/// \returns true if all of the values in \p VL have the same type or false
773/// otherwise.
774static bool allSameType(ArrayRef<Value *> VL) {
775 Type *Ty = VL[0]->getType();
776 for (int i = 1, e = VL.size(); i < e; i++)
777 if (VL[i]->getType() != Ty)
778 return false;
779
780 return true;
781}
782
783/// \returns True if in-tree use also needs extract. This refers to
784/// possible scalar operand in vectorized instruction.
785static bool InTreeUserNeedToExtract(Value *Scalar, Instruction *UserInst,
786 TargetLibraryInfo *TLI) {
787 unsigned Opcode = UserInst->getOpcode();
788 switch (Opcode) {
789 case Instruction::Load: {
790 LoadInst *LI = cast<LoadInst>(UserInst);
791 return (LI->getPointerOperand() == Scalar);
792 }
793 case Instruction::Store: {
794 StoreInst *SI = cast<StoreInst>(UserInst);
795 return (SI->getPointerOperand() == Scalar);
796 }
797 case Instruction::Call: {
798 CallInst *CI = cast<CallInst>(UserInst);
799 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
800 for (unsigned i = 0, e = CI->arg_size(); i != e; ++i) {
801 if (isVectorIntrinsicWithScalarOpAtArg(ID, i))
802 return (CI->getArgOperand(i) == Scalar);
803 }
804 [[fallthrough]];
805 }
806 default:
807 return false;
808 }
809}
810
811/// \returns the AA location that is being access by the instruction.
812static MemoryLocation getLocation(Instruction *I) {
813 if (StoreInst *SI = dyn_cast<StoreInst>(I))
814 return MemoryLocation::get(SI);
815 if (LoadInst *LI = dyn_cast<LoadInst>(I))
816 return MemoryLocation::get(LI);
817 return MemoryLocation();
818}
819
820/// \returns True if the instruction is not a volatile or atomic load/store.
821static bool isSimple(Instruction *I) {
822 if (LoadInst *LI = dyn_cast<LoadInst>(I))
823 return LI->isSimple();
824 if (StoreInst *SI = dyn_cast<StoreInst>(I))
825 return SI->isSimple();
826 if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(I))
827 return !MI->isVolatile();
828 return true;
829}
830
831/// Shuffles \p Mask in accordance with the given \p SubMask.
832static void addMask(SmallVectorImpl<int> &Mask, ArrayRef<int> SubMask) {
833 if (SubMask.empty())
834 return;
835 if (Mask.empty()) {
836 Mask.append(SubMask.begin(), SubMask.end());
837 return;
838 }
839 SmallVector<int> NewMask(SubMask.size(), UndefMaskElem);
840 int TermValue = std::min(Mask.size(), SubMask.size());
841 for (int I = 0, E = SubMask.size(); I < E; ++I) {
842 if (SubMask[I] >= TermValue || SubMask[I] == UndefMaskElem ||
843 Mask[SubMask[I]] >= TermValue)
844 continue;
845 NewMask[I] = Mask[SubMask[I]];
846 }
847 Mask.swap(NewMask);
848}
849
850/// Order may have elements assigned special value (size) which is out of
851/// bounds. Such indices only appear on places which correspond to undef values
852/// (see canReuseExtract for details) and used in order to avoid undef values
853/// have effect on operands ordering.
854/// The first loop below simply finds all unused indices and then the next loop
855/// nest assigns these indices for undef values positions.
856/// As an example below Order has two undef positions and they have assigned
857/// values 3 and 7 respectively:
858/// before: 6 9 5 4 9 2 1 0
859/// after: 6 3 5 4 7 2 1 0
860static void fixupOrderingIndices(SmallVectorImpl<unsigned> &Order) {
861 const unsigned Sz = Order.size();
862 SmallBitVector UnusedIndices(Sz, /*t=*/true);
863 SmallBitVector MaskedIndices(Sz);
864 for (unsigned I = 0; I < Sz; ++I) {
865 if (Order[I] < Sz)
866 UnusedIndices.reset(Order[I]);
867 else
868 MaskedIndices.set(I);
869 }
870 if (MaskedIndices.none())
871 return;
872 assert(UnusedIndices.count() == MaskedIndices.count() &&(static_cast <bool> (UnusedIndices.count() == MaskedIndices
.count() && "Non-synced masked/available indices.") ?
void (0) : __assert_fail ("UnusedIndices.count() == MaskedIndices.count() && \"Non-synced masked/available indices.\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 873, __extension__
__PRETTY_FUNCTION__))
873 "Non-synced masked/available indices.")(static_cast <bool> (UnusedIndices.count() == MaskedIndices
.count() && "Non-synced masked/available indices.") ?
void (0) : __assert_fail ("UnusedIndices.count() == MaskedIndices.count() && \"Non-synced masked/available indices.\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 873, __extension__
__PRETTY_FUNCTION__))
;
874 int Idx = UnusedIndices.find_first();
875 int MIdx = MaskedIndices.find_first();
876 while (MIdx >= 0) {
877 assert(Idx >= 0 && "Indices must be synced.")(static_cast <bool> (Idx >= 0 && "Indices must be synced."
) ? void (0) : __assert_fail ("Idx >= 0 && \"Indices must be synced.\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 877, __extension__
__PRETTY_FUNCTION__))
;
878 Order[MIdx] = Idx;
879 Idx = UnusedIndices.find_next(Idx);
880 MIdx = MaskedIndices.find_next(MIdx);
881 }
882}
883
884namespace llvm {
885
886static void inversePermutation(ArrayRef<unsigned> Indices,
887 SmallVectorImpl<int> &Mask) {
888 Mask.clear();
889 const unsigned E = Indices.size();
890 Mask.resize(E, UndefMaskElem);
891 for (unsigned I = 0; I < E; ++I)
892 Mask[Indices[I]] = I;
893}
894
895/// Reorders the list of scalars in accordance with the given \p Mask.
896static void reorderScalars(SmallVectorImpl<Value *> &Scalars,
897 ArrayRef<int> Mask) {
898 assert(!Mask.empty() && "Expected non-empty mask.")(static_cast <bool> (!Mask.empty() && "Expected non-empty mask."
) ? void (0) : __assert_fail ("!Mask.empty() && \"Expected non-empty mask.\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 898, __extension__
__PRETTY_FUNCTION__))
;
899 SmallVector<Value *> Prev(Scalars.size(),
900 UndefValue::get(Scalars.front()->getType()));
901 Prev.swap(Scalars);
902 for (unsigned I = 0, E = Prev.size(); I < E; ++I)
903 if (Mask[I] != UndefMaskElem)
904 Scalars[Mask[I]] = Prev[I];
905}
906
907/// Checks if the provided value does not require scheduling. It does not
908/// require scheduling if this is not an instruction or it is an instruction
909/// that does not read/write memory and all operands are either not instructions
910/// or phi nodes or instructions from different blocks.
911static bool areAllOperandsNonInsts(Value *V) {
912 auto *I = dyn_cast<Instruction>(V);
913 if (!I)
914 return true;
915 return !mayHaveNonDefUseDependency(*I) &&
916 all_of(I->operands(), [I](Value *V) {
917 auto *IO = dyn_cast<Instruction>(V);
918 if (!IO)
919 return true;
920 return isa<PHINode>(IO) || IO->getParent() != I->getParent();
921 });
922}
923
924/// Checks if the provided value does not require scheduling. It does not
925/// require scheduling if this is not an instruction or it is an instruction
926/// that does not read/write memory and all users are phi nodes or instructions
927/// from the different blocks.
928static bool isUsedOutsideBlock(Value *V) {
929 auto *I = dyn_cast<Instruction>(V);
930 if (!I)
931 return true;
932 // Limits the number of uses to save compile time.
933 constexpr int UsesLimit = 8;
934 return !I->mayReadOrWriteMemory() && !I->hasNUsesOrMore(UsesLimit) &&
935 all_of(I->users(), [I](User *U) {
936 auto *IU = dyn_cast<Instruction>(U);
937 if (!IU)
938 return true;
939 return IU->getParent() != I->getParent() || isa<PHINode>(IU);
940 });
941}
942
943/// Checks if the specified value does not require scheduling. It does not
944/// require scheduling if all operands and all users do not need to be scheduled
945/// in the current basic block.
946static bool doesNotNeedToBeScheduled(Value *V) {
947 return areAllOperandsNonInsts(V) && isUsedOutsideBlock(V);
948}
949
950/// Checks if the specified array of instructions does not require scheduling.
951/// It is so if all either instructions have operands that do not require
952/// scheduling or their users do not require scheduling since they are phis or
953/// in other basic blocks.
954static bool doesNotNeedToSchedule(ArrayRef<Value *> VL) {
955 return !VL.empty() &&
956 (all_of(VL, isUsedOutsideBlock) || all_of(VL, areAllOperandsNonInsts));
957}
958
959namespace slpvectorizer {
960
961/// Bottom Up SLP Vectorizer.
962class BoUpSLP {
963 struct TreeEntry;
964 struct ScheduleData;
965 class ShuffleInstructionBuilder;
966
967public:
968 using ValueList = SmallVector<Value *, 8>;
969 using InstrList = SmallVector<Instruction *, 16>;
970 using ValueSet = SmallPtrSet<Value *, 16>;
971 using StoreList = SmallVector<StoreInst *, 8>;
972 using ExtraValueToDebugLocsMap =
973 MapVector<Value *, SmallVector<Instruction *, 2>>;
974 using OrdersType = SmallVector<unsigned, 4>;
975
976 BoUpSLP(Function *Func, ScalarEvolution *Se, TargetTransformInfo *Tti,
977 TargetLibraryInfo *TLi, AAResults *Aa, LoopInfo *Li,
978 DominatorTree *Dt, AssumptionCache *AC, DemandedBits *DB,
979 const DataLayout *DL, OptimizationRemarkEmitter *ORE)
980 : BatchAA(*Aa), F(Func), SE(Se), TTI(Tti), TLI(TLi), LI(Li),
981 DT(Dt), AC(AC), DB(DB), DL(DL), ORE(ORE), Builder(Se->getContext()) {
982 CodeMetrics::collectEphemeralValues(F, AC, EphValues);
983 // Use the vector register size specified by the target unless overridden
984 // by a command-line option.
985 // TODO: It would be better to limit the vectorization factor based on
986 // data type rather than just register size. For example, x86 AVX has
987 // 256-bit registers, but it does not support integer operations
988 // at that width (that requires AVX2).
989 if (MaxVectorRegSizeOption.getNumOccurrences())
990 MaxVecRegSize = MaxVectorRegSizeOption;
991 else
992 MaxVecRegSize =
993 TTI->getRegisterBitWidth(TargetTransformInfo::RGK_FixedWidthVector)
994 .getFixedValue();
995
996 if (MinVectorRegSizeOption.getNumOccurrences())
997 MinVecRegSize = MinVectorRegSizeOption;
998 else
999 MinVecRegSize = TTI->getMinVectorRegisterBitWidth();
1000 }
1001
1002 /// Vectorize the tree that starts with the elements in \p VL.
1003 /// Returns the vectorized root.
1004 Value *vectorizeTree();
1005
1006 /// Vectorize the tree but with the list of externally used values \p
1007 /// ExternallyUsedValues. Values in this MapVector can be replaced but the
1008 /// generated extractvalue instructions.
1009 Value *vectorizeTree(ExtraValueToDebugLocsMap &ExternallyUsedValues,
1010 Instruction *ReductionRoot = nullptr);
1011
1012 /// \returns the cost incurred by unwanted spills and fills, caused by
1013 /// holding live values over call sites.
1014 InstructionCost getSpillCost() const;
1015
1016 /// \returns the vectorization cost of the subtree that starts at \p VL.
1017 /// A negative number means that this is profitable.
1018 InstructionCost getTreeCost(ArrayRef<Value *> VectorizedVals = std::nullopt);
1019
1020 /// Construct a vectorizable tree that starts at \p Roots, ignoring users for
1021 /// the purpose of scheduling and extraction in the \p UserIgnoreLst.
1022 void buildTree(ArrayRef<Value *> Roots,
1023 const SmallDenseSet<Value *> &UserIgnoreLst);
1024
1025 /// Construct a vectorizable tree that starts at \p Roots.
1026 void buildTree(ArrayRef<Value *> Roots);
1027
1028 /// Checks if the very first tree node is going to be vectorized.
1029 bool isVectorizedFirstNode() const {
1030 return !VectorizableTree.empty() &&
1031 VectorizableTree.front()->State == TreeEntry::Vectorize;
1032 }
1033
1034 /// Returns the main instruction for the very first node.
1035 Instruction *getFirstNodeMainOp() const {
1036 assert(!VectorizableTree.empty() && "No tree to get the first node from")(static_cast <bool> (!VectorizableTree.empty() &&
"No tree to get the first node from") ? void (0) : __assert_fail
("!VectorizableTree.empty() && \"No tree to get the first node from\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 1036, __extension__
__PRETTY_FUNCTION__))
;
1037 return VectorizableTree.front()->getMainOp();
1038 }
1039
1040 /// Returns whether the root node has in-tree uses.
1041 bool doesRootHaveInTreeUses() const {
1042 return !VectorizableTree.empty() &&
1043 !VectorizableTree.front()->UserTreeIndices.empty();
1044 }
1045
1046 /// Builds external uses of the vectorized scalars, i.e. the list of
1047 /// vectorized scalars to be extracted, their lanes and their scalar users. \p
1048 /// ExternallyUsedValues contains additional list of external uses to handle
1049 /// vectorization of reductions.
1050 void
1051 buildExternalUses(const ExtraValueToDebugLocsMap &ExternallyUsedValues = {});
1052
1053 /// Clear the internal data structures that are created by 'buildTree'.
1054 void deleteTree() {
1055 VectorizableTree.clear();
1056 ScalarToTreeEntry.clear();
1057 MustGather.clear();
1058 EntryToLastInstruction.clear();
1059 ExternalUses.clear();
1060 for (auto &Iter : BlocksSchedules) {
1061 BlockScheduling *BS = Iter.second.get();
1062 BS->clear();
1063 }
1064 MinBWs.clear();
1065 InstrElementSize.clear();
1066 UserIgnoreList = nullptr;
1067 }
1068
1069 unsigned getTreeSize() const { return VectorizableTree.size(); }
1070
1071 /// Perform LICM and CSE on the newly generated gather sequences.
1072 void optimizeGatherSequence();
1073
1074 /// Checks if the specified gather tree entry \p TE can be represented as a
1075 /// shuffled vector entry + (possibly) permutation with other gathers. It
1076 /// implements the checks only for possibly ordered scalars (Loads,
1077 /// ExtractElement, ExtractValue), which can be part of the graph.
1078 std::optional<OrdersType> findReusedOrderedScalars(const TreeEntry &TE);
1079
1080 /// Sort loads into increasing pointers offsets to allow greater clustering.
1081 std::optional<OrdersType> findPartiallyOrderedLoads(const TreeEntry &TE);
1082
1083 /// Gets reordering data for the given tree entry. If the entry is vectorized
1084 /// - just return ReorderIndices, otherwise check if the scalars can be
1085 /// reordered and return the most optimal order.
1086 /// \param TopToBottom If true, include the order of vectorized stores and
1087 /// insertelement nodes, otherwise skip them.
1088 std::optional<OrdersType> getReorderingData(const TreeEntry &TE, bool TopToBottom);
1089
1090 /// Reorders the current graph to the most profitable order starting from the
1091 /// root node to the leaf nodes. The best order is chosen only from the nodes
1092 /// of the same size (vectorization factor). Smaller nodes are considered
1093 /// parts of subgraph with smaller VF and they are reordered independently. We
1094 /// can make it because we still need to extend smaller nodes to the wider VF
1095 /// and we can merge reordering shuffles with the widening shuffles.
1096 void reorderTopToBottom();
1097
1098 /// Reorders the current graph to the most profitable order starting from
1099 /// leaves to the root. It allows to rotate small subgraphs and reduce the
1100 /// number of reshuffles if the leaf nodes use the same order. In this case we
1101 /// can merge the orders and just shuffle user node instead of shuffling its
1102 /// operands. Plus, even the leaf nodes have different orders, it allows to
1103 /// sink reordering in the graph closer to the root node and merge it later
1104 /// during analysis.
1105 void reorderBottomToTop(bool IgnoreReorder = false);
1106
1107 /// \return The vector element size in bits to use when vectorizing the
1108 /// expression tree ending at \p V. If V is a store, the size is the width of
1109 /// the stored value. Otherwise, the size is the width of the largest loaded
1110 /// value reaching V. This method is used by the vectorizer to calculate
1111 /// vectorization factors.
1112 unsigned getVectorElementSize(Value *V);
1113
1114 /// Compute the minimum type sizes required to represent the entries in a
1115 /// vectorizable tree.
1116 void computeMinimumValueSizes();
1117
1118 // \returns maximum vector register size as set by TTI or overridden by cl::opt.
1119 unsigned getMaxVecRegSize() const {
1120 return MaxVecRegSize;
1121 }
1122
1123 // \returns minimum vector register size as set by cl::opt.
1124 unsigned getMinVecRegSize() const {
1125 return MinVecRegSize;
1126 }
1127
1128 unsigned getMinVF(unsigned Sz) const {
1129 return std::max(2U, getMinVecRegSize() / Sz);
1130 }
1131
1132 unsigned getMaximumVF(unsigned ElemWidth, unsigned Opcode) const {
1133 unsigned MaxVF = MaxVFOption.getNumOccurrences() ?
1134 MaxVFOption : TTI->getMaximumVF(ElemWidth, Opcode);
1135 return MaxVF ? MaxVF : UINT_MAX(2147483647 *2U +1U);
1136 }
1137
1138 /// Check if homogeneous aggregate is isomorphic to some VectorType.
1139 /// Accepts homogeneous multidimensional aggregate of scalars/vectors like
1140 /// {[4 x i16], [4 x i16]}, { <2 x float>, <2 x float> },
1141 /// {{{i16, i16}, {i16, i16}}, {{i16, i16}, {i16, i16}}} and so on.
1142 ///
1143 /// \returns number of elements in vector if isomorphism exists, 0 otherwise.
1144 unsigned canMapToVector(Type *T, const DataLayout &DL) const;
1145
1146 /// \returns True if the VectorizableTree is both tiny and not fully
1147 /// vectorizable. We do not vectorize such trees.
1148 bool isTreeTinyAndNotFullyVectorizable(bool ForReduction = false) const;
1149
1150 /// Assume that a legal-sized 'or'-reduction of shifted/zexted loaded values
1151 /// can be load combined in the backend. Load combining may not be allowed in
1152 /// the IR optimizer, so we do not want to alter the pattern. For example,
1153 /// partially transforming a scalar bswap() pattern into vector code is
1154 /// effectively impossible for the backend to undo.
1155 /// TODO: If load combining is allowed in the IR optimizer, this analysis
1156 /// may not be necessary.
1157 bool isLoadCombineReductionCandidate(RecurKind RdxKind) const;
1158
1159 /// Assume that a vector of stores of bitwise-or/shifted/zexted loaded values
1160 /// can be load combined in the backend. Load combining may not be allowed in
1161 /// the IR optimizer, so we do not want to alter the pattern. For example,
1162 /// partially transforming a scalar bswap() pattern into vector code is
1163 /// effectively impossible for the backend to undo.
1164 /// TODO: If load combining is allowed in the IR optimizer, this analysis
1165 /// may not be necessary.
1166 bool isLoadCombineCandidate() const;
1167
1168 OptimizationRemarkEmitter *getORE() { return ORE; }
1169
1170 /// This structure holds any data we need about the edges being traversed
1171 /// during buildTree_rec(). We keep track of:
1172 /// (i) the user TreeEntry index, and
1173 /// (ii) the index of the edge.
1174 struct EdgeInfo {
1175 EdgeInfo() = default;
1176 EdgeInfo(TreeEntry *UserTE, unsigned EdgeIdx)
1177 : UserTE(UserTE), EdgeIdx(EdgeIdx) {}
1178 /// The user TreeEntry.
1179 TreeEntry *UserTE = nullptr;
1180 /// The operand index of the use.
1181 unsigned EdgeIdx = UINT_MAX(2147483647 *2U +1U);
1182#ifndef NDEBUG
1183 friend inline raw_ostream &operator<<(raw_ostream &OS,
1184 const BoUpSLP::EdgeInfo &EI) {
1185 EI.dump(OS);
1186 return OS;
1187 }
1188 /// Debug print.
1189 void dump(raw_ostream &OS) const {
1190 OS << "{User:" << (UserTE ? std::to_string(UserTE->Idx) : "null")
1191 << " EdgeIdx:" << EdgeIdx << "}";
1192 }
1193 LLVM_DUMP_METHOD__attribute__((noinline)) __attribute__((__used__)) void dump() const { dump(dbgs()); }
1194#endif
1195 };
1196
1197 /// A helper class used for scoring candidates for two consecutive lanes.
1198 class LookAheadHeuristics {
1199 const TargetLibraryInfo &TLI;
1200 const DataLayout &DL;
1201 ScalarEvolution &SE;
1202 const BoUpSLP &R;
1203 int NumLanes; // Total number of lanes (aka vectorization factor).
1204 int MaxLevel; // The maximum recursion depth for accumulating score.
1205
1206 public:
1207 LookAheadHeuristics(const TargetLibraryInfo &TLI, const DataLayout &DL,
1208 ScalarEvolution &SE, const BoUpSLP &R, int NumLanes,
1209 int MaxLevel)
1210 : TLI(TLI), DL(DL), SE(SE), R(R), NumLanes(NumLanes),
1211 MaxLevel(MaxLevel) {}
1212
1213 // The hard-coded scores listed here are not very important, though it shall
1214 // be higher for better matches to improve the resulting cost. When
1215 // computing the scores of matching one sub-tree with another, we are
1216 // basically counting the number of values that are matching. So even if all
1217 // scores are set to 1, we would still get a decent matching result.
1218 // However, sometimes we have to break ties. For example we may have to
1219 // choose between matching loads vs matching opcodes. This is what these
1220 // scores are helping us with: they provide the order of preference. Also,
1221 // this is important if the scalar is externally used or used in another
1222 // tree entry node in the different lane.
1223
1224 /// Loads from consecutive memory addresses, e.g. load(A[i]), load(A[i+1]).
1225 static const int ScoreConsecutiveLoads = 4;
1226 /// The same load multiple times. This should have a better score than
1227 /// `ScoreSplat` because it in x86 for a 2-lane vector we can represent it
1228 /// with `movddup (%reg), xmm0` which has a throughput of 0.5 versus 0.5 for
1229 /// a vector load and 1.0 for a broadcast.
1230 static const int ScoreSplatLoads = 3;
1231 /// Loads from reversed memory addresses, e.g. load(A[i+1]), load(A[i]).
1232 static const int ScoreReversedLoads = 3;
1233 /// A load candidate for masked gather.
1234 static const int ScoreMaskedGatherCandidate = 1;
1235 /// ExtractElementInst from same vector and consecutive indexes.
1236 static const int ScoreConsecutiveExtracts = 4;
1237 /// ExtractElementInst from same vector and reversed indices.
1238 static const int ScoreReversedExtracts = 3;
1239 /// Constants.
1240 static const int ScoreConstants = 2;
1241 /// Instructions with the same opcode.
1242 static const int ScoreSameOpcode = 2;
1243 /// Instructions with alt opcodes (e.g, add + sub).
1244 static const int ScoreAltOpcodes = 1;
1245 /// Identical instructions (a.k.a. splat or broadcast).
1246 static const int ScoreSplat = 1;
1247 /// Matching with an undef is preferable to failing.
1248 static const int ScoreUndef = 1;
1249 /// Score for failing to find a decent match.
1250 static const int ScoreFail = 0;
1251 /// Score if all users are vectorized.
1252 static const int ScoreAllUserVectorized = 1;
1253
1254 /// \returns the score of placing \p V1 and \p V2 in consecutive lanes.
1255 /// \p U1 and \p U2 are the users of \p V1 and \p V2.
1256 /// Also, checks if \p V1 and \p V2 are compatible with instructions in \p
1257 /// MainAltOps.
1258 int getShallowScore(Value *V1, Value *V2, Instruction *U1, Instruction *U2,
1259 ArrayRef<Value *> MainAltOps) const {
1260 if (!isValidElementType(V1->getType()) ||
1261 !isValidElementType(V2->getType()))
1262 return LookAheadHeuristics::ScoreFail;
1263
1264 if (V1 == V2) {
1265 if (isa<LoadInst>(V1)) {
1266 // Retruns true if the users of V1 and V2 won't need to be extracted.
1267 auto AllUsersAreInternal = [U1, U2, this](Value *V1, Value *V2) {
1268 // Bail out if we have too many uses to save compilation time.
1269 static constexpr unsigned Limit = 8;
1270 if (V1->hasNUsesOrMore(Limit) || V2->hasNUsesOrMore(Limit))
1271 return false;
1272
1273 auto AllUsersVectorized = [U1, U2, this](Value *V) {
1274 return llvm::all_of(V->users(), [U1, U2, this](Value *U) {
1275 return U == U1 || U == U2 || R.getTreeEntry(U) != nullptr;
1276 });
1277 };
1278 return AllUsersVectorized(V1) && AllUsersVectorized(V2);
1279 };
1280 // A broadcast of a load can be cheaper on some targets.
1281 if (R.TTI->isLegalBroadcastLoad(V1->getType(),
1282 ElementCount::getFixed(NumLanes)) &&
1283 ((int)V1->getNumUses() == NumLanes ||
1284 AllUsersAreInternal(V1, V2)))
1285 return LookAheadHeuristics::ScoreSplatLoads;
1286 }
1287 return LookAheadHeuristics::ScoreSplat;
1288 }
1289
1290 auto *LI1 = dyn_cast<LoadInst>(V1);
1291 auto *LI2 = dyn_cast<LoadInst>(V2);
1292 if (LI1 && LI2) {
1293 if (LI1->getParent() != LI2->getParent() || !LI1->isSimple() ||
1294 !LI2->isSimple())
1295 return LookAheadHeuristics::ScoreFail;
1296
1297 std::optional<int> Dist = getPointersDiff(
1298 LI1->getType(), LI1->getPointerOperand(), LI2->getType(),
1299 LI2->getPointerOperand(), DL, SE, /*StrictCheck=*/true);
1300 if (!Dist || *Dist == 0) {
1301 if (getUnderlyingObject(LI1->getPointerOperand()) ==
1302 getUnderlyingObject(LI2->getPointerOperand()) &&
1303 R.TTI->isLegalMaskedGather(
1304 FixedVectorType::get(LI1->getType(), NumLanes),
1305 LI1->getAlign()))
1306 return LookAheadHeuristics::ScoreMaskedGatherCandidate;
1307 return LookAheadHeuristics::ScoreFail;
1308 }
1309 // The distance is too large - still may be profitable to use masked
1310 // loads/gathers.
1311 if (std::abs(*Dist) > NumLanes / 2)
1312 return LookAheadHeuristics::ScoreMaskedGatherCandidate;
1313 // This still will detect consecutive loads, but we might have "holes"
1314 // in some cases. It is ok for non-power-2 vectorization and may produce
1315 // better results. It should not affect current vectorization.
1316 return (*Dist > 0) ? LookAheadHeuristics::ScoreConsecutiveLoads
1317 : LookAheadHeuristics::ScoreReversedLoads;
1318 }
1319
1320 auto *C1 = dyn_cast<Constant>(V1);
1321 auto *C2 = dyn_cast<Constant>(V2);
1322 if (C1 && C2)
1323 return LookAheadHeuristics::ScoreConstants;
1324
1325 // Extracts from consecutive indexes of the same vector better score as
1326 // the extracts could be optimized away.
1327 Value *EV1;
1328 ConstantInt *Ex1Idx;
1329 if (match(V1, m_ExtractElt(m_Value(EV1), m_ConstantInt(Ex1Idx)))) {
1330 // Undefs are always profitable for extractelements.
1331 if (isa<UndefValue>(V2))
1332 return LookAheadHeuristics::ScoreConsecutiveExtracts;
1333 Value *EV2 = nullptr;
1334 ConstantInt *Ex2Idx = nullptr;
1335 if (match(V2,
1336 m_ExtractElt(m_Value(EV2), m_CombineOr(m_ConstantInt(Ex2Idx),
1337 m_Undef())))) {
1338 // Undefs are always profitable for extractelements.
1339 if (!Ex2Idx)
1340 return LookAheadHeuristics::ScoreConsecutiveExtracts;
1341 if (isUndefVector(EV2).all() && EV2->getType() == EV1->getType())
1342 return LookAheadHeuristics::ScoreConsecutiveExtracts;
1343 if (EV2 == EV1) {
1344 int Idx1 = Ex1Idx->getZExtValue();
1345 int Idx2 = Ex2Idx->getZExtValue();
1346 int Dist = Idx2 - Idx1;
1347 // The distance is too large - still may be profitable to use
1348 // shuffles.
1349 if (std::abs(Dist) == 0)
1350 return LookAheadHeuristics::ScoreSplat;
1351 if (std::abs(Dist) > NumLanes / 2)
1352 return LookAheadHeuristics::ScoreSameOpcode;
1353 return (Dist > 0) ? LookAheadHeuristics::ScoreConsecutiveExtracts
1354 : LookAheadHeuristics::ScoreReversedExtracts;
1355 }
1356 return LookAheadHeuristics::ScoreAltOpcodes;
1357 }
1358 return LookAheadHeuristics::ScoreFail;
1359 }
1360
1361 auto *I1 = dyn_cast<Instruction>(V1);
1362 auto *I2 = dyn_cast<Instruction>(V2);
1363 if (I1 && I2) {
1364 if (I1->getParent() != I2->getParent())
1365 return LookAheadHeuristics::ScoreFail;
1366 SmallVector<Value *, 4> Ops(MainAltOps.begin(), MainAltOps.end());
1367 Ops.push_back(I1);
1368 Ops.push_back(I2);
1369 InstructionsState S = getSameOpcode(Ops, TLI);
1370 // Note: Only consider instructions with <= 2 operands to avoid
1371 // complexity explosion.
1372 if (S.getOpcode() &&
1373 (S.MainOp->getNumOperands() <= 2 || !MainAltOps.empty() ||
1374 !S.isAltShuffle()) &&
1375 all_of(Ops, [&S](Value *V) {
1376 return cast<Instruction>(V)->getNumOperands() ==
1377 S.MainOp->getNumOperands();
1378 }))
1379 return S.isAltShuffle() ? LookAheadHeuristics::ScoreAltOpcodes
1380 : LookAheadHeuristics::ScoreSameOpcode;
1381 }
1382
1383 if (isa<UndefValue>(V2))
1384 return LookAheadHeuristics::ScoreUndef;
1385
1386 return LookAheadHeuristics::ScoreFail;
1387 }
1388
1389 /// Go through the operands of \p LHS and \p RHS recursively until
1390 /// MaxLevel, and return the cummulative score. \p U1 and \p U2 are
1391 /// the users of \p LHS and \p RHS (that is \p LHS and \p RHS are operands
1392 /// of \p U1 and \p U2), except at the beginning of the recursion where
1393 /// these are set to nullptr.
1394 ///
1395 /// For example:
1396 /// \verbatim
1397 /// A[0] B[0] A[1] B[1] C[0] D[0] B[1] A[1]
1398 /// \ / \ / \ / \ /
1399 /// + + + +
1400 /// G1 G2 G3 G4
1401 /// \endverbatim
1402 /// The getScoreAtLevelRec(G1, G2) function will try to match the nodes at
1403 /// each level recursively, accumulating the score. It starts from matching
1404 /// the additions at level 0, then moves on to the loads (level 1). The
1405 /// score of G1 and G2 is higher than G1 and G3, because {A[0],A[1]} and
1406 /// {B[0],B[1]} match with LookAheadHeuristics::ScoreConsecutiveLoads, while
1407 /// {A[0],C[0]} has a score of LookAheadHeuristics::ScoreFail.
1408 /// Please note that the order of the operands does not matter, as we
1409 /// evaluate the score of all profitable combinations of operands. In
1410 /// other words the score of G1 and G4 is the same as G1 and G2. This
1411 /// heuristic is based on ideas described in:
1412 /// Look-ahead SLP: Auto-vectorization in the presence of commutative
1413 /// operations, CGO 2018 by Vasileios Porpodas, Rodrigo C. O. Rocha,
1414 /// Luís F. W. Góes
1415 int getScoreAtLevelRec(Value *LHS, Value *RHS, Instruction *U1,
1416 Instruction *U2, int CurrLevel,
1417 ArrayRef<Value *> MainAltOps) const {
1418
1419 // Get the shallow score of V1 and V2.
1420 int ShallowScoreAtThisLevel =
1421 getShallowScore(LHS, RHS, U1, U2, MainAltOps);
1422
1423 // If reached MaxLevel,
1424 // or if V1 and V2 are not instructions,
1425 // or if they are SPLAT,
1426 // or if they are not consecutive,
1427 // or if profitable to vectorize loads or extractelements, early return
1428 // the current cost.
1429 auto *I1 = dyn_cast<Instruction>(LHS);
1430 auto *I2 = dyn_cast<Instruction>(RHS);
1431 if (CurrLevel == MaxLevel || !(I1 && I2) || I1 == I2 ||
1432 ShallowScoreAtThisLevel == LookAheadHeuristics::ScoreFail ||
1433 (((isa<LoadInst>(I1) && isa<LoadInst>(I2)) ||
1434 (I1->getNumOperands() > 2 && I2->getNumOperands() > 2) ||
1435 (isa<ExtractElementInst>(I1) && isa<ExtractElementInst>(I2))) &&
1436 ShallowScoreAtThisLevel))
1437 return ShallowScoreAtThisLevel;
1438 assert(I1 && I2 && "Should have early exited.")(static_cast <bool> (I1 && I2 && "Should have early exited."
) ? void (0) : __assert_fail ("I1 && I2 && \"Should have early exited.\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 1438, __extension__
__PRETTY_FUNCTION__))
;
1439
1440 // Contains the I2 operand indexes that got matched with I1 operands.
1441 SmallSet<unsigned, 4> Op2Used;
1442
1443 // Recursion towards the operands of I1 and I2. We are trying all possible
1444 // operand pairs, and keeping track of the best score.
1445 for (unsigned OpIdx1 = 0, NumOperands1 = I1->getNumOperands();
1446 OpIdx1 != NumOperands1; ++OpIdx1) {
1447 // Try to pair op1I with the best operand of I2.
1448 int MaxTmpScore = 0;
1449 unsigned MaxOpIdx2 = 0;
1450 bool FoundBest = false;
1451 // If I2 is commutative try all combinations.
1452 unsigned FromIdx = isCommutative(I2) ? 0 : OpIdx1;
1453 unsigned ToIdx = isCommutative(I2)
1454 ? I2->getNumOperands()
1455 : std::min(I2->getNumOperands(), OpIdx1 + 1);
1456 assert(FromIdx <= ToIdx && "Bad index")(static_cast <bool> (FromIdx <= ToIdx && "Bad index"
) ? void (0) : __assert_fail ("FromIdx <= ToIdx && \"Bad index\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 1456, __extension__
__PRETTY_FUNCTION__))
;
1457 for (unsigned OpIdx2 = FromIdx; OpIdx2 != ToIdx; ++OpIdx2) {
1458 // Skip operands already paired with OpIdx1.
1459 if (Op2Used.count(OpIdx2))
1460 continue;
1461 // Recursively calculate the cost at each level
1462 int TmpScore =
1463 getScoreAtLevelRec(I1->getOperand(OpIdx1), I2->getOperand(OpIdx2),
1464 I1, I2, CurrLevel + 1, std::nullopt);
1465 // Look for the best score.
1466 if (TmpScore > LookAheadHeuristics::ScoreFail &&
1467 TmpScore > MaxTmpScore) {
1468 MaxTmpScore = TmpScore;
1469 MaxOpIdx2 = OpIdx2;
1470 FoundBest = true;
1471 }
1472 }
1473 if (FoundBest) {
1474 // Pair {OpIdx1, MaxOpIdx2} was found to be best. Never revisit it.
1475 Op2Used.insert(MaxOpIdx2);
1476 ShallowScoreAtThisLevel += MaxTmpScore;
1477 }
1478 }
1479 return ShallowScoreAtThisLevel;
1480 }
1481 };
1482 /// A helper data structure to hold the operands of a vector of instructions.
1483 /// This supports a fixed vector length for all operand vectors.
1484 class VLOperands {
1485 /// For each operand we need (i) the value, and (ii) the opcode that it
1486 /// would be attached to if the expression was in a left-linearized form.
1487 /// This is required to avoid illegal operand reordering.
1488 /// For example:
1489 /// \verbatim
1490 /// 0 Op1
1491 /// |/
1492 /// Op1 Op2 Linearized + Op2
1493 /// \ / ----------> |/
1494 /// - -
1495 ///
1496 /// Op1 - Op2 (0 + Op1) - Op2
1497 /// \endverbatim
1498 ///
1499 /// Value Op1 is attached to a '+' operation, and Op2 to a '-'.
1500 ///
1501 /// Another way to think of this is to track all the operations across the
1502 /// path from the operand all the way to the root of the tree and to
1503 /// calculate the operation that corresponds to this path. For example, the
1504 /// path from Op2 to the root crosses the RHS of the '-', therefore the
1505 /// corresponding operation is a '-' (which matches the one in the
1506 /// linearized tree, as shown above).
1507 ///
1508 /// For lack of a better term, we refer to this operation as Accumulated
1509 /// Path Operation (APO).
1510 struct OperandData {
1511 OperandData() = default;
1512 OperandData(Value *V, bool APO, bool IsUsed)
1513 : V(V), APO(APO), IsUsed(IsUsed) {}
1514 /// The operand value.
1515 Value *V = nullptr;
1516 /// TreeEntries only allow a single opcode, or an alternate sequence of
1517 /// them (e.g, +, -). Therefore, we can safely use a boolean value for the
1518 /// APO. It is set to 'true' if 'V' is attached to an inverse operation
1519 /// in the left-linearized form (e.g., Sub/Div), and 'false' otherwise
1520 /// (e.g., Add/Mul)
1521 bool APO = false;
1522 /// Helper data for the reordering function.
1523 bool IsUsed = false;
1524 };
1525
1526 /// During operand reordering, we are trying to select the operand at lane
1527 /// that matches best with the operand at the neighboring lane. Our
1528 /// selection is based on the type of value we are looking for. For example,
1529 /// if the neighboring lane has a load, we need to look for a load that is
1530 /// accessing a consecutive address. These strategies are summarized in the
1531 /// 'ReorderingMode' enumerator.
1532 enum class ReorderingMode {
1533 Load, ///< Matching loads to consecutive memory addresses
1534 Opcode, ///< Matching instructions based on opcode (same or alternate)
1535 Constant, ///< Matching constants
1536 Splat, ///< Matching the same instruction multiple times (broadcast)
1537 Failed, ///< We failed to create a vectorizable group
1538 };
1539
1540 using OperandDataVec = SmallVector<OperandData, 2>;
1541
1542 /// A vector of operand vectors.
1543 SmallVector<OperandDataVec, 4> OpsVec;
1544
1545 const TargetLibraryInfo &TLI;
1546 const DataLayout &DL;
1547 ScalarEvolution &SE;
1548 const BoUpSLP &R;
1549
1550 /// \returns the operand data at \p OpIdx and \p Lane.
1551 OperandData &getData(unsigned OpIdx, unsigned Lane) {
1552 return OpsVec[OpIdx][Lane];
1553 }
1554
1555 /// \returns the operand data at \p OpIdx and \p Lane. Const version.
1556 const OperandData &getData(unsigned OpIdx, unsigned Lane) const {
1557 return OpsVec[OpIdx][Lane];
1558 }
1559
1560 /// Clears the used flag for all entries.
1561 void clearUsed() {
1562 for (unsigned OpIdx = 0, NumOperands = getNumOperands();
1563 OpIdx != NumOperands; ++OpIdx)
1564 for (unsigned Lane = 0, NumLanes = getNumLanes(); Lane != NumLanes;
1565 ++Lane)
1566 OpsVec[OpIdx][Lane].IsUsed = false;
1567 }
1568
1569 /// Swap the operand at \p OpIdx1 with that one at \p OpIdx2.
1570 void swap(unsigned OpIdx1, unsigned OpIdx2, unsigned Lane) {
1571 std::swap(OpsVec[OpIdx1][Lane], OpsVec[OpIdx2][Lane]);
1572 }
1573
1574 /// \param Lane lane of the operands under analysis.
1575 /// \param OpIdx operand index in \p Lane lane we're looking the best
1576 /// candidate for.
1577 /// \param Idx operand index of the current candidate value.
1578 /// \returns The additional score due to possible broadcasting of the
1579 /// elements in the lane. It is more profitable to have power-of-2 unique
1580 /// elements in the lane, it will be vectorized with higher probability
1581 /// after removing duplicates. Currently the SLP vectorizer supports only
1582 /// vectorization of the power-of-2 number of unique scalars.
1583 int getSplatScore(unsigned Lane, unsigned OpIdx, unsigned Idx) const {
1584 Value *IdxLaneV = getData(Idx, Lane).V;
1585 if (!isa<Instruction>(IdxLaneV) || IdxLaneV == getData(OpIdx, Lane).V)
1586 return 0;
1587 SmallPtrSet<Value *, 4> Uniques;
1588 for (unsigned Ln = 0, E = getNumLanes(); Ln < E; ++Ln) {
1589 if (Ln == Lane)
1590 continue;
1591 Value *OpIdxLnV = getData(OpIdx, Ln).V;
1592 if (!isa<Instruction>(OpIdxLnV))
1593 return 0;
1594 Uniques.insert(OpIdxLnV);
1595 }
1596 int UniquesCount = Uniques.size();
1597 int UniquesCntWithIdxLaneV =
1598 Uniques.contains(IdxLaneV) ? UniquesCount : UniquesCount + 1;
1599 Value *OpIdxLaneV = getData(OpIdx, Lane).V;
1600 int UniquesCntWithOpIdxLaneV =
1601 Uniques.contains(OpIdxLaneV) ? UniquesCount : UniquesCount + 1;
1602 if (UniquesCntWithIdxLaneV == UniquesCntWithOpIdxLaneV)
1603 return 0;
1604 return (PowerOf2Ceil(UniquesCntWithOpIdxLaneV) -
1605 UniquesCntWithOpIdxLaneV) -
1606 (PowerOf2Ceil(UniquesCntWithIdxLaneV) - UniquesCntWithIdxLaneV);
1607 }
1608
1609 /// \param Lane lane of the operands under analysis.
1610 /// \param OpIdx operand index in \p Lane lane we're looking the best
1611 /// candidate for.
1612 /// \param Idx operand index of the current candidate value.
1613 /// \returns The additional score for the scalar which users are all
1614 /// vectorized.
1615 int getExternalUseScore(unsigned Lane, unsigned OpIdx, unsigned Idx) const {
1616 Value *IdxLaneV = getData(Idx, Lane).V;
1617 Value *OpIdxLaneV = getData(OpIdx, Lane).V;
1618 // Do not care about number of uses for vector-like instructions
1619 // (extractelement/extractvalue with constant indices), they are extracts
1620 // themselves and already externally used. Vectorization of such
1621 // instructions does not add extra extractelement instruction, just may
1622 // remove it.
1623 if (isVectorLikeInstWithConstOps(IdxLaneV) &&
1624 isVectorLikeInstWithConstOps(OpIdxLaneV))
1625 return LookAheadHeuristics::ScoreAllUserVectorized;
1626 auto *IdxLaneI = dyn_cast<Instruction>(IdxLaneV);
1627 if (!IdxLaneI || !isa<Instruction>(OpIdxLaneV))
1628 return 0;
1629 return R.areAllUsersVectorized(IdxLaneI, std::nullopt)
1630 ? LookAheadHeuristics::ScoreAllUserVectorized
1631 : 0;
1632 }
1633
1634 /// Score scaling factor for fully compatible instructions but with
1635 /// different number of external uses. Allows better selection of the
1636 /// instructions with less external uses.
1637 static const int ScoreScaleFactor = 10;
1638
1639 /// \Returns the look-ahead score, which tells us how much the sub-trees
1640 /// rooted at \p LHS and \p RHS match, the more they match the higher the
1641 /// score. This helps break ties in an informed way when we cannot decide on
1642 /// the order of the operands by just considering the immediate
1643 /// predecessors.
1644 int getLookAheadScore(Value *LHS, Value *RHS, ArrayRef<Value *> MainAltOps,
1645 int Lane, unsigned OpIdx, unsigned Idx,
1646 bool &IsUsed) {
1647 LookAheadHeuristics LookAhead(TLI, DL, SE, R, getNumLanes(),
1648 LookAheadMaxDepth);
1649 // Keep track of the instruction stack as we recurse into the operands
1650 // during the look-ahead score exploration.
1651 int Score =
1652 LookAhead.getScoreAtLevelRec(LHS, RHS, /*U1=*/nullptr, /*U2=*/nullptr,
1653 /*CurrLevel=*/1, MainAltOps);
1654 if (Score) {
1655 int SplatScore = getSplatScore(Lane, OpIdx, Idx);
1656 if (Score <= -SplatScore) {
1657 // Set the minimum score for splat-like sequence to avoid setting
1658 // failed state.
1659 Score = 1;
1660 } else {
1661 Score += SplatScore;
1662 // Scale score to see the difference between different operands
1663 // and similar operands but all vectorized/not all vectorized
1664 // uses. It does not affect actual selection of the best
1665 // compatible operand in general, just allows to select the
1666 // operand with all vectorized uses.
1667 Score *= ScoreScaleFactor;
1668 Score += getExternalUseScore(Lane, OpIdx, Idx);
1669 IsUsed = true;
1670 }
1671 }
1672 return Score;
1673 }
1674
1675 /// Best defined scores per lanes between the passes. Used to choose the
1676 /// best operand (with the highest score) between the passes.
1677 /// The key - {Operand Index, Lane}.
1678 /// The value - the best score between the passes for the lane and the
1679 /// operand.
1680 SmallDenseMap<std::pair<unsigned, unsigned>, unsigned, 8>
1681 BestScoresPerLanes;
1682
1683 // Search all operands in Ops[*][Lane] for the one that matches best
1684 // Ops[OpIdx][LastLane] and return its opreand index.
1685 // If no good match can be found, return std::nullopt.
1686 std::optional<unsigned> getBestOperand(unsigned OpIdx, int Lane, int LastLane,
1687 ArrayRef<ReorderingMode> ReorderingModes,
1688 ArrayRef<Value *> MainAltOps) {
1689 unsigned NumOperands = getNumOperands();
1690
1691 // The operand of the previous lane at OpIdx.
1692 Value *OpLastLane = getData(OpIdx, LastLane).V;
1693
1694 // Our strategy mode for OpIdx.
1695 ReorderingMode RMode = ReorderingModes[OpIdx];
1696 if (RMode == ReorderingMode::Failed)
1697 return std::nullopt;
1698
1699 // The linearized opcode of the operand at OpIdx, Lane.
1700 bool OpIdxAPO = getData(OpIdx, Lane).APO;
1701
1702 // The best operand index and its score.
1703 // Sometimes we have more than one option (e.g., Opcode and Undefs), so we
1704 // are using the score to differentiate between the two.
1705 struct BestOpData {
1706 std::optional<unsigned> Idx;
1707 unsigned Score = 0;
1708 } BestOp;
1709 BestOp.Score =
1710 BestScoresPerLanes.try_emplace(std::make_pair(OpIdx, Lane), 0)
1711 .first->second;
1712
1713 // Track if the operand must be marked as used. If the operand is set to
1714 // Score 1 explicitly (because of non power-of-2 unique scalars, we may
1715 // want to reestimate the operands again on the following iterations).
1716 bool IsUsed =
1717 RMode == ReorderingMode::Splat || RMode == ReorderingMode::Constant;
1718 // Iterate through all unused operands and look for the best.
1719 for (unsigned Idx = 0; Idx != NumOperands; ++Idx) {
1720 // Get the operand at Idx and Lane.
1721 OperandData &OpData = getData(Idx, Lane);
1722 Value *Op = OpData.V;
1723 bool OpAPO = OpData.APO;
1724
1725 // Skip already selected operands.
1726 if (OpData.IsUsed)
1727 continue;
1728
1729 // Skip if we are trying to move the operand to a position with a
1730 // different opcode in the linearized tree form. This would break the
1731 // semantics.
1732 if (OpAPO != OpIdxAPO)
1733 continue;
1734
1735 // Look for an operand that matches the current mode.
1736 switch (RMode) {
1737 case ReorderingMode::Load:
1738 case ReorderingMode::Constant:
1739 case ReorderingMode::Opcode: {
1740 bool LeftToRight = Lane > LastLane;
1741 Value *OpLeft = (LeftToRight) ? OpLastLane : Op;
1742 Value *OpRight = (LeftToRight) ? Op : OpLastLane;
1743 int Score = getLookAheadScore(OpLeft, OpRight, MainAltOps, Lane,
1744 OpIdx, Idx, IsUsed);
1745 if (Score > static_cast<int>(BestOp.Score)) {
1746 BestOp.Idx = Idx;
1747 BestOp.Score = Score;
1748 BestScoresPerLanes[std::make_pair(OpIdx, Lane)] = Score;
1749 }
1750 break;
1751 }
1752 case ReorderingMode::Splat:
1753 if (Op == OpLastLane)
1754 BestOp.Idx = Idx;
1755 break;
1756 case ReorderingMode::Failed:
1757 llvm_unreachable("Not expected Failed reordering mode.")::llvm::llvm_unreachable_internal("Not expected Failed reordering mode."
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 1757)
;
1758 }
1759 }
1760
1761 if (BestOp.Idx) {
1762 getData(*BestOp.Idx, Lane).IsUsed = IsUsed;
1763 return BestOp.Idx;
1764 }
1765 // If we could not find a good match return std::nullopt.
1766 return std::nullopt;
1767 }
1768
1769 /// Helper for reorderOperandVecs.
1770 /// \returns the lane that we should start reordering from. This is the one
1771 /// which has the least number of operands that can freely move about or
1772 /// less profitable because it already has the most optimal set of operands.
1773 unsigned getBestLaneToStartReordering() const {
1774 unsigned Min = UINT_MAX(2147483647 *2U +1U);
1775 unsigned SameOpNumber = 0;
1776 // std::pair<unsigned, unsigned> is used to implement a simple voting
1777 // algorithm and choose the lane with the least number of operands that
1778 // can freely move about or less profitable because it already has the
1779 // most optimal set of operands. The first unsigned is a counter for
1780 // voting, the second unsigned is the counter of lanes with instructions
1781 // with same/alternate opcodes and same parent basic block.
1782 MapVector<unsigned, std::pair<unsigned, unsigned>> HashMap;
1783 // Try to be closer to the original results, if we have multiple lanes
1784 // with same cost. If 2 lanes have the same cost, use the one with the
1785 // lowest index.
1786 for (int I = getNumLanes(); I > 0; --I) {
1787 unsigned Lane = I - 1;
1788 OperandsOrderData NumFreeOpsHash =
1789 getMaxNumOperandsThatCanBeReordered(Lane);
1790 // Compare the number of operands that can move and choose the one with
1791 // the least number.
1792 if (NumFreeOpsHash.NumOfAPOs < Min) {
1793 Min = NumFreeOpsHash.NumOfAPOs;
1794 SameOpNumber = NumFreeOpsHash.NumOpsWithSameOpcodeParent;
1795 HashMap.clear();
1796 HashMap[NumFreeOpsHash.Hash] = std::make_pair(1, Lane);
1797 } else if (NumFreeOpsHash.NumOfAPOs == Min &&
1798 NumFreeOpsHash.NumOpsWithSameOpcodeParent < SameOpNumber) {
1799 // Select the most optimal lane in terms of number of operands that
1800 // should be moved around.
1801 SameOpNumber = NumFreeOpsHash.NumOpsWithSameOpcodeParent;
1802 HashMap[NumFreeOpsHash.Hash] = std::make_pair(1, Lane);
1803 } else if (NumFreeOpsHash.NumOfAPOs == Min &&
1804 NumFreeOpsHash.NumOpsWithSameOpcodeParent == SameOpNumber) {
1805 auto It = HashMap.find(NumFreeOpsHash.Hash);
1806 if (It == HashMap.end())
1807 HashMap[NumFreeOpsHash.Hash] = std::make_pair(1, Lane);
1808 else
1809 ++It->second.first;
1810 }
1811 }
1812 // Select the lane with the minimum counter.
1813 unsigned BestLane = 0;
1814 unsigned CntMin = UINT_MAX(2147483647 *2U +1U);
1815 for (const auto &Data : reverse(HashMap)) {
1816 if (Data.second.first < CntMin) {
1817 CntMin = Data.second.first;
1818 BestLane = Data.second.second;
1819 }
1820 }
1821 return BestLane;
1822 }
1823
1824 /// Data structure that helps to reorder operands.
1825 struct OperandsOrderData {
1826 /// The best number of operands with the same APOs, which can be
1827 /// reordered.
1828 unsigned NumOfAPOs = UINT_MAX(2147483647 *2U +1U);
1829 /// Number of operands with the same/alternate instruction opcode and
1830 /// parent.
1831 unsigned NumOpsWithSameOpcodeParent = 0;
1832 /// Hash for the actual operands ordering.
1833 /// Used to count operands, actually their position id and opcode
1834 /// value. It is used in the voting mechanism to find the lane with the
1835 /// least number of operands that can freely move about or less profitable
1836 /// because it already has the most optimal set of operands. Can be
1837 /// replaced with SmallVector<unsigned> instead but hash code is faster
1838 /// and requires less memory.
1839 unsigned Hash = 0;
1840 };
1841 /// \returns the maximum number of operands that are allowed to be reordered
1842 /// for \p Lane and the number of compatible instructions(with the same
1843 /// parent/opcode). This is used as a heuristic for selecting the first lane
1844 /// to start operand reordering.
1845 OperandsOrderData getMaxNumOperandsThatCanBeReordered(unsigned Lane) const {
1846 unsigned CntTrue = 0;
1847 unsigned NumOperands = getNumOperands();
1848 // Operands with the same APO can be reordered. We therefore need to count
1849 // how many of them we have for each APO, like this: Cnt[APO] = x.
1850 // Since we only have two APOs, namely true and false, we can avoid using
1851 // a map. Instead we can simply count the number of operands that
1852 // correspond to one of them (in this case the 'true' APO), and calculate
1853 // the other by subtracting it from the total number of operands.
1854 // Operands with the same instruction opcode and parent are more
1855 // profitable since we don't need to move them in many cases, with a high
1856 // probability such lane already can be vectorized effectively.
1857 bool AllUndefs = true;
1858 unsigned NumOpsWithSameOpcodeParent = 0;
1859 Instruction *OpcodeI = nullptr;
1860 BasicBlock *Parent = nullptr;
1861 unsigned Hash = 0;
1862 for (unsigned OpIdx = 0; OpIdx != NumOperands; ++OpIdx) {
1863 const OperandData &OpData = getData(OpIdx, Lane);
1864 if (OpData.APO)
1865 ++CntTrue;
1866 // Use Boyer-Moore majority voting for finding the majority opcode and
1867 // the number of times it occurs.
1868 if (auto *I = dyn_cast<Instruction>(OpData.V)) {
1869 if (!OpcodeI || !getSameOpcode({OpcodeI, I}, TLI).getOpcode() ||
1870 I->getParent() != Parent) {
1871 if (NumOpsWithSameOpcodeParent == 0) {
1872 NumOpsWithSameOpcodeParent = 1;
1873 OpcodeI = I;
1874 Parent = I->getParent();
1875 } else {
1876 --NumOpsWithSameOpcodeParent;
1877 }
1878 } else {
1879 ++NumOpsWithSameOpcodeParent;
1880 }
1881 }
1882 Hash = hash_combine(
1883 Hash, hash_value((OpIdx + 1) * (OpData.V->getValueID() + 1)));
1884 AllUndefs = AllUndefs && isa<UndefValue>(OpData.V);
1885 }
1886 if (AllUndefs)
1887 return {};
1888 OperandsOrderData Data;
1889 Data.NumOfAPOs = std::max(CntTrue, NumOperands - CntTrue);
1890 Data.NumOpsWithSameOpcodeParent = NumOpsWithSameOpcodeParent;
1891 Data.Hash = Hash;
1892 return Data;
1893 }
1894
1895 /// Go through the instructions in VL and append their operands.
1896 void appendOperandsOfVL(ArrayRef<Value *> VL) {
1897 assert(!VL.empty() && "Bad VL")(static_cast <bool> (!VL.empty() && "Bad VL") ?
void (0) : __assert_fail ("!VL.empty() && \"Bad VL\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 1897, __extension__
__PRETTY_FUNCTION__))
;
1898 assert((empty() || VL.size() == getNumLanes()) &&(static_cast <bool> ((empty() || VL.size() == getNumLanes
()) && "Expected same number of lanes") ? void (0) : __assert_fail
("(empty() || VL.size() == getNumLanes()) && \"Expected same number of lanes\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 1899, __extension__
__PRETTY_FUNCTION__))
1899 "Expected same number of lanes")(static_cast <bool> ((empty() || VL.size() == getNumLanes
()) && "Expected same number of lanes") ? void (0) : __assert_fail
("(empty() || VL.size() == getNumLanes()) && \"Expected same number of lanes\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 1899, __extension__
__PRETTY_FUNCTION__))
;
1900 assert(isa<Instruction>(VL[0]) && "Expected instruction")(static_cast <bool> (isa<Instruction>(VL[0]) &&
"Expected instruction") ? void (0) : __assert_fail ("isa<Instruction>(VL[0]) && \"Expected instruction\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 1900, __extension__
__PRETTY_FUNCTION__))
;
1901 unsigned NumOperands = cast<Instruction>(VL[0])->getNumOperands();
1902 OpsVec.resize(NumOperands);
1903 unsigned NumLanes = VL.size();
1904 for (unsigned OpIdx = 0; OpIdx != NumOperands; ++OpIdx) {
1905 OpsVec[OpIdx].resize(NumLanes);
1906 for (unsigned Lane = 0; Lane != NumLanes; ++Lane) {
1907 assert(isa<Instruction>(VL[Lane]) && "Expected instruction")(static_cast <bool> (isa<Instruction>(VL[Lane]) &&
"Expected instruction") ? void (0) : __assert_fail ("isa<Instruction>(VL[Lane]) && \"Expected instruction\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 1907, __extension__
__PRETTY_FUNCTION__))
;
1908 // Our tree has just 3 nodes: the root and two operands.
1909 // It is therefore trivial to get the APO. We only need to check the
1910 // opcode of VL[Lane] and whether the operand at OpIdx is the LHS or
1911 // RHS operand. The LHS operand of both add and sub is never attached
1912 // to an inversese operation in the linearized form, therefore its APO
1913 // is false. The RHS is true only if VL[Lane] is an inverse operation.
1914
1915 // Since operand reordering is performed on groups of commutative
1916 // operations or alternating sequences (e.g., +, -), we can safely
1917 // tell the inverse operations by checking commutativity.
1918 bool IsInverseOperation = !isCommutative(cast<Instruction>(VL[Lane]));
1919 bool APO = (OpIdx == 0) ? false : IsInverseOperation;
1920 OpsVec[OpIdx][Lane] = {cast<Instruction>(VL[Lane])->getOperand(OpIdx),
1921 APO, false};
1922 }
1923 }
1924 }
1925
1926 /// \returns the number of operands.
1927 unsigned getNumOperands() const { return OpsVec.size(); }
1928
1929 /// \returns the number of lanes.
1930 unsigned getNumLanes() const { return OpsVec[0].size(); }
1931
1932 /// \returns the operand value at \p OpIdx and \p Lane.
1933 Value *getValue(unsigned OpIdx, unsigned Lane) const {
1934 return getData(OpIdx, Lane).V;
1935 }
1936
1937 /// \returns true if the data structure is empty.
1938 bool empty() const { return OpsVec.empty(); }
1939
1940 /// Clears the data.
1941 void clear() { OpsVec.clear(); }
1942
1943 /// \Returns true if there are enough operands identical to \p Op to fill
1944 /// the whole vector.
1945 /// Note: This modifies the 'IsUsed' flag, so a cleanUsed() must follow.
1946 bool shouldBroadcast(Value *Op, unsigned OpIdx, unsigned Lane) {
1947 bool OpAPO = getData(OpIdx, Lane).APO;
1948 for (unsigned Ln = 0, Lns = getNumLanes(); Ln != Lns; ++Ln) {
1949 if (Ln == Lane)
1950 continue;
1951 // This is set to true if we found a candidate for broadcast at Lane.
1952 bool FoundCandidate = false;
1953 for (unsigned OpI = 0, OpE = getNumOperands(); OpI != OpE; ++OpI) {
1954 OperandData &Data = getData(OpI, Ln);
1955 if (Data.APO != OpAPO || Data.IsUsed)
1956 continue;
1957 if (Data.V == Op) {
1958 FoundCandidate = true;
1959 Data.IsUsed = true;
1960 break;
1961 }
1962 }
1963 if (!FoundCandidate)
1964 return false;
1965 }
1966 return true;
1967 }
1968
1969 public:
1970 /// Initialize with all the operands of the instruction vector \p RootVL.
1971 VLOperands(ArrayRef<Value *> RootVL, const TargetLibraryInfo &TLI,
1972 const DataLayout &DL, ScalarEvolution &SE, const BoUpSLP &R)
1973 : TLI(TLI), DL(DL), SE(SE), R(R) {
1974 // Append all the operands of RootVL.
1975 appendOperandsOfVL(RootVL);
1976 }
1977
1978 /// \Returns a value vector with the operands across all lanes for the
1979 /// opearnd at \p OpIdx.
1980 ValueList getVL(unsigned OpIdx) const {
1981 ValueList OpVL(OpsVec[OpIdx].size());
1982 assert(OpsVec[OpIdx].size() == getNumLanes() &&(static_cast <bool> (OpsVec[OpIdx].size() == getNumLanes
() && "Expected same num of lanes across all operands"
) ? void (0) : __assert_fail ("OpsVec[OpIdx].size() == getNumLanes() && \"Expected same num of lanes across all operands\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 1983, __extension__
__PRETTY_FUNCTION__))
1983 "Expected same num of lanes across all operands")(static_cast <bool> (OpsVec[OpIdx].size() == getNumLanes
() && "Expected same num of lanes across all operands"
) ? void (0) : __assert_fail ("OpsVec[OpIdx].size() == getNumLanes() && \"Expected same num of lanes across all operands\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 1983, __extension__
__PRETTY_FUNCTION__))
;
1984 for (unsigned Lane = 0, Lanes = getNumLanes(); Lane != Lanes; ++Lane)
1985 OpVL[Lane] = OpsVec[OpIdx][Lane].V;
1986 return OpVL;
1987 }
1988
1989 // Performs operand reordering for 2 or more operands.
1990 // The original operands are in OrigOps[OpIdx][Lane].
1991 // The reordered operands are returned in 'SortedOps[OpIdx][Lane]'.
1992 void reorder() {
1993 unsigned NumOperands = getNumOperands();
1994 unsigned NumLanes = getNumLanes();
1995 // Each operand has its own mode. We are using this mode to help us select
1996 // the instructions for each lane, so that they match best with the ones
1997 // we have selected so far.
1998 SmallVector<ReorderingMode, 2> ReorderingModes(NumOperands);
1999
2000 // This is a greedy single-pass algorithm. We are going over each lane
2001 // once and deciding on the best order right away with no back-tracking.
2002 // However, in order to increase its effectiveness, we start with the lane
2003 // that has operands that can move the least. For example, given the
2004 // following lanes:
2005 // Lane 0 : A[0] = B[0] + C[0] // Visited 3rd
2006 // Lane 1 : A[1] = C[1] - B[1] // Visited 1st
2007 // Lane 2 : A[2] = B[2] + C[2] // Visited 2nd
2008 // Lane 3 : A[3] = C[3] - B[3] // Visited 4th
2009 // we will start at Lane 1, since the operands of the subtraction cannot
2010 // be reordered. Then we will visit the rest of the lanes in a circular
2011 // fashion. That is, Lanes 2, then Lane 0, and finally Lane 3.
2012
2013 // Find the first lane that we will start our search from.
2014 unsigned FirstLane = getBestLaneToStartReordering();
2015
2016 // Initialize the modes.
2017 for (unsigned OpIdx = 0; OpIdx != NumOperands; ++OpIdx) {
2018 Value *OpLane0 = getValue(OpIdx, FirstLane);
2019 // Keep track if we have instructions with all the same opcode on one
2020 // side.
2021 if (isa<LoadInst>(OpLane0))
2022 ReorderingModes[OpIdx] = ReorderingMode::Load;
2023 else if (isa<Instruction>(OpLane0)) {
2024 // Check if OpLane0 should be broadcast.
2025 if (shouldBroadcast(OpLane0, OpIdx, FirstLane))
2026 ReorderingModes[OpIdx] = ReorderingMode::Splat;
2027 else
2028 ReorderingModes[OpIdx] = ReorderingMode::Opcode;
2029 }
2030 else if (isa<Constant>(OpLane0))
2031 ReorderingModes[OpIdx] = ReorderingMode::Constant;
2032 else if (isa<Argument>(OpLane0))
2033 // Our best hope is a Splat. It may save some cost in some cases.
2034 ReorderingModes[OpIdx] = ReorderingMode::Splat;
2035 else
2036 // NOTE: This should be unreachable.
2037 ReorderingModes[OpIdx] = ReorderingMode::Failed;
2038 }
2039
2040 // Check that we don't have same operands. No need to reorder if operands
2041 // are just perfect diamond or shuffled diamond match. Do not do it only
2042 // for possible broadcasts or non-power of 2 number of scalars (just for
2043 // now).
2044 auto &&SkipReordering = [this]() {
2045 SmallPtrSet<Value *, 4> UniqueValues;
2046 ArrayRef<OperandData> Op0 = OpsVec.front();
2047 for (const OperandData &Data : Op0)
2048 UniqueValues.insert(Data.V);
2049 for (ArrayRef<OperandData> Op : drop_begin(OpsVec, 1)) {
2050 if (any_of(Op, [&UniqueValues](const OperandData &Data) {
2051 return !UniqueValues.contains(Data.V);
2052 }))
2053 return false;
2054 }
2055 // TODO: Check if we can remove a check for non-power-2 number of
2056 // scalars after full support of non-power-2 vectorization.
2057 return UniqueValues.size() != 2 && isPowerOf2_32(UniqueValues.size());
2058 };
2059
2060 // If the initial strategy fails for any of the operand indexes, then we
2061 // perform reordering again in a second pass. This helps avoid assigning
2062 // high priority to the failed strategy, and should improve reordering for
2063 // the non-failed operand indexes.
2064 for (int Pass = 0; Pass != 2; ++Pass) {
2065 // Check if no need to reorder operands since they're are perfect or
2066 // shuffled diamond match.
2067 // Need to to do it to avoid extra external use cost counting for
2068 // shuffled matches, which may cause regressions.
2069 if (SkipReordering())
2070 break;
2071 // Skip the second pass if the first pass did not fail.
2072 bool StrategyFailed = false;
2073 // Mark all operand data as free to use.
2074 clearUsed();
2075 // We keep the original operand order for the FirstLane, so reorder the
2076 // rest of the lanes. We are visiting the nodes in a circular fashion,
2077 // using FirstLane as the center point and increasing the radius
2078 // distance.
2079 SmallVector<SmallVector<Value *, 2>> MainAltOps(NumOperands);
2080 for (unsigned I = 0; I < NumOperands; ++I)
2081 MainAltOps[I].push_back(getData(I, FirstLane).V);
2082
2083 for (unsigned Distance = 1; Distance != NumLanes; ++Distance) {
2084 // Visit the lane on the right and then the lane on the left.
2085 for (int Direction : {+1, -1}) {
2086 int Lane = FirstLane + Direction * Distance;
2087 if (Lane < 0 || Lane >= (int)NumLanes)
2088 continue;
2089 int LastLane = Lane - Direction;
2090 assert(LastLane >= 0 && LastLane < (int)NumLanes &&(static_cast <bool> (LastLane >= 0 && LastLane
< (int)NumLanes && "Out of bounds") ? void (0) : __assert_fail
("LastLane >= 0 && LastLane < (int)NumLanes && \"Out of bounds\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 2091, __extension__
__PRETTY_FUNCTION__))
2091 "Out of bounds")(static_cast <bool> (LastLane >= 0 && LastLane
< (int)NumLanes && "Out of bounds") ? void (0) : __assert_fail
("LastLane >= 0 && LastLane < (int)NumLanes && \"Out of bounds\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 2091, __extension__
__PRETTY_FUNCTION__))
;
2092 // Look for a good match for each operand.
2093 for (unsigned OpIdx = 0; OpIdx != NumOperands; ++OpIdx) {
2094 // Search for the operand that matches SortedOps[OpIdx][Lane-1].
2095 std::optional<unsigned> BestIdx = getBestOperand(
2096 OpIdx, Lane, LastLane, ReorderingModes, MainAltOps[OpIdx]);
2097 // By not selecting a value, we allow the operands that follow to
2098 // select a better matching value. We will get a non-null value in
2099 // the next run of getBestOperand().
2100 if (BestIdx) {
2101 // Swap the current operand with the one returned by
2102 // getBestOperand().
2103 swap(OpIdx, *BestIdx, Lane);
2104 } else {
2105 // We failed to find a best operand, set mode to 'Failed'.
2106 ReorderingModes[OpIdx] = ReorderingMode::Failed;
2107 // Enable the second pass.
2108 StrategyFailed = true;
2109 }
2110 // Try to get the alternate opcode and follow it during analysis.
2111 if (MainAltOps[OpIdx].size() != 2) {
2112 OperandData &AltOp = getData(OpIdx, Lane);
2113 InstructionsState OpS =
2114 getSameOpcode({MainAltOps[OpIdx].front(), AltOp.V}, TLI);
2115 if (OpS.getOpcode() && OpS.isAltShuffle())
2116 MainAltOps[OpIdx].push_back(AltOp.V);
2117 }
2118 }
2119 }
2120 }
2121 // Skip second pass if the strategy did not fail.
2122 if (!StrategyFailed)
2123 break;
2124 }
2125 }
2126
2127#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2128 LLVM_DUMP_METHOD__attribute__((noinline)) __attribute__((__used__)) static StringRef getModeStr(ReorderingMode RMode) {
2129 switch (RMode) {
2130 case ReorderingMode::Load:
2131 return "Load";
2132 case ReorderingMode::Opcode:
2133 return "Opcode";
2134 case ReorderingMode::Constant:
2135 return "Constant";
2136 case ReorderingMode::Splat:
2137 return "Splat";
2138 case ReorderingMode::Failed:
2139 return "Failed";
2140 }
2141 llvm_unreachable("Unimplemented Reordering Type")::llvm::llvm_unreachable_internal("Unimplemented Reordering Type"
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 2141)
;
2142 }
2143
2144 LLVM_DUMP_METHOD__attribute__((noinline)) __attribute__((__used__)) static raw_ostream &printMode(ReorderingMode RMode,
2145 raw_ostream &OS) {
2146 return OS << getModeStr(RMode);
2147 }
2148
2149 /// Debug print.
2150 LLVM_DUMP_METHOD__attribute__((noinline)) __attribute__((__used__)) static void dumpMode(ReorderingMode RMode) {
2151 printMode(RMode, dbgs());
2152 }
2153
2154 friend raw_ostream &operator<<(raw_ostream &OS, ReorderingMode RMode) {
2155 return printMode(RMode, OS);
2156 }
2157
2158 LLVM_DUMP_METHOD__attribute__((noinline)) __attribute__((__used__)) raw_ostream &print(raw_ostream &OS) const {
2159 const unsigned Indent = 2;
2160 unsigned Cnt = 0;
2161 for (const OperandDataVec &OpDataVec : OpsVec) {
2162 OS << "Operand " << Cnt++ << "\n";
2163 for (const OperandData &OpData : OpDataVec) {
2164 OS.indent(Indent) << "{";
2165 if (Value *V = OpData.V)
2166 OS << *V;
2167 else
2168 OS << "null";
2169 OS << ", APO:" << OpData.APO << "}\n";
2170 }
2171 OS << "\n";
2172 }
2173 return OS;
2174 }
2175
2176 /// Debug print.
2177 LLVM_DUMP_METHOD__attribute__((noinline)) __attribute__((__used__)) void dump() const { print(dbgs()); }
2178#endif
2179 };
2180
2181 /// Evaluate each pair in \p Candidates and return index into \p Candidates
2182 /// for a pair which have highest score deemed to have best chance to form
2183 /// root of profitable tree to vectorize. Return std::nullopt if no candidate
2184 /// scored above the LookAheadHeuristics::ScoreFail. \param Limit Lower limit
2185 /// of the cost, considered to be good enough score.
2186 std::optional<int>
2187 findBestRootPair(ArrayRef<std::pair<Value *, Value *>> Candidates,
2188 int Limit = LookAheadHeuristics::ScoreFail) {
2189 LookAheadHeuristics LookAhead(*TLI, *DL, *SE, *this, /*NumLanes=*/2,
2190 RootLookAheadMaxDepth);
2191 int BestScore = Limit;
2192 std::optional<int> Index;
2193 for (int I : seq<int>(0, Candidates.size())) {
2194 int Score = LookAhead.getScoreAtLevelRec(Candidates[I].first,
2195 Candidates[I].second,
2196 /*U1=*/nullptr, /*U2=*/nullptr,
2197 /*Level=*/1, std::nullopt);
2198 if (Score > BestScore) {
2199 BestScore = Score;
2200 Index = I;
2201 }
2202 }
2203 return Index;
2204 }
2205
2206 /// Checks if the instruction is marked for deletion.
2207 bool isDeleted(Instruction *I) const { return DeletedInstructions.count(I); }
2208
2209 /// Removes an instruction from its block and eventually deletes it.
2210 /// It's like Instruction::eraseFromParent() except that the actual deletion
2211 /// is delayed until BoUpSLP is destructed.
2212 void eraseInstruction(Instruction *I) {
2213 DeletedInstructions.insert(I);
2214 }
2215
2216 /// Checks if the instruction was already analyzed for being possible
2217 /// reduction root.
2218 bool isAnalyzedReductionRoot(Instruction *I) const {
2219 return AnalyzedReductionsRoots.count(I);
2220 }
2221 /// Register given instruction as already analyzed for being possible
2222 /// reduction root.
2223 void analyzedReductionRoot(Instruction *I) {
2224 AnalyzedReductionsRoots.insert(I);
2225 }
2226 /// Checks if the provided list of reduced values was checked already for
2227 /// vectorization.
2228 bool areAnalyzedReductionVals(ArrayRef<Value *> VL) const {
2229 return AnalyzedReductionVals.contains(hash_value(VL));
2230 }
2231 /// Adds the list of reduced values to list of already checked values for the
2232 /// vectorization.
2233 void analyzedReductionVals(ArrayRef<Value *> VL) {
2234 AnalyzedReductionVals.insert(hash_value(VL));
2235 }
2236 /// Clear the list of the analyzed reduction root instructions.
2237 void clearReductionData() {
2238 AnalyzedReductionsRoots.clear();
2239 AnalyzedReductionVals.clear();
2240 }
2241 /// Checks if the given value is gathered in one of the nodes.
2242 bool isAnyGathered(const SmallDenseSet<Value *> &Vals) const {
2243 return any_of(MustGather, [&](Value *V) { return Vals.contains(V); });
2244 }
2245
2246 /// Check if the value is vectorized in the tree.
2247 bool isVectorized(Value *V) const { return getTreeEntry(V); }
2248
2249 ~BoUpSLP();
2250
2251private:
2252 /// Check if the operands on the edges \p Edges of the \p UserTE allows
2253 /// reordering (i.e. the operands can be reordered because they have only one
2254 /// user and reordarable).
2255 /// \param ReorderableGathers List of all gather nodes that require reordering
2256 /// (e.g., gather of extractlements or partially vectorizable loads).
2257 /// \param GatherOps List of gather operand nodes for \p UserTE that require
2258 /// reordering, subset of \p NonVectorized.
2259 bool
2260 canReorderOperands(TreeEntry *UserTE,
2261 SmallVectorImpl<std::pair<unsigned, TreeEntry *>> &Edges,
2262 ArrayRef<TreeEntry *> ReorderableGathers,
2263 SmallVectorImpl<TreeEntry *> &GatherOps);
2264
2265 /// Checks if the given \p TE is a gather node with clustered reused scalars
2266 /// and reorders it per given \p Mask.
2267 void reorderNodeWithReuses(TreeEntry &TE, ArrayRef<int> Mask) const;
2268
2269 /// Returns vectorized operand \p OpIdx of the node \p UserTE from the graph,
2270 /// if any. If it is not vectorized (gather node), returns nullptr.
2271 TreeEntry *getVectorizedOperand(TreeEntry *UserTE, unsigned OpIdx) {
2272 ArrayRef<Value *> VL = UserTE->getOperand(OpIdx);
2273 TreeEntry *TE = nullptr;
2274 const auto *It = find_if(VL, [this, &TE](Value *V) {
2275 TE = getTreeEntry(V);
2276 return TE;
2277 });
2278 if (It != VL.end() && TE->isSame(VL))
2279 return TE;
2280 return nullptr;
2281 }
2282
2283 /// Returns vectorized operand \p OpIdx of the node \p UserTE from the graph,
2284 /// if any. If it is not vectorized (gather node), returns nullptr.
2285 const TreeEntry *getVectorizedOperand(const TreeEntry *UserTE,
2286 unsigned OpIdx) const {
2287 return const_cast<BoUpSLP *>(this)->getVectorizedOperand(
2288 const_cast<TreeEntry *>(UserTE), OpIdx);
2289 }
2290
2291 /// Checks if all users of \p I are the part of the vectorization tree.
2292 bool areAllUsersVectorized(Instruction *I,
2293 ArrayRef<Value *> VectorizedVals) const;
2294
2295 /// Return information about the vector formed for the specified index
2296 /// of a vector of (the same) instruction.
2297 TargetTransformInfo::OperandValueInfo getOperandInfo(ArrayRef<Value *> VL,
2298 unsigned OpIdx);
2299
2300 /// \returns the cost of the vectorizable entry.
2301 InstructionCost getEntryCost(const TreeEntry *E,
2302 ArrayRef<Value *> VectorizedVals);
2303
2304 /// This is the recursive part of buildTree.
2305 void buildTree_rec(ArrayRef<Value *> Roots, unsigned Depth,
2306 const EdgeInfo &EI);
2307
2308 /// \returns true if the ExtractElement/ExtractValue instructions in \p VL can
2309 /// be vectorized to use the original vector (or aggregate "bitcast" to a
2310 /// vector) and sets \p CurrentOrder to the identity permutation; otherwise
2311 /// returns false, setting \p CurrentOrder to either an empty vector or a
2312 /// non-identity permutation that allows to reuse extract instructions.
2313 bool canReuseExtract(ArrayRef<Value *> VL, Value *OpValue,
2314 SmallVectorImpl<unsigned> &CurrentOrder) const;
2315
2316 /// Vectorize a single entry in the tree.
2317 Value *vectorizeTree(TreeEntry *E);
2318
2319 /// Vectorize a single entry in the tree, the \p Idx-th operand of the entry
2320 /// \p E.
2321 Value *vectorizeOperand(TreeEntry *E, unsigned NodeIdx);
2322
2323 /// Create a new vector from a list of scalar values. Produces a sequence
2324 /// which exploits values reused across lanes, and arranges the inserts
2325 /// for ease of later optimization.
2326 Value *createBuildVector(const TreeEntry *E);
2327
2328 /// \returns the scalarization cost for this type. Scalarization in this
2329 /// context means the creation of vectors from a group of scalars. If \p
2330 /// NeedToShuffle is true, need to add a cost of reshuffling some of the
2331 /// vector elements.
2332 InstructionCost getGatherCost(FixedVectorType *Ty,
2333 const APInt &ShuffledIndices,
2334 bool NeedToShuffle) const;
2335
2336 /// Returns the instruction in the bundle, which can be used as a base point
2337 /// for scheduling. Usually it is the last instruction in the bundle, except
2338 /// for the case when all operands are external (in this case, it is the first
2339 /// instruction in the list).
2340 Instruction &getLastInstructionInBundle(const TreeEntry *E);
2341
2342 /// Checks if the gathered \p VL can be represented as shuffle(s) of previous
2343 /// tree entries.
2344 /// \param TE Tree entry checked for permutation.
2345 /// \param VL List of scalars (a subset of the TE scalar), checked for
2346 /// permutations.
2347 /// \returns ShuffleKind, if gathered values can be represented as shuffles of
2348 /// previous tree entries. \p Mask is filled with the shuffle mask.
2349 std::optional<TargetTransformInfo::ShuffleKind>
2350 isGatherShuffledEntry(const TreeEntry *TE, ArrayRef<Value *> VL,
2351 SmallVectorImpl<int> &Mask,
2352 SmallVectorImpl<const TreeEntry *> &Entries);
2353
2354 /// \returns the scalarization cost for this list of values. Assuming that
2355 /// this subtree gets vectorized, we may need to extract the values from the
2356 /// roots. This method calculates the cost of extracting the values.
2357 InstructionCost getGatherCost(ArrayRef<Value *> VL) const;
2358
2359 /// Set the Builder insert point to one after the last instruction in
2360 /// the bundle
2361 void setInsertPointAfterBundle(const TreeEntry *E);
2362
2363 /// \returns a vector from a collection of scalars in \p VL.
2364 Value *gather(ArrayRef<Value *> VL);
2365
2366 /// \returns whether the VectorizableTree is fully vectorizable and will
2367 /// be beneficial even the tree height is tiny.
2368 bool isFullyVectorizableTinyTree(bool ForReduction) const;
2369
2370 /// Reorder commutative or alt operands to get better probability of
2371 /// generating vectorized code.
2372 static void reorderInputsAccordingToOpcode(
2373 ArrayRef<Value *> VL, SmallVectorImpl<Value *> &Left,
2374 SmallVectorImpl<Value *> &Right, const TargetLibraryInfo &TLI,
2375 const DataLayout &DL, ScalarEvolution &SE, const BoUpSLP &R);
2376
2377 /// Helper for `findExternalStoreUsersReorderIndices()`. It iterates over the
2378 /// users of \p TE and collects the stores. It returns the map from the store
2379 /// pointers to the collected stores.
2380 DenseMap<Value *, SmallVector<StoreInst *, 4>>
2381 collectUserStores(const BoUpSLP::TreeEntry *TE) const;
2382
2383 /// Helper for `findExternalStoreUsersReorderIndices()`. It checks if the
2384 /// stores in \p StoresVec can form a vector instruction. If so it returns true
2385 /// and populates \p ReorderIndices with the shuffle indices of the the stores
2386 /// when compared to the sorted vector.
2387 bool canFormVector(const SmallVector<StoreInst *, 4> &StoresVec,
2388 OrdersType &ReorderIndices) const;
2389
2390 /// Iterates through the users of \p TE, looking for scalar stores that can be
2391 /// potentially vectorized in a future SLP-tree. If found, it keeps track of
2392 /// their order and builds an order index vector for each store bundle. It
2393 /// returns all these order vectors found.
2394 /// We run this after the tree has formed, otherwise we may come across user
2395 /// instructions that are not yet in the tree.
2396 SmallVector<OrdersType, 1>
2397 findExternalStoreUsersReorderIndices(TreeEntry *TE) const;
2398
2399 struct TreeEntry {
2400 using VecTreeTy = SmallVector<std::unique_ptr<TreeEntry>, 8>;
2401 TreeEntry(VecTreeTy &Container) : Container(Container) {}
2402
2403 /// \returns true if the scalars in VL are equal to this entry.
2404 bool isSame(ArrayRef<Value *> VL) const {
2405 auto &&IsSame = [VL](ArrayRef<Value *> Scalars, ArrayRef<int> Mask) {
2406 if (Mask.size() != VL.size() && VL.size() == Scalars.size())
2407 return std::equal(VL.begin(), VL.end(), Scalars.begin());
2408 return VL.size() == Mask.size() &&
2409 std::equal(VL.begin(), VL.end(), Mask.begin(),
2410 [Scalars](Value *V, int Idx) {
2411 return (isa<UndefValue>(V) &&
2412 Idx == UndefMaskElem) ||
2413 (Idx != UndefMaskElem && V == Scalars[Idx]);
2414 });
2415 };
2416 if (!ReorderIndices.empty()) {
2417 // TODO: implement matching if the nodes are just reordered, still can
2418 // treat the vector as the same if the list of scalars matches VL
2419 // directly, without reordering.
2420 SmallVector<int> Mask;
2421 inversePermutation(ReorderIndices, Mask);
2422 if (VL.size() == Scalars.size())
2423 return IsSame(Scalars, Mask);
2424 if (VL.size() == ReuseShuffleIndices.size()) {
2425 ::addMask(Mask, ReuseShuffleIndices);
2426 return IsSame(Scalars, Mask);
2427 }
2428 return false;
2429 }
2430 return IsSame(Scalars, ReuseShuffleIndices);
2431 }
2432
2433 bool isOperandGatherNode(const EdgeInfo &UserEI) const {
2434 return State == TreeEntry::NeedToGather &&
2435 UserTreeIndices.front().EdgeIdx == UserEI.EdgeIdx &&
2436 UserTreeIndices.front().UserTE == UserEI.UserTE;
2437 }
2438
2439 /// \returns true if current entry has same operands as \p TE.
2440 bool hasEqualOperands(const TreeEntry &TE) const {
2441 if (TE.getNumOperands() != getNumOperands())
2442 return false;
2443 SmallBitVector Used(getNumOperands());
2444 for (unsigned I = 0, E = getNumOperands(); I < E; ++I) {
2445 unsigned PrevCount = Used.count();
2446 for (unsigned K = 0; K < E; ++K) {
2447 if (Used.test(K))
2448 continue;
2449 if (getOperand(K) == TE.getOperand(I)) {
2450 Used.set(K);
2451 break;
2452 }
2453 }
2454 // Check if we actually found the matching operand.
2455 if (PrevCount == Used.count())
2456 return false;
2457 }
2458 return true;
2459 }
2460
2461 /// \return Final vectorization factor for the node. Defined by the total
2462 /// number of vectorized scalars, including those, used several times in the
2463 /// entry and counted in the \a ReuseShuffleIndices, if any.
2464 unsigned getVectorFactor() const {
2465 if (!ReuseShuffleIndices.empty())
2466 return ReuseShuffleIndices.size();
2467 return Scalars.size();
2468 };
2469
2470 /// A vector of scalars.
2471 ValueList Scalars;
2472
2473 /// The Scalars are vectorized into this value. It is initialized to Null.
2474 Value *VectorizedValue = nullptr;
2475
2476 /// Do we need to gather this sequence or vectorize it
2477 /// (either with vector instruction or with scatter/gather
2478 /// intrinsics for store/load)?
2479 enum EntryState { Vectorize, ScatterVectorize, NeedToGather };
2480 EntryState State;
2481
2482 /// Does this sequence require some shuffling?
2483 SmallVector<int, 4> ReuseShuffleIndices;
2484
2485 /// Does this entry require reordering?
2486 SmallVector<unsigned, 4> ReorderIndices;
2487
2488 /// Points back to the VectorizableTree.
2489 ///
2490 /// Only used for Graphviz right now. Unfortunately GraphTrait::NodeRef has
2491 /// to be a pointer and needs to be able to initialize the child iterator.
2492 /// Thus we need a reference back to the container to translate the indices
2493 /// to entries.
2494 VecTreeTy &Container;
2495
2496 /// The TreeEntry index containing the user of this entry. We can actually
2497 /// have multiple users so the data structure is not truly a tree.
2498 SmallVector<EdgeInfo, 1> UserTreeIndices;
2499
2500 /// The index of this treeEntry in VectorizableTree.
2501 int Idx = -1;
2502
2503 private:
2504 /// The operands of each instruction in each lane Operands[op_index][lane].
2505 /// Note: This helps avoid the replication of the code that performs the
2506 /// reordering of operands during buildTree_rec() and vectorizeTree().
2507 SmallVector<ValueList, 2> Operands;
2508
2509 /// The main/alternate instruction.
2510 Instruction *MainOp = nullptr;
2511 Instruction *AltOp = nullptr;
2512
2513 public:
2514 /// Set this bundle's \p OpIdx'th operand to \p OpVL.
2515 void setOperand(unsigned OpIdx, ArrayRef<Value *> OpVL) {
2516 if (Operands.size() < OpIdx + 1)
2517 Operands.resize(OpIdx + 1);
2518 assert(Operands[OpIdx].empty() && "Already resized?")(static_cast <bool> (Operands[OpIdx].empty() &&
"Already resized?") ? void (0) : __assert_fail ("Operands[OpIdx].empty() && \"Already resized?\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 2518, __extension__
__PRETTY_FUNCTION__))
;
2519 assert(OpVL.size() <= Scalars.size() &&(static_cast <bool> (OpVL.size() <= Scalars.size() &&
"Number of operands is greater than the number of scalars.")
? void (0) : __assert_fail ("OpVL.size() <= Scalars.size() && \"Number of operands is greater than the number of scalars.\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 2520, __extension__
__PRETTY_FUNCTION__))
2520 "Number of operands is greater than the number of scalars.")(static_cast <bool> (OpVL.size() <= Scalars.size() &&
"Number of operands is greater than the number of scalars.")
? void (0) : __assert_fail ("OpVL.size() <= Scalars.size() && \"Number of operands is greater than the number of scalars.\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 2520, __extension__
__PRETTY_FUNCTION__))
;
2521 Operands[OpIdx].resize(OpVL.size());
2522 copy(OpVL, Operands[OpIdx].begin());
2523 }
2524
2525 /// Set the operands of this bundle in their original order.
2526 void setOperandsInOrder() {
2527 assert(Operands.empty() && "Already initialized?")(static_cast <bool> (Operands.empty() && "Already initialized?"
) ? void (0) : __assert_fail ("Operands.empty() && \"Already initialized?\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 2527, __extension__
__PRETTY_FUNCTION__))
;
2528 auto *I0 = cast<Instruction>(Scalars[0]);
2529 Operands.resize(I0->getNumOperands());
2530 unsigned NumLanes = Scalars.size();
2531 for (unsigned OpIdx = 0, NumOperands = I0->getNumOperands();
2532 OpIdx != NumOperands; ++OpIdx) {
2533 Operands[OpIdx].resize(NumLanes);
2534 for (unsigned Lane = 0; Lane != NumLanes; ++Lane) {
2535 auto *I = cast<Instruction>(Scalars[Lane]);
2536 assert(I->getNumOperands() == NumOperands &&(static_cast <bool> (I->getNumOperands() == NumOperands
&& "Expected same number of operands") ? void (0) : __assert_fail
("I->getNumOperands() == NumOperands && \"Expected same number of operands\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 2537, __extension__
__PRETTY_FUNCTION__))
2537 "Expected same number of operands")(static_cast <bool> (I->getNumOperands() == NumOperands
&& "Expected same number of operands") ? void (0) : __assert_fail
("I->getNumOperands() == NumOperands && \"Expected same number of operands\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 2537, __extension__
__PRETTY_FUNCTION__))
;
2538 Operands[OpIdx][Lane] = I->getOperand(OpIdx);
2539 }
2540 }
2541 }
2542
2543 /// Reorders operands of the node to the given mask \p Mask.
2544 void reorderOperands(ArrayRef<int> Mask) {
2545 for (ValueList &Operand : Operands)
2546 reorderScalars(Operand, Mask);
2547 }
2548
2549 /// \returns the \p OpIdx operand of this TreeEntry.
2550 ValueList &getOperand(unsigned OpIdx) {
2551 assert(OpIdx < Operands.size() && "Off bounds")(static_cast <bool> (OpIdx < Operands.size() &&
"Off bounds") ? void (0) : __assert_fail ("OpIdx < Operands.size() && \"Off bounds\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 2551, __extension__
__PRETTY_FUNCTION__))
;
2552 return Operands[OpIdx];
2553 }
2554
2555 /// \returns the \p OpIdx operand of this TreeEntry.
2556 ArrayRef<Value *> getOperand(unsigned OpIdx) const {
2557 assert(OpIdx < Operands.size() && "Off bounds")(static_cast <bool> (OpIdx < Operands.size() &&
"Off bounds") ? void (0) : __assert_fail ("OpIdx < Operands.size() && \"Off bounds\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 2557, __extension__
__PRETTY_FUNCTION__))
;
2558 return Operands[OpIdx];
2559 }
2560
2561 /// \returns the number of operands.
2562 unsigned getNumOperands() const { return Operands.size(); }
2563
2564 /// \return the single \p OpIdx operand.
2565 Value *getSingleOperand(unsigned OpIdx) const {
2566 assert(OpIdx < Operands.size() && "Off bounds")(static_cast <bool> (OpIdx < Operands.size() &&
"Off bounds") ? void (0) : __assert_fail ("OpIdx < Operands.size() && \"Off bounds\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 2566, __extension__
__PRETTY_FUNCTION__))
;
2567 assert(!Operands[OpIdx].empty() && "No operand available")(static_cast <bool> (!Operands[OpIdx].empty() &&
"No operand available") ? void (0) : __assert_fail ("!Operands[OpIdx].empty() && \"No operand available\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 2567, __extension__
__PRETTY_FUNCTION__))
;
2568 return Operands[OpIdx][0];
2569 }
2570
2571 /// Some of the instructions in the list have alternate opcodes.
2572 bool isAltShuffle() const { return MainOp != AltOp; }
2573
2574 bool isOpcodeOrAlt(Instruction *I) const {
2575 unsigned CheckedOpcode = I->getOpcode();
2576 return (getOpcode() == CheckedOpcode ||
2577 getAltOpcode() == CheckedOpcode);
2578 }
2579
2580 /// Chooses the correct key for scheduling data. If \p Op has the same (or
2581 /// alternate) opcode as \p OpValue, the key is \p Op. Otherwise the key is
2582 /// \p OpValue.
2583 Value *isOneOf(Value *Op) const {
2584 auto *I = dyn_cast<Instruction>(Op);
2585 if (I && isOpcodeOrAlt(I))
2586 return Op;
2587 return MainOp;
2588 }
2589
2590 void setOperations(const InstructionsState &S) {
2591 MainOp = S.MainOp;
2592 AltOp = S.AltOp;
2593 }
2594
2595 Instruction *getMainOp() const {
2596 return MainOp;
2597 }
2598
2599 Instruction *getAltOp() const {
2600 return AltOp;
2601 }
2602
2603 /// The main/alternate opcodes for the list of instructions.
2604 unsigned getOpcode() const {
2605 return MainOp ? MainOp->getOpcode() : 0;
2606 }
2607
2608 unsigned getAltOpcode() const {
2609 return AltOp ? AltOp->getOpcode() : 0;
2610 }
2611
2612 /// When ReuseReorderShuffleIndices is empty it just returns position of \p
2613 /// V within vector of Scalars. Otherwise, try to remap on its reuse index.
2614 int findLaneForValue(Value *V) const {
2615 unsigned FoundLane = std::distance(Scalars.begin(), find(Scalars, V));
2616 assert(FoundLane < Scalars.size() && "Couldn't find extract lane")(static_cast <bool> (FoundLane < Scalars.size() &&
"Couldn't find extract lane") ? void (0) : __assert_fail ("FoundLane < Scalars.size() && \"Couldn't find extract lane\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 2616, __extension__
__PRETTY_FUNCTION__))
;
2617 if (!ReorderIndices.empty())
2618 FoundLane = ReorderIndices[FoundLane];
2619 assert(FoundLane < Scalars.size() && "Couldn't find extract lane")(static_cast <bool> (FoundLane < Scalars.size() &&
"Couldn't find extract lane") ? void (0) : __assert_fail ("FoundLane < Scalars.size() && \"Couldn't find extract lane\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 2619, __extension__
__PRETTY_FUNCTION__))
;
2620 if (!ReuseShuffleIndices.empty()) {
2621 FoundLane = std::distance(ReuseShuffleIndices.begin(),
2622 find(ReuseShuffleIndices, FoundLane));
2623 }
2624 return FoundLane;
2625 }
2626
2627#ifndef NDEBUG
2628 /// Debug printer.
2629 LLVM_DUMP_METHOD__attribute__((noinline)) __attribute__((__used__)) void dump() const {
2630 dbgs() << Idx << ".\n";
2631 for (unsigned OpI = 0, OpE = Operands.size(); OpI != OpE; ++OpI) {
2632 dbgs() << "Operand " << OpI << ":\n";
2633 for (const Value *V : Operands[OpI])
2634 dbgs().indent(2) << *V << "\n";
2635 }
2636 dbgs() << "Scalars: \n";
2637 for (Value *V : Scalars)
2638 dbgs().indent(2) << *V << "\n";
2639 dbgs() << "State: ";
2640 switch (State) {
2641 case Vectorize:
2642 dbgs() << "Vectorize\n";
2643 break;
2644 case ScatterVectorize:
2645 dbgs() << "ScatterVectorize\n";
2646 break;
2647 case NeedToGather:
2648 dbgs() << "NeedToGather\n";
2649 break;
2650 }
2651 dbgs() << "MainOp: ";
2652 if (MainOp)
2653 dbgs() << *MainOp << "\n";
2654 else
2655 dbgs() << "NULL\n";
2656 dbgs() << "AltOp: ";
2657 if (AltOp)
2658 dbgs() << *AltOp << "\n";
2659 else
2660 dbgs() << "NULL\n";
2661 dbgs() << "VectorizedValue: ";
2662 if (VectorizedValue)
2663 dbgs() << *VectorizedValue << "\n";
2664 else
2665 dbgs() << "NULL\n";
2666 dbgs() << "ReuseShuffleIndices: ";
2667 if (ReuseShuffleIndices.empty())
2668 dbgs() << "Empty";
2669 else
2670 for (int ReuseIdx : ReuseShuffleIndices)
2671 dbgs() << ReuseIdx << ", ";
2672 dbgs() << "\n";
2673 dbgs() << "ReorderIndices: ";
2674 for (unsigned ReorderIdx : ReorderIndices)
2675 dbgs() << ReorderIdx << ", ";
2676 dbgs() << "\n";
2677 dbgs() << "UserTreeIndices: ";
2678 for (const auto &EInfo : UserTreeIndices)
2679 dbgs() << EInfo << ", ";
2680 dbgs() << "\n";
2681 }
2682#endif
2683 };
2684
2685#ifndef NDEBUG
2686 void dumpTreeCosts(const TreeEntry *E, InstructionCost ReuseShuffleCost,
2687 InstructionCost VecCost,
2688 InstructionCost ScalarCost) const {
2689 dbgs() << "SLP: Calculated costs for Tree:\n"; E->dump();
2690 dbgs() << "SLP: Costs:\n";
2691 dbgs() << "SLP: ReuseShuffleCost = " << ReuseShuffleCost << "\n";
2692 dbgs() << "SLP: VectorCost = " << VecCost << "\n";
2693 dbgs() << "SLP: ScalarCost = " << ScalarCost << "\n";
2694 dbgs() << "SLP: ReuseShuffleCost + VecCost - ScalarCost = " <<
2695 ReuseShuffleCost + VecCost - ScalarCost << "\n";
2696 }
2697#endif
2698
2699 /// Create a new VectorizableTree entry.
2700 TreeEntry *newTreeEntry(ArrayRef<Value *> VL, std::optional<ScheduleData *> Bundle,
2701 const InstructionsState &S,
2702 const EdgeInfo &UserTreeIdx,
2703 ArrayRef<int> ReuseShuffleIndices = std::nullopt,
2704 ArrayRef<unsigned> ReorderIndices = std::nullopt) {
2705 TreeEntry::EntryState EntryState =
2706 Bundle ? TreeEntry::Vectorize : TreeEntry::NeedToGather;
2707 return newTreeEntry(VL, EntryState, Bundle, S, UserTreeIdx,
2708 ReuseShuffleIndices, ReorderIndices);
2709 }
2710
2711 TreeEntry *newTreeEntry(ArrayRef<Value *> VL,
2712 TreeEntry::EntryState EntryState,
2713 std::optional<ScheduleData *> Bundle,
2714 const InstructionsState &S,
2715 const EdgeInfo &UserTreeIdx,
2716 ArrayRef<int> ReuseShuffleIndices = std::nullopt,
2717 ArrayRef<unsigned> ReorderIndices = std::nullopt) {
2718 assert(((!Bundle && EntryState == TreeEntry::NeedToGather) ||(static_cast <bool> (((!Bundle && EntryState ==
TreeEntry::NeedToGather) || (Bundle && EntryState !=
TreeEntry::NeedToGather)) && "Need to vectorize gather entry?"
) ? void (0) : __assert_fail ("((!Bundle && EntryState == TreeEntry::NeedToGather) || (Bundle && EntryState != TreeEntry::NeedToGather)) && \"Need to vectorize gather entry?\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 2720, __extension__
__PRETTY_FUNCTION__))
2719 (Bundle && EntryState != TreeEntry::NeedToGather)) &&(static_cast <bool> (((!Bundle && EntryState ==
TreeEntry::NeedToGather) || (Bundle && EntryState !=
TreeEntry::NeedToGather)) && "Need to vectorize gather entry?"
) ? void (0) : __assert_fail ("((!Bundle && EntryState == TreeEntry::NeedToGather) || (Bundle && EntryState != TreeEntry::NeedToGather)) && \"Need to vectorize gather entry?\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 2720, __extension__
__PRETTY_FUNCTION__))
2720 "Need to vectorize gather entry?")(static_cast <bool> (((!Bundle && EntryState ==
TreeEntry::NeedToGather) || (Bundle && EntryState !=
TreeEntry::NeedToGather)) && "Need to vectorize gather entry?"
) ? void (0) : __assert_fail ("((!Bundle && EntryState == TreeEntry::NeedToGather) || (Bundle && EntryState != TreeEntry::NeedToGather)) && \"Need to vectorize gather entry?\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 2720, __extension__
__PRETTY_FUNCTION__))
;
2721 VectorizableTree.push_back(std::make_unique<TreeEntry>(VectorizableTree));
2722 TreeEntry *Last = VectorizableTree.back().get();
2723 Last->Idx = VectorizableTree.size() - 1;
2724 Last->State = EntryState;
2725 Last->ReuseShuffleIndices.append(ReuseShuffleIndices.begin(),
2726 ReuseShuffleIndices.end());
2727 if (ReorderIndices.empty()) {
2728 Last->Scalars.assign(VL.begin(), VL.end());
2729 Last->setOperations(S);
2730 } else {
2731 // Reorder scalars and build final mask.
2732 Last->Scalars.assign(VL.size(), nullptr);
2733 transform(ReorderIndices, Last->Scalars.begin(),
2734 [VL](unsigned Idx) -> Value * {
2735 if (Idx >= VL.size())
2736 return UndefValue::get(VL.front()->getType());
2737 return VL[Idx];
2738 });
2739 InstructionsState S = getSameOpcode(Last->Scalars, *TLI);
2740 Last->setOperations(S);
2741 Last->ReorderIndices.append(ReorderIndices.begin(), ReorderIndices.end());
2742 }
2743 if (Last->State != TreeEntry::NeedToGather) {
2744 for (Value *V : VL) {
2745 assert(!getTreeEntry(V) && "Scalar already in tree!")(static_cast <bool> (!getTreeEntry(V) && "Scalar already in tree!"
) ? void (0) : __assert_fail ("!getTreeEntry(V) && \"Scalar already in tree!\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 2745, __extension__
__PRETTY_FUNCTION__))
;
2746 ScalarToTreeEntry[V] = Last;
2747 }
2748 // Update the scheduler bundle to point to this TreeEntry.
2749 ScheduleData *BundleMember = *Bundle;
2750 assert((BundleMember || isa<PHINode>(S.MainOp) ||(static_cast <bool> ((BundleMember || isa<PHINode>
(S.MainOp) || isVectorLikeInstWithConstOps(S.MainOp) || doesNotNeedToSchedule
(VL)) && "Bundle and VL out of sync") ? void (0) : __assert_fail
("(BundleMember || isa<PHINode>(S.MainOp) || isVectorLikeInstWithConstOps(S.MainOp) || doesNotNeedToSchedule(VL)) && \"Bundle and VL out of sync\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 2753, __extension__
__PRETTY_FUNCTION__))
2751 isVectorLikeInstWithConstOps(S.MainOp) ||(static_cast <bool> ((BundleMember || isa<PHINode>
(S.MainOp) || isVectorLikeInstWithConstOps(S.MainOp) || doesNotNeedToSchedule
(VL)) && "Bundle and VL out of sync") ? void (0) : __assert_fail
("(BundleMember || isa<PHINode>(S.MainOp) || isVectorLikeInstWithConstOps(S.MainOp) || doesNotNeedToSchedule(VL)) && \"Bundle and VL out of sync\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 2753, __extension__
__PRETTY_FUNCTION__))
2752 doesNotNeedToSchedule(VL)) &&(static_cast <bool> ((BundleMember || isa<PHINode>
(S.MainOp) || isVectorLikeInstWithConstOps(S.MainOp) || doesNotNeedToSchedule
(VL)) && "Bundle and VL out of sync") ? void (0) : __assert_fail
("(BundleMember || isa<PHINode>(S.MainOp) || isVectorLikeInstWithConstOps(S.MainOp) || doesNotNeedToSchedule(VL)) && \"Bundle and VL out of sync\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 2753, __extension__
__PRETTY_FUNCTION__))
2753 "Bundle and VL out of sync")(static_cast <bool> ((BundleMember || isa<PHINode>
(S.MainOp) || isVectorLikeInstWithConstOps(S.MainOp) || doesNotNeedToSchedule
(VL)) && "Bundle and VL out of sync") ? void (0) : __assert_fail
("(BundleMember || isa<PHINode>(S.MainOp) || isVectorLikeInstWithConstOps(S.MainOp) || doesNotNeedToSchedule(VL)) && \"Bundle and VL out of sync\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 2753, __extension__
__PRETTY_FUNCTION__))
;
2754 if (BundleMember) {
2755 for (Value *V : VL) {
2756 if (doesNotNeedToBeScheduled(V))
2757 continue;
2758 assert(BundleMember && "Unexpected end of bundle.")(static_cast <bool> (BundleMember && "Unexpected end of bundle."
) ? void (0) : __assert_fail ("BundleMember && \"Unexpected end of bundle.\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 2758, __extension__
__PRETTY_FUNCTION__))
;
2759 BundleMember->TE = Last;
2760 BundleMember = BundleMember->NextInBundle;
2761 }
2762 }
2763 assert(!BundleMember && "Bundle and VL out of sync")(static_cast <bool> (!BundleMember && "Bundle and VL out of sync"
) ? void (0) : __assert_fail ("!BundleMember && \"Bundle and VL out of sync\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 2763, __extension__
__PRETTY_FUNCTION__))
;
2764 } else {
2765 MustGather.insert(VL.begin(), VL.end());
2766 }
2767
2768 if (UserTreeIdx.UserTE)
2769 Last->UserTreeIndices.push_back(UserTreeIdx);
2770
2771 return Last;
2772 }
2773
2774 /// -- Vectorization State --
2775 /// Holds all of the tree entries.
2776 TreeEntry::VecTreeTy VectorizableTree;
2777
2778#ifndef NDEBUG
2779 /// Debug printer.
2780 LLVM_DUMP_METHOD__attribute__((noinline)) __attribute__((__used__)) void dumpVectorizableTree() const {
2781 for (unsigned Id = 0, IdE = VectorizableTree.size(); Id != IdE; ++Id) {
2782 VectorizableTree[Id]->dump();
2783 dbgs() << "\n";
2784 }
2785 }
2786#endif
2787
2788 TreeEntry *getTreeEntry(Value *V) { return ScalarToTreeEntry.lookup(V); }
2789
2790 const TreeEntry *getTreeEntry(Value *V) const {
2791 return ScalarToTreeEntry.lookup(V);
2792 }
2793
2794 /// Maps a specific scalar to its tree entry.
2795 SmallDenseMap<Value*, TreeEntry *> ScalarToTreeEntry;
2796
2797 /// Maps a value to the proposed vectorizable size.
2798 SmallDenseMap<Value *, unsigned> InstrElementSize;
2799
2800 /// A list of scalars that we found that we need to keep as scalars.
2801 ValueSet MustGather;
2802
2803 /// A map between the vectorized entries and the last instructions in the
2804 /// bundles. The bundles are built in use order, not in the def order of the
2805 /// instructions. So, we cannot rely directly on the last instruction in the
2806 /// bundle being the last instruction in the program order during
2807 /// vectorization process since the basic blocks are affected, need to
2808 /// pre-gather them before.
2809 DenseMap<const TreeEntry *, Instruction *> EntryToLastInstruction;
2810
2811 /// This POD struct describes one external user in the vectorized tree.
2812 struct ExternalUser {
2813 ExternalUser(Value *S, llvm::User *U, int L)
2814 : Scalar(S), User(U), Lane(L) {}
2815
2816 // Which scalar in our function.
2817 Value *Scalar;
2818
2819 // Which user that uses the scalar.
2820 llvm::User *User;
2821
2822 // Which lane does the scalar belong to.
2823 int Lane;
2824 };
2825 using UserList = SmallVector<ExternalUser, 16>;
2826
2827 /// Checks if two instructions may access the same memory.
2828 ///
2829 /// \p Loc1 is the location of \p Inst1. It is passed explicitly because it
2830 /// is invariant in the calling loop.
2831 bool isAliased(const MemoryLocation &Loc1, Instruction *Inst1,
2832 Instruction *Inst2) {
2833 // First check if the result is already in the cache.
2834 AliasCacheKey key = std::make_pair(Inst1, Inst2);
2835 std::optional<bool> &result = AliasCache[key];
2836 if (result) {
2837 return *result;
2838 }
2839 bool aliased = true;
2840 if (Loc1.Ptr && isSimple(Inst1))
2841 aliased = isModOrRefSet(BatchAA.getModRefInfo(Inst2, Loc1));
2842 // Store the result in the cache.
2843 result = aliased;
2844 return aliased;
2845 }
2846
2847 using AliasCacheKey = std::pair<Instruction *, Instruction *>;
2848
2849 /// Cache for alias results.
2850 /// TODO: consider moving this to the AliasAnalysis itself.
2851 DenseMap<AliasCacheKey, std::optional<bool>> AliasCache;
2852
2853 // Cache for pointerMayBeCaptured calls inside AA. This is preserved
2854 // globally through SLP because we don't perform any action which
2855 // invalidates capture results.
2856 BatchAAResults BatchAA;
2857
2858 /// Temporary store for deleted instructions. Instructions will be deleted
2859 /// eventually when the BoUpSLP is destructed. The deferral is required to
2860 /// ensure that there are no incorrect collisions in the AliasCache, which
2861 /// can happen if a new instruction is allocated at the same address as a
2862 /// previously deleted instruction.
2863 DenseSet<Instruction *> DeletedInstructions;
2864
2865 /// Set of the instruction, being analyzed already for reductions.
2866 SmallPtrSet<Instruction *, 16> AnalyzedReductionsRoots;
2867
2868 /// Set of hashes for the list of reduction values already being analyzed.
2869 DenseSet<size_t> AnalyzedReductionVals;
2870
2871 /// A list of values that need to extracted out of the tree.
2872 /// This list holds pairs of (Internal Scalar : External User). External User
2873 /// can be nullptr, it means that this Internal Scalar will be used later,
2874 /// after vectorization.
2875 UserList ExternalUses;
2876
2877 /// Values used only by @llvm.assume calls.
2878 SmallPtrSet<const Value *, 32> EphValues;
2879
2880 /// Holds all of the instructions that we gathered, shuffle instructions and
2881 /// extractelements.
2882 SetVector<Instruction *> GatherShuffleExtractSeq;
2883
2884 /// A list of blocks that we are going to CSE.
2885 SetVector<BasicBlock *> CSEBlocks;
2886
2887 /// Contains all scheduling relevant data for an instruction.
2888 /// A ScheduleData either represents a single instruction or a member of an
2889 /// instruction bundle (= a group of instructions which is combined into a
2890 /// vector instruction).
2891 struct ScheduleData {
2892 // The initial value for the dependency counters. It means that the
2893 // dependencies are not calculated yet.
2894 enum { InvalidDeps = -1 };
2895
2896 ScheduleData() = default;
2897
2898 void init(int BlockSchedulingRegionID, Value *OpVal) {
2899 FirstInBundle = this;
2900 NextInBundle = nullptr;
2901 NextLoadStore = nullptr;
2902 IsScheduled = false;
2903 SchedulingRegionID = BlockSchedulingRegionID;
2904 clearDependencies();
2905 OpValue = OpVal;
2906 TE = nullptr;
2907 }
2908
2909 /// Verify basic self consistency properties
2910 void verify() {
2911 if (hasValidDependencies()) {
2912 assert(UnscheduledDeps <= Dependencies && "invariant")(static_cast <bool> (UnscheduledDeps <= Dependencies
&& "invariant") ? void (0) : __assert_fail ("UnscheduledDeps <= Dependencies && \"invariant\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 2912, __extension__
__PRETTY_FUNCTION__))
;
2913 } else {
2914 assert(UnscheduledDeps == Dependencies && "invariant")(static_cast <bool> (UnscheduledDeps == Dependencies &&
"invariant") ? void (0) : __assert_fail ("UnscheduledDeps == Dependencies && \"invariant\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 2914, __extension__
__PRETTY_FUNCTION__))
;
2915 }
2916
2917 if (IsScheduled) {
2918 assert(isSchedulingEntity() &&(static_cast <bool> (isSchedulingEntity() && "unexpected scheduled state"
) ? void (0) : __assert_fail ("isSchedulingEntity() && \"unexpected scheduled state\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 2919, __extension__
__PRETTY_FUNCTION__))
2919 "unexpected scheduled state")(static_cast <bool> (isSchedulingEntity() && "unexpected scheduled state"
) ? void (0) : __assert_fail ("isSchedulingEntity() && \"unexpected scheduled state\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 2919, __extension__
__PRETTY_FUNCTION__))
;
2920 for (const ScheduleData *BundleMember = this; BundleMember;
2921 BundleMember = BundleMember->NextInBundle) {
2922 assert(BundleMember->hasValidDependencies() &&(static_cast <bool> (BundleMember->hasValidDependencies
() && BundleMember->UnscheduledDeps == 0 &&
"unexpected scheduled state") ? void (0) : __assert_fail ("BundleMember->hasValidDependencies() && BundleMember->UnscheduledDeps == 0 && \"unexpected scheduled state\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 2924, __extension__
__PRETTY_FUNCTION__))
2923 BundleMember->UnscheduledDeps == 0 &&(static_cast <bool> (BundleMember->hasValidDependencies
() && BundleMember->UnscheduledDeps == 0 &&
"unexpected scheduled state") ? void (0) : __assert_fail ("BundleMember->hasValidDependencies() && BundleMember->UnscheduledDeps == 0 && \"unexpected scheduled state\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 2924, __extension__
__PRETTY_FUNCTION__))
2924 "unexpected scheduled state")(static_cast <bool> (BundleMember->hasValidDependencies
() && BundleMember->UnscheduledDeps == 0 &&
"unexpected scheduled state") ? void (0) : __assert_fail ("BundleMember->hasValidDependencies() && BundleMember->UnscheduledDeps == 0 && \"unexpected scheduled state\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 2924, __extension__
__PRETTY_FUNCTION__))
;
2925 assert((BundleMember == this || !BundleMember->IsScheduled) &&(static_cast <bool> ((BundleMember == this || !BundleMember
->IsScheduled) && "only bundle is marked scheduled"
) ? void (0) : __assert_fail ("(BundleMember == this || !BundleMember->IsScheduled) && \"only bundle is marked scheduled\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 2926, __extension__
__PRETTY_FUNCTION__))
2926 "only bundle is marked scheduled")(static_cast <bool> ((BundleMember == this || !BundleMember
->IsScheduled) && "only bundle is marked scheduled"
) ? void (0) : __assert_fail ("(BundleMember == this || !BundleMember->IsScheduled) && \"only bundle is marked scheduled\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 2926, __extension__
__PRETTY_FUNCTION__))
;
2927 }
2928 }
2929
2930 assert(Inst->getParent() == FirstInBundle->Inst->getParent() &&(static_cast <bool> (Inst->getParent() == FirstInBundle
->Inst->getParent() && "all bundle members must be in same basic block"
) ? void (0) : __assert_fail ("Inst->getParent() == FirstInBundle->Inst->getParent() && \"all bundle members must be in same basic block\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 2931, __extension__
__PRETTY_FUNCTION__))
2931 "all bundle members must be in same basic block")(static_cast <bool> (Inst->getParent() == FirstInBundle
->Inst->getParent() && "all bundle members must be in same basic block"
) ? void (0) : __assert_fail ("Inst->getParent() == FirstInBundle->Inst->getParent() && \"all bundle members must be in same basic block\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 2931, __extension__
__PRETTY_FUNCTION__))
;
2932 }
2933
2934 /// Returns true if the dependency information has been calculated.
2935 /// Note that depenendency validity can vary between instructions within
2936 /// a single bundle.
2937 bool hasValidDependencies() const { return Dependencies != InvalidDeps; }
2938
2939 /// Returns true for single instructions and for bundle representatives
2940 /// (= the head of a bundle).
2941 bool isSchedulingEntity() const { return FirstInBundle == this; }
2942
2943 /// Returns true if it represents an instruction bundle and not only a
2944 /// single instruction.
2945 bool isPartOfBundle() const {
2946 return NextInBundle != nullptr || FirstInBundle != this || TE;
2947 }
2948
2949 /// Returns true if it is ready for scheduling, i.e. it has no more
2950 /// unscheduled depending instructions/bundles.
2951 bool isReady() const {
2952 assert(isSchedulingEntity() &&(static_cast <bool> (isSchedulingEntity() && "can't consider non-scheduling entity for ready list"
) ? void (0) : __assert_fail ("isSchedulingEntity() && \"can't consider non-scheduling entity for ready list\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 2953, __extension__
__PRETTY_FUNCTION__))
2953 "can't consider non-scheduling entity for ready list")(static_cast <bool> (isSchedulingEntity() && "can't consider non-scheduling entity for ready list"
) ? void (0) : __assert_fail ("isSchedulingEntity() && \"can't consider non-scheduling entity for ready list\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 2953, __extension__
__PRETTY_FUNCTION__))
;
2954 return unscheduledDepsInBundle() == 0 && !IsScheduled;
2955 }
2956
2957 /// Modifies the number of unscheduled dependencies for this instruction,
2958 /// and returns the number of remaining dependencies for the containing
2959 /// bundle.
2960 int incrementUnscheduledDeps(int Incr) {
2961 assert(hasValidDependencies() &&(static_cast <bool> (hasValidDependencies() && "increment of unscheduled deps would be meaningless"
) ? void (0) : __assert_fail ("hasValidDependencies() && \"increment of unscheduled deps would be meaningless\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 2962, __extension__
__PRETTY_FUNCTION__))
2962 "increment of unscheduled deps would be meaningless")(static_cast <bool> (hasValidDependencies() && "increment of unscheduled deps would be meaningless"
) ? void (0) : __assert_fail ("hasValidDependencies() && \"increment of unscheduled deps would be meaningless\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 2962, __extension__
__PRETTY_FUNCTION__))
;
2963 UnscheduledDeps += Incr;
2964 return FirstInBundle->unscheduledDepsInBundle();
2965 }
2966
2967 /// Sets the number of unscheduled dependencies to the number of
2968 /// dependencies.
2969 void resetUnscheduledDeps() {
2970 UnscheduledDeps = Dependencies;
2971 }
2972
2973 /// Clears all dependency information.
2974 void clearDependencies() {
2975 Dependencies = InvalidDeps;
2976 resetUnscheduledDeps();
2977 MemoryDependencies.clear();
2978 ControlDependencies.clear();
2979 }
2980
2981 int unscheduledDepsInBundle() const {
2982 assert(isSchedulingEntity() && "only meaningful on the bundle")(static_cast <bool> (isSchedulingEntity() && "only meaningful on the bundle"
) ? void (0) : __assert_fail ("isSchedulingEntity() && \"only meaningful on the bundle\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 2982, __extension__
__PRETTY_FUNCTION__))
;
2983 int Sum = 0;
2984 for (const ScheduleData *BundleMember = this; BundleMember;
2985 BundleMember = BundleMember->NextInBundle) {
2986 if (BundleMember->UnscheduledDeps == InvalidDeps)
2987 return InvalidDeps;
2988 Sum += BundleMember->UnscheduledDeps;
2989 }
2990 return Sum;
2991 }
2992
2993 void dump(raw_ostream &os) const {
2994 if (!isSchedulingEntity()) {
2995 os << "/ " << *Inst;
2996 } else if (NextInBundle) {
2997 os << '[' << *Inst;
2998 ScheduleData *SD = NextInBundle;
2999 while (SD) {
3000 os << ';' << *SD->Inst;
3001 SD = SD->NextInBundle;
3002 }
3003 os << ']';
3004 } else {
3005 os << *Inst;
3006 }
3007 }
3008
3009 Instruction *Inst = nullptr;
3010
3011 /// Opcode of the current instruction in the schedule data.
3012 Value *OpValue = nullptr;
3013
3014 /// The TreeEntry that this instruction corresponds to.
3015 TreeEntry *TE = nullptr;
3016
3017 /// Points to the head in an instruction bundle (and always to this for
3018 /// single instructions).
3019 ScheduleData *FirstInBundle = nullptr;
3020
3021 /// Single linked list of all instructions in a bundle. Null if it is a
3022 /// single instruction.
3023 ScheduleData *NextInBundle = nullptr;
3024
3025 /// Single linked list of all memory instructions (e.g. load, store, call)
3026 /// in the block - until the end of the scheduling region.
3027 ScheduleData *NextLoadStore = nullptr;
3028
3029 /// The dependent memory instructions.
3030 /// This list is derived on demand in calculateDependencies().
3031 SmallVector<ScheduleData *, 4> MemoryDependencies;
3032
3033 /// List of instructions which this instruction could be control dependent
3034 /// on. Allowing such nodes to be scheduled below this one could introduce
3035 /// a runtime fault which didn't exist in the original program.
3036 /// ex: this is a load or udiv following a readonly call which inf loops
3037 SmallVector<ScheduleData *, 4> ControlDependencies;
3038
3039 /// This ScheduleData is in the current scheduling region if this matches
3040 /// the current SchedulingRegionID of BlockScheduling.
3041 int SchedulingRegionID = 0;
3042
3043 /// Used for getting a "good" final ordering of instructions.
3044 int SchedulingPriority = 0;
3045
3046 /// The number of dependencies. Constitutes of the number of users of the
3047 /// instruction plus the number of dependent memory instructions (if any).
3048 /// This value is calculated on demand.
3049 /// If InvalidDeps, the number of dependencies is not calculated yet.
3050 int Dependencies = InvalidDeps;
3051
3052 /// The number of dependencies minus the number of dependencies of scheduled
3053 /// instructions. As soon as this is zero, the instruction/bundle gets ready
3054 /// for scheduling.
3055 /// Note that this is negative as long as Dependencies is not calculated.
3056 int UnscheduledDeps = InvalidDeps;
3057
3058 /// True if this instruction is scheduled (or considered as scheduled in the
3059 /// dry-run).
3060 bool IsScheduled = false;
3061 };
3062
3063#ifndef NDEBUG
3064 friend inline raw_ostream &operator<<(raw_ostream &os,
3065 const BoUpSLP::ScheduleData &SD) {
3066 SD.dump(os);
3067 return os;
3068 }
3069#endif
3070
3071 friend struct GraphTraits<BoUpSLP *>;
3072 friend struct DOTGraphTraits<BoUpSLP *>;
3073
3074 /// Contains all scheduling data for a basic block.
3075 /// It does not schedules instructions, which are not memory read/write
3076 /// instructions and their operands are either constants, or arguments, or
3077 /// phis, or instructions from others blocks, or their users are phis or from
3078 /// the other blocks. The resulting vector instructions can be placed at the
3079 /// beginning of the basic block without scheduling (if operands does not need
3080 /// to be scheduled) or at the end of the block (if users are outside of the
3081 /// block). It allows to save some compile time and memory used by the
3082 /// compiler.
3083 /// ScheduleData is assigned for each instruction in between the boundaries of
3084 /// the tree entry, even for those, which are not part of the graph. It is
3085 /// required to correctly follow the dependencies between the instructions and
3086 /// their correct scheduling. The ScheduleData is not allocated for the
3087 /// instructions, which do not require scheduling, like phis, nodes with
3088 /// extractelements/insertelements only or nodes with instructions, with
3089 /// uses/operands outside of the block.
3090 struct BlockScheduling {
3091 BlockScheduling(BasicBlock *BB)
3092 : BB(BB), ChunkSize(BB->size()), ChunkPos(ChunkSize) {}
3093
3094 void clear() {
3095 ReadyInsts.clear();
3096 ScheduleStart = nullptr;
3097 ScheduleEnd = nullptr;
3098 FirstLoadStoreInRegion = nullptr;
3099 LastLoadStoreInRegion = nullptr;
3100 RegionHasStackSave = false;
3101
3102 // Reduce the maximum schedule region size by the size of the
3103 // previous scheduling run.
3104 ScheduleRegionSizeLimit -= ScheduleRegionSize;
3105 if (ScheduleRegionSizeLimit < MinScheduleRegionSize)
3106 ScheduleRegionSizeLimit = MinScheduleRegionSize;
3107 ScheduleRegionSize = 0;
3108
3109 // Make a new scheduling region, i.e. all existing ScheduleData is not
3110 // in the new region yet.
3111 ++SchedulingRegionID;
3112 }
3113
3114 ScheduleData *getScheduleData(Instruction *I) {
3115 if (BB != I->getParent())
3116 // Avoid lookup if can't possibly be in map.
3117 return nullptr;
3118 ScheduleData *SD = ScheduleDataMap.lookup(I);
3119 if (SD && isInSchedulingRegion(SD))
3120 return SD;
3121 return nullptr;
3122 }
3123
3124 ScheduleData *getScheduleData(Value *V) {
3125 if (auto *I = dyn_cast<Instruction>(V))
3126 return getScheduleData(I);
3127 return nullptr;
3128 }
3129
3130 ScheduleData *getScheduleData(Value *V, Value *Key) {
3131 if (V == Key)
3132 return getScheduleData(V);
3133 auto I = ExtraScheduleDataMap.find(V);
3134 if (I != ExtraScheduleDataMap.end()) {
3135 ScheduleData *SD = I->second.lookup(Key);
3136 if (SD && isInSchedulingRegion(SD))
3137 return SD;
3138 }
3139 return nullptr;
3140 }
3141
3142 bool isInSchedulingRegion(ScheduleData *SD) const {
3143 return SD->SchedulingRegionID == SchedulingRegionID;
3144 }
3145
3146 /// Marks an instruction as scheduled and puts all dependent ready
3147 /// instructions into the ready-list.
3148 template <typename ReadyListType>
3149 void schedule(ScheduleData *SD, ReadyListType &ReadyList) {
3150 SD->IsScheduled = true;
3151 LLVM_DEBUG(dbgs() << "SLP: schedule " << *SD << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: schedule " << *SD <<
"\n"; } } while (false)
;
3152
3153 for (ScheduleData *BundleMember = SD; BundleMember;
3154 BundleMember = BundleMember->NextInBundle) {
3155 if (BundleMember->Inst != BundleMember->OpValue)
3156 continue;
3157
3158 // Handle the def-use chain dependencies.
3159
3160 // Decrement the unscheduled counter and insert to ready list if ready.
3161 auto &&DecrUnsched = [this, &ReadyList](Instruction *I) {
3162 doForAllOpcodes(I, [&ReadyList](ScheduleData *OpDef) {
3163 if (OpDef && OpDef->hasValidDependencies() &&
3164 OpDef->incrementUnscheduledDeps(-1) == 0) {
3165 // There are no more unscheduled dependencies after
3166 // decrementing, so we can put the dependent instruction
3167 // into the ready list.
3168 ScheduleData *DepBundle = OpDef->FirstInBundle;
3169 assert(!DepBundle->IsScheduled &&(static_cast <bool> (!DepBundle->IsScheduled &&
"already scheduled bundle gets ready") ? void (0) : __assert_fail
("!DepBundle->IsScheduled && \"already scheduled bundle gets ready\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 3170, __extension__
__PRETTY_FUNCTION__))
3170 "already scheduled bundle gets ready")(static_cast <bool> (!DepBundle->IsScheduled &&
"already scheduled bundle gets ready") ? void (0) : __assert_fail
("!DepBundle->IsScheduled && \"already scheduled bundle gets ready\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 3170, __extension__
__PRETTY_FUNCTION__))
;
3171 ReadyList.insert(DepBundle);
3172 LLVM_DEBUG(dbgs()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: gets ready (def): " <<
*DepBundle << "\n"; } } while (false)
3173 << "SLP: gets ready (def): " << *DepBundle << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: gets ready (def): " <<
*DepBundle << "\n"; } } while (false)
;
3174 }
3175 });
3176 };
3177
3178 // If BundleMember is a vector bundle, its operands may have been
3179 // reordered during buildTree(). We therefore need to get its operands
3180 // through the TreeEntry.
3181 if (TreeEntry *TE = BundleMember->TE) {
3182 // Need to search for the lane since the tree entry can be reordered.
3183 int Lane = std::distance(TE->Scalars.begin(),
3184 find(TE->Scalars, BundleMember->Inst));
3185 assert(Lane >= 0 && "Lane not set")(static_cast <bool> (Lane >= 0 && "Lane not set"
) ? void (0) : __assert_fail ("Lane >= 0 && \"Lane not set\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 3185, __extension__
__PRETTY_FUNCTION__))
;
3186
3187 // Since vectorization tree is being built recursively this assertion
3188 // ensures that the tree entry has all operands set before reaching
3189 // this code. Couple of exceptions known at the moment are extracts
3190 // where their second (immediate) operand is not added. Since
3191 // immediates do not affect scheduler behavior this is considered
3192 // okay.
3193 auto *In = BundleMember->Inst;
3194 assert(In &&(static_cast <bool> (In && (isa<ExtractValueInst
, ExtractElementInst>(In) || In->getNumOperands() == TE
->getNumOperands()) && "Missed TreeEntry operands?"
) ? void (0) : __assert_fail ("In && (isa<ExtractValueInst, ExtractElementInst>(In) || In->getNumOperands() == TE->getNumOperands()) && \"Missed TreeEntry operands?\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 3197, __extension__
__PRETTY_FUNCTION__))
3195 (isa<ExtractValueInst, ExtractElementInst>(In) ||(static_cast <bool> (In && (isa<ExtractValueInst
, ExtractElementInst>(In) || In->getNumOperands() == TE
->getNumOperands()) && "Missed TreeEntry operands?"
) ? void (0) : __assert_fail ("In && (isa<ExtractValueInst, ExtractElementInst>(In) || In->getNumOperands() == TE->getNumOperands()) && \"Missed TreeEntry operands?\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 3197, __extension__
__PRETTY_FUNCTION__))
3196 In->getNumOperands() == TE->getNumOperands()) &&(static_cast <bool> (In && (isa<ExtractValueInst
, ExtractElementInst>(In) || In->getNumOperands() == TE
->getNumOperands()) && "Missed TreeEntry operands?"
) ? void (0) : __assert_fail ("In && (isa<ExtractValueInst, ExtractElementInst>(In) || In->getNumOperands() == TE->getNumOperands()) && \"Missed TreeEntry operands?\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 3197, __extension__
__PRETTY_FUNCTION__))
3197 "Missed TreeEntry operands?")(static_cast <bool> (In && (isa<ExtractValueInst
, ExtractElementInst>(In) || In->getNumOperands() == TE
->getNumOperands()) && "Missed TreeEntry operands?"
) ? void (0) : __assert_fail ("In && (isa<ExtractValueInst, ExtractElementInst>(In) || In->getNumOperands() == TE->getNumOperands()) && \"Missed TreeEntry operands?\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 3197, __extension__
__PRETTY_FUNCTION__))
;
3198 (void)In; // fake use to avoid build failure when assertions disabled
3199
3200 for (unsigned OpIdx = 0, NumOperands = TE->getNumOperands();
3201 OpIdx != NumOperands; ++OpIdx)
3202 if (auto *I = dyn_cast<Instruction>(TE->getOperand(OpIdx)[Lane]))
3203 DecrUnsched(I);
3204 } else {
3205 // If BundleMember is a stand-alone instruction, no operand reordering
3206 // has taken place, so we directly access its operands.
3207 for (Use &U : BundleMember->Inst->operands())
3208 if (auto *I = dyn_cast<Instruction>(U.get()))
3209 DecrUnsched(I);
3210 }
3211 // Handle the memory dependencies.
3212 for (ScheduleData *MemoryDepSD : BundleMember->MemoryDependencies) {
3213 if (MemoryDepSD->hasValidDependencies() &&
3214 MemoryDepSD->incrementUnscheduledDeps(-1) == 0) {
3215 // There are no more unscheduled dependencies after decrementing,
3216 // so we can put the dependent instruction into the ready list.
3217 ScheduleData *DepBundle = MemoryDepSD->FirstInBundle;
3218 assert(!DepBundle->IsScheduled &&(static_cast <bool> (!DepBundle->IsScheduled &&
"already scheduled bundle gets ready") ? void (0) : __assert_fail
("!DepBundle->IsScheduled && \"already scheduled bundle gets ready\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 3219, __extension__
__PRETTY_FUNCTION__))
3219 "already scheduled bundle gets ready")(static_cast <bool> (!DepBundle->IsScheduled &&
"already scheduled bundle gets ready") ? void (0) : __assert_fail
("!DepBundle->IsScheduled && \"already scheduled bundle gets ready\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 3219, __extension__
__PRETTY_FUNCTION__))
;
3220 ReadyList.insert(DepBundle);
3221 LLVM_DEBUG(dbgs()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: gets ready (mem): " <<
*DepBundle << "\n"; } } while (false)
3222 << "SLP: gets ready (mem): " << *DepBundle << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: gets ready (mem): " <<
*DepBundle << "\n"; } } while (false)
;
3223 }
3224 }
3225 // Handle the control dependencies.
3226 for (ScheduleData *DepSD : BundleMember->ControlDependencies) {
3227 if (DepSD->incrementUnscheduledDeps(-1) == 0) {
3228 // There are no more unscheduled dependencies after decrementing,
3229 // so we can put the dependent instruction into the ready list.
3230 ScheduleData *DepBundle = DepSD->FirstInBundle;
3231 assert(!DepBundle->IsScheduled &&(static_cast <bool> (!DepBundle->IsScheduled &&
"already scheduled bundle gets ready") ? void (0) : __assert_fail
("!DepBundle->IsScheduled && \"already scheduled bundle gets ready\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 3232, __extension__
__PRETTY_FUNCTION__))
3232 "already scheduled bundle gets ready")(static_cast <bool> (!DepBundle->IsScheduled &&
"already scheduled bundle gets ready") ? void (0) : __assert_fail
("!DepBundle->IsScheduled && \"already scheduled bundle gets ready\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 3232, __extension__
__PRETTY_FUNCTION__))
;
3233 ReadyList.insert(DepBundle);
3234 LLVM_DEBUG(dbgs()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: gets ready (ctl): " <<
*DepBundle << "\n"; } } while (false)
3235 << "SLP: gets ready (ctl): " << *DepBundle << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: gets ready (ctl): " <<
*DepBundle << "\n"; } } while (false)
;
3236 }
3237 }
3238
3239 }
3240 }
3241
3242 /// Verify basic self consistency properties of the data structure.
3243 void verify() {
3244 if (!ScheduleStart)
3245 return;
3246
3247 assert(ScheduleStart->getParent() == ScheduleEnd->getParent() &&(static_cast <bool> (ScheduleStart->getParent() == ScheduleEnd
->getParent() && ScheduleStart->comesBefore(ScheduleEnd
) && "Not a valid scheduling region?") ? void (0) : __assert_fail
("ScheduleStart->getParent() == ScheduleEnd->getParent() && ScheduleStart->comesBefore(ScheduleEnd) && \"Not a valid scheduling region?\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 3249, __extension__
__PRETTY_FUNCTION__))
3248 ScheduleStart->comesBefore(ScheduleEnd) &&(static_cast <bool> (ScheduleStart->getParent() == ScheduleEnd
->getParent() && ScheduleStart->comesBefore(ScheduleEnd
) && "Not a valid scheduling region?") ? void (0) : __assert_fail
("ScheduleStart->getParent() == ScheduleEnd->getParent() && ScheduleStart->comesBefore(ScheduleEnd) && \"Not a valid scheduling region?\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 3249, __extension__
__PRETTY_FUNCTION__))
3249 "Not a valid scheduling region?")(static_cast <bool> (ScheduleStart->getParent() == ScheduleEnd
->getParent() && ScheduleStart->comesBefore(ScheduleEnd
) && "Not a valid scheduling region?") ? void (0) : __assert_fail
("ScheduleStart->getParent() == ScheduleEnd->getParent() && ScheduleStart->comesBefore(ScheduleEnd) && \"Not a valid scheduling region?\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 3249, __extension__
__PRETTY_FUNCTION__))
;
3250
3251 for (auto *I = ScheduleStart; I != ScheduleEnd; I = I->getNextNode()) {
3252 auto *SD = getScheduleData(I);
3253 if (!SD)
3254 continue;
3255 assert(isInSchedulingRegion(SD) &&(static_cast <bool> (isInSchedulingRegion(SD) &&
"primary schedule data not in window?") ? void (0) : __assert_fail
("isInSchedulingRegion(SD) && \"primary schedule data not in window?\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 3256, __extension__
__PRETTY_FUNCTION__))
3256 "primary schedule data not in window?")(static_cast <bool> (isInSchedulingRegion(SD) &&
"primary schedule data not in window?") ? void (0) : __assert_fail
("isInSchedulingRegion(SD) && \"primary schedule data not in window?\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 3256, __extension__
__PRETTY_FUNCTION__))
;
3257 assert(isInSchedulingRegion(SD->FirstInBundle) &&(static_cast <bool> (isInSchedulingRegion(SD->FirstInBundle
) && "entire bundle in window!") ? void (0) : __assert_fail
("isInSchedulingRegion(SD->FirstInBundle) && \"entire bundle in window!\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 3258, __extension__
__PRETTY_FUNCTION__))
3258 "entire bundle in window!")(static_cast <bool> (isInSchedulingRegion(SD->FirstInBundle
) && "entire bundle in window!") ? void (0) : __assert_fail
("isInSchedulingRegion(SD->FirstInBundle) && \"entire bundle in window!\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 3258, __extension__
__PRETTY_FUNCTION__))
;
3259 (void)SD;
3260 doForAllOpcodes(I, [](ScheduleData *SD) { SD->verify(); });
3261 }
3262
3263 for (auto *SD : ReadyInsts) {
3264 assert(SD->isSchedulingEntity() && SD->isReady() &&(static_cast <bool> (SD->isSchedulingEntity() &&
SD->isReady() && "item in ready list not ready?")
? void (0) : __assert_fail ("SD->isSchedulingEntity() && SD->isReady() && \"item in ready list not ready?\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 3265, __extension__
__PRETTY_FUNCTION__))
3265 "item in ready list not ready?")(static_cast <bool> (SD->isSchedulingEntity() &&
SD->isReady() && "item in ready list not ready?")
? void (0) : __assert_fail ("SD->isSchedulingEntity() && SD->isReady() && \"item in ready list not ready?\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 3265, __extension__
__PRETTY_FUNCTION__))
;
3266 (void)SD;
3267 }
3268 }
3269
3270 void doForAllOpcodes(Value *V,
3271 function_ref<void(ScheduleData *SD)> Action) {
3272 if (ScheduleData *SD = getScheduleData(V))
3273 Action(SD);
3274 auto I = ExtraScheduleDataMap.find(V);
3275 if (I != ExtraScheduleDataMap.end())
3276 for (auto &P : I->second)
3277 if (isInSchedulingRegion(P.second))
3278 Action(P.second);
3279 }
3280
3281 /// Put all instructions into the ReadyList which are ready for scheduling.
3282 template <typename ReadyListType>
3283 void initialFillReadyList(ReadyListType &ReadyList) {
3284 for (auto *I = ScheduleStart; I != ScheduleEnd; I = I->getNextNode()) {
3285 doForAllOpcodes(I, [&](ScheduleData *SD) {
3286 if (SD->isSchedulingEntity() && SD->hasValidDependencies() &&
3287 SD->isReady()) {
3288 ReadyList.insert(SD);
3289 LLVM_DEBUG(dbgs()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: initially in ready list: "
<< *SD << "\n"; } } while (false)
3290 << "SLP: initially in ready list: " << *SD << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: initially in ready list: "
<< *SD << "\n"; } } while (false)
;
3291 }
3292 });
3293 }
3294 }
3295
3296 /// Build a bundle from the ScheduleData nodes corresponding to the
3297 /// scalar instruction for each lane.
3298 ScheduleData *buildBundle(ArrayRef<Value *> VL);
3299
3300 /// Checks if a bundle of instructions can be scheduled, i.e. has no
3301 /// cyclic dependencies. This is only a dry-run, no instructions are
3302 /// actually moved at this stage.
3303 /// \returns the scheduling bundle. The returned Optional value is not
3304 /// std::nullopt if \p VL is allowed to be scheduled.
3305 std::optional<ScheduleData *>
3306 tryScheduleBundle(ArrayRef<Value *> VL, BoUpSLP *SLP,
3307 const InstructionsState &S);
3308
3309 /// Un-bundles a group of instructions.
3310 void cancelScheduling(ArrayRef<Value *> VL, Value *OpValue);
3311
3312 /// Allocates schedule data chunk.
3313 ScheduleData *allocateScheduleDataChunks();
3314
3315 /// Extends the scheduling region so that V is inside the region.
3316 /// \returns true if the region size is within the limit.
3317 bool extendSchedulingRegion(Value *V, const InstructionsState &S);
3318
3319 /// Initialize the ScheduleData structures for new instructions in the
3320 /// scheduling region.
3321 void initScheduleData(Instruction *FromI, Instruction *ToI,
3322 ScheduleData *PrevLoadStore,
3323 ScheduleData *NextLoadStore);
3324
3325 /// Updates the dependency information of a bundle and of all instructions/
3326 /// bundles which depend on the original bundle.
3327 void calculateDependencies(ScheduleData *SD, bool InsertInReadyList,
3328 BoUpSLP *SLP);
3329
3330 /// Sets all instruction in the scheduling region to un-scheduled.
3331 void resetSchedule();
3332
3333 BasicBlock *BB;
3334
3335 /// Simple memory allocation for ScheduleData.
3336 std::vector<std::unique_ptr<ScheduleData[]>> ScheduleDataChunks;
3337
3338 /// The size of a ScheduleData array in ScheduleDataChunks.
3339 int ChunkSize;
3340
3341 /// The allocator position in the current chunk, which is the last entry
3342 /// of ScheduleDataChunks.
3343 int ChunkPos;
3344
3345 /// Attaches ScheduleData to Instruction.
3346 /// Note that the mapping survives during all vectorization iterations, i.e.
3347 /// ScheduleData structures are recycled.
3348 DenseMap<Instruction *, ScheduleData *> ScheduleDataMap;
3349
3350 /// Attaches ScheduleData to Instruction with the leading key.
3351 DenseMap<Value *, SmallDenseMap<Value *, ScheduleData *>>
3352 ExtraScheduleDataMap;
3353
3354 /// The ready-list for scheduling (only used for the dry-run).
3355 SetVector<ScheduleData *> ReadyInsts;
3356
3357 /// The first instruction of the scheduling region.
3358 Instruction *ScheduleStart = nullptr;
3359
3360 /// The first instruction _after_ the scheduling region.
3361 Instruction *ScheduleEnd = nullptr;
3362
3363 /// The first memory accessing instruction in the scheduling region
3364 /// (can be null).
3365 ScheduleData *FirstLoadStoreInRegion = nullptr;
3366
3367 /// The last memory accessing instruction in the scheduling region
3368 /// (can be null).
3369 ScheduleData *LastLoadStoreInRegion = nullptr;
3370
3371 /// Is there an llvm.stacksave or llvm.stackrestore in the scheduling
3372 /// region? Used to optimize the dependence calculation for the
3373 /// common case where there isn't.
3374 bool RegionHasStackSave = false;
3375
3376 /// The current size of the scheduling region.
3377 int ScheduleRegionSize = 0;
3378
3379 /// The maximum size allowed for the scheduling region.
3380 int ScheduleRegionSizeLimit = ScheduleRegionSizeBudget;
3381
3382 /// The ID of the scheduling region. For a new vectorization iteration this
3383 /// is incremented which "removes" all ScheduleData from the region.
3384 /// Make sure that the initial SchedulingRegionID is greater than the
3385 /// initial SchedulingRegionID in ScheduleData (which is 0).
3386 int SchedulingRegionID = 1;
3387 };
3388
3389 /// Attaches the BlockScheduling structures to basic blocks.
3390 MapVector<BasicBlock *, std::unique_ptr<BlockScheduling>> BlocksSchedules;
3391
3392 /// Performs the "real" scheduling. Done before vectorization is actually
3393 /// performed in a basic block.
3394 void scheduleBlock(BlockScheduling *BS);
3395
3396 /// List of users to ignore during scheduling and that don't need extracting.
3397 const SmallDenseSet<Value *> *UserIgnoreList = nullptr;
3398
3399 /// A DenseMapInfo implementation for holding DenseMaps and DenseSets of
3400 /// sorted SmallVectors of unsigned.
3401 struct OrdersTypeDenseMapInfo {
3402 static OrdersType getEmptyKey() {
3403 OrdersType V;
3404 V.push_back(~1U);
3405 return V;
3406 }
3407
3408 static OrdersType getTombstoneKey() {
3409 OrdersType V;
3410 V.push_back(~2U);
3411 return V;
3412 }
3413
3414 static unsigned getHashValue(const OrdersType &V) {
3415 return static_cast<unsigned>(hash_combine_range(V.begin(), V.end()));
3416 }
3417
3418 static bool isEqual(const OrdersType &LHS, const OrdersType &RHS) {
3419 return LHS == RHS;
3420 }
3421 };
3422
3423 // Analysis and block reference.
3424 Function *F;
3425 ScalarEvolution *SE;
3426 TargetTransformInfo *TTI;
3427 TargetLibraryInfo *TLI;
3428 LoopInfo *LI;
3429 DominatorTree *DT;
3430 AssumptionCache *AC;
3431 DemandedBits *DB;
3432 const DataLayout *DL;
3433 OptimizationRemarkEmitter *ORE;
3434
3435 unsigned MaxVecRegSize; // This is set by TTI or overridden by cl::opt.
3436 unsigned MinVecRegSize; // Set by cl::opt (default: 128).
3437
3438 /// Instruction builder to construct the vectorized tree.
3439 IRBuilder<> Builder;
3440
3441 /// A map of scalar integer values to the smallest bit width with which they
3442 /// can legally be represented. The values map to (width, signed) pairs,
3443 /// where "width" indicates the minimum bit width and "signed" is True if the
3444 /// value must be signed-extended, rather than zero-extended, back to its
3445 /// original width.
3446 MapVector<Value *, std::pair<uint64_t, bool>> MinBWs;
3447};
3448
3449} // end namespace slpvectorizer
3450
3451template <> struct GraphTraits<BoUpSLP *> {
3452 using TreeEntry = BoUpSLP::TreeEntry;
3453
3454 /// NodeRef has to be a pointer per the GraphWriter.
3455 using NodeRef = TreeEntry *;
3456
3457 using ContainerTy = BoUpSLP::TreeEntry::VecTreeTy;
3458
3459 /// Add the VectorizableTree to the index iterator to be able to return
3460 /// TreeEntry pointers.
3461 struct ChildIteratorType
3462 : public iterator_adaptor_base<
3463 ChildIteratorType, SmallVector<BoUpSLP::EdgeInfo, 1>::iterator> {
3464 ContainerTy &VectorizableTree;
3465
3466 ChildIteratorType(SmallVector<BoUpSLP::EdgeInfo, 1>::iterator W,
3467 ContainerTy &VT)
3468 : ChildIteratorType::iterator_adaptor_base(W), VectorizableTree(VT) {}
3469
3470 NodeRef operator*() { return I->UserTE; }
3471 };
3472
3473 static NodeRef getEntryNode(BoUpSLP &R) {
3474 return R.VectorizableTree[0].get();
3475 }
3476
3477 static ChildIteratorType child_begin(NodeRef N) {
3478 return {N->UserTreeIndices.begin(), N->Container};
3479 }
3480
3481 static ChildIteratorType child_end(NodeRef N) {
3482 return {N->UserTreeIndices.end(), N->Container};
3483 }
3484
3485 /// For the node iterator we just need to turn the TreeEntry iterator into a
3486 /// TreeEntry* iterator so that it dereferences to NodeRef.
3487 class nodes_iterator {
3488 using ItTy = ContainerTy::iterator;
3489 ItTy It;
3490
3491 public:
3492 nodes_iterator(const ItTy &It2) : It(It2) {}
3493 NodeRef operator*() { return It->get(); }
3494 nodes_iterator operator++() {
3495 ++It;
3496 return *this;
3497 }
3498 bool operator!=(const nodes_iterator &N2) const { return N2.It != It; }
3499 };
3500
3501 static nodes_iterator nodes_begin(BoUpSLP *R) {
3502 return nodes_iterator(R->VectorizableTree.begin());
3503 }
3504
3505 static nodes_iterator nodes_end(BoUpSLP *R) {
3506 return nodes_iterator(R->VectorizableTree.end());
3507 }
3508
3509 static unsigned size(BoUpSLP *R) { return R->VectorizableTree.size(); }
3510};
3511
3512template <> struct DOTGraphTraits<BoUpSLP *> : public DefaultDOTGraphTraits {
3513 using TreeEntry = BoUpSLP::TreeEntry;
3514
3515 DOTGraphTraits(bool isSimple = false) : DefaultDOTGraphTraits(isSimple) {}
3516
3517 std::string getNodeLabel(const TreeEntry *Entry, const BoUpSLP *R) {
3518 std::string Str;
3519 raw_string_ostream OS(Str);
3520 OS << Entry->Idx << ".\n";
3521 if (isSplat(Entry->Scalars))
3522 OS << "<splat> ";
3523 for (auto *V : Entry->Scalars) {
3524 OS << *V;
3525 if (llvm::any_of(R->ExternalUses, [&](const BoUpSLP::ExternalUser &EU) {
3526 return EU.Scalar == V;
3527 }))
3528 OS << " <extract>";
3529 OS << "\n";
3530 }
3531 return Str;
3532 }
3533
3534 static std::string getNodeAttributes(const TreeEntry *Entry,
3535 const BoUpSLP *) {
3536 if (Entry->State == TreeEntry::NeedToGather)
3537 return "color=red";
3538 if (Entry->State == TreeEntry::ScatterVectorize)
3539 return "color=blue";
3540 return "";
3541 }
3542};
3543
3544} // end namespace llvm
3545
3546BoUpSLP::~BoUpSLP() {
3547 SmallVector<WeakTrackingVH> DeadInsts;
3548 for (auto *I : DeletedInstructions) {
3549 for (Use &U : I->operands()) {
3550 auto *Op = dyn_cast<Instruction>(U.get());
3551 if (Op && !DeletedInstructions.count(Op) && Op->hasOneUser() &&
3552 wouldInstructionBeTriviallyDead(Op, TLI))
3553 DeadInsts.emplace_back(Op);
3554 }
3555 I->dropAllReferences();
3556 }
3557 for (auto *I : DeletedInstructions) {
3558 assert(I->use_empty() &&(static_cast <bool> (I->use_empty() && "trying to erase instruction with users."
) ? void (0) : __assert_fail ("I->use_empty() && \"trying to erase instruction with users.\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 3559, __extension__
__PRETTY_FUNCTION__))
3559 "trying to erase instruction with users.")(static_cast <bool> (I->use_empty() && "trying to erase instruction with users."
) ? void (0) : __assert_fail ("I->use_empty() && \"trying to erase instruction with users.\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 3559, __extension__
__PRETTY_FUNCTION__))
;
3560 I->eraseFromParent();
3561 }
3562
3563 // Cleanup any dead scalar code feeding the vectorized instructions
3564 RecursivelyDeleteTriviallyDeadInstructions(DeadInsts, TLI);
3565
3566#ifdef EXPENSIVE_CHECKS
3567 // If we could guarantee that this call is not extremely slow, we could
3568 // remove the ifdef limitation (see PR47712).
3569 assert(!verifyFunction(*F, &dbgs()))(static_cast <bool> (!verifyFunction(*F, &dbgs())) ?
void (0) : __assert_fail ("!verifyFunction(*F, &dbgs())"
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 3569, __extension__
__PRETTY_FUNCTION__))
;
3570#endif
3571}
3572
3573/// Reorders the given \p Reuses mask according to the given \p Mask. \p Reuses
3574/// contains original mask for the scalars reused in the node. Procedure
3575/// transform this mask in accordance with the given \p Mask.
3576static void reorderReuses(SmallVectorImpl<int> &Reuses, ArrayRef<int> Mask) {
3577 assert(!Mask.empty() && Reuses.size() == Mask.size() &&(static_cast <bool> (!Mask.empty() && Reuses.size
() == Mask.size() && "Expected non-empty mask.") ? void
(0) : __assert_fail ("!Mask.empty() && Reuses.size() == Mask.size() && \"Expected non-empty mask.\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 3578, __extension__
__PRETTY_FUNCTION__))
3578 "Expected non-empty mask.")(static_cast <bool> (!Mask.empty() && Reuses.size
() == Mask.size() && "Expected non-empty mask.") ? void
(0) : __assert_fail ("!Mask.empty() && Reuses.size() == Mask.size() && \"Expected non-empty mask.\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 3578, __extension__
__PRETTY_FUNCTION__))
;
3579 SmallVector<int> Prev(Reuses.begin(), Reuses.end());
3580 Prev.swap(Reuses);
3581 for (unsigned I = 0, E = Prev.size(); I < E; ++I)
3582 if (Mask[I] != UndefMaskElem)
3583 Reuses[Mask[I]] = Prev[I];
3584}
3585
3586/// Reorders the given \p Order according to the given \p Mask. \p Order - is
3587/// the original order of the scalars. Procedure transforms the provided order
3588/// in accordance with the given \p Mask. If the resulting \p Order is just an
3589/// identity order, \p Order is cleared.
3590static void reorderOrder(SmallVectorImpl<unsigned> &Order, ArrayRef<int> Mask) {
3591 assert(!Mask.empty() && "Expected non-empty mask.")(static_cast <bool> (!Mask.empty() && "Expected non-empty mask."
) ? void (0) : __assert_fail ("!Mask.empty() && \"Expected non-empty mask.\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 3591, __extension__
__PRETTY_FUNCTION__))
;
3592 SmallVector<int> MaskOrder;
3593 if (Order.empty()) {
3594 MaskOrder.resize(Mask.size());
3595 std::iota(MaskOrder.begin(), MaskOrder.end(), 0);
3596 } else {
3597 inversePermutation(Order, MaskOrder);
3598 }
3599 reorderReuses(MaskOrder, Mask);
3600 if (ShuffleVectorInst::isIdentityMask(MaskOrder)) {
3601 Order.clear();
3602 return;
3603 }
3604 Order.assign(Mask.size(), Mask.size());
3605 for (unsigned I = 0, E = Mask.size(); I < E; ++I)
3606 if (MaskOrder[I] != UndefMaskElem)
3607 Order[MaskOrder[I]] = I;
3608 fixupOrderingIndices(Order);
3609}
3610
3611std::optional<BoUpSLP::OrdersType>
3612BoUpSLP::findReusedOrderedScalars(const BoUpSLP::TreeEntry &TE) {
3613 assert(TE.State == TreeEntry::NeedToGather && "Expected gather node only.")(static_cast <bool> (TE.State == TreeEntry::NeedToGather
&& "Expected gather node only.") ? void (0) : __assert_fail
("TE.State == TreeEntry::NeedToGather && \"Expected gather node only.\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 3613, __extension__
__PRETTY_FUNCTION__))
;
3614 unsigned NumScalars = TE.Scalars.size();
3615 OrdersType CurrentOrder(NumScalars, NumScalars);
3616 SmallVector<int> Positions;
3617 SmallBitVector UsedPositions(NumScalars);
3618 const TreeEntry *STE = nullptr;
3619 // Try to find all gathered scalars that are gets vectorized in other
3620 // vectorize node. Here we can have only one single tree vector node to
3621 // correctly identify order of the gathered scalars.
3622 for (unsigned I = 0; I < NumScalars; ++I) {
3623 Value *V = TE.Scalars[I];
3624 if (!isa<LoadInst, ExtractElementInst, ExtractValueInst>(V))
3625 continue;
3626 if (const auto *LocalSTE = getTreeEntry(V)) {
3627 if (!STE)
3628 STE = LocalSTE;
3629 else if (STE != LocalSTE)
3630 // Take the order only from the single vector node.
3631 return std::nullopt;
3632 unsigned Lane =
3633 std::distance(STE->Scalars.begin(), find(STE->Scalars, V));
3634 if (Lane >= NumScalars)
3635 return std::nullopt;
3636 if (CurrentOrder[Lane] != NumScalars) {
3637 if (Lane != I)
3638 continue;
3639 UsedPositions.reset(CurrentOrder[Lane]);
3640 }
3641 // The partial identity (where only some elements of the gather node are
3642 // in the identity order) is good.
3643 CurrentOrder[Lane] = I;
3644 UsedPositions.set(I);
3645 }
3646 }
3647 // Need to keep the order if we have a vector entry and at least 2 scalars or
3648 // the vectorized entry has just 2 scalars.
3649 if (STE && (UsedPositions.count() > 1 || STE->Scalars.size() == 2)) {
3650 auto &&IsIdentityOrder = [NumScalars](ArrayRef<unsigned> CurrentOrder) {
3651 for (unsigned I = 0; I < NumScalars; ++I)
3652 if (CurrentOrder[I] != I && CurrentOrder[I] != NumScalars)
3653 return false;
3654 return true;
3655 };
3656 if (IsIdentityOrder(CurrentOrder)) {
3657 CurrentOrder.clear();
3658 return CurrentOrder;
3659 }
3660 auto *It = CurrentOrder.begin();
3661 for (unsigned I = 0; I < NumScalars;) {
3662 if (UsedPositions.test(I)) {
3663 ++I;
3664 continue;
3665 }
3666 if (*It == NumScalars) {
3667 *It = I;
3668 ++I;
3669 }
3670 ++It;
3671 }
3672 return CurrentOrder;
3673 }
3674 return std::nullopt;
3675}
3676
3677namespace {
3678/// Tracks the state we can represent the loads in the given sequence.
3679enum class LoadsState { Gather, Vectorize, ScatterVectorize };
3680} // anonymous namespace
3681
3682static bool arePointersCompatible(Value *Ptr1, Value *Ptr2,
3683 const TargetLibraryInfo &TLI,
3684 bool CompareOpcodes = true) {
3685 if (getUnderlyingObject(Ptr1) != getUnderlyingObject(Ptr2))
3686 return false;
3687 auto *GEP1 = dyn_cast<GetElementPtrInst>(Ptr1);
3688 if (!GEP1)
3689 return false;
3690 auto *GEP2 = dyn_cast<GetElementPtrInst>(Ptr2);
3691 if (!GEP2)
3692 return false;
3693 return GEP1->getNumOperands() == 2 && GEP2->getNumOperands() == 2 &&
3694 ((isConstant(GEP1->getOperand(1)) &&
3695 isConstant(GEP2->getOperand(1))) ||
3696 !CompareOpcodes ||
3697 getSameOpcode({GEP1->getOperand(1), GEP2->getOperand(1)}, TLI)
3698 .getOpcode());
3699}
3700
3701/// Checks if the given array of loads can be represented as a vectorized,
3702/// scatter or just simple gather.
3703static LoadsState canVectorizeLoads(ArrayRef<Value *> VL, const Value *VL0,
3704 const TargetTransformInfo &TTI,
3705 const DataLayout &DL, ScalarEvolution &SE,
3706 LoopInfo &LI, const TargetLibraryInfo &TLI,
3707 SmallVectorImpl<unsigned> &Order,
3708 SmallVectorImpl<Value *> &PointerOps) {
3709 // Check that a vectorized load would load the same memory as a scalar
3710 // load. For example, we don't want to vectorize loads that are smaller
3711 // than 8-bit. Even though we have a packed struct {<i2, i2, i2, i2>} LLVM
3712 // treats loading/storing it as an i8 struct. If we vectorize loads/stores
3713 // from such a struct, we read/write packed bits disagreeing with the
3714 // unvectorized version.
3715 Type *ScalarTy = VL0->getType();
3716
3717 if (DL.getTypeSizeInBits(ScalarTy) != DL.getTypeAllocSizeInBits(ScalarTy))
3718 return LoadsState::Gather;
3719
3720 // Make sure all loads in the bundle are simple - we can't vectorize
3721 // atomic or volatile loads.
3722 PointerOps.clear();
3723 PointerOps.resize(VL.size());
3724 auto *POIter = PointerOps.begin();
3725 for (Value *V : VL) {
3726 auto *L = cast<LoadInst>(V);
3727 if (!L->isSimple())
3728 return LoadsState::Gather;
3729 *POIter = L->getPointerOperand();
3730 ++POIter;
3731 }
3732
3733 Order.clear();
3734 // Check the order of pointer operands or that all pointers are the same.
3735 bool IsSorted = sortPtrAccesses(PointerOps, ScalarTy, DL, SE, Order);
3736 if (IsSorted || all_of(PointerOps, [&](Value *P) {
3737 return arePointersCompatible(P, PointerOps.front(), TLI);
3738 })) {
3739 if (IsSorted) {
3740 Value *Ptr0;
3741 Value *PtrN;
3742 if (Order.empty()) {
3743 Ptr0 = PointerOps.front();
3744 PtrN = PointerOps.back();
3745 } else {
3746 Ptr0 = PointerOps[Order.front()];
3747 PtrN = PointerOps[Order.back()];
3748 }
3749 std::optional<int> Diff =
3750 getPointersDiff(ScalarTy, Ptr0, ScalarTy, PtrN, DL, SE);
3751 // Check that the sorted loads are consecutive.
3752 if (static_cast<unsigned>(*Diff) == VL.size() - 1)
3753 return LoadsState::Vectorize;
3754 }
3755 // TODO: need to improve analysis of the pointers, if not all of them are
3756 // GEPs or have > 2 operands, we end up with a gather node, which just
3757 // increases the cost.
3758 Loop *L = LI.getLoopFor(cast<LoadInst>(VL0)->getParent());
3759 bool ProfitableGatherPointers =
3760 static_cast<unsigned>(count_if(PointerOps, [L](Value *V) {
3761 return L && L->isLoopInvariant(V);
3762 })) <= VL.size() / 2 && VL.size() > 2;
3763 if (ProfitableGatherPointers || all_of(PointerOps, [IsSorted](Value *P) {
3764 auto *GEP = dyn_cast<GetElementPtrInst>(P);
3765 return (IsSorted && !GEP && doesNotNeedToBeScheduled(P)) ||
3766 (GEP && GEP->getNumOperands() == 2);
3767 })) {
3768 Align CommonAlignment = cast<LoadInst>(VL0)->getAlign();
3769 for (Value *V : VL)
3770 CommonAlignment =
3771 std::min(CommonAlignment, cast<LoadInst>(V)->getAlign());
3772 auto *VecTy = FixedVectorType::get(ScalarTy, VL.size());
3773 if (TTI.isLegalMaskedGather(VecTy, CommonAlignment) &&
3774 !TTI.forceScalarizeMaskedGather(VecTy, CommonAlignment))
3775 return LoadsState::ScatterVectorize;
3776 }
3777 }
3778
3779 return LoadsState::Gather;
3780}
3781
3782bool clusterSortPtrAccesses(ArrayRef<Value *> VL, Type *ElemTy,
3783 const DataLayout &DL, ScalarEvolution &SE,
3784 SmallVectorImpl<unsigned> &SortedIndices) {
3785 assert(llvm::all_of((static_cast <bool> (llvm::all_of( VL, [](const Value *
V) { return V->getType()->isPointerTy(); }) && "Expected list of pointer operands."
) ? void (0) : __assert_fail ("llvm::all_of( VL, [](const Value *V) { return V->getType()->isPointerTy(); }) && \"Expected list of pointer operands.\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 3787, __extension__
__PRETTY_FUNCTION__))
3786 VL, [](const Value *V) { return V->getType()->isPointerTy(); }) &&(static_cast <bool> (llvm::all_of( VL, [](const Value *
V) { return V->getType()->isPointerTy(); }) && "Expected list of pointer operands."
) ? void (0) : __assert_fail ("llvm::all_of( VL, [](const Value *V) { return V->getType()->isPointerTy(); }) && \"Expected list of pointer operands.\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 3787, __extension__
__PRETTY_FUNCTION__))
3787 "Expected list of pointer operands.")(static_cast <bool> (llvm::all_of( VL, [](const Value *
V) { return V->getType()->isPointerTy(); }) && "Expected list of pointer operands."
) ? void (0) : __assert_fail ("llvm::all_of( VL, [](const Value *V) { return V->getType()->isPointerTy(); }) && \"Expected list of pointer operands.\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 3787, __extension__
__PRETTY_FUNCTION__))
;
3788 // Map from bases to a vector of (Ptr, Offset, OrigIdx), which we insert each
3789 // Ptr into, sort and return the sorted indices with values next to one
3790 // another.
3791 MapVector<Value *, SmallVector<std::tuple<Value *, int, unsigned>>> Bases;
3792 Bases[VL[0]].push_back(std::make_tuple(VL[0], 0U, 0U));
3793
3794 unsigned Cnt = 1;
3795 for (Value *Ptr : VL.drop_front()) {
3796 bool Found = any_of(Bases, [&](auto &Base) {
3797 std::optional<int> Diff =
3798 getPointersDiff(ElemTy, Base.first, ElemTy, Ptr, DL, SE,
3799 /*StrictCheck=*/true);
3800 if (!Diff)
3801 return false;
3802
3803 Base.second.emplace_back(Ptr, *Diff, Cnt++);
3804 return true;
3805 });
3806
3807 if (!Found) {
3808 // If we haven't found enough to usefully cluster, return early.
3809 if (Bases.size() > VL.size() / 2 - 1)
3810 return false;
3811
3812 // Not found already - add a new Base
3813 Bases[Ptr].emplace_back(Ptr, 0, Cnt++);
3814 }
3815 }
3816
3817 // For each of the bases sort the pointers by Offset and check if any of the
3818 // base become consecutively allocated.
3819 bool AnyConsecutive = false;
3820 for (auto &Base : Bases) {
3821 auto &Vec = Base.second;
3822 if (Vec.size() > 1) {
3823 llvm::stable_sort(Vec, [](const std::tuple<Value *, int, unsigned> &X,
3824 const std::tuple<Value *, int, unsigned> &Y) {
3825 return std::get<1>(X) < std::get<1>(Y);
3826 });
3827 int InitialOffset = std::get<1>(Vec[0]);
3828 AnyConsecutive |= all_of(enumerate(Vec), [InitialOffset](auto &P) {
3829 return std::get<1>(P.value()) == int(P.index()) + InitialOffset;
3830 });
3831 }
3832 }
3833
3834 // Fill SortedIndices array only if it looks worth-while to sort the ptrs.
3835 SortedIndices.clear();
3836 if (!AnyConsecutive)
3837 return false;
3838
3839 for (auto &Base : Bases) {
3840 for (auto &T : Base.second)
3841 SortedIndices.push_back(std::get<2>(T));
3842 }
3843
3844 assert(SortedIndices.size() == VL.size() &&(static_cast <bool> (SortedIndices.size() == VL.size() &&
"Expected SortedIndices to be the size of VL") ? void (0) : __assert_fail
("SortedIndices.size() == VL.size() && \"Expected SortedIndices to be the size of VL\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 3845, __extension__
__PRETTY_FUNCTION__))
3845 "Expected SortedIndices to be the size of VL")(static_cast <bool> (SortedIndices.size() == VL.size() &&
"Expected SortedIndices to be the size of VL") ? void (0) : __assert_fail
("SortedIndices.size() == VL.size() && \"Expected SortedIndices to be the size of VL\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 3845, __extension__
__PRETTY_FUNCTION__))
;
3846 return true;
3847}
3848
3849std::optional<BoUpSLP::OrdersType>
3850BoUpSLP::findPartiallyOrderedLoads(const BoUpSLP::TreeEntry &TE) {
3851 assert(TE.State == TreeEntry::NeedToGather && "Expected gather node only.")(static_cast <bool> (TE.State == TreeEntry::NeedToGather
&& "Expected gather node only.") ? void (0) : __assert_fail
("TE.State == TreeEntry::NeedToGather && \"Expected gather node only.\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 3851, __extension__
__PRETTY_FUNCTION__))
;
3852 Type *ScalarTy = TE.Scalars[0]->getType();
3853
3854 SmallVector<Value *> Ptrs;
3855 Ptrs.reserve(TE.Scalars.size());
3856 for (Value *V : TE.Scalars) {
3857 auto *L = dyn_cast<LoadInst>(V);
3858 if (!L || !L->isSimple())
3859 return std::nullopt;
3860 Ptrs.push_back(L->getPointerOperand());
3861 }
3862
3863 BoUpSLP::OrdersType Order;
3864 if (clusterSortPtrAccesses(Ptrs, ScalarTy, *DL, *SE, Order))
3865 return Order;
3866 return std::nullopt;
3867}
3868
3869/// Check if two insertelement instructions are from the same buildvector.
3870static bool areTwoInsertFromSameBuildVector(
3871 InsertElementInst *VU, InsertElementInst *V,
3872 function_ref<Value *(InsertElementInst *)> GetBaseOperand) {
3873 // Instructions must be from the same basic blocks.
3874 if (VU->getParent() != V->getParent())
3875 return false;
3876 // Checks if 2 insertelements are from the same buildvector.
3877 if (VU->getType() != V->getType())
3878 return false;
3879 // Multiple used inserts are separate nodes.
3880 if (!VU->hasOneUse() && !V->hasOneUse())
3881 return false;
3882 auto *IE1 = VU;
3883 auto *IE2 = V;
3884 std::optional<unsigned> Idx1 = getInsertIndex(IE1);
3885 std::optional<unsigned> Idx2 = getInsertIndex(IE2);
3886 if (Idx1 == std::nullopt || Idx2 == std::nullopt)
3887 return false;
3888 // Go through the vector operand of insertelement instructions trying to find
3889 // either VU as the original vector for IE2 or V as the original vector for
3890 // IE1.
3891 do {
3892 if (IE2 == VU)
3893 return VU->hasOneUse();
3894 if (IE1 == V)
3895 return V->hasOneUse();
3896 if (IE1) {
3897 if ((IE1 != VU && !IE1->hasOneUse()) ||
3898 getInsertIndex(IE1).value_or(*Idx2) == *Idx2)
3899 IE1 = nullptr;
3900 else
3901 IE1 = dyn_cast_or_null<InsertElementInst>(GetBaseOperand(IE1));
3902 }
3903 if (IE2) {
3904 if ((IE2 != V && !IE2->hasOneUse()) ||
3905 getInsertIndex(IE2).value_or(*Idx1) == *Idx1)
3906 IE2 = nullptr;
3907 else
3908 IE2 = dyn_cast_or_null<InsertElementInst>(GetBaseOperand(IE2));
3909 }
3910 } while (IE1 || IE2);
3911 return false;
3912}
3913
3914std::optional<BoUpSLP::OrdersType> BoUpSLP::getReorderingData(const TreeEntry &TE,
3915 bool TopToBottom) {
3916 // No need to reorder if need to shuffle reuses, still need to shuffle the
3917 // node.
3918 if (!TE.ReuseShuffleIndices.empty()) {
3919 // Check if reuse shuffle indices can be improved by reordering.
3920 // For this, check that reuse mask is "clustered", i.e. each scalar values
3921 // is used once in each submask of size <number_of_scalars>.
3922 // Example: 4 scalar values.
3923 // ReuseShuffleIndices mask: 0, 1, 2, 3, 3, 2, 0, 1 - clustered.
3924 // 0, 1, 2, 3, 3, 3, 1, 0 - not clustered, because
3925 // element 3 is used twice in the second submask.
3926 unsigned Sz = TE.Scalars.size();
3927 if (!ShuffleVectorInst::isOneUseSingleSourceMask(TE.ReuseShuffleIndices,
3928 Sz))
3929 return std::nullopt;
3930 unsigned VF = TE.getVectorFactor();
3931 // Try build correct order for extractelement instructions.
3932 SmallVector<int> ReusedMask(TE.ReuseShuffleIndices.begin(),
3933 TE.ReuseShuffleIndices.end());
3934 if (TE.getOpcode() == Instruction::ExtractElement && !TE.isAltShuffle() &&
3935 all_of(TE.Scalars, [Sz](Value *V) {
3936 std::optional<unsigned> Idx = getExtractIndex(cast<Instruction>(V));
3937 return Idx && *Idx < Sz;
3938 })) {
3939 SmallVector<int> ReorderMask(Sz, UndefMaskElem);
3940 if (TE.ReorderIndices.empty())
3941 std::iota(ReorderMask.begin(), ReorderMask.end(), 0);
3942 else
3943 inversePermutation(TE.ReorderIndices, ReorderMask);
3944 for (unsigned I = 0; I < VF; ++I) {
3945 int &Idx = ReusedMask[I];
3946 if (Idx == UndefMaskElem)
3947 continue;
3948 Value *V = TE.Scalars[ReorderMask[Idx]];
3949 std::optional<unsigned> EI = getExtractIndex(cast<Instruction>(V));
3950 Idx = std::distance(ReorderMask.begin(), find(ReorderMask, *EI));
3951 }
3952 }
3953 // Build the order of the VF size, need to reorder reuses shuffles, they are
3954 // always of VF size.
3955 OrdersType ResOrder(VF);
3956 std::iota(ResOrder.begin(), ResOrder.end(), 0);
3957 auto *It = ResOrder.begin();
3958 for (unsigned K = 0; K < VF; K += Sz) {
3959 OrdersType CurrentOrder(TE.ReorderIndices);
3960 SmallVector<int> SubMask{ArrayRef(ReusedMask).slice(K, Sz)};
3961 if (SubMask.front() == UndefMaskElem)
3962 std::iota(SubMask.begin(), SubMask.end(), 0);
3963 reorderOrder(CurrentOrder, SubMask);
3964 transform(CurrentOrder, It, [K](unsigned Pos) { return Pos + K; });
3965 std::advance(It, Sz);
3966 }
3967 if (all_of(enumerate(ResOrder),
3968 [](const auto &Data) { return Data.index() == Data.value(); }))
3969 return {}; // Use identity order.
3970 return ResOrder;
3971 }
3972 if (TE.State == TreeEntry::Vectorize &&
3973 (isa<LoadInst, ExtractElementInst, ExtractValueInst>(TE.getMainOp()) ||
3974 (TopToBottom && isa<StoreInst, InsertElementInst>(TE.getMainOp()))) &&
3975 !TE.isAltShuffle())
3976 return TE.ReorderIndices;
3977 if (TE.State == TreeEntry::Vectorize && TE.getOpcode() == Instruction::PHI) {
3978 auto PHICompare = [](llvm::Value *V1, llvm::Value *V2) {
3979 if (!V1->hasOneUse() || !V2->hasOneUse())
3980 return false;
3981 auto *FirstUserOfPhi1 = cast<Instruction>(*V1->user_begin());
3982 auto *FirstUserOfPhi2 = cast<Instruction>(*V2->user_begin());
3983 if (auto *IE1 = dyn_cast<InsertElementInst>(FirstUserOfPhi1))
3984 if (auto *IE2 = dyn_cast<InsertElementInst>(FirstUserOfPhi2)) {
3985 if (!areTwoInsertFromSameBuildVector(
3986 IE1, IE2,
3987 [](InsertElementInst *II) { return II->getOperand(0); }))
3988 return false;
3989 std::optional<unsigned> Idx1 = getInsertIndex(IE1);
3990 std::optional<unsigned> Idx2 = getInsertIndex(IE2);
3991 if (Idx1 == std::nullopt || Idx2 == std::nullopt)
3992 return false;
3993 return *Idx1 < *Idx2;
3994 }
3995 if (auto *EE1 = dyn_cast<ExtractElementInst>(FirstUserOfPhi1))
3996 if (auto *EE2 = dyn_cast<ExtractElementInst>(FirstUserOfPhi2)) {
3997 if (EE1->getOperand(0) != EE2->getOperand(0))
3998 return false;
3999 std::optional<unsigned> Idx1 = getExtractIndex(EE1);
4000 std::optional<unsigned> Idx2 = getExtractIndex(EE2);
4001 if (Idx1 == std::nullopt || Idx2 == std::nullopt)
4002 return false;
4003 return *Idx1 < *Idx2;
4004 }
4005 return false;
4006 };
4007 auto IsIdentityOrder = [](const OrdersType &Order) {
4008 for (unsigned Idx : seq<unsigned>(0, Order.size()))
4009 if (Idx != Order[Idx])
4010 return false;
4011 return true;
4012 };
4013 if (!TE.ReorderIndices.empty())
4014 return TE.ReorderIndices;
4015 DenseMap<Value *, unsigned> PhiToId;
4016 SmallVector<Value *, 4> Phis;
4017 OrdersType ResOrder(TE.Scalars.size());
4018 for (unsigned Id = 0, Sz = TE.Scalars.size(); Id < Sz; ++Id) {
4019 PhiToId[TE.Scalars[Id]] = Id;
4020 Phis.push_back(TE.Scalars[Id]);
4021 }
4022 llvm::stable_sort(Phis, PHICompare);
4023 for (unsigned Id = 0, Sz = Phis.size(); Id < Sz; ++Id)
4024 ResOrder[Id] = PhiToId[Phis[Id]];
4025 if (IsIdentityOrder(ResOrder))
4026 return {};
4027 return ResOrder;
4028 }
4029 if (TE.State == TreeEntry::NeedToGather) {
4030 // TODO: add analysis of other gather nodes with extractelement
4031 // instructions and other values/instructions, not only undefs.
4032 if (((TE.getOpcode() == Instruction::ExtractElement &&
4033 !TE.isAltShuffle()) ||
4034 (all_of(TE.Scalars,
4035 [](Value *V) {
4036 return isa<UndefValue, ExtractElementInst>(V);
4037 }) &&
4038 any_of(TE.Scalars,
4039 [](Value *V) { return isa<ExtractElementInst>(V); }))) &&
4040 all_of(TE.Scalars,
4041 [](Value *V) {
4042 auto *EE = dyn_cast<ExtractElementInst>(V);
4043 return !EE || isa<FixedVectorType>(EE->getVectorOperandType());
4044 }) &&
4045 allSameType(TE.Scalars)) {
4046 // Check that gather of extractelements can be represented as
4047 // just a shuffle of a single vector.
4048 OrdersType CurrentOrder;
4049 bool Reuse = canReuseExtract(TE.Scalars, TE.getMainOp(), CurrentOrder);
4050 if (Reuse || !CurrentOrder.empty()) {
4051 if (!CurrentOrder.empty())
4052 fixupOrderingIndices(CurrentOrder);
4053 return CurrentOrder;
4054 }
4055 }
4056 if (std::optional<OrdersType> CurrentOrder = findReusedOrderedScalars(TE))
4057 return CurrentOrder;
4058 if (TE.Scalars.size() >= 4)
4059 if (std::optional<OrdersType> Order = findPartiallyOrderedLoads(TE))
4060 return Order;
4061 }
4062 return std::nullopt;
4063}
4064
4065/// Checks if the given mask is a "clustered" mask with the same clusters of
4066/// size \p Sz, which are not identity submasks.
4067static bool isRepeatedNonIdentityClusteredMask(ArrayRef<int> Mask,
4068 unsigned Sz) {
4069 ArrayRef<int> FirstCluster = Mask.slice(0, Sz);
4070 if (ShuffleVectorInst::isIdentityMask(FirstCluster))
4071 return false;
4072 for (unsigned I = Sz, E = Mask.size(); I < E; I += Sz) {
4073 ArrayRef<int> Cluster = Mask.slice(I, Sz);
4074 if (Cluster != FirstCluster)
4075 return false;
4076 }
4077 return true;
4078}
4079
4080void BoUpSLP::reorderNodeWithReuses(TreeEntry &TE, ArrayRef<int> Mask) const {
4081 // Reorder reuses mask.
4082 reorderReuses(TE.ReuseShuffleIndices, Mask);
4083 const unsigned Sz = TE.Scalars.size();
4084 // For vectorized and non-clustered reused no need to do anything else.
4085 if (TE.State != TreeEntry::NeedToGather ||
4086 !ShuffleVectorInst::isOneUseSingleSourceMask(TE.ReuseShuffleIndices,
4087 Sz) ||
4088 !isRepeatedNonIdentityClusteredMask(TE.ReuseShuffleIndices, Sz))
4089 return;
4090 SmallVector<int> NewMask;
4091 inversePermutation(TE.ReorderIndices, NewMask);
4092 addMask(NewMask, TE.ReuseShuffleIndices);
4093 // Clear reorder since it is going to be applied to the new mask.
4094 TE.ReorderIndices.clear();
4095 // Try to improve gathered nodes with clustered reuses, if possible.
4096 ArrayRef<int> Slice = ArrayRef(NewMask).slice(0, Sz);
4097 SmallVector<unsigned> NewOrder(Slice.begin(), Slice.end());
4098 inversePermutation(NewOrder, NewMask);
4099 reorderScalars(TE.Scalars, NewMask);
4100 // Fill the reuses mask with the identity submasks.
4101 for (auto *It = TE.ReuseShuffleIndices.begin(),
4102 *End = TE.ReuseShuffleIndices.end();
4103 It != End; std::advance(It, Sz))
4104 std::iota(It, std::next(It, Sz), 0);
4105}
4106
4107void BoUpSLP::reorderTopToBottom() {
4108 // Maps VF to the graph nodes.
4109 DenseMap<unsigned, SetVector<TreeEntry *>> VFToOrderedEntries;
4110 // ExtractElement gather nodes which can be vectorized and need to handle
4111 // their ordering.
4112 DenseMap<const TreeEntry *, OrdersType> GathersToOrders;
4113
4114 // Phi nodes can have preferred ordering based on their result users
4115 DenseMap<const TreeEntry *, OrdersType> PhisToOrders;
4116
4117 // AltShuffles can also have a preferred ordering that leads to fewer
4118 // instructions, e.g., the addsub instruction in x86.
4119 DenseMap<const TreeEntry *, OrdersType> AltShufflesToOrders;
4120
4121 // Maps a TreeEntry to the reorder indices of external users.
4122 DenseMap<const TreeEntry *, SmallVector<OrdersType, 1>>
4123 ExternalUserReorderMap;
4124 // FIXME: Workaround for syntax error reported by MSVC buildbots.
4125 TargetTransformInfo &TTIRef = *TTI;
4126 // Find all reorderable nodes with the given VF.
4127 // Currently the are vectorized stores,loads,extracts + some gathering of
4128 // extracts.
4129 for_each(VectorizableTree, [this, &TTIRef, &VFToOrderedEntries,
4130 &GathersToOrders, &ExternalUserReorderMap,
4131 &AltShufflesToOrders, &PhisToOrders](
4132 const std::unique_ptr<TreeEntry> &TE) {
4133 // Look for external users that will probably be vectorized.
4134 SmallVector<OrdersType, 1> ExternalUserReorderIndices =
4135 findExternalStoreUsersReorderIndices(TE.get());
4136 if (!ExternalUserReorderIndices.empty()) {
4137 VFToOrderedEntries[TE->getVectorFactor()].insert(TE.get());
4138 ExternalUserReorderMap.try_emplace(TE.get(),
4139 std::move(ExternalUserReorderIndices));
4140 }
4141
4142 // Patterns like [fadd,fsub] can be combined into a single instruction in
4143 // x86. Reordering them into [fsub,fadd] blocks this pattern. So we need
4144 // to take into account their order when looking for the most used order.
4145 if (TE->isAltShuffle()) {
4146 VectorType *VecTy =
4147 FixedVectorType::get(TE->Scalars[0]->getType(), TE->Scalars.size());
4148 unsigned Opcode0 = TE->getOpcode();
4149 unsigned Opcode1 = TE->getAltOpcode();
4150 // The opcode mask selects between the two opcodes.
4151 SmallBitVector OpcodeMask(TE->Scalars.size(), false);
4152 for (unsigned Lane : seq<unsigned>(0, TE->Scalars.size()))
4153 if (cast<Instruction>(TE->Scalars[Lane])->getOpcode() == Opcode1)
4154 OpcodeMask.set(Lane);
4155 // If this pattern is supported by the target then we consider the order.
4156 if (TTIRef.isLegalAltInstr(VecTy, Opcode0, Opcode1, OpcodeMask)) {
4157 VFToOrderedEntries[TE->getVectorFactor()].insert(TE.get());
4158 AltShufflesToOrders.try_emplace(TE.get(), OrdersType());
4159 }
4160 // TODO: Check the reverse order too.
4161 }
4162
4163 if (std::optional<OrdersType> CurrentOrder =
4164 getReorderingData(*TE, /*TopToBottom=*/true)) {
4165 // Do not include ordering for nodes used in the alt opcode vectorization,
4166 // better to reorder them during bottom-to-top stage. If follow the order
4167 // here, it causes reordering of the whole graph though actually it is
4168 // profitable just to reorder the subgraph that starts from the alternate
4169 // opcode vectorization node. Such nodes already end-up with the shuffle
4170 // instruction and it is just enough to change this shuffle rather than
4171 // rotate the scalars for the whole graph.
4172 unsigned Cnt = 0;
4173 const TreeEntry *UserTE = TE.get();
4174 while (UserTE && Cnt < RecursionMaxDepth) {
4175 if (UserTE->UserTreeIndices.size() != 1)
4176 break;
4177 if (all_of(UserTE->UserTreeIndices, [](const EdgeInfo &EI) {
4178 return EI.UserTE->State == TreeEntry::Vectorize &&
4179 EI.UserTE->isAltShuffle() && EI.UserTE->Idx != 0;
4180 }))
4181 return;
4182 UserTE = UserTE->UserTreeIndices.back().UserTE;
4183 ++Cnt;
4184 }
4185 VFToOrderedEntries[TE->getVectorFactor()].insert(TE.get());
4186 if (TE->State != TreeEntry::Vectorize || !TE->ReuseShuffleIndices.empty())
4187 GathersToOrders.try_emplace(TE.get(), *CurrentOrder);
4188 if (TE->State == TreeEntry::Vectorize &&
4189 TE->getOpcode() == Instruction::PHI)
4190 PhisToOrders.try_emplace(TE.get(), *CurrentOrder);
4191 }
4192 });
4193
4194 // Reorder the graph nodes according to their vectorization factor.
4195 for (unsigned VF = VectorizableTree.front()->getVectorFactor(); VF > 1;
4196 VF /= 2) {
4197 auto It = VFToOrderedEntries.find(VF);
4198 if (It == VFToOrderedEntries.end())
4199 continue;
4200 // Try to find the most profitable order. We just are looking for the most
4201 // used order and reorder scalar elements in the nodes according to this
4202 // mostly used order.
4203 ArrayRef<TreeEntry *> OrderedEntries = It->second.getArrayRef();
4204 // All operands are reordered and used only in this node - propagate the
4205 // most used order to the user node.
4206 MapVector<OrdersType, unsigned,
4207 DenseMap<OrdersType, unsigned, OrdersTypeDenseMapInfo>>
4208 OrdersUses;
4209 SmallPtrSet<const TreeEntry *, 4> VisitedOps;
4210 for (const TreeEntry *OpTE : OrderedEntries) {
4211 // No need to reorder this nodes, still need to extend and to use shuffle,
4212 // just need to merge reordering shuffle and the reuse shuffle.
4213 if (!OpTE->ReuseShuffleIndices.empty() && !GathersToOrders.count(OpTE))
4214 continue;
4215 // Count number of orders uses.
4216 const auto &Order = [OpTE, &GathersToOrders, &AltShufflesToOrders,
4217 &PhisToOrders]() -> const OrdersType & {
4218 if (OpTE->State == TreeEntry::NeedToGather ||
4219 !OpTE->ReuseShuffleIndices.empty()) {
4220 auto It = GathersToOrders.find(OpTE);
4221 if (It != GathersToOrders.end())
4222 return It->second;
4223 }
4224 if (OpTE->isAltShuffle()) {
4225 auto It = AltShufflesToOrders.find(OpTE);
4226 if (It != AltShufflesToOrders.end())
4227 return It->second;
4228 }
4229 if (OpTE->State == TreeEntry::Vectorize &&
4230 OpTE->getOpcode() == Instruction::PHI) {
4231 auto It = PhisToOrders.find(OpTE);
4232 if (It != PhisToOrders.end())
4233 return It->second;
4234 }
4235 return OpTE->ReorderIndices;
4236 }();
4237 // First consider the order of the external scalar users.
4238 auto It = ExternalUserReorderMap.find(OpTE);
4239 if (It != ExternalUserReorderMap.end()) {
4240 const auto &ExternalUserReorderIndices = It->second;
4241 // If the OpTE vector factor != number of scalars - use natural order,
4242 // it is an attempt to reorder node with reused scalars but with
4243 // external uses.
4244 if (OpTE->getVectorFactor() != OpTE->Scalars.size()) {
4245 OrdersUses.insert(std::make_pair(OrdersType(), 0)).first->second +=
4246 ExternalUserReorderIndices.size();
4247 } else {
4248 for (const OrdersType &ExtOrder : ExternalUserReorderIndices)
4249 ++OrdersUses.insert(std::make_pair(ExtOrder, 0)).first->second;
4250 }
4251 // No other useful reorder data in this entry.
4252 if (Order.empty())
4253 continue;
4254 }
4255 // Stores actually store the mask, not the order, need to invert.
4256 if (OpTE->State == TreeEntry::Vectorize && !OpTE->isAltShuffle() &&
4257 OpTE->getOpcode() == Instruction::Store && !Order.empty()) {
4258 SmallVector<int> Mask;
4259 inversePermutation(Order, Mask);
4260 unsigned E = Order.size();
4261 OrdersType CurrentOrder(E, E);
4262 transform(Mask, CurrentOrder.begin(), [E](int Idx) {
4263 return Idx == UndefMaskElem ? E : static_cast<unsigned>(Idx);
4264 });
4265 fixupOrderingIndices(CurrentOrder);
4266 ++OrdersUses.insert(std::make_pair(CurrentOrder, 0)).first->second;
4267 } else {
4268 ++OrdersUses.insert(std::make_pair(Order, 0)).first->second;
4269 }
4270 }
4271 // Set order of the user node.
4272 if (OrdersUses.empty())
4273 continue;
4274 // Choose the most used order.
4275 ArrayRef<unsigned> BestOrder = OrdersUses.front().first;
4276 unsigned Cnt = OrdersUses.front().second;
4277 for (const auto &Pair : drop_begin(OrdersUses)) {
4278 if (Cnt < Pair.second || (Cnt == Pair.second && Pair.first.empty())) {
4279 BestOrder = Pair.first;
4280 Cnt = Pair.second;
4281 }
4282 }
4283 // Set order of the user node.
4284 if (BestOrder.empty())
4285 continue;
4286 SmallVector<int> Mask;
4287 inversePermutation(BestOrder, Mask);
4288 SmallVector<int> MaskOrder(BestOrder.size(), UndefMaskElem);
4289 unsigned E = BestOrder.size();
4290 transform(BestOrder, MaskOrder.begin(), [E](unsigned I) {
4291 return I < E ? static_cast<int>(I) : UndefMaskElem;
4292 });
4293 // Do an actual reordering, if profitable.
4294 for (std::unique_ptr<TreeEntry> &TE : VectorizableTree) {
4295 // Just do the reordering for the nodes with the given VF.
4296 if (TE->Scalars.size() != VF) {
4297 if (TE->ReuseShuffleIndices.size() == VF) {
4298 // Need to reorder the reuses masks of the operands with smaller VF to
4299 // be able to find the match between the graph nodes and scalar
4300 // operands of the given node during vectorization/cost estimation.
4301 assert(all_of(TE->UserTreeIndices,(static_cast <bool> (all_of(TE->UserTreeIndices, [VF
, &TE](const EdgeInfo &EI) { return EI.UserTE->Scalars
.size() == VF || EI.UserTE->Scalars.size() == TE->Scalars
.size(); }) && "All users must be of VF size.") ? void
(0) : __assert_fail ("all_of(TE->UserTreeIndices, [VF, &TE](const EdgeInfo &EI) { return EI.UserTE->Scalars.size() == VF || EI.UserTE->Scalars.size() == TE->Scalars.size(); }) && \"All users must be of VF size.\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 4307, __extension__
__PRETTY_FUNCTION__))
4302 [VF, &TE](const EdgeInfo &EI) {(static_cast <bool> (all_of(TE->UserTreeIndices, [VF
, &TE](const EdgeInfo &EI) { return EI.UserTE->Scalars
.size() == VF || EI.UserTE->Scalars.size() == TE->Scalars
.size(); }) && "All users must be of VF size.") ? void
(0) : __assert_fail ("all_of(TE->UserTreeIndices, [VF, &TE](const EdgeInfo &EI) { return EI.UserTE->Scalars.size() == VF || EI.UserTE->Scalars.size() == TE->Scalars.size(); }) && \"All users must be of VF size.\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 4307, __extension__
__PRETTY_FUNCTION__))
4303 return EI.UserTE->Scalars.size() == VF ||(static_cast <bool> (all_of(TE->UserTreeIndices, [VF
, &TE](const EdgeInfo &EI) { return EI.UserTE->Scalars
.size() == VF || EI.UserTE->Scalars.size() == TE->Scalars
.size(); }) && "All users must be of VF size.") ? void
(0) : __assert_fail ("all_of(TE->UserTreeIndices, [VF, &TE](const EdgeInfo &EI) { return EI.UserTE->Scalars.size() == VF || EI.UserTE->Scalars.size() == TE->Scalars.size(); }) && \"All users must be of VF size.\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 4307, __extension__
__PRETTY_FUNCTION__))
4304 EI.UserTE->Scalars.size() ==(static_cast <bool> (all_of(TE->UserTreeIndices, [VF
, &TE](const EdgeInfo &EI) { return EI.UserTE->Scalars
.size() == VF || EI.UserTE->Scalars.size() == TE->Scalars
.size(); }) && "All users must be of VF size.") ? void
(0) : __assert_fail ("all_of(TE->UserTreeIndices, [VF, &TE](const EdgeInfo &EI) { return EI.UserTE->Scalars.size() == VF || EI.UserTE->Scalars.size() == TE->Scalars.size(); }) && \"All users must be of VF size.\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 4307, __extension__
__PRETTY_FUNCTION__))
4305 TE->Scalars.size();(static_cast <bool> (all_of(TE->UserTreeIndices, [VF
, &TE](const EdgeInfo &EI) { return EI.UserTE->Scalars
.size() == VF || EI.UserTE->Scalars.size() == TE->Scalars
.size(); }) && "All users must be of VF size.") ? void
(0) : __assert_fail ("all_of(TE->UserTreeIndices, [VF, &TE](const EdgeInfo &EI) { return EI.UserTE->Scalars.size() == VF || EI.UserTE->Scalars.size() == TE->Scalars.size(); }) && \"All users must be of VF size.\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 4307, __extension__
__PRETTY_FUNCTION__))
4306 }) &&(static_cast <bool> (all_of(TE->UserTreeIndices, [VF
, &TE](const EdgeInfo &EI) { return EI.UserTE->Scalars
.size() == VF || EI.UserTE->Scalars.size() == TE->Scalars
.size(); }) && "All users must be of VF size.") ? void
(0) : __assert_fail ("all_of(TE->UserTreeIndices, [VF, &TE](const EdgeInfo &EI) { return EI.UserTE->Scalars.size() == VF || EI.UserTE->Scalars.size() == TE->Scalars.size(); }) && \"All users must be of VF size.\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 4307, __extension__
__PRETTY_FUNCTION__))
4307 "All users must be of VF size.")(static_cast <bool> (all_of(TE->UserTreeIndices, [VF
, &TE](const EdgeInfo &EI) { return EI.UserTE->Scalars
.size() == VF || EI.UserTE->Scalars.size() == TE->Scalars
.size(); }) && "All users must be of VF size.") ? void
(0) : __assert_fail ("all_of(TE->UserTreeIndices, [VF, &TE](const EdgeInfo &EI) { return EI.UserTE->Scalars.size() == VF || EI.UserTE->Scalars.size() == TE->Scalars.size(); }) && \"All users must be of VF size.\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 4307, __extension__
__PRETTY_FUNCTION__))
;
4308 // Update ordering of the operands with the smaller VF than the given
4309 // one.
4310 reorderNodeWithReuses(*TE, Mask);
4311 }
4312 continue;
4313 }
4314 if (TE->State == TreeEntry::Vectorize &&
4315 isa<ExtractElementInst, ExtractValueInst, LoadInst, StoreInst,
4316 InsertElementInst>(TE->getMainOp()) &&
4317 !TE->isAltShuffle()) {
4318 // Build correct orders for extract{element,value}, loads and
4319 // stores.
4320 reorderOrder(TE->ReorderIndices, Mask);
4321 if (isa<InsertElementInst, StoreInst>(TE->getMainOp()))
4322 TE->reorderOperands(Mask);
4323 } else {
4324 // Reorder the node and its operands.
4325 TE->reorderOperands(Mask);
4326 assert(TE->ReorderIndices.empty() &&(static_cast <bool> (TE->ReorderIndices.empty() &&
"Expected empty reorder sequence.") ? void (0) : __assert_fail
("TE->ReorderIndices.empty() && \"Expected empty reorder sequence.\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 4327, __extension__
__PRETTY_FUNCTION__))
4327 "Expected empty reorder sequence.")(static_cast <bool> (TE->ReorderIndices.empty() &&
"Expected empty reorder sequence.") ? void (0) : __assert_fail
("TE->ReorderIndices.empty() && \"Expected empty reorder sequence.\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 4327, __extension__
__PRETTY_FUNCTION__))
;
4328 reorderScalars(TE->Scalars, Mask);
4329 }
4330 if (!TE->ReuseShuffleIndices.empty()) {
4331 // Apply reversed order to keep the original ordering of the reused
4332 // elements to avoid extra reorder indices shuffling.
4333 OrdersType CurrentOrder;
4334 reorderOrder(CurrentOrder, MaskOrder);
4335 SmallVector<int> NewReuses;
4336 inversePermutation(CurrentOrder, NewReuses);
4337 addMask(NewReuses, TE->ReuseShuffleIndices);
4338 TE->ReuseShuffleIndices.swap(NewReuses);
4339 }
4340 }
4341 }
4342}
4343
4344bool BoUpSLP::canReorderOperands(
4345 TreeEntry *UserTE, SmallVectorImpl<std::pair<unsigned, TreeEntry *>> &Edges,
4346 ArrayRef<TreeEntry *> ReorderableGathers,
4347 SmallVectorImpl<TreeEntry *> &GatherOps) {
4348 for (unsigned I = 0, E = UserTE->getNumOperands(); I < E; ++I) {
4349 if (any_of(Edges, [I](const std::pair<unsigned, TreeEntry *> &OpData) {
4350 return OpData.first == I &&
4351 OpData.second->State == TreeEntry::Vectorize;
4352 }))
4353 continue;
4354 if (TreeEntry *TE = getVectorizedOperand(UserTE, I)) {
4355 // Do not reorder if operand node is used by many user nodes.
4356 if (any_of(TE->UserTreeIndices,
4357 [UserTE](const EdgeInfo &EI) { return EI.UserTE != UserTE; }))
4358 return false;
4359 // Add the node to the list of the ordered nodes with the identity
4360 // order.
4361 Edges.emplace_back(I, TE);
4362 // Add ScatterVectorize nodes to the list of operands, where just
4363 // reordering of the scalars is required. Similar to the gathers, so
4364 // simply add to the list of gathered ops.
4365 // If there are reused scalars, process this node as a regular vectorize
4366 // node, just reorder reuses mask.
4367 if (TE->State != TreeEntry::Vectorize && TE->ReuseShuffleIndices.empty())
4368 GatherOps.push_back(TE);
4369 continue;
4370 }
4371 TreeEntry *Gather = nullptr;
4372 if (count_if(ReorderableGathers,
4373 [&Gather, UserTE, I](TreeEntry *TE) {
4374 assert(TE->State != TreeEntry::Vectorize &&(static_cast <bool> (TE->State != TreeEntry::Vectorize
&& "Only non-vectorized nodes are expected.") ? void
(0) : __assert_fail ("TE->State != TreeEntry::Vectorize && \"Only non-vectorized nodes are expected.\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 4375, __extension__
__PRETTY_FUNCTION__))
4375 "Only non-vectorized nodes are expected.")(static_cast <bool> (TE->State != TreeEntry::Vectorize
&& "Only non-vectorized nodes are expected.") ? void
(0) : __assert_fail ("TE->State != TreeEntry::Vectorize && \"Only non-vectorized nodes are expected.\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 4375, __extension__
__PRETTY_FUNCTION__))
;
4376 if (any_of(TE->UserTreeIndices,
4377 [UserTE, I](const EdgeInfo &EI) {
4378 return EI.UserTE == UserTE && EI.EdgeIdx == I;
4379 })) {
4380 assert(TE->isSame(UserTE->getOperand(I)) &&(static_cast <bool> (TE->isSame(UserTE->getOperand
(I)) && "Operand entry does not match operands.") ? void
(0) : __assert_fail ("TE->isSame(UserTE->getOperand(I)) && \"Operand entry does not match operands.\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 4381, __extension__
__PRETTY_FUNCTION__))
4381 "Operand entry does not match operands.")(static_cast <bool> (TE->isSame(UserTE->getOperand
(I)) && "Operand entry does not match operands.") ? void
(0) : __assert_fail ("TE->isSame(UserTE->getOperand(I)) && \"Operand entry does not match operands.\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 4381, __extension__
__PRETTY_FUNCTION__))
;
4382 Gather = TE;
4383 return true;
4384 }
4385 return false;
4386 }) > 1 &&
4387 !all_of(UserTE->getOperand(I), isConstant))
4388 return false;
4389 if (Gather)
4390 GatherOps.push_back(Gather);
4391 }
4392 return true;
4393}
4394
4395void BoUpSLP::reorderBottomToTop(bool IgnoreReorder) {
4396 SetVector<TreeEntry *> OrderedEntries;
4397 DenseMap<const TreeEntry *, OrdersType> GathersToOrders;
4398 // Find all reorderable leaf nodes with the given VF.
4399 // Currently the are vectorized loads,extracts without alternate operands +
4400 // some gathering of extracts.
4401 SmallVector<TreeEntry *> NonVectorized;
4402 for_each(VectorizableTree, [this, &OrderedEntries, &GathersToOrders,
4403 &NonVectorized](
4404 const std::unique_ptr<TreeEntry> &TE) {
4405 if (TE->State != TreeEntry::Vectorize)
4406 NonVectorized.push_back(TE.get());
4407 if (std::optional<OrdersType> CurrentOrder =
4408 getReorderingData(*TE, /*TopToBottom=*/false)) {
4409 OrderedEntries.insert(TE.get());
4410 if (TE->State != TreeEntry::Vectorize || !TE->ReuseShuffleIndices.empty())
4411 GathersToOrders.try_emplace(TE.get(), *CurrentOrder);
4412 }
4413 });
4414
4415 // 1. Propagate order to the graph nodes, which use only reordered nodes.
4416 // I.e., if the node has operands, that are reordered, try to make at least
4417 // one operand order in the natural order and reorder others + reorder the
4418 // user node itself.
4419 SmallPtrSet<const TreeEntry *, 4> Visited;
4420 while (!OrderedEntries.empty()) {
4421 // 1. Filter out only reordered nodes.
4422 // 2. If the entry has multiple uses - skip it and jump to the next node.
4423 DenseMap<TreeEntry *, SmallVector<std::pair<unsigned, TreeEntry *>>> Users;
4424 SmallVector<TreeEntry *> Filtered;
4425 for (TreeEntry *TE : OrderedEntries) {
4426 if (!(TE->State == TreeEntry::Vectorize ||
4427 (TE->State == TreeEntry::NeedToGather &&
4428 GathersToOrders.count(TE))) ||
4429 TE->UserTreeIndices.empty() || !TE->ReuseShuffleIndices.empty() ||
4430 !all_of(drop_begin(TE->UserTreeIndices),
4431 [TE](const EdgeInfo &EI) {
4432 return EI.UserTE == TE->UserTreeIndices.front().UserTE;
4433 }) ||
4434 !Visited.insert(TE).second) {
4435 Filtered.push_back(TE);
4436 continue;
4437 }
4438 // Build a map between user nodes and their operands order to speedup
4439 // search. The graph currently does not provide this dependency directly.
4440 for (EdgeInfo &EI : TE->UserTreeIndices) {
4441 TreeEntry *UserTE = EI.UserTE;
4442 auto It = Users.find(UserTE);
4443 if (It == Users.end())
4444 It = Users.insert({UserTE, {}}).first;
4445 It->second.emplace_back(EI.EdgeIdx, TE);
4446 }
4447 }
4448 // Erase filtered entries.
4449 for_each(Filtered,
4450 [&OrderedEntries](TreeEntry *TE) { OrderedEntries.remove(TE); });
4451 SmallVector<
4452 std::pair<TreeEntry *, SmallVector<std::pair<unsigned, TreeEntry *>>>>
4453 UsersVec(Users.begin(), Users.end());
4454 sort(UsersVec, [](const auto &Data1, const auto &Data2) {
4455 return Data1.first->Idx > Data2.first->Idx;
4456 });
4457 for (auto &Data : UsersVec) {
4458 // Check that operands are used only in the User node.
4459 SmallVector<TreeEntry *> GatherOps;
4460 if (!canReorderOperands(Data.first, Data.second, NonVectorized,
4461 GatherOps)) {
4462 for_each(Data.second,
4463 [&OrderedEntries](const std::pair<unsigned, TreeEntry *> &Op) {
4464 OrderedEntries.remove(Op.second);
4465 });
4466 continue;
4467 }
4468 // All operands are reordered and used only in this node - propagate the
4469 // most used order to the user node.
4470 MapVector<OrdersType, unsigned,
4471 DenseMap<OrdersType, unsigned, OrdersTypeDenseMapInfo>>
4472 OrdersUses;
4473 // Do the analysis for each tree entry only once, otherwise the order of
4474 // the same node my be considered several times, though might be not
4475 // profitable.
4476 SmallPtrSet<const TreeEntry *, 4> VisitedOps;
4477 SmallPtrSet<const TreeEntry *, 4> VisitedUsers;
4478 for (const auto &Op : Data.second) {
4479 TreeEntry *OpTE = Op.second;
4480 if (!VisitedOps.insert(OpTE).second)
4481 continue;
4482 if (!OpTE->ReuseShuffleIndices.empty() && !GathersToOrders.count(OpTE))
4483 continue;
4484 const auto &Order = [OpTE, &GathersToOrders]() -> const OrdersType & {
4485 if (OpTE->State == TreeEntry::NeedToGather ||
4486 !OpTE->ReuseShuffleIndices.empty())
4487 return GathersToOrders.find(OpTE)->second;
4488 return OpTE->ReorderIndices;
4489 }();
4490 unsigned NumOps = count_if(
4491 Data.second, [OpTE](const std::pair<unsigned, TreeEntry *> &P) {
4492 return P.second == OpTE;
4493 });
4494 // Stores actually store the mask, not the order, need to invert.
4495 if (OpTE->State == TreeEntry::Vectorize && !OpTE->isAltShuffle() &&
4496 OpTE->getOpcode() == Instruction::Store && !Order.empty()) {
4497 SmallVector<int> Mask;
4498 inversePermutation(Order, Mask);
4499 unsigned E = Order.size();
4500 OrdersType CurrentOrder(E, E);
4501 transform(Mask, CurrentOrder.begin(), [E](int Idx) {
4502 return Idx == UndefMaskElem ? E : static_cast<unsigned>(Idx);
4503 });
4504 fixupOrderingIndices(CurrentOrder);
4505 OrdersUses.insert(std::make_pair(CurrentOrder, 0)).first->second +=
4506 NumOps;
4507 } else {
4508 OrdersUses.insert(std::make_pair(Order, 0)).first->second += NumOps;
4509 }
4510 auto Res = OrdersUses.insert(std::make_pair(OrdersType(), 0));
4511 const auto &&AllowsReordering = [IgnoreReorder, &GathersToOrders](
4512 const TreeEntry *TE) {
4513 if (!TE->ReorderIndices.empty() || !TE->ReuseShuffleIndices.empty() ||
4514 (TE->State == TreeEntry::Vectorize && TE->isAltShuffle()) ||
4515 (IgnoreReorder && TE->Idx == 0))
4516 return true;
4517 if (TE->State == TreeEntry::NeedToGather) {
4518 auto It = GathersToOrders.find(TE);
4519 if (It != GathersToOrders.end())
4520 return !It->second.empty();
4521 return true;
4522 }
4523 return false;
4524 };
4525 for (const EdgeInfo &EI : OpTE->UserTreeIndices) {
4526 TreeEntry *UserTE = EI.UserTE;
4527 if (!VisitedUsers.insert(UserTE).second)
4528 continue;
4529 // May reorder user node if it requires reordering, has reused
4530 // scalars, is an alternate op vectorize node or its op nodes require
4531 // reordering.
4532 if (AllowsReordering(UserTE))
4533 continue;
4534 // Check if users allow reordering.
4535 // Currently look up just 1 level of operands to avoid increase of
4536 // the compile time.
4537 // Profitable to reorder if definitely more operands allow
4538 // reordering rather than those with natural order.
4539 ArrayRef<std::pair<unsigned, TreeEntry *>> Ops = Users[UserTE];
4540 if (static_cast<unsigned>(count_if(
4541 Ops, [UserTE, &AllowsReordering](
4542 const std::pair<unsigned, TreeEntry *> &Op) {
4543 return AllowsReordering(Op.second) &&
4544 all_of(Op.second->UserTreeIndices,
4545 [UserTE](const EdgeInfo &EI) {
4546 return EI.UserTE == UserTE;
4547 });
4548 })) <= Ops.size() / 2)
4549 ++Res.first->second;
4550 }
4551 }
4552 // If no orders - skip current nodes and jump to the next one, if any.
4553 if (OrdersUses.empty()) {
4554 for_each(Data.second,
4555 [&OrderedEntries](const std::pair<unsigned, TreeEntry *> &Op) {
4556 OrderedEntries.remove(Op.second);
4557 });
4558 continue;
4559 }
4560 // Choose the best order.
4561 ArrayRef<unsigned> BestOrder = OrdersUses.front().first;
4562 unsigned Cnt = OrdersUses.front().second;
4563 for (const auto &Pair : drop_begin(OrdersUses)) {
4564 if (Cnt < Pair.second || (Cnt == Pair.second && Pair.first.empty())) {
4565 BestOrder = Pair.first;
4566 Cnt = Pair.second;
4567 }
4568 }
4569 // Set order of the user node (reordering of operands and user nodes).
4570 if (BestOrder.empty()) {
4571 for_each(Data.second,
4572 [&OrderedEntries](const std::pair<unsigned, TreeEntry *> &Op) {
4573 OrderedEntries.remove(Op.second);
4574 });
4575 continue;
4576 }
4577 // Erase operands from OrderedEntries list and adjust their orders.
4578 VisitedOps.clear();
4579 SmallVector<int> Mask;
4580 inversePermutation(BestOrder, Mask);
4581 SmallVector<int> MaskOrder(BestOrder.size(), UndefMaskElem);
4582 unsigned E = BestOrder.size();
4583 transform(BestOrder, MaskOrder.begin(), [E](unsigned I) {
4584 return I < E ? static_cast<int>(I) : UndefMaskElem;
4585 });
4586 for (const std::pair<unsigned, TreeEntry *> &Op : Data.second) {
4587 TreeEntry *TE = Op.second;
4588 OrderedEntries.remove(TE);
4589 if (!VisitedOps.insert(TE).second)
4590 continue;
4591 if (TE->ReuseShuffleIndices.size() == BestOrder.size()) {
4592 reorderNodeWithReuses(*TE, Mask);
4593 continue;
4594 }
4595 // Gathers are processed separately.
4596 if (TE->State != TreeEntry::Vectorize)
4597 continue;
4598 assert((BestOrder.size() == TE->ReorderIndices.size() ||(static_cast <bool> ((BestOrder.size() == TE->ReorderIndices
.size() || TE->ReorderIndices.empty()) && "Non-matching sizes of user/operand entries."
) ? void (0) : __assert_fail ("(BestOrder.size() == TE->ReorderIndices.size() || TE->ReorderIndices.empty()) && \"Non-matching sizes of user/operand entries.\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 4600, __extension__
__PRETTY_FUNCTION__))
4599 TE->ReorderIndices.empty()) &&(static_cast <bool> ((BestOrder.size() == TE->ReorderIndices
.size() || TE->ReorderIndices.empty()) && "Non-matching sizes of user/operand entries."
) ? void (0) : __assert_fail ("(BestOrder.size() == TE->ReorderIndices.size() || TE->ReorderIndices.empty()) && \"Non-matching sizes of user/operand entries.\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 4600, __extension__
__PRETTY_FUNCTION__))
4600 "Non-matching sizes of user/operand entries.")(static_cast <bool> ((BestOrder.size() == TE->ReorderIndices
.size() || TE->ReorderIndices.empty()) && "Non-matching sizes of user/operand entries."
) ? void (0) : __assert_fail ("(BestOrder.size() == TE->ReorderIndices.size() || TE->ReorderIndices.empty()) && \"Non-matching sizes of user/operand entries.\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 4600, __extension__
__PRETTY_FUNCTION__))
;
4601 reorderOrder(TE->ReorderIndices, Mask);
4602 if (IgnoreReorder && TE == VectorizableTree.front().get())
4603 IgnoreReorder = false;
4604 }
4605 // For gathers just need to reorder its scalars.
4606 for (TreeEntry *Gather : GatherOps) {
4607 assert(Gather->ReorderIndices.empty() &&(static_cast <bool> (Gather->ReorderIndices.empty() &&
"Unexpected reordering of gathers.") ? void (0) : __assert_fail
("Gather->ReorderIndices.empty() && \"Unexpected reordering of gathers.\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 4608, __extension__
__PRETTY_FUNCTION__))
4608 "Unexpected reordering of gathers.")(static_cast <bool> (Gather->ReorderIndices.empty() &&
"Unexpected reordering of gathers.") ? void (0) : __assert_fail
("Gather->ReorderIndices.empty() && \"Unexpected reordering of gathers.\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 4608, __extension__
__PRETTY_FUNCTION__))
;
4609 if (!Gather->ReuseShuffleIndices.empty()) {
4610 // Just reorder reuses indices.
4611 reorderReuses(Gather->ReuseShuffleIndices, Mask);
4612 continue;
4613 }
4614 reorderScalars(Gather->Scalars, Mask);
4615 OrderedEntries.remove(Gather);
4616 }
4617 // Reorder operands of the user node and set the ordering for the user
4618 // node itself.
4619 if (Data.first->State != TreeEntry::Vectorize ||
4620 !isa<ExtractElementInst, ExtractValueInst, LoadInst>(
4621 Data.first->getMainOp()) ||
4622 Data.first->isAltShuffle())
4623 Data.first->reorderOperands(Mask);
4624 if (!isa<InsertElementInst, StoreInst>(Data.first->getMainOp()) ||
4625 Data.first->isAltShuffle()) {
4626 reorderScalars(Data.first->Scalars, Mask);
4627 reorderOrder(Data.first->ReorderIndices, MaskOrder);
4628 if (Data.first->ReuseShuffleIndices.empty() &&
4629 !Data.first->ReorderIndices.empty() &&
4630 !Data.first->isAltShuffle()) {
4631 // Insert user node to the list to try to sink reordering deeper in
4632 // the graph.
4633 OrderedEntries.insert(Data.first);
4634 }
4635 } else {
4636 reorderOrder(Data.first->ReorderIndices, Mask);
4637 }
4638 }
4639 }
4640 // If the reordering is unnecessary, just remove the reorder.
4641 if (IgnoreReorder && !VectorizableTree.front()->ReorderIndices.empty() &&
4642 VectorizableTree.front()->ReuseShuffleIndices.empty())
4643 VectorizableTree.front()->ReorderIndices.clear();
4644}
4645
4646void BoUpSLP::buildExternalUses(
4647 const ExtraValueToDebugLocsMap &ExternallyUsedValues) {
4648 // Collect the values that we need to extract from the tree.
4649 for (auto &TEPtr : VectorizableTree) {
4650 TreeEntry *Entry = TEPtr.get();
4651
4652 // No need to handle users of gathered values.
4653 if (Entry->State == TreeEntry::NeedToGather)
4654 continue;
4655
4656 // For each lane:
4657 for (int Lane = 0, LE = Entry->Scalars.size(); Lane != LE; ++Lane) {
4658 Value *Scalar = Entry->Scalars[Lane];
4659 int FoundLane = Entry->findLaneForValue(Scalar);
4660
4661 // Check if the scalar is externally used as an extra arg.
4662 auto ExtI = ExternallyUsedValues.find(Scalar);
4663 if (ExtI != ExternallyUsedValues.end()) {
4664 LLVM_DEBUG(dbgs() << "SLP: Need to extract: Extra arg from lane "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Need to extract: Extra arg from lane "
<< Lane << " from " << *Scalar << ".\n"
; } } while (false)
4665 << Lane << " from " << *Scalar << ".\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Need to extract: Extra arg from lane "
<< Lane << " from " << *Scalar << ".\n"
; } } while (false)
;
4666 ExternalUses.emplace_back(Scalar, nullptr, FoundLane);
4667 }
4668 for (User *U : Scalar->users()) {
4669 LLVM_DEBUG(dbgs() << "SLP: Checking user:" << *U << ".\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Checking user:" << *U <<
".\n"; } } while (false)
;
4670
4671 Instruction *UserInst = dyn_cast<Instruction>(U);
4672 if (!UserInst)
4673 continue;
4674
4675 if (isDeleted(UserInst))
4676 continue;
4677
4678 // Skip in-tree scalars that become vectors
4679 if (TreeEntry *UseEntry = getTreeEntry(U)) {
4680 Value *UseScalar = UseEntry->Scalars[0];
4681 // Some in-tree scalars will remain as scalar in vectorized
4682 // instructions. If that is the case, the one in Lane 0 will
4683 // be used.
4684 if (UseScalar != U ||
4685 UseEntry->State == TreeEntry::ScatterVectorize ||
4686 !InTreeUserNeedToExtract(Scalar, UserInst, TLI)) {
4687 LLVM_DEBUG(dbgs() << "SLP: \tInternal user will be removed:" << *Udo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: \tInternal user will be removed:"
<< *U << ".\n"; } } while (false)
4688 << ".\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: \tInternal user will be removed:"
<< *U << ".\n"; } } while (false)
;
4689 assert(UseEntry->State != TreeEntry::NeedToGather && "Bad state")(static_cast <bool> (UseEntry->State != TreeEntry::NeedToGather
&& "Bad state") ? void (0) : __assert_fail ("UseEntry->State != TreeEntry::NeedToGather && \"Bad state\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 4689, __extension__
__PRETTY_FUNCTION__))
;
4690 continue;
4691 }
4692 }
4693
4694 // Ignore users in the user ignore list.
4695 if (UserIgnoreList && UserIgnoreList->contains(UserInst))
4696 continue;
4697
4698 LLVM_DEBUG(dbgs() << "SLP: Need to extract:" << *U << " from lane "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Need to extract:" << *
U << " from lane " << Lane << " from " <<
*Scalar << ".\n"; } } while (false)
4699 << Lane << " from " << *Scalar << ".\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Need to extract:" << *
U << " from lane " << Lane << " from " <<
*Scalar << ".\n"; } } while (false)
;
4700 ExternalUses.push_back(ExternalUser(Scalar, U, FoundLane));
4701 }
4702 }
4703 }
4704}
4705
4706DenseMap<Value *, SmallVector<StoreInst *, 4>>
4707BoUpSLP::collectUserStores(const BoUpSLP::TreeEntry *TE) const {
4708 DenseMap<Value *, SmallVector<StoreInst *, 4>> PtrToStoresMap;
4709 for (unsigned Lane : seq<unsigned>(0, TE->Scalars.size())) {
4710 Value *V = TE->Scalars[Lane];
4711 // To save compilation time we don't visit if we have too many users.
4712 static constexpr unsigned UsersLimit = 4;
4713 if (V->hasNUsesOrMore(UsersLimit))
4714 break;
4715
4716 // Collect stores per pointer object.
4717 for (User *U : V->users()) {
4718 auto *SI = dyn_cast<StoreInst>(U);
4719 if (SI == nullptr || !SI->isSimple() ||
4720 !isValidElementType(SI->getValueOperand()->getType()))
4721 continue;
4722 // Skip entry if already
4723 if (getTreeEntry(U))
4724 continue;
4725
4726 Value *Ptr = getUnderlyingObject(SI->getPointerOperand());
4727 auto &StoresVec = PtrToStoresMap[Ptr];
4728 // For now just keep one store per pointer object per lane.
4729 // TODO: Extend this to support multiple stores per pointer per lane
4730 if (StoresVec.size() > Lane)
4731 continue;
4732 // Skip if in different BBs.
4733 if (!StoresVec.empty() &&
4734 SI->getParent() != StoresVec.back()->getParent())
4735 continue;
4736 // Make sure that the stores are of the same type.
4737 if (!StoresVec.empty() &&
4738 SI->getValueOperand()->getType() !=
4739 StoresVec.back()->getValueOperand()->getType())
4740 continue;
4741 StoresVec.push_back(SI);
4742 }
4743 }
4744 return PtrToStoresMap;
4745}
4746
4747bool BoUpSLP::canFormVector(const SmallVector<StoreInst *, 4> &StoresVec,
4748 OrdersType &ReorderIndices) const {
4749 // We check whether the stores in StoreVec can form a vector by sorting them
4750 // and checking whether they are consecutive.
4751
4752 // To avoid calling getPointersDiff() while sorting we create a vector of
4753 // pairs {store, offset from first} and sort this instead.
4754 SmallVector<std::pair<StoreInst *, int>, 4> StoreOffsetVec(StoresVec.size());
4755 StoreInst *S0 = StoresVec[0];
4756 StoreOffsetVec[0] = {S0, 0};
4757 Type *S0Ty = S0->getValueOperand()->getType();
4758 Value *S0Ptr = S0->getPointerOperand();
4759 for (unsigned Idx : seq<unsigned>(1, StoresVec.size())) {
4760 StoreInst *SI = StoresVec[Idx];
4761 std::optional<int> Diff =
4762 getPointersDiff(S0Ty, S0Ptr, SI->getValueOperand()->getType(),
4763 SI->getPointerOperand(), *DL, *SE,
4764 /*StrictCheck=*/true);
4765 // We failed to compare the pointers so just abandon this StoresVec.
4766 if (!Diff)
4767 return false;
4768 StoreOffsetVec[Idx] = {StoresVec[Idx], *Diff};
4769 }
4770
4771 // Sort the vector based on the pointers. We create a copy because we may
4772 // need the original later for calculating the reorder (shuffle) indices.
4773 stable_sort(StoreOffsetVec, [](const std::pair<StoreInst *, int> &Pair1,
4774 const std::pair<StoreInst *, int> &Pair2) {
4775 int Offset1 = Pair1.second;
4776 int Offset2 = Pair2.second;
4777 return Offset1 < Offset2;
4778 });
4779
4780 // Check if the stores are consecutive by checking if their difference is 1.
4781 for (unsigned Idx : seq<unsigned>(1, StoreOffsetVec.size()))
4782 if (StoreOffsetVec[Idx].second != StoreOffsetVec[Idx-1].second + 1)
4783 return false;
4784
4785 // Calculate the shuffle indices according to their offset against the sorted
4786 // StoreOffsetVec.
4787 ReorderIndices.reserve(StoresVec.size());
4788 for (StoreInst *SI : StoresVec) {
4789 unsigned Idx = find_if(StoreOffsetVec,
4790 [SI](const std::pair<StoreInst *, int> &Pair) {
4791 return Pair.first == SI;
4792 }) -
4793 StoreOffsetVec.begin();
4794 ReorderIndices.push_back(Idx);
4795 }
4796 // Identity order (e.g., {0,1,2,3}) is modeled as an empty OrdersType in
4797 // reorderTopToBottom() and reorderBottomToTop(), so we are following the
4798 // same convention here.
4799 auto IsIdentityOrder = [](const OrdersType &Order) {
4800 for (unsigned Idx : seq<unsigned>(0, Order.size()))
4801 if (Idx != Order[Idx])
4802 return false;
4803 return true;
4804 };
4805 if (IsIdentityOrder(ReorderIndices))
4806 ReorderIndices.clear();
4807
4808 return true;
4809}
4810
4811#ifndef NDEBUG
4812LLVM_DUMP_METHOD__attribute__((noinline)) __attribute__((__used__)) static void dumpOrder(const BoUpSLP::OrdersType &Order) {
4813 for (unsigned Idx : Order)
4814 dbgs() << Idx << ", ";
4815 dbgs() << "\n";
4816}
4817#endif
4818
4819SmallVector<BoUpSLP::OrdersType, 1>
4820BoUpSLP::findExternalStoreUsersReorderIndices(TreeEntry *TE) const {
4821 unsigned NumLanes = TE->Scalars.size();
4822
4823 DenseMap<Value *, SmallVector<StoreInst *, 4>> PtrToStoresMap =
4824 collectUserStores(TE);
4825
4826 // Holds the reorder indices for each candidate store vector that is a user of
4827 // the current TreeEntry.
4828 SmallVector<OrdersType, 1> ExternalReorderIndices;
4829
4830 // Now inspect the stores collected per pointer and look for vectorization
4831 // candidates. For each candidate calculate the reorder index vector and push
4832 // it into `ExternalReorderIndices`
4833 for (const auto &Pair : PtrToStoresMap) {
4834 auto &StoresVec = Pair.second;
4835 // If we have fewer than NumLanes stores, then we can't form a vector.
4836 if (StoresVec.size() != NumLanes)
4837 continue;
4838
4839 // If the stores are not consecutive then abandon this StoresVec.
4840 OrdersType ReorderIndices;
4841 if (!canFormVector(StoresVec, ReorderIndices))
4842 continue;
4843
4844 // We now know that the scalars in StoresVec can form a vector instruction,
4845 // so set the reorder indices.
4846 ExternalReorderIndices.push_back(ReorderIndices);
4847 }
4848 return ExternalReorderIndices;
4849}
4850
4851void BoUpSLP::buildTree(ArrayRef<Value *> Roots,
4852 const SmallDenseSet<Value *> &UserIgnoreLst) {
4853 deleteTree();
4854 UserIgnoreList = &UserIgnoreLst;
4855 if (!allSameType(Roots))
4856 return;
4857 buildTree_rec(Roots, 0, EdgeInfo());
4858}
4859
4860void BoUpSLP::buildTree(ArrayRef<Value *> Roots) {
4861 deleteTree();
4862 if (!allSameType(Roots))
4863 return;
4864 buildTree_rec(Roots, 0, EdgeInfo());
4865}
4866
4867/// \return true if the specified list of values has only one instruction that
4868/// requires scheduling, false otherwise.
4869#ifndef NDEBUG
4870static bool needToScheduleSingleInstruction(ArrayRef<Value *> VL) {
4871 Value *NeedsScheduling = nullptr;
4872 for (Value *V : VL) {
4873 if (doesNotNeedToBeScheduled(V))
4874 continue;
4875 if (!NeedsScheduling) {
4876 NeedsScheduling = V;
4877 continue;
4878 }
4879 return false;
4880 }
4881 return NeedsScheduling;
4882}
4883#endif
4884
4885/// Generates key/subkey pair for the given value to provide effective sorting
4886/// of the values and better detection of the vectorizable values sequences. The
4887/// keys/subkeys can be used for better sorting of the values themselves (keys)
4888/// and in values subgroups (subkeys).
4889static std::pair<size_t, size_t> generateKeySubkey(
4890 Value *V, const TargetLibraryInfo *TLI,
4891 function_ref<hash_code(size_t, LoadInst *)> LoadsSubkeyGenerator,
4892 bool AllowAlternate) {
4893 hash_code Key = hash_value(V->getValueID() + 2);
4894 hash_code SubKey = hash_value(0);
4895 // Sort the loads by the distance between the pointers.
4896 if (auto *LI = dyn_cast<LoadInst>(V)) {
4897 Key = hash_combine(LI->getType(), hash_value(Instruction::Load), Key);
4898 if (LI->isSimple())
4899 SubKey = hash_value(LoadsSubkeyGenerator(Key, LI));
4900 else
4901 Key = SubKey = hash_value(LI);
4902 } else if (isVectorLikeInstWithConstOps(V)) {
4903 // Sort extracts by the vector operands.
4904 if (isa<ExtractElementInst, UndefValue>(V))
4905 Key = hash_value(Value::UndefValueVal + 1);
4906 if (auto *EI = dyn_cast<ExtractElementInst>(V)) {
4907 if (!isUndefVector(EI->getVectorOperand()).all() &&
4908 !isa<UndefValue>(EI->getIndexOperand()))
4909 SubKey = hash_value(EI->getVectorOperand());
4910 }
4911 } else if (auto *I = dyn_cast<Instruction>(V)) {
4912 // Sort other instructions just by the opcodes except for CMPInst.
4913 // For CMP also sort by the predicate kind.
4914 if ((isa<BinaryOperator, CastInst>(I)) &&
4915 isValidForAlternation(I->getOpcode())) {
4916 if (AllowAlternate)
4917 Key = hash_value(isa<BinaryOperator>(I) ? 1 : 0);
4918 else
4919 Key = hash_combine(hash_value(I->getOpcode()), Key);
4920 SubKey = hash_combine(
4921 hash_value(I->getOpcode()), hash_value(I->getType()),
4922 hash_value(isa<BinaryOperator>(I)
4923 ? I->getType()
4924 : cast<CastInst>(I)->getOperand(0)->getType()));
4925 // For casts, look through the only operand to improve compile time.
4926 if (isa<CastInst>(I)) {
4927 std::pair<size_t, size_t> OpVals =
4928 generateKeySubkey(I->getOperand(0), TLI, LoadsSubkeyGenerator,
4929 /*AllowAlternate=*/true);
4930 Key = hash_combine(OpVals.first, Key);
4931 SubKey = hash_combine(OpVals.first, SubKey);
4932 }
4933 } else if (auto *CI = dyn_cast<CmpInst>(I)) {
4934 CmpInst::Predicate Pred = CI->getPredicate();
4935 if (CI->isCommutative())
4936 Pred = std::min(Pred, CmpInst::getInversePredicate(Pred));
4937 CmpInst::Predicate SwapPred = CmpInst::getSwappedPredicate(Pred);
4938 SubKey = hash_combine(hash_value(I->getOpcode()), hash_value(Pred),
4939 hash_value(SwapPred),
4940 hash_value(CI->getOperand(0)->getType()));
4941 } else if (auto *Call = dyn_cast<CallInst>(I)) {
4942 Intrinsic::ID ID = getVectorIntrinsicIDForCall(Call, TLI);
4943 if (isTriviallyVectorizable(ID)) {
4944 SubKey = hash_combine(hash_value(I->getOpcode()), hash_value(ID));
4945 } else if (!VFDatabase(*Call).getMappings(*Call).empty()) {
4946 SubKey = hash_combine(hash_value(I->getOpcode()),
4947 hash_value(Call->getCalledFunction()));
4948 } else {
4949 Key = hash_combine(hash_value(Call), Key);
4950 SubKey = hash_combine(hash_value(I->getOpcode()), hash_value(Call));
4951 }
4952 for (const CallBase::BundleOpInfo &Op : Call->bundle_op_infos())
4953 SubKey = hash_combine(hash_value(Op.Begin), hash_value(Op.End),
4954 hash_value(Op.Tag), SubKey);
4955 } else if (auto *Gep = dyn_cast<GetElementPtrInst>(I)) {
4956 if (Gep->getNumOperands() == 2 && isa<ConstantInt>(Gep->getOperand(1)))
4957 SubKey = hash_value(Gep->getPointerOperand());
4958 else
4959 SubKey = hash_value(Gep);
4960 } else if (BinaryOperator::isIntDivRem(I->getOpcode()) &&
4961 !isa<ConstantInt>(I->getOperand(1))) {
4962 // Do not try to vectorize instructions with potentially high cost.
4963 SubKey = hash_value(I);
4964 } else {
4965 SubKey = hash_value(I->getOpcode());
4966 }
4967 Key = hash_combine(hash_value(I->getParent()), Key);
4968 }
4969 return std::make_pair(Key, SubKey);
4970}
4971
4972/// Checks if the specified instruction \p I is an alternate operation for
4973/// the given \p MainOp and \p AltOp instructions.
4974static bool isAlternateInstruction(const Instruction *I,
4975 const Instruction *MainOp,
4976 const Instruction *AltOp,
4977 const TargetLibraryInfo &TLI);
4978
4979void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
4980 const EdgeInfo &UserTreeIdx) {
4981 assert((allConstant(VL) || allSameType(VL)) && "Invalid types!")(static_cast <bool> ((allConstant(VL) || allSameType(VL
)) && "Invalid types!") ? void (0) : __assert_fail ("(allConstant(VL) || allSameType(VL)) && \"Invalid types!\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 4981, __extension__
__PRETTY_FUNCTION__))
;
4982
4983 SmallVector<int> ReuseShuffleIndicies;
4984 SmallVector<Value *> UniqueValues;
4985 auto &&TryToFindDuplicates = [&VL, &ReuseShuffleIndicies, &UniqueValues,
4986 &UserTreeIdx,
4987 this](const InstructionsState &S) {
4988 // Check that every instruction appears once in this bundle.
4989 DenseMap<Value *, unsigned> UniquePositions(VL.size());
4990 for (Value *V : VL) {
4991 if (isConstant(V)) {
4992 ReuseShuffleIndicies.emplace_back(
4993 isa<UndefValue>(V) ? UndefMaskElem : UniqueValues.size());
4994 UniqueValues.emplace_back(V);
4995 continue;
4996 }
4997 auto Res = UniquePositions.try_emplace(V, UniqueValues.size());
4998 ReuseShuffleIndicies.emplace_back(Res.first->second);
4999 if (Res.second)
5000 UniqueValues.emplace_back(V);
5001 }
5002 size_t NumUniqueScalarValues = UniqueValues.size();
5003 if (NumUniqueScalarValues == VL.size()) {
5004 ReuseShuffleIndicies.clear();
5005 } else {
5006 LLVM_DEBUG(dbgs() << "SLP: Shuffle for reused scalars.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Shuffle for reused scalars.\n"
; } } while (false)
;
5007 if (NumUniqueScalarValues <= 1 ||
5008 (UniquePositions.size() == 1 && all_of(UniqueValues,
5009 [](Value *V) {
5010 return isa<UndefValue>(V) ||
5011 !isConstant(V);
5012 })) ||
5013 !llvm::isPowerOf2_32(NumUniqueScalarValues)) {
5014 LLVM_DEBUG(dbgs() << "SLP: Scalar used twice in bundle.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Scalar used twice in bundle.\n"
; } } while (false)
;
5015 newTreeEntry(VL, std::nullopt /*not vectorized*/, S, UserTreeIdx);
5016 return false;
5017 }
5018 VL = UniqueValues;
5019 }
5020 return true;
5021 };
5022
5023 InstructionsState S = getSameOpcode(VL, *TLI);
5024
5025 // Gather if we hit the RecursionMaxDepth, unless this is a load (or z/sext of
5026 // a load), in which case peek through to include it in the tree, without
5027 // ballooning over-budget.
5028 if (Depth >= RecursionMaxDepth &&
5029 !(S.MainOp && isa<Instruction>(S.MainOp) && S.MainOp == S.AltOp &&
5030 VL.size() >= 4 &&
5031 (match(S.MainOp, m_Load(m_Value())) || all_of(VL, [&S](const Value *I) {
5032 return match(I,
5033 m_OneUse(m_ZExtOrSExt(m_OneUse(m_Load(m_Value()))))) &&
5034 cast<Instruction>(I)->getOpcode() ==
5035 cast<Instruction>(S.MainOp)->getOpcode();
5036 })))) {
5037 LLVM_DEBUG(dbgs() << "SLP: Gathering due to max recursion depth.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Gathering due to max recursion depth.\n"
; } } while (false)
;
5038 if (TryToFindDuplicates(S))
5039 newTreeEntry(VL, std::nullopt /*not vectorized*/, S, UserTreeIdx,
5040 ReuseShuffleIndicies);
5041 return;
5042 }
5043
5044 // Don't handle scalable vectors
5045 if (S.getOpcode() == Instruction::ExtractElement &&
5046 isa<ScalableVectorType>(
5047 cast<ExtractElementInst>(S.OpValue)->getVectorOperandType())) {
5048 LLVM_DEBUG(dbgs() << "SLP: Gathering due to scalable vector type.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Gathering due to scalable vector type.\n"
; } } while (false)
;
5049 if (TryToFindDuplicates(S))
5050 newTreeEntry(VL, std::nullopt /*not vectorized*/, S, UserTreeIdx,
5051 ReuseShuffleIndicies);
5052 return;
5053 }
5054
5055 // Don't handle vectors.
5056 if (S.OpValue->getType()->isVectorTy() &&
5057 !isa<InsertElementInst>(S.OpValue)) {
5058 LLVM_DEBUG(dbgs() << "SLP: Gathering due to vector type.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Gathering due to vector type.\n"
; } } while (false)
;
5059 newTreeEntry(VL, std::nullopt /*not vectorized*/, S, UserTreeIdx);
5060 return;
5061 }
5062
5063 if (StoreInst *SI = dyn_cast<StoreInst>(S.OpValue))
5064 if (SI->getValueOperand()->getType()->isVectorTy()) {
5065 LLVM_DEBUG(dbgs() << "SLP: Gathering due to store vector type.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Gathering due to store vector type.\n"
; } } while (false)
;
5066 newTreeEntry(VL, std::nullopt /*not vectorized*/, S, UserTreeIdx);
5067 return;
5068 }
5069
5070 // If all of the operands are identical or constant we have a simple solution.
5071 // If we deal with insert/extract instructions, they all must have constant
5072 // indices, otherwise we should gather them, not try to vectorize.
5073 // If alternate op node with 2 elements with gathered operands - do not
5074 // vectorize.
5075 auto &&NotProfitableForVectorization = [&S, this,
5076 Depth](ArrayRef<Value *> VL) {
5077 if (!S.getOpcode() || !S.isAltShuffle() || VL.size() > 2)
5078 return false;
5079 if (VectorizableTree.size() < MinTreeSize)
5080 return false;
5081 if (Depth >= RecursionMaxDepth - 1)
5082 return true;
5083 // Check if all operands are extracts, part of vector node or can build a
5084 // regular vectorize node.
5085 SmallVector<unsigned, 2> InstsCount(VL.size(), 0);
5086 for (Value *V : VL) {
5087 auto *I = cast<Instruction>(V);
5088 InstsCount.push_back(count_if(I->operand_values(), [](Value *Op) {
5089 return isa<Instruction>(Op) || isVectorLikeInstWithConstOps(Op);
5090 }));
5091 }
5092 bool IsCommutative = isCommutative(S.MainOp) || isCommutative(S.AltOp);
5093 if ((IsCommutative &&
5094 std::accumulate(InstsCount.begin(), InstsCount.end(), 0) < 2) ||
5095 (!IsCommutative &&
5096 all_of(InstsCount, [](unsigned ICnt) { return ICnt < 2; })))
5097 return true;
5098 assert(VL.size() == 2 && "Expected only 2 alternate op instructions.")(static_cast <bool> (VL.size() == 2 && "Expected only 2 alternate op instructions."
) ? void (0) : __assert_fail ("VL.size() == 2 && \"Expected only 2 alternate op instructions.\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 5098, __extension__
__PRETTY_FUNCTION__))
;
5099 SmallVector<SmallVector<std::pair<Value *, Value *>>> Candidates;
5100 auto *I1 = cast<Instruction>(VL.front());
5101 auto *I2 = cast<Instruction>(VL.back());
5102 for (int Op = 0, E = S.MainOp->getNumOperands(); Op < E; ++Op)
5103 Candidates.emplace_back().emplace_back(I1->getOperand(Op),
5104 I2->getOperand(Op));
5105 if (static_cast<unsigned>(count_if(
5106 Candidates, [this](ArrayRef<std::pair<Value *, Value *>> Cand) {
5107 return findBestRootPair(Cand, LookAheadHeuristics::ScoreSplat);
5108 })) >= S.MainOp->getNumOperands() / 2)
5109 return false;
5110 if (S.MainOp->getNumOperands() > 2)
5111 return true;
5112 if (IsCommutative) {
5113 // Check permuted operands.
5114 Candidates.clear();
5115 for (int Op = 0, E = S.MainOp->getNumOperands(); Op < E; ++Op)
5116 Candidates.emplace_back().emplace_back(I1->getOperand(Op),
5117 I2->getOperand((Op + 1) % E));
5118 if (any_of(
5119 Candidates, [this](ArrayRef<std::pair<Value *, Value *>> Cand) {
5120 return findBestRootPair(Cand, LookAheadHeuristics::ScoreSplat);
5121 }))
5122 return false;
5123 }
5124 return true;
5125 };
5126 SmallVector<unsigned> SortedIndices;
5127 BasicBlock *BB = nullptr;
5128 bool IsScatterVectorizeUserTE =
5129 UserTreeIdx.UserTE &&
5130 UserTreeIdx.UserTE->State == TreeEntry::ScatterVectorize;
5131 bool AreAllSameInsts =
5132 (S.getOpcode() && allSameBlock(VL)) ||
5133 (S.OpValue->getType()->isPointerTy() && IsScatterVectorizeUserTE &&
5134 VL.size() > 2 &&
5135 all_of(VL,
5136 [&BB](Value *V) {
5137 auto *I = dyn_cast<GetElementPtrInst>(V);
5138 if (!I)
5139 return doesNotNeedToBeScheduled(V);
5140 if (!BB)
5141 BB = I->getParent();
5142 return BB == I->getParent() && I->getNumOperands() == 2;
5143 }) &&
5144 BB &&
5145 sortPtrAccesses(VL, UserTreeIdx.UserTE->getMainOp()->getType(), *DL, *SE,
5146 SortedIndices));
5147 if (!AreAllSameInsts || allConstant(VL) || isSplat(VL) ||
5148 (isa<InsertElementInst, ExtractValueInst, ExtractElementInst>(
5149 S.OpValue) &&
5150 !all_of(VL, isVectorLikeInstWithConstOps)) ||
5151 NotProfitableForVectorization(VL)) {
5152 LLVM_DEBUG(dbgs() << "SLP: Gathering due to C,S,B,O, small shuffle. \n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Gathering due to C,S,B,O, small shuffle. \n"
; } } while (false)
;
5153 if (TryToFindDuplicates(S))
5154 newTreeEntry(VL, std::nullopt /*not vectorized*/, S, UserTreeIdx,
5155 ReuseShuffleIndicies);
5156 return;
5157 }
5158
5159 // We now know that this is a vector of instructions of the same type from
5160 // the same block.
5161
5162 // Don't vectorize ephemeral values.
5163 if (!EphValues.empty()) {
5164 for (Value *V : VL) {
5165 if (EphValues.count(V)) {
5166 LLVM_DEBUG(dbgs() << "SLP: The instruction (" << *Vdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: The instruction (" << *
V << ") is ephemeral.\n"; } } while (false)
5167 << ") is ephemeral.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: The instruction (" << *
V << ") is ephemeral.\n"; } } while (false)
;
5168 newTreeEntry(VL, std::nullopt /*not vectorized*/, S, UserTreeIdx);
5169 return;
5170 }
5171 }
5172 }
5173
5174 // Check if this is a duplicate of another entry.
5175 if (TreeEntry *E = getTreeEntry(S.OpValue)) {
5176 LLVM_DEBUG(dbgs() << "SLP: \tChecking bundle: " << *S.OpValue << ".\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: \tChecking bundle: " <<
*S.OpValue << ".\n"; } } while (false)
;
5177 if (!E->isSame(VL)) {
5178 LLVM_DEBUG(dbgs() << "SLP: Gathering due to partial overlap.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Gathering due to partial overlap.\n"
; } } while (false)
;
5179 if (TryToFindDuplicates(S))
5180 newTreeEntry(VL, std::nullopt /*not vectorized*/, S, UserTreeIdx,
5181 ReuseShuffleIndicies);
5182 return;
5183 }
5184 // Record the reuse of the tree node. FIXME, currently this is only used to
5185 // properly draw the graph rather than for the actual vectorization.
5186 E->UserTreeIndices.push_back(UserTreeIdx);
5187 LLVM_DEBUG(dbgs() << "SLP: Perfect diamond merge at " << *S.OpValuedo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Perfect diamond merge at " <<
*S.OpValue << ".\n"; } } while (false)
5188 << ".\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Perfect diamond merge at " <<
*S.OpValue << ".\n"; } } while (false)
;
5189 return;
5190 }
5191
5192 // Check that none of the instructions in the bundle are already in the tree.
5193 for (Value *V : VL) {
5194 if (!IsScatterVectorizeUserTE && !isa<Instruction>(V))
5195 continue;
5196 if (getTreeEntry(V)) {
5197 LLVM_DEBUG(dbgs() << "SLP: The instruction (" << *Vdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: The instruction (" << *
V << ") is already in tree.\n"; } } while (false)
5198 << ") is already in tree.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: The instruction (" << *
V << ") is already in tree.\n"; } } while (false)
;
5199 if (TryToFindDuplicates(S))
5200 newTreeEntry(VL, std::nullopt /*not vectorized*/, S, UserTreeIdx,
5201 ReuseShuffleIndicies);
5202 return;
5203 }
5204 }
5205
5206 // The reduction nodes (stored in UserIgnoreList) also should stay scalar.
5207 if (UserIgnoreList && !UserIgnoreList->empty()) {
5208 for (Value *V : VL) {
5209 if (UserIgnoreList && UserIgnoreList->contains(V)) {
5210 LLVM_DEBUG(dbgs() << "SLP: Gathering due to gathered scalar.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Gathering due to gathered scalar.\n"
; } } while (false)
;
5211 if (TryToFindDuplicates(S))
5212 newTreeEntry(VL, std::nullopt /*not vectorized*/, S, UserTreeIdx,
5213 ReuseShuffleIndicies);
5214 return;
5215 }
5216 }
5217 }
5218
5219 // Special processing for sorted pointers for ScatterVectorize node with
5220 // constant indeces only.
5221 if (AreAllSameInsts && UserTreeIdx.UserTE &&
5222 UserTreeIdx.UserTE->State == TreeEntry::ScatterVectorize &&
5223 !(S.getOpcode() && allSameBlock(VL))) {
5224 assert(S.OpValue->getType()->isPointerTy() &&(static_cast <bool> (S.OpValue->getType()->isPointerTy
() && count_if(VL, [](Value *V) { return isa<GetElementPtrInst
>(V); }) >= 2 && "Expected pointers only.") ? void
(0) : __assert_fail ("S.OpValue->getType()->isPointerTy() && count_if(VL, [](Value *V) { return isa<GetElementPtrInst>(V); }) >= 2 && \"Expected pointers only.\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 5227, __extension__
__PRETTY_FUNCTION__))
5225 count_if(VL, [](Value *V) { return isa<GetElementPtrInst>(V); }) >=(static_cast <bool> (S.OpValue->getType()->isPointerTy
() && count_if(VL, [](Value *V) { return isa<GetElementPtrInst
>(V); }) >= 2 && "Expected pointers only.") ? void
(0) : __assert_fail ("S.OpValue->getType()->isPointerTy() && count_if(VL, [](Value *V) { return isa<GetElementPtrInst>(V); }) >= 2 && \"Expected pointers only.\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 5227, __extension__
__PRETTY_FUNCTION__))
5226 2 &&(static_cast <bool> (S.OpValue->getType()->isPointerTy
() && count_if(VL, [](Value *V) { return isa<GetElementPtrInst
>(V); }) >= 2 && "Expected pointers only.") ? void
(0) : __assert_fail ("S.OpValue->getType()->isPointerTy() && count_if(VL, [](Value *V) { return isa<GetElementPtrInst>(V); }) >= 2 && \"Expected pointers only.\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 5227, __extension__
__PRETTY_FUNCTION__))
5227 "Expected pointers only.")(static_cast <bool> (S.OpValue->getType()->isPointerTy
() && count_if(VL, [](Value *V) { return isa<GetElementPtrInst
>(V); }) >= 2 && "Expected pointers only.") ? void
(0) : __assert_fail ("S.OpValue->getType()->isPointerTy() && count_if(VL, [](Value *V) { return isa<GetElementPtrInst>(V); }) >= 2 && \"Expected pointers only.\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 5227, __extension__
__PRETTY_FUNCTION__))
;
5228 // Reset S to make it GetElementPtr kind of node.
5229 const auto *It = find_if(VL, [](Value *V) { return isa<GetElementPtrInst>(V); });
5230 assert(It != VL.end() && "Expected at least one GEP.")(static_cast <bool> (It != VL.end() && "Expected at least one GEP."
) ? void (0) : __assert_fail ("It != VL.end() && \"Expected at least one GEP.\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 5230, __extension__
__PRETTY_FUNCTION__))
;
5231 S = getSameOpcode(*It, *TLI);
5232 }
5233
5234 // Check that all of the users of the scalars that we want to vectorize are
5235 // schedulable.
5236 auto *VL0 = cast<Instruction>(S.OpValue);
5237 BB = VL0->getParent();
5238
5239 if (!DT->isReachableFromEntry(BB)) {
5240 // Don't go into unreachable blocks. They may contain instructions with
5241 // dependency cycles which confuse the final scheduling.
5242 LLVM_DEBUG(dbgs() << "SLP: bundle in unreachable block.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: bundle in unreachable block.\n"
; } } while (false)
;
5243 newTreeEntry(VL, std::nullopt /*not vectorized*/, S, UserTreeIdx);
5244 return;
5245 }
5246
5247 // Don't go into catchswitch blocks, which can happen with PHIs.
5248 // Such blocks can only have PHIs and the catchswitch. There is no
5249 // place to insert a shuffle if we need to, so just avoid that issue.
5250 if (isa<CatchSwitchInst>(BB->getTerminator())) {
5251 LLVM_DEBUG(dbgs() << "SLP: bundle in catchswitch block.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: bundle in catchswitch block.\n"
; } } while (false)
;
5252 newTreeEntry(VL, std::nullopt /*not vectorized*/, S, UserTreeIdx);
5253 return;
5254 }
5255
5256 // Check that every instruction appears once in this bundle.
5257 if (!TryToFindDuplicates(S))
5258 return;
5259
5260 auto &BSRef = BlocksSchedules[BB];
5261 if (!BSRef)
5262 BSRef = std::make_unique<BlockScheduling>(BB);
5263
5264 BlockScheduling &BS = *BSRef;
5265
5266 std::optional<ScheduleData *> Bundle = BS.tryScheduleBundle(VL, this, S);
5267#ifdef EXPENSIVE_CHECKS
5268 // Make sure we didn't break any internal invariants
5269 BS.verify();
5270#endif
5271 if (!Bundle) {
5272 LLVM_DEBUG(dbgs() << "SLP: We are not able to schedule this bundle!\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: We are not able to schedule this bundle!\n"
; } } while (false)
;
5273 assert((!BS.getScheduleData(VL0) ||(static_cast <bool> ((!BS.getScheduleData(VL0) || !BS.getScheduleData
(VL0)->isPartOfBundle()) && "tryScheduleBundle should cancelScheduling on failure"
) ? void (0) : __assert_fail ("(!BS.getScheduleData(VL0) || !BS.getScheduleData(VL0)->isPartOfBundle()) && \"tryScheduleBundle should cancelScheduling on failure\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 5275, __extension__
__PRETTY_FUNCTION__))
5274 !BS.getScheduleData(VL0)->isPartOfBundle()) &&(static_cast <bool> ((!BS.getScheduleData(VL0) || !BS.getScheduleData
(VL0)->isPartOfBundle()) && "tryScheduleBundle should cancelScheduling on failure"
) ? void (0) : __assert_fail ("(!BS.getScheduleData(VL0) || !BS.getScheduleData(VL0)->isPartOfBundle()) && \"tryScheduleBundle should cancelScheduling on failure\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 5275, __extension__
__PRETTY_FUNCTION__))
5275 "tryScheduleBundle should cancelScheduling on failure")(static_cast <bool> ((!BS.getScheduleData(VL0) || !BS.getScheduleData
(VL0)->isPartOfBundle()) && "tryScheduleBundle should cancelScheduling on failure"
) ? void (0) : __assert_fail ("(!BS.getScheduleData(VL0) || !BS.getScheduleData(VL0)->isPartOfBundle()) && \"tryScheduleBundle should cancelScheduling on failure\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 5275, __extension__
__PRETTY_FUNCTION__))
;
5276 newTreeEntry(VL, std::nullopt /*not vectorized*/, S, UserTreeIdx,
5277 ReuseShuffleIndicies);
5278 return;
5279 }
5280 LLVM_DEBUG(dbgs() << "SLP: We are able to schedule this bundle.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: We are able to schedule this bundle.\n"
; } } while (false)
;
5281
5282 unsigned ShuffleOrOp = S.isAltShuffle() ?
5283 (unsigned) Instruction::ShuffleVector : S.getOpcode();
5284 switch (ShuffleOrOp) {
5285 case Instruction::PHI: {
5286 auto *PH = cast<PHINode>(VL0);
5287
5288 // Check for terminator values (e.g. invoke).
5289 for (Value *V : VL)
5290 for (Value *Incoming : cast<PHINode>(V)->incoming_values()) {
5291 Instruction *Term = dyn_cast<Instruction>(Incoming);
5292 if (Term && Term->isTerminator()) {
5293 LLVM_DEBUG(dbgs()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Need to swizzle PHINodes (terminator use).\n"
; } } while (false)
5294 << "SLP: Need to swizzle PHINodes (terminator use).\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Need to swizzle PHINodes (terminator use).\n"
; } } while (false)
;
5295 BS.cancelScheduling(VL, VL0);
5296 newTreeEntry(VL, std::nullopt /*not vectorized*/, S, UserTreeIdx,
5297 ReuseShuffleIndicies);
5298 return;
5299 }
5300 }
5301
5302 TreeEntry *TE =
5303 newTreeEntry(VL, Bundle, S, UserTreeIdx, ReuseShuffleIndicies);
5304 LLVM_DEBUG(dbgs() << "SLP: added a vector of PHINodes.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: added a vector of PHINodes.\n"
; } } while (false)
;
5305
5306 // Keeps the reordered operands to avoid code duplication.
5307 SmallVector<ValueList, 2> OperandsVec;
5308 for (unsigned I = 0, E = PH->getNumIncomingValues(); I < E; ++I) {
5309 if (!DT->isReachableFromEntry(PH->getIncomingBlock(I))) {
5310 ValueList Operands(VL.size(), PoisonValue::get(PH->getType()));
5311 TE->setOperand(I, Operands);
5312 OperandsVec.push_back(Operands);
5313 continue;
5314 }
5315 ValueList Operands;
5316 // Prepare the operand vector.
5317 for (Value *V : VL)
5318 Operands.push_back(cast<PHINode>(V)->getIncomingValueForBlock(
5319 PH->getIncomingBlock(I)));
5320 TE->setOperand(I, Operands);
5321 OperandsVec.push_back(Operands);
5322 }
5323 for (unsigned OpIdx = 0, OpE = OperandsVec.size(); OpIdx != OpE; ++OpIdx)
5324 buildTree_rec(OperandsVec[OpIdx], Depth + 1, {TE, OpIdx});
5325 return;
5326 }
5327 case Instruction::ExtractValue:
5328 case Instruction::ExtractElement: {
5329 OrdersType CurrentOrder;
5330 bool Reuse = canReuseExtract(VL, VL0, CurrentOrder);
5331 if (Reuse) {
5332 LLVM_DEBUG(dbgs() << "SLP: Reusing or shuffling extract sequence.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Reusing or shuffling extract sequence.\n"
; } } while (false)
;
5333 newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx,
5334 ReuseShuffleIndicies);
5335 // This is a special case, as it does not gather, but at the same time
5336 // we are not extending buildTree_rec() towards the operands.
5337 ValueList Op0;
5338 Op0.assign(VL.size(), VL0->getOperand(0));
5339 VectorizableTree.back()->setOperand(0, Op0);
5340 return;
5341 }
5342 if (!CurrentOrder.empty()) {
5343 LLVM_DEBUG({do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { { dbgs() << "SLP: Reusing or shuffling of reordered extract sequence "
"with order"; for (unsigned Idx : CurrentOrder) dbgs() <<
" " << Idx; dbgs() << "\n"; }; } } while (false)
5344 dbgs() << "SLP: Reusing or shuffling of reordered extract sequence "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { { dbgs() << "SLP: Reusing or shuffling of reordered extract sequence "
"with order"; for (unsigned Idx : CurrentOrder) dbgs() <<
" " << Idx; dbgs() << "\n"; }; } } while (false)
5345 "with order";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { { dbgs() << "SLP: Reusing or shuffling of reordered extract sequence "
"with order"; for (unsigned Idx : CurrentOrder) dbgs() <<
" " << Idx; dbgs() << "\n"; }; } } while (false)
5346 for (unsigned Idx : CurrentOrder)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { { dbgs() << "SLP: Reusing or shuffling of reordered extract sequence "
"with order"; for (unsigned Idx : CurrentOrder) dbgs() <<
" " << Idx; dbgs() << "\n"; }; } } while (false)
5347 dbgs() << " " << Idx;do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { { dbgs() << "SLP: Reusing or shuffling of reordered extract sequence "
"with order"; for (unsigned Idx : CurrentOrder) dbgs() <<
" " << Idx; dbgs() << "\n"; }; } } while (false)
5348 dbgs() << "\n";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { { dbgs() << "SLP: Reusing or shuffling of reordered extract sequence "
"with order"; for (unsigned Idx : CurrentOrder) dbgs() <<
" " << Idx; dbgs() << "\n"; }; } } while (false)
5349 })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { { dbgs() << "SLP: Reusing or shuffling of reordered extract sequence "
"with order"; for (unsigned Idx : CurrentOrder) dbgs() <<
" " << Idx; dbgs() << "\n"; }; } } while (false)
;
5350 fixupOrderingIndices(CurrentOrder);
5351 // Insert new order with initial value 0, if it does not exist,
5352 // otherwise return the iterator to the existing one.
5353 newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx,
5354 ReuseShuffleIndicies, CurrentOrder);
5355 // This is a special case, as it does not gather, but at the same time
5356 // we are not extending buildTree_rec() towards the operands.
5357 ValueList Op0;
5358 Op0.assign(VL.size(), VL0->getOperand(0));
5359 VectorizableTree.back()->setOperand(0, Op0);
5360 return;
5361 }
5362 LLVM_DEBUG(dbgs() << "SLP: Gather extract sequence.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Gather extract sequence.\n";
} } while (false)
;
5363 newTreeEntry(VL, std::nullopt /*not vectorized*/, S, UserTreeIdx,
5364 ReuseShuffleIndicies);
5365 BS.cancelScheduling(VL, VL0);
5366 return;
5367 }
5368 case Instruction::InsertElement: {
5369 assert(ReuseShuffleIndicies.empty() && "All inserts should be unique")(static_cast <bool> (ReuseShuffleIndicies.empty() &&
"All inserts should be unique") ? void (0) : __assert_fail (
"ReuseShuffleIndicies.empty() && \"All inserts should be unique\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 5369, __extension__
__PRETTY_FUNCTION__))
;
5370
5371 // Check that we have a buildvector and not a shuffle of 2 or more
5372 // different vectors.
5373 ValueSet SourceVectors;
5374 for (Value *V : VL) {
5375 SourceVectors.insert(cast<Instruction>(V)->getOperand(0));
5376 assert(getInsertIndex(V) != std::nullopt &&(static_cast <bool> (getInsertIndex(V) != std::nullopt &&
"Non-constant or undef index?") ? void (0) : __assert_fail (
"getInsertIndex(V) != std::nullopt && \"Non-constant or undef index?\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 5377, __extension__
__PRETTY_FUNCTION__))
5377 "Non-constant or undef index?")(static_cast <bool> (getInsertIndex(V) != std::nullopt &&
"Non-constant or undef index?") ? void (0) : __assert_fail (
"getInsertIndex(V) != std::nullopt && \"Non-constant or undef index?\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 5377, __extension__
__PRETTY_FUNCTION__))
;
5378 }
5379
5380 if (count_if(VL, [&SourceVectors](Value *V) {
5381 return !SourceVectors.contains(V);
5382 }) >= 2) {
5383 // Found 2nd source vector - cancel.
5384 LLVM_DEBUG(dbgs() << "SLP: Gather of insertelement vectors with "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Gather of insertelement vectors with "
"different source vectors.\n"; } } while (false)
5385 "different source vectors.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Gather of insertelement vectors with "
"different source vectors.\n"; } } while (false)
;
5386 newTreeEntry(VL, std::nullopt /*not vectorized*/, S, UserTreeIdx);
5387 BS.cancelScheduling(VL, VL0);
5388 return;
5389 }
5390
5391 auto OrdCompare = [](const std::pair<int, int> &P1,
5392 const std::pair<int, int> &P2) {
5393 return P1.first > P2.first;
5394 };
5395 PriorityQueue<std::pair<int, int>, SmallVector<std::pair<int, int>>,
5396 decltype(OrdCompare)>
5397 Indices(OrdCompare);
5398 for (int I = 0, E = VL.size(); I < E; ++I) {
5399 unsigned Idx = *getInsertIndex(VL[I]);
5400 Indices.emplace(Idx, I);
5401 }
5402 OrdersType CurrentOrder(VL.size(), VL.size());
5403 bool IsIdentity = true;
5404 for (int I = 0, E = VL.size(); I < E; ++I) {
5405 CurrentOrder[Indices.top().second] = I;
5406 IsIdentity &= Indices.top().second == I;
5407 Indices.pop();
5408 }
5409 if (IsIdentity)
5410 CurrentOrder.clear();
5411 TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx,
5412 std::nullopt, CurrentOrder);
5413 LLVM_DEBUG(dbgs() << "SLP: added inserts bundle.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: added inserts bundle.\n"; } }
while (false)
;
5414
5415 constexpr int NumOps = 2;
5416 ValueList VectorOperands[NumOps];
5417 for (int I = 0; I < NumOps; ++I) {
5418 for (Value *V : VL)
5419 VectorOperands[I].push_back(cast<Instruction>(V)->getOperand(I));
5420
5421 TE->setOperand(I, VectorOperands[I]);
5422 }
5423 buildTree_rec(VectorOperands[NumOps - 1], Depth + 1, {TE, NumOps - 1});
5424 return;
5425 }
5426 case Instruction::Load: {
5427 // Check that a vectorized load would load the same memory as a scalar
5428 // load. For example, we don't want to vectorize loads that are smaller
5429 // than 8-bit. Even though we have a packed struct {<i2, i2, i2, i2>} LLVM
5430 // treats loading/storing it as an i8 struct. If we vectorize loads/stores
5431 // from such a struct, we read/write packed bits disagreeing with the
5432 // unvectorized version.
5433 SmallVector<Value *> PointerOps;
5434 OrdersType CurrentOrder;
5435 TreeEntry *TE = nullptr;
5436 switch (canVectorizeLoads(VL, VL0, *TTI, *DL, *SE, *LI, *TLI,
5437 CurrentOrder, PointerOps)) {
5438 case LoadsState::Vectorize:
5439 if (CurrentOrder.empty()) {
5440 // Original loads are consecutive and does not require reordering.
5441 TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx,
5442 ReuseShuffleIndicies);
5443 LLVM_DEBUG(dbgs() << "SLP: added a vector of loads.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: added a vector of loads.\n";
} } while (false)
;
5444 } else {
5445 fixupOrderingIndices(CurrentOrder);
5446 // Need to reorder.
5447 TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx,
5448 ReuseShuffleIndicies, CurrentOrder);
5449 LLVM_DEBUG(dbgs() << "SLP: added a vector of jumbled loads.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: added a vector of jumbled loads.\n"
; } } while (false)
;
5450 }
5451 TE->setOperandsInOrder();
5452 break;
5453 case LoadsState::ScatterVectorize:
5454 // Vectorizing non-consecutive loads with `llvm.masked.gather`.
5455 TE = newTreeEntry(VL, TreeEntry::ScatterVectorize, Bundle, S,
5456 UserTreeIdx, ReuseShuffleIndicies);
5457 TE->setOperandsInOrder();
5458 buildTree_rec(PointerOps, Depth + 1, {TE, 0});
5459 LLVM_DEBUG(dbgs() << "SLP: added a vector of non-consecutive loads.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: added a vector of non-consecutive loads.\n"
; } } while (false)
;
5460 break;
5461 case LoadsState::Gather:
5462 BS.cancelScheduling(VL, VL0);
5463 newTreeEntry(VL, std::nullopt /*not vectorized*/, S, UserTreeIdx,
5464 ReuseShuffleIndicies);
5465#ifndef NDEBUG
5466 Type *ScalarTy = VL0->getType();
5467 if (DL->getTypeSizeInBits(ScalarTy) !=
5468 DL->getTypeAllocSizeInBits(ScalarTy))
5469 LLVM_DEBUG(dbgs() << "SLP: Gathering loads of non-packed type.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Gathering loads of non-packed type.\n"
; } } while (false)
;
5470 else if (any_of(VL, [](Value *V) {
5471 return !cast<LoadInst>(V)->isSimple();
5472 }))
5473 LLVM_DEBUG(dbgs() << "SLP: Gathering non-simple loads.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Gathering non-simple loads.\n"
; } } while (false)
;
5474 else
5475 LLVM_DEBUG(dbgs() << "SLP: Gathering non-consecutive loads.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Gathering non-consecutive loads.\n"
; } } while (false)
;
5476#endif // NDEBUG
5477 break;
5478 }
5479 return;
5480 }
5481 case Instruction::ZExt:
5482 case Instruction::SExt:
5483 case Instruction::FPToUI:
5484 case Instruction::FPToSI:
5485 case Instruction::FPExt:
5486 case Instruction::PtrToInt:
5487 case Instruction::IntToPtr:
5488 case Instruction::SIToFP:
5489 case Instruction::UIToFP:
5490 case Instruction::Trunc:
5491 case Instruction::FPTrunc:
5492 case Instruction::BitCast: {
5493 Type *SrcTy = VL0->getOperand(0)->getType();
5494 for (Value *V : VL) {
5495 Type *Ty = cast<Instruction>(V)->getOperand(0)->getType();
5496 if (Ty != SrcTy || !isValidElementType(Ty)) {
5497 BS.cancelScheduling(VL, VL0);
5498 newTreeEntry(VL, std::nullopt /*not vectorized*/, S, UserTreeIdx,
5499 ReuseShuffleIndicies);
5500 LLVM_DEBUG(dbgs()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Gathering casts with different src types.\n"
; } } while (false)
5501 << "SLP: Gathering casts with different src types.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Gathering casts with different src types.\n"
; } } while (false)
;
5502 return;
5503 }
5504 }
5505 TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx,
5506 ReuseShuffleIndicies);
5507 LLVM_DEBUG(dbgs() << "SLP: added a vector of casts.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: added a vector of casts.\n";
} } while (false)
;
5508
5509 TE->setOperandsInOrder();
5510 for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) {
5511 ValueList Operands;
5512 // Prepare the operand vector.
5513 for (Value *V : VL)
5514 Operands.push_back(cast<Instruction>(V)->getOperand(i));
5515
5516 buildTree_rec(Operands, Depth + 1, {TE, i});
5517 }
5518 return;
5519 }
5520 case Instruction::ICmp:
5521 case Instruction::FCmp: {
5522 // Check that all of the compares have the same predicate.
5523 CmpInst::Predicate P0 = cast<CmpInst>(VL0)->getPredicate();
5524 CmpInst::Predicate SwapP0 = CmpInst::getSwappedPredicate(P0);
5525 Type *ComparedTy = VL0->getOperand(0)->getType();
5526 for (Value *V : VL) {
5527 CmpInst *Cmp = cast<CmpInst>(V);
5528 if ((Cmp->getPredicate() != P0 && Cmp->getPredicate() != SwapP0) ||
5529 Cmp->getOperand(0)->getType() != ComparedTy) {
5530 BS.cancelScheduling(VL, VL0);
5531 newTreeEntry(VL, std::nullopt /*not vectorized*/, S, UserTreeIdx,
5532 ReuseShuffleIndicies);
5533 LLVM_DEBUG(dbgs()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Gathering cmp with different predicate.\n"
; } } while (false)
5534 << "SLP: Gathering cmp with different predicate.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Gathering cmp with different predicate.\n"
; } } while (false)
;
5535 return;
5536 }
5537 }
5538
5539 TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx,
5540 ReuseShuffleIndicies);
5541 LLVM_DEBUG(dbgs() << "SLP: added a vector of compares.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: added a vector of compares.\n"
; } } while (false)
;
5542
5543 ValueList Left, Right;
5544 if (cast<CmpInst>(VL0)->isCommutative()) {
5545 // Commutative predicate - collect + sort operands of the instructions
5546 // so that each side is more likely to have the same opcode.
5547 assert(P0 == SwapP0 && "Commutative Predicate mismatch")(static_cast <bool> (P0 == SwapP0 && "Commutative Predicate mismatch"
) ? void (0) : __assert_fail ("P0 == SwapP0 && \"Commutative Predicate mismatch\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 5547, __extension__
__PRETTY_FUNCTION__))
;
5548 reorderInputsAccordingToOpcode(VL, Left, Right, *TLI, *DL, *SE, *this);
5549 } else {
5550 // Collect operands - commute if it uses the swapped predicate.
5551 for (Value *V : VL) {
5552 auto *Cmp = cast<CmpInst>(V);
5553 Value *LHS = Cmp->getOperand(0);
5554 Value *RHS = Cmp->getOperand(1);
5555 if (Cmp->getPredicate() != P0)
5556 std::swap(LHS, RHS);
5557 Left.push_back(LHS);
5558 Right.push_back(RHS);
5559 }
5560 }
5561 TE->setOperand(0, Left);
5562 TE->setOperand(1, Right);
5563 buildTree_rec(Left, Depth + 1, {TE, 0});
5564 buildTree_rec(Right, Depth + 1, {TE, 1});
5565 return;
5566 }
5567 case Instruction::Select:
5568 case Instruction::FNeg:
5569 case Instruction::Add:
5570 case Instruction::FAdd:
5571 case Instruction::Sub:
5572 case Instruction::FSub:
5573 case Instruction::Mul:
5574 case Instruction::FMul:
5575 case Instruction::UDiv:
5576 case Instruction::SDiv:
5577 case Instruction::FDiv:
5578 case Instruction::URem:
5579 case Instruction::SRem:
5580 case Instruction::FRem:
5581 case Instruction::Shl:
5582 case Instruction::LShr:
5583 case Instruction::AShr:
5584 case Instruction::And:
5585 case Instruction::Or:
5586 case Instruction::Xor: {
5587 TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx,
5588 ReuseShuffleIndicies);
5589 LLVM_DEBUG(dbgs() << "SLP: added a vector of un/bin op.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: added a vector of un/bin op.\n"
; } } while (false)
;
5590
5591 // Sort operands of the instructions so that each side is more likely to
5592 // have the same opcode.
5593 if (isa<BinaryOperator>(VL0) && VL0->isCommutative()) {
5594 ValueList Left, Right;
5595 reorderInputsAccordingToOpcode(VL, Left, Right, *TLI, *DL, *SE, *this);
5596 TE->setOperand(0, Left);
5597 TE->setOperand(1, Right);
5598 buildTree_rec(Left, Depth + 1, {TE, 0});
5599 buildTree_rec(Right, Depth + 1, {TE, 1});
5600 return;
5601 }
5602
5603 TE->setOperandsInOrder();
5604 for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) {
5605 ValueList Operands;
5606 // Prepare the operand vector.
5607 for (Value *V : VL)
5608 Operands.push_back(cast<Instruction>(V)->getOperand(i));
5609
5610 buildTree_rec(Operands, Depth + 1, {TE, i});
5611 }
5612 return;
5613 }
5614 case Instruction::GetElementPtr: {
5615 // We don't combine GEPs with complicated (nested) indexing.
5616 for (Value *V : VL) {
5617 auto *I = dyn_cast<GetElementPtrInst>(V);
5618 if (!I)
5619 continue;
5620 if (I->getNumOperands() != 2) {
5621 LLVM_DEBUG(dbgs() << "SLP: not-vectorizable GEP (nested indexes).\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: not-vectorizable GEP (nested indexes).\n"
; } } while (false)
;
5622 BS.cancelScheduling(VL, VL0);
5623 newTreeEntry(VL, std::nullopt /*not vectorized*/, S, UserTreeIdx,
5624 ReuseShuffleIndicies);
5625 return;
5626 }
5627 }
5628
5629 // We can't combine several GEPs into one vector if they operate on
5630 // different types.
5631 Type *Ty0 = cast<GEPOperator>(VL0)->getSourceElementType();
5632 for (Value *V : VL) {
5633 auto *GEP = dyn_cast<GEPOperator>(V);
5634 if (!GEP)
5635 continue;
5636 Type *CurTy = GEP->getSourceElementType();
5637 if (Ty0 != CurTy) {
5638 LLVM_DEBUG(dbgs()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: not-vectorizable GEP (different types).\n"
; } } while (false)
5639 << "SLP: not-vectorizable GEP (different types).\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: not-vectorizable GEP (different types).\n"
; } } while (false)
;
5640 BS.cancelScheduling(VL, VL0);
5641 newTreeEntry(VL, std::nullopt /*not vectorized*/, S, UserTreeIdx,
5642 ReuseShuffleIndicies);
5643 return;
5644 }
5645 }
5646
5647 // We don't combine GEPs with non-constant indexes.
5648 Type *Ty1 = VL0->getOperand(1)->getType();
5649 for (Value *V : VL) {
5650 auto *I = dyn_cast<GetElementPtrInst>(V);
5651 if (!I)
5652 continue;
5653 auto *Op = I->getOperand(1);
5654 if ((!IsScatterVectorizeUserTE && !isa<ConstantInt>(Op)) ||
5655 (Op->getType() != Ty1 &&
5656 ((IsScatterVectorizeUserTE && !isa<ConstantInt>(Op)) ||
5657 Op->getType()->getScalarSizeInBits() >
5658 DL->getIndexSizeInBits(
5659 V->getType()->getPointerAddressSpace())))) {
5660 LLVM_DEBUG(dbgs()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: not-vectorizable GEP (non-constant indexes).\n"
; } } while (false)
5661 << "SLP: not-vectorizable GEP (non-constant indexes).\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: not-vectorizable GEP (non-constant indexes).\n"
; } } while (false)
;
5662 BS.cancelScheduling(VL, VL0);
5663 newTreeEntry(VL, std::nullopt /*not vectorized*/, S, UserTreeIdx,
5664 ReuseShuffleIndicies);
5665 return;
5666 }
5667 }
5668
5669 TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx,
5670 ReuseShuffleIndicies);
5671 LLVM_DEBUG(dbgs() << "SLP: added a vector of GEPs.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: added a vector of GEPs.\n"; }
} while (false)
;
5672 SmallVector<ValueList, 2> Operands(2);
5673 // Prepare the operand vector for pointer operands.
5674 for (Value *V : VL) {
5675 auto *GEP = dyn_cast<GetElementPtrInst>(V);
5676 if (!GEP) {
5677 Operands.front().push_back(V);
5678 continue;
5679 }
5680 Operands.front().push_back(GEP->getPointerOperand());
5681 }
5682 TE->setOperand(0, Operands.front());
5683 // Need to cast all indices to the same type before vectorization to
5684 // avoid crash.
5685 // Required to be able to find correct matches between different gather
5686 // nodes and reuse the vectorized values rather than trying to gather them
5687 // again.
5688 int IndexIdx = 1;
5689 Type *VL0Ty = VL0->getOperand(IndexIdx)->getType();
5690 Type *Ty = all_of(VL,
5691 [VL0Ty, IndexIdx](Value *V) {
5692 auto *GEP = dyn_cast<GetElementPtrInst>(V);
5693 if (!GEP)
5694 return true;
5695 return VL0Ty == GEP->getOperand(IndexIdx)->getType();
5696 })
5697 ? VL0Ty
5698 : DL->getIndexType(cast<GetElementPtrInst>(VL0)
5699 ->getPointerOperandType()
5700 ->getScalarType());
5701 // Prepare the operand vector.
5702 for (Value *V : VL) {
5703 auto *I = dyn_cast<GetElementPtrInst>(V);
5704 if (!I) {
5705 Operands.back().push_back(
5706 ConstantInt::get(Ty, 0, /*isSigned=*/false));
5707 continue;
5708 }
5709 auto *Op = I->getOperand(IndexIdx);
5710 auto *CI = dyn_cast<ConstantInt>(Op);
5711 if (!CI)
5712 Operands.back().push_back(Op);
5713 else
5714 Operands.back().push_back(ConstantExpr::getIntegerCast(
5715 CI, Ty, CI->getValue().isSignBitSet()));
5716 }
5717 TE->setOperand(IndexIdx, Operands.back());
5718
5719 for (unsigned I = 0, Ops = Operands.size(); I < Ops; ++I)
5720 buildTree_rec(Operands[I], Depth + 1, {TE, I});
5721 return;
5722 }
5723 case Instruction::Store: {
5724 // Check if the stores are consecutive or if we need to swizzle them.
5725 llvm::Type *ScalarTy = cast<StoreInst>(VL0)->getValueOperand()->getType();
5726 // Avoid types that are padded when being allocated as scalars, while
5727 // being packed together in a vector (such as i1).
5728 if (DL->getTypeSizeInBits(ScalarTy) !=
5729 DL->getTypeAllocSizeInBits(ScalarTy)) {
5730 BS.cancelScheduling(VL, VL0);
5731 newTreeEntry(VL, std::nullopt /*not vectorized*/, S, UserTreeIdx,
5732 ReuseShuffleIndicies);
5733 LLVM_DEBUG(dbgs() << "SLP: Gathering stores of non-packed type.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Gathering stores of non-packed type.\n"
; } } while (false)
;
5734 return;
5735 }
5736 // Make sure all stores in the bundle are simple - we can't vectorize
5737 // atomic or volatile stores.
5738 SmallVector<Value *, 4> PointerOps(VL.size());
5739 ValueList Operands(VL.size());
5740 auto POIter = PointerOps.begin();
5741 auto OIter = Operands.begin();
5742 for (Value *V : VL) {
5743 auto *SI = cast<StoreInst>(V);
5744 if (!SI->isSimple()) {
5745 BS.cancelScheduling(VL, VL0);
5746 newTreeEntry(VL, std::nullopt /*not vectorized*/, S, UserTreeIdx,
5747 ReuseShuffleIndicies);
5748 LLVM_DEBUG(dbgs() << "SLP: Gathering non-simple stores.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Gathering non-simple stores.\n"
; } } while (false)
;
5749 return;
5750 }
5751 *POIter = SI->getPointerOperand();
5752 *OIter = SI->getValueOperand();
5753 ++POIter;
5754 ++OIter;
5755 }
5756
5757 OrdersType CurrentOrder;
5758 // Check the order of pointer operands.
5759 if (llvm::sortPtrAccesses(PointerOps, ScalarTy, *DL, *SE, CurrentOrder)) {
5760 Value *Ptr0;
5761 Value *PtrN;
5762 if (CurrentOrder.empty()) {
5763 Ptr0 = PointerOps.front();
5764 PtrN = PointerOps.back();
5765 } else {
5766 Ptr0 = PointerOps[CurrentOrder.front()];
5767 PtrN = PointerOps[CurrentOrder.back()];
5768 }
5769 std::optional<int> Dist =
5770 getPointersDiff(ScalarTy, Ptr0, ScalarTy, PtrN, *DL, *SE);
5771 // Check that the sorted pointer operands are consecutive.
5772 if (static_cast<unsigned>(*Dist) == VL.size() - 1) {
5773 if (CurrentOrder.empty()) {
5774 // Original stores are consecutive and does not require reordering.
5775 TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S,
5776 UserTreeIdx, ReuseShuffleIndicies);
5777 TE->setOperandsInOrder();
5778 buildTree_rec(Operands, Depth + 1, {TE, 0});
5779 LLVM_DEBUG(dbgs() << "SLP: added a vector of stores.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: added a vector of stores.\n"
; } } while (false)
;
5780 } else {
5781 fixupOrderingIndices(CurrentOrder);
5782 TreeEntry *TE =
5783 newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx,
5784 ReuseShuffleIndicies, CurrentOrder);
5785 TE->setOperandsInOrder();
5786 buildTree_rec(Operands, Depth + 1, {TE, 0});
5787 LLVM_DEBUG(dbgs() << "SLP: added a vector of jumbled stores.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: added a vector of jumbled stores.\n"
; } } while (false)
;
5788 }
5789 return;
5790 }
5791 }
5792
5793 BS.cancelScheduling(VL, VL0);
5794 newTreeEntry(VL, std::nullopt /*not vectorized*/, S, UserTreeIdx,
5795 ReuseShuffleIndicies);
5796 LLVM_DEBUG(dbgs() << "SLP: Non-consecutive store.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Non-consecutive store.\n"; }
} while (false)
;
5797 return;
5798 }
5799 case Instruction::Call: {
5800 // Check if the calls are all to the same vectorizable intrinsic or
5801 // library function.
5802 CallInst *CI = cast<CallInst>(VL0);
5803 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
5804
5805 VFShape Shape = VFShape::get(
5806 *CI, ElementCount::getFixed(static_cast<unsigned int>(VL.size())),
5807 false /*HasGlobalPred*/);
5808 Function *VecFunc = VFDatabase(*CI).getVectorizedFunction(Shape);
5809
5810 if (!VecFunc && !isTriviallyVectorizable(ID)) {
5811 BS.cancelScheduling(VL, VL0);
5812 newTreeEntry(VL, std::nullopt /*not vectorized*/, S, UserTreeIdx,
5813 ReuseShuffleIndicies);
5814 LLVM_DEBUG(dbgs() << "SLP: Non-vectorizable call.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Non-vectorizable call.\n"; }
} while (false)
;
5815 return;
5816 }
5817 Function *F = CI->getCalledFunction();
5818 unsigned NumArgs = CI->arg_size();
5819 SmallVector<Value*, 4> ScalarArgs(NumArgs, nullptr);
5820 for (unsigned j = 0; j != NumArgs; ++j)
5821 if (isVectorIntrinsicWithScalarOpAtArg(ID, j))
5822 ScalarArgs[j] = CI->getArgOperand(j);
5823 for (Value *V : VL) {
5824 CallInst *CI2 = dyn_cast<CallInst>(V);
5825 if (!CI2 || CI2->getCalledFunction() != F ||
5826 getVectorIntrinsicIDForCall(CI2, TLI) != ID ||
5827 (VecFunc &&
5828 VecFunc != VFDatabase(*CI2).getVectorizedFunction(Shape)) ||
5829 !CI->hasIdenticalOperandBundleSchema(*CI2)) {
5830 BS.cancelScheduling(VL, VL0);
5831 newTreeEntry(VL, std::nullopt /*not vectorized*/, S, UserTreeIdx,
5832 ReuseShuffleIndicies);
5833 LLVM_DEBUG(dbgs() << "SLP: mismatched calls:" << *CI << "!=" << *Vdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: mismatched calls:" << *
CI << "!=" << *V << "\n"; } } while (false)
5834 << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: mismatched calls:" << *
CI << "!=" << *V << "\n"; } } while (false)
;
5835 return;
5836 }
5837 // Some intrinsics have scalar arguments and should be same in order for
5838 // them to be vectorized.
5839 for (unsigned j = 0; j != NumArgs; ++j) {
5840 if (isVectorIntrinsicWithScalarOpAtArg(ID, j)) {
5841 Value *A1J = CI2->getArgOperand(j);
5842 if (ScalarArgs[j] != A1J) {
5843 BS.cancelScheduling(VL, VL0);
5844 newTreeEntry(VL, std::nullopt /*not vectorized*/, S, UserTreeIdx,
5845 ReuseShuffleIndicies);
5846 LLVM_DEBUG(dbgs() << "SLP: mismatched arguments in call:" << *CIdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: mismatched arguments in call:"
<< *CI << " argument " << ScalarArgs[j] <<
"!=" << A1J << "\n"; } } while (false)
5847 << " argument " << ScalarArgs[j] << "!=" << A1Jdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: mismatched arguments in call:"
<< *CI << " argument " << ScalarArgs[j] <<
"!=" << A1J << "\n"; } } while (false)
5848 << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: mismatched arguments in call:"
<< *CI << " argument " << ScalarArgs[j] <<
"!=" << A1J << "\n"; } } while (false)
;
5849 return;
5850 }
5851 }
5852 }
5853 // Verify that the bundle operands are identical between the two calls.
5854 if (CI->hasOperandBundles() &&
5855 !std::equal(CI->op_begin() + CI->getBundleOperandsStartIndex(),
5856 CI->op_begin() + CI->getBundleOperandsEndIndex(),
5857 CI2->op_begin() + CI2->getBundleOperandsStartIndex())) {
5858 BS.cancelScheduling(VL, VL0);
5859 newTreeEntry(VL, std::nullopt /*not vectorized*/, S, UserTreeIdx,
5860 ReuseShuffleIndicies);
5861 LLVM_DEBUG(dbgs() << "SLP: mismatched bundle operands in calls:"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: mismatched bundle operands in calls:"
<< *CI << "!=" << *V << '\n'; } } while
(false)
5862 << *CI << "!=" << *V << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: mismatched bundle operands in calls:"
<< *CI << "!=" << *V << '\n'; } } while
(false)
;
5863 return;
5864 }
5865 }
5866
5867 TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx,
5868 ReuseShuffleIndicies);
5869 TE->setOperandsInOrder();
5870 for (unsigned i = 0, e = CI->arg_size(); i != e; ++i) {
5871 // For scalar operands no need to to create an entry since no need to
5872 // vectorize it.
5873 if (isVectorIntrinsicWithScalarOpAtArg(ID, i))
5874 continue;
5875 ValueList Operands;
5876 // Prepare the operand vector.
5877 for (Value *V : VL) {
5878 auto *CI2 = cast<CallInst>(V);
5879 Operands.push_back(CI2->getArgOperand(i));
5880 }
5881 buildTree_rec(Operands, Depth + 1, {TE, i});
5882 }
5883 return;
5884 }
5885 case Instruction::ShuffleVector: {
5886 // If this is not an alternate sequence of opcode like add-sub
5887 // then do not vectorize this instruction.
5888 if (!S.isAltShuffle()) {
5889 BS.cancelScheduling(VL, VL0);
5890 newTreeEntry(VL, std::nullopt /*not vectorized*/, S, UserTreeIdx,
5891 ReuseShuffleIndicies);
5892 LLVM_DEBUG(dbgs() << "SLP: ShuffleVector are not vectorized.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: ShuffleVector are not vectorized.\n"
; } } while (false)
;
5893 return;
5894 }
5895 TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx,
5896 ReuseShuffleIndicies);
5897 LLVM_DEBUG(dbgs() << "SLP: added a ShuffleVector op.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: added a ShuffleVector op.\n"
; } } while (false)
;
5898
5899 // Reorder operands if reordering would enable vectorization.
5900 auto *CI = dyn_cast<CmpInst>(VL0);
5901 if (isa<BinaryOperator>(VL0) || CI) {
5902 ValueList Left, Right;
5903 if (!CI || all_of(VL, [](Value *V) {
5904 return cast<CmpInst>(V)->isCommutative();
5905 })) {
5906 reorderInputsAccordingToOpcode(VL, Left, Right, *TLI, *DL, *SE,
5907 *this);
5908 } else {
5909 auto *MainCI = cast<CmpInst>(S.MainOp);
5910 auto *AltCI = cast<CmpInst>(S.AltOp);
5911 CmpInst::Predicate MainP = MainCI->getPredicate();
5912 CmpInst::Predicate AltP = AltCI->getPredicate();
5913 assert(MainP != AltP &&(static_cast <bool> (MainP != AltP && "Expected different main/alternate predicates."
) ? void (0) : __assert_fail ("MainP != AltP && \"Expected different main/alternate predicates.\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 5914, __extension__
__PRETTY_FUNCTION__))
5914 "Expected different main/alternate predicates.")(static_cast <bool> (MainP != AltP && "Expected different main/alternate predicates."
) ? void (0) : __assert_fail ("MainP != AltP && \"Expected different main/alternate predicates.\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 5914, __extension__
__PRETTY_FUNCTION__))
;
5915 // Collect operands - commute if it uses the swapped predicate or
5916 // alternate operation.
5917 for (Value *V : VL) {
5918 auto *Cmp = cast<CmpInst>(V);
5919 Value *LHS = Cmp->getOperand(0);
5920 Value *RHS = Cmp->getOperand(1);
5921
5922 if (isAlternateInstruction(Cmp, MainCI, AltCI, *TLI)) {
5923 if (AltP == CmpInst::getSwappedPredicate(Cmp->getPredicate()))
5924 std::swap(LHS, RHS);
5925 } else {
5926 if (MainP == CmpInst::getSwappedPredicate(Cmp->getPredicate()))
5927 std::swap(LHS, RHS);
5928 }
5929 Left.push_back(LHS);
5930 Right.push_back(RHS);
5931 }
5932 }
5933 TE->setOperand(0, Left);
5934 TE->setOperand(1, Right);
5935 buildTree_rec(Left, Depth + 1, {TE, 0});
5936 buildTree_rec(Right, Depth + 1, {TE, 1});
5937 return;
5938 }
5939
5940 TE->setOperandsInOrder();
5941 for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) {
5942 ValueList Operands;
5943 // Prepare the operand vector.
5944 for (Value *V : VL)
5945 Operands.push_back(cast<Instruction>(V)->getOperand(i));
5946
5947 buildTree_rec(Operands, Depth + 1, {TE, i});
5948 }
5949 return;
5950 }
5951 default:
5952 BS.cancelScheduling(VL, VL0);
5953 newTreeEntry(VL, std::nullopt /*not vectorized*/, S, UserTreeIdx,
5954 ReuseShuffleIndicies);
5955 LLVM_DEBUG(dbgs() << "SLP: Gathering unknown instruction.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Gathering unknown instruction.\n"
; } } while (false)
;
5956 return;
5957 }
5958}
5959
5960unsigned BoUpSLP::canMapToVector(Type *T, const DataLayout &DL) const {
5961 unsigned N = 1;
5962 Type *EltTy = T;
5963
5964 while (isa<StructType, ArrayType, VectorType>(EltTy)) {
5965 if (auto *ST = dyn_cast<StructType>(EltTy)) {
5966 // Check that struct is homogeneous.
5967 for (const auto *Ty : ST->elements())
5968 if (Ty != *ST->element_begin())
5969 return 0;
5970 N *= ST->getNumElements();
5971 EltTy = *ST->element_begin();
5972 } else if (auto *AT = dyn_cast<ArrayType>(EltTy)) {
5973 N *= AT->getNumElements();
5974 EltTy = AT->getElementType();
5975 } else {
5976 auto *VT = cast<FixedVectorType>(EltTy);
5977 N *= VT->getNumElements();
5978 EltTy = VT->getElementType();
5979 }
5980 }
5981
5982 if (!isValidElementType(EltTy))
5983 return 0;
5984 uint64_t VTSize = DL.getTypeStoreSizeInBits(FixedVectorType::get(EltTy, N));
5985 if (VTSize < MinVecRegSize || VTSize > MaxVecRegSize || VTSize != DL.getTypeStoreSizeInBits(T))
5986 return 0;
5987 return N;
5988}
5989
5990bool BoUpSLP::canReuseExtract(ArrayRef<Value *> VL, Value *OpValue,
5991 SmallVectorImpl<unsigned> &CurrentOrder) const {
5992 const auto *It = find_if(VL, [](Value *V) {
5993 return isa<ExtractElementInst, ExtractValueInst>(V);
5994 });
5995 assert(It != VL.end() && "Expected at least one extract instruction.")(static_cast <bool> (It != VL.end() && "Expected at least one extract instruction."
) ? void (0) : __assert_fail ("It != VL.end() && \"Expected at least one extract instruction.\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 5995, __extension__
__PRETTY_FUNCTION__))
;
5996 auto *E0 = cast<Instruction>(*It);
5997 assert(all_of(VL,(static_cast <bool> (all_of(VL, [](Value *V) { return isa
<UndefValue, ExtractElementInst, ExtractValueInst>( V);
}) && "Invalid opcode") ? void (0) : __assert_fail (
"all_of(VL, [](Value *V) { return isa<UndefValue, ExtractElementInst, ExtractValueInst>( V); }) && \"Invalid opcode\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 6002, __extension__
__PRETTY_FUNCTION__))
5998 [](Value *V) {(static_cast <bool> (all_of(VL, [](Value *V) { return isa
<UndefValue, ExtractElementInst, ExtractValueInst>( V);
}) && "Invalid opcode") ? void (0) : __assert_fail (
"all_of(VL, [](Value *V) { return isa<UndefValue, ExtractElementInst, ExtractValueInst>( V); }) && \"Invalid opcode\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 6002, __extension__
__PRETTY_FUNCTION__))
5999 return isa<UndefValue, ExtractElementInst, ExtractValueInst>((static_cast <bool> (all_of(VL, [](Value *V) { return isa
<UndefValue, ExtractElementInst, ExtractValueInst>( V);
}) && "Invalid opcode") ? void (0) : __assert_fail (
"all_of(VL, [](Value *V) { return isa<UndefValue, ExtractElementInst, ExtractValueInst>( V); }) && \"Invalid opcode\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 6002, __extension__
__PRETTY_FUNCTION__))
6000 V);(static_cast <bool> (all_of(VL, [](Value *V) { return isa
<UndefValue, ExtractElementInst, ExtractValueInst>( V);
}) && "Invalid opcode") ? void (0) : __assert_fail (
"all_of(VL, [](Value *V) { return isa<UndefValue, ExtractElementInst, ExtractValueInst>( V); }) && \"Invalid opcode\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 6002, __extension__
__PRETTY_FUNCTION__))
6001 }) &&(static_cast <bool> (all_of(VL, [](Value *V) { return isa
<UndefValue, ExtractElementInst, ExtractValueInst>( V);
}) && "Invalid opcode") ? void (0) : __assert_fail (
"all_of(VL, [](Value *V) { return isa<UndefValue, ExtractElementInst, ExtractValueInst>( V); }) && \"Invalid opcode\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 6002, __extension__
__PRETTY_FUNCTION__))
6002 "Invalid opcode")(static_cast <bool> (all_of(VL, [](Value *V) { return isa
<UndefValue, ExtractElementInst, ExtractValueInst>( V);
}) && "Invalid opcode") ? void (0) : __assert_fail (
"all_of(VL, [](Value *V) { return isa<UndefValue, ExtractElementInst, ExtractValueInst>( V); }) && \"Invalid opcode\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 6002, __extension__
__PRETTY_FUNCTION__))
;
6003 // Check if all of the extracts come from the same vector and from the
6004 // correct offset.
6005 Value *Vec = E0->getOperand(0);
6006
6007 CurrentOrder.clear();
6008
6009 // We have to extract from a vector/aggregate with the same number of elements.
6010 unsigned NElts;
6011 if (E0->getOpcode() == Instruction::ExtractValue) {
6012 const DataLayout &DL = E0->getModule()->getDataLayout();
6013 NElts = canMapToVector(Vec->getType(), DL);
6014 if (!NElts)
6015 return false;
6016 // Check if load can be rewritten as load of vector.
6017 LoadInst *LI = dyn_cast<LoadInst>(Vec);
6018 if (!LI || !LI->isSimple() || !LI->hasNUses(VL.size()))
6019 return false;
6020 } else {
6021 NElts = cast<FixedVectorType>(Vec->getType())->getNumElements();
6022 }
6023
6024 if (NElts != VL.size())
6025 return false;
6026
6027 // Check that all of the indices extract from the correct offset.
6028 bool ShouldKeepOrder = true;
6029 unsigned E = VL.size();
6030 // Assign to all items the initial value E + 1 so we can check if the extract
6031 // instruction index was used already.
6032 // Also, later we can check that all the indices are used and we have a
6033 // consecutive access in the extract instructions, by checking that no
6034 // element of CurrentOrder still has value E + 1.
6035 CurrentOrder.assign(E, E);
6036 unsigned I = 0;
6037 for (; I < E; ++I) {
6038 auto *Inst = dyn_cast<Instruction>(VL[I]);
6039 if (!Inst)
6040 continue;
6041 if (Inst->getOperand(0) != Vec)
6042 break;
6043 if (auto *EE = dyn_cast<ExtractElementInst>(Inst))
6044 if (isa<UndefValue>(EE->getIndexOperand()))
6045 continue;
6046 std::optional<unsigned> Idx = getExtractIndex(Inst);
6047 if (!Idx)
6048 break;
6049 const unsigned ExtIdx = *Idx;
6050 if (ExtIdx != I) {
6051 if (ExtIdx >= E || CurrentOrder[ExtIdx] != E)
6052 break;
6053 ShouldKeepOrder = false;
6054 CurrentOrder[ExtIdx] = I;
6055 } else {
6056 if (CurrentOrder[I] != E)
6057 break;
6058 CurrentOrder[I] = I;
6059 }
6060 }
6061 if (I < E) {
6062 CurrentOrder.clear();
6063 return false;
6064 }
6065 if (ShouldKeepOrder)
6066 CurrentOrder.clear();
6067
6068 return ShouldKeepOrder;
6069}
6070
6071bool BoUpSLP::areAllUsersVectorized(Instruction *I,
6072 ArrayRef<Value *> VectorizedVals) const {
6073 return (I->hasOneUse() && is_contained(VectorizedVals, I)) ||
6074 all_of(I->users(), [this](User *U) {
6075 return ScalarToTreeEntry.count(U) > 0 ||
6076 isVectorLikeInstWithConstOps(U) ||
6077 (isa<ExtractElementInst>(U) && MustGather.contains(U));
6078 });
6079}
6080
6081static std::pair<InstructionCost, InstructionCost>
6082getVectorCallCosts(CallInst *CI, FixedVectorType *VecTy,
6083 TargetTransformInfo *TTI, TargetLibraryInfo *TLI) {
6084 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
6085
6086 // Calculate the cost of the scalar and vector calls.
6087 SmallVector<Type *, 4> VecTys;
6088 for (Use &Arg : CI->args())
6089 VecTys.push_back(
6090 FixedVectorType::get(Arg->getType(), VecTy->getNumElements()));
6091 FastMathFlags FMF;
6092 if (auto *FPCI = dyn_cast<FPMathOperator>(CI))
6093 FMF = FPCI->getFastMathFlags();
6094 SmallVector<const Value *> Arguments(CI->args());
6095 IntrinsicCostAttributes CostAttrs(ID, VecTy, Arguments, VecTys, FMF,
6096 dyn_cast<IntrinsicInst>(CI));
6097 auto IntrinsicCost =
6098 TTI->getIntrinsicInstrCost(CostAttrs, TTI::TCK_RecipThroughput);
6099
6100 auto Shape = VFShape::get(*CI, ElementCount::getFixed(static_cast<unsigned>(
6101 VecTy->getNumElements())),
6102 false /*HasGlobalPred*/);
6103 Function *VecFunc = VFDatabase(*CI).getVectorizedFunction(Shape);
6104 auto LibCost = IntrinsicCost;
6105 if (!CI->isNoBuiltin() && VecFunc) {
6106 // Calculate the cost of the vector library call.
6107 // If the corresponding vector call is cheaper, return its cost.
6108 LibCost = TTI->getCallInstrCost(nullptr, VecTy, VecTys,
6109 TTI::TCK_RecipThroughput);
6110 }
6111 return {IntrinsicCost, LibCost};
6112}
6113
6114/// Compute the cost of creating a vector of type \p VecTy containing the
6115/// extracted values from \p VL.
6116static InstructionCost
6117computeExtractCost(ArrayRef<Value *> VL, FixedVectorType *VecTy,
6118 TargetTransformInfo::ShuffleKind ShuffleKind,
6119 ArrayRef<int> Mask, TargetTransformInfo &TTI) {
6120 unsigned NumOfParts = TTI.getNumberOfParts(VecTy);
6121
6122 if (ShuffleKind != TargetTransformInfo::SK_PermuteSingleSrc || !NumOfParts ||
6123 VecTy->getNumElements() < NumOfParts)
6124 return TTI.getShuffleCost(ShuffleKind, VecTy, Mask);
6125
6126 bool AllConsecutive = true;
6127 unsigned EltsPerVector = VecTy->getNumElements() / NumOfParts;
6128 unsigned Idx = -1;
6129 InstructionCost Cost = 0;
6130
6131 // Process extracts in blocks of EltsPerVector to check if the source vector
6132 // operand can be re-used directly. If not, add the cost of creating a shuffle
6133 // to extract the values into a vector register.
6134 SmallVector<int> RegMask(EltsPerVector, UndefMaskElem);
6135 for (auto *V : VL) {
6136 ++Idx;
6137
6138 // Reached the start of a new vector registers.
6139 if (Idx % EltsPerVector == 0) {
6140 RegMask.assign(EltsPerVector, UndefMaskElem);
6141 AllConsecutive = true;
6142 continue;
6143 }
6144
6145 // Need to exclude undefs from analysis.
6146 if (isa<UndefValue>(V) || Mask[Idx] == UndefMaskElem)
6147 continue;
6148
6149 // Check all extracts for a vector register on the target directly
6150 // extract values in order.
6151 unsigned CurrentIdx = *getExtractIndex(cast<Instruction>(V));
6152 if (!isa<UndefValue>(VL[Idx - 1]) && Mask[Idx - 1] != UndefMaskElem) {
6153 unsigned PrevIdx = *getExtractIndex(cast<Instruction>(VL[Idx - 1]));
6154 AllConsecutive &= PrevIdx + 1 == CurrentIdx &&
6155 CurrentIdx % EltsPerVector == Idx % EltsPerVector;
6156 RegMask[Idx % EltsPerVector] = CurrentIdx % EltsPerVector;
6157 }
6158
6159 if (AllConsecutive)
6160 continue;
6161
6162 // Skip all indices, except for the last index per vector block.
6163 if ((Idx + 1) % EltsPerVector != 0 && Idx + 1 != VL.size())
6164 continue;
6165
6166 // If we have a series of extracts which are not consecutive and hence
6167 // cannot re-use the source vector register directly, compute the shuffle
6168 // cost to extract the vector with EltsPerVector elements.
6169 Cost += TTI.getShuffleCost(
6170 TargetTransformInfo::SK_PermuteSingleSrc,
6171 FixedVectorType::get(VecTy->getElementType(), EltsPerVector), RegMask);
6172 }
6173 return Cost;
6174}
6175
6176/// Build shuffle mask for shuffle graph entries and lists of main and alternate
6177/// operations operands.
6178static void
6179buildShuffleEntryMask(ArrayRef<Value *> VL, ArrayRef<unsigned> ReorderIndices,
6180 ArrayRef<int> ReusesIndices,
6181 const function_ref<bool(Instruction *)> IsAltOp,
6182 SmallVectorImpl<int> &Mask,
6183 SmallVectorImpl<Value *> *OpScalars = nullptr,
6184 SmallVectorImpl<Value *> *AltScalars = nullptr) {
6185 unsigned Sz = VL.size();
6186 Mask.assign(Sz, UndefMaskElem);
6187 SmallVector<int> OrderMask;
6188 if (!ReorderIndices.empty())
6189 inversePermutation(ReorderIndices, OrderMask);
6190 for (unsigned I = 0; I < Sz; ++I) {
6191 unsigned Idx = I;
6192 if (!ReorderIndices.empty())
6193 Idx = OrderMask[I];
6194 auto *OpInst = cast<Instruction>(VL[Idx]);
6195 if (IsAltOp(OpInst)) {
6196 Mask[I] = Sz + Idx;
6197 if (AltScalars)
6198 AltScalars->push_back(OpInst);
6199 } else {
6200 Mask[I] = Idx;
6201 if (OpScalars)
6202 OpScalars->push_back(OpInst);
6203 }
6204 }
6205 if (!ReusesIndices.empty()) {
6206 SmallVector<int> NewMask(ReusesIndices.size(), UndefMaskElem);
6207 transform(ReusesIndices, NewMask.begin(), [&Mask](int Idx) {
6208 return Idx != UndefMaskElem ? Mask[Idx] : UndefMaskElem;
6209 });
6210 Mask.swap(NewMask);
6211 }
6212}
6213
6214static bool isAlternateInstruction(const Instruction *I,
6215 const Instruction *MainOp,
6216 const Instruction *AltOp,
6217 const TargetLibraryInfo &TLI) {
6218 if (auto *MainCI = dyn_cast<CmpInst>(MainOp)) {
6219 auto *AltCI = cast<CmpInst>(AltOp);
6220 CmpInst::Predicate MainP = MainCI->getPredicate();
6221 CmpInst::Predicate AltP = AltCI->getPredicate();
6222 assert(MainP != AltP && "Expected different main/alternate predicates.")(static_cast <bool> (MainP != AltP && "Expected different main/alternate predicates."
) ? void (0) : __assert_fail ("MainP != AltP && \"Expected different main/alternate predicates.\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 6222, __extension__
__PRETTY_FUNCTION__))
;
6223 auto *CI = cast<CmpInst>(I);
6224 if (isCmpSameOrSwapped(MainCI, CI, TLI))
6225 return false;
6226 if (isCmpSameOrSwapped(AltCI, CI, TLI))
6227 return true;
6228 CmpInst::Predicate P = CI->getPredicate();
6229 CmpInst::Predicate SwappedP = CmpInst::getSwappedPredicate(P);
6230
6231 assert((MainP == P || AltP == P || MainP == SwappedP || AltP == SwappedP) &&(static_cast <bool> ((MainP == P || AltP == P || MainP ==
SwappedP || AltP == SwappedP) && "CmpInst expected to match either main or alternate predicate or "
"their swap.") ? void (0) : __assert_fail ("(MainP == P || AltP == P || MainP == SwappedP || AltP == SwappedP) && \"CmpInst expected to match either main or alternate predicate or \" \"their swap.\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 6233, __extension__
__PRETTY_FUNCTION__))
6232 "CmpInst expected to match either main or alternate predicate or "(static_cast <bool> ((MainP == P || AltP == P || MainP ==
SwappedP || AltP == SwappedP) && "CmpInst expected to match either main or alternate predicate or "
"their swap.") ? void (0) : __assert_fail ("(MainP == P || AltP == P || MainP == SwappedP || AltP == SwappedP) && \"CmpInst expected to match either main or alternate predicate or \" \"their swap.\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 6233, __extension__
__PRETTY_FUNCTION__))
6233 "their swap.")(static_cast <bool> ((MainP == P || AltP == P || MainP ==
SwappedP || AltP == SwappedP) && "CmpInst expected to match either main or alternate predicate or "
"their swap.") ? void (0) : __assert_fail ("(MainP == P || AltP == P || MainP == SwappedP || AltP == SwappedP) && \"CmpInst expected to match either main or alternate predicate or \" \"their swap.\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 6233, __extension__
__PRETTY_FUNCTION__))
;
6234 (void)AltP;
6235 return MainP != P && MainP != SwappedP;
6236 }
6237 return I->getOpcode() == AltOp->getOpcode();
6238}
6239
6240TTI::OperandValueInfo BoUpSLP::getOperandInfo(ArrayRef<Value *> VL,
6241 unsigned OpIdx) {
6242 assert(!VL.empty())(static_cast <bool> (!VL.empty()) ? void (0) : __assert_fail
("!VL.empty()", "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp"
, 6242, __extension__ __PRETTY_FUNCTION__))
;
6243 const auto *I0 = cast<Instruction>(*find_if(VL, Instruction::classof));
6244 const auto *Op0 = I0->getOperand(OpIdx);
6245
6246 const bool IsConstant = all_of(VL, [&](Value *V) {
6247 // TODO: We should allow undef elements here
6248 const auto *I = dyn_cast<Instruction>(V);
6249 if (!I)
6250 return true;
6251 auto *Op = I->getOperand(OpIdx);
6252 return isConstant(Op) && !isa<UndefValue>(Op);
6253 });
6254 const bool IsUniform = all_of(VL, [&](Value *V) {
6255 // TODO: We should allow undef elements here
6256 const auto *I = dyn_cast<Instruction>(V);
6257 if (!I)
6258 return false;
6259 return I->getOperand(OpIdx) == Op0;
6260 });
6261 const bool IsPowerOfTwo = all_of(VL, [&](Value *V) {
6262 // TODO: We should allow undef elements here
6263 const auto *I = dyn_cast<Instruction>(V);
6264 if (!I) {
6265 assert((isa<UndefValue>(V) ||(static_cast <bool> ((isa<UndefValue>(V) || I0->
getOpcode() == Instruction::GetElementPtr) && "Expected undef or GEP."
) ? void (0) : __assert_fail ("(isa<UndefValue>(V) || I0->getOpcode() == Instruction::GetElementPtr) && \"Expected undef or GEP.\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 6267, __extension__
__PRETTY_FUNCTION__))
6266 I0->getOpcode() == Instruction::GetElementPtr) &&(static_cast <bool> ((isa<UndefValue>(V) || I0->
getOpcode() == Instruction::GetElementPtr) && "Expected undef or GEP."
) ? void (0) : __assert_fail ("(isa<UndefValue>(V) || I0->getOpcode() == Instruction::GetElementPtr) && \"Expected undef or GEP.\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 6267, __extension__
__PRETTY_FUNCTION__))
6267 "Expected undef or GEP.")(static_cast <bool> ((isa<UndefValue>(V) || I0->
getOpcode() == Instruction::GetElementPtr) && "Expected undef or GEP."
) ? void (0) : __assert_fail ("(isa<UndefValue>(V) || I0->getOpcode() == Instruction::GetElementPtr) && \"Expected undef or GEP.\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 6267, __extension__
__PRETTY_FUNCTION__))
;
6268 return true;
6269 }
6270 auto *Op = I->getOperand(OpIdx);
6271 if (auto *CI = dyn_cast<ConstantInt>(Op))
6272 return CI->getValue().isPowerOf2();
6273 return false;
6274 });
6275 const bool IsNegatedPowerOfTwo = all_of(VL, [&](Value *V) {
6276 // TODO: We should allow undef elements here
6277 const auto *I = dyn_cast<Instruction>(V);
6278 if (!I) {
6279 assert((isa<UndefValue>(V) ||(static_cast <bool> ((isa<UndefValue>(V) || I0->
getOpcode() == Instruction::GetElementPtr) && "Expected undef or GEP."
) ? void (0) : __assert_fail ("(isa<UndefValue>(V) || I0->getOpcode() == Instruction::GetElementPtr) && \"Expected undef or GEP.\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 6281, __extension__
__PRETTY_FUNCTION__))
6280 I0->getOpcode() == Instruction::GetElementPtr) &&(static_cast <bool> ((isa<UndefValue>(V) || I0->
getOpcode() == Instruction::GetElementPtr) && "Expected undef or GEP."
) ? void (0) : __assert_fail ("(isa<UndefValue>(V) || I0->getOpcode() == Instruction::GetElementPtr) && \"Expected undef or GEP.\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 6281, __extension__
__PRETTY_FUNCTION__))
6281 "Expected undef or GEP.")(static_cast <bool> ((isa<UndefValue>(V) || I0->
getOpcode() == Instruction::GetElementPtr) && "Expected undef or GEP."
) ? void (0) : __assert_fail ("(isa<UndefValue>(V) || I0->getOpcode() == Instruction::GetElementPtr) && \"Expected undef or GEP.\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 6281, __extension__
__PRETTY_FUNCTION__))
;
6282 return true;
6283 }
6284 const auto *Op = I->getOperand(OpIdx);
6285 if (auto *CI = dyn_cast<ConstantInt>(Op))
6286 return CI->getValue().isNegatedPowerOf2();
6287 return false;
6288 });
6289
6290 TTI::OperandValueKind VK = TTI::OK_AnyValue;
6291 if (IsConstant && IsUniform)
6292 VK = TTI::OK_UniformConstantValue;
6293 else if (IsConstant)
6294 VK = TTI::OK_NonUniformConstantValue;
6295 else if (IsUniform)
6296 VK = TTI::OK_UniformValue;
6297
6298 TTI::OperandValueProperties VP = TTI::OP_None;
6299 VP = IsPowerOfTwo ? TTI::OP_PowerOf2 : VP;
6300 VP = IsNegatedPowerOfTwo ? TTI::OP_NegatedPowerOf2 : VP;
6301
6302 return {VK, VP};
6303}
6304
6305namespace {
6306/// The base class for shuffle instruction emission and shuffle cost estimation.
6307class BaseShuffleAnalysis {
6308protected:
6309 /// Checks if the mask is an identity mask.
6310 /// \param IsStrict if is true the function returns false if mask size does
6311 /// not match vector size.
6312 static bool isIdentityMask(ArrayRef<int> Mask, const FixedVectorType *VecTy,
6313 bool IsStrict) {
6314 int Limit = Mask.size();
6315 int VF = VecTy->getNumElements();
6316 return (VF == Limit || !IsStrict) &&
6317 all_of(Mask, [Limit](int Idx) { return Idx < Limit; }) &&
6318 ShuffleVectorInst::isIdentityMask(Mask);
6319 }
6320
6321 /// Tries to combine 2 different masks into single one.
6322 /// \param LocalVF Vector length of the permuted input vector. \p Mask may
6323 /// change the size of the vector, \p LocalVF is the original size of the
6324 /// shuffled vector.
6325 static void combineMasks(unsigned LocalVF, SmallVectorImpl<int> &Mask,
6326 ArrayRef<int> ExtMask) {
6327 unsigned VF = Mask.size();
6328 SmallVector<int> NewMask(ExtMask.size(), UndefMaskElem);
6329 for (int I = 0, Sz = ExtMask.size(); I < Sz; ++I) {
6330 if (ExtMask[I] == UndefMaskElem)
6331 continue;
6332 int MaskedIdx = Mask[ExtMask[I] % VF];
6333 NewMask[I] =
6334 MaskedIdx == UndefMaskElem ? UndefMaskElem : MaskedIdx % LocalVF;
6335 }
6336 Mask.swap(NewMask);
6337 }
6338
6339 /// Looks through shuffles trying to reduce final number of shuffles in the
6340 /// code. The function looks through the previously emitted shuffle
6341 /// instructions and properly mark indices in mask as undef.
6342 /// For example, given the code
6343 /// \code
6344 /// %s1 = shufflevector <2 x ty> %0, poison, <1, 0>
6345 /// %s2 = shufflevector <2 x ty> %1, poison, <1, 0>
6346 /// \endcode
6347 /// and if need to emit shuffle of %s1 and %s2 with mask <1, 0, 3, 2>, it will
6348 /// look through %s1 and %s2 and select vectors %0 and %1 with mask
6349 /// <0, 1, 2, 3> for the shuffle.
6350 /// If 2 operands are of different size, the smallest one will be resized and
6351 /// the mask recalculated properly.
6352 /// For example, given the code
6353 /// \code
6354 /// %s1 = shufflevector <2 x ty> %0, poison, <1, 0, 1, 0>
6355 /// %s2 = shufflevector <2 x ty> %1, poison, <1, 0, 1, 0>
6356 /// \endcode
6357 /// and if need to emit shuffle of %s1 and %s2 with mask <1, 0, 5, 4>, it will
6358 /// look through %s1 and %s2 and select vectors %0 and %1 with mask
6359 /// <0, 1, 2, 3> for the shuffle.
6360 /// So, it tries to transform permutations to simple vector merge, if
6361 /// possible.
6362 /// \param V The input vector which must be shuffled using the given \p Mask.
6363 /// If the better candidate is found, \p V is set to this best candidate
6364 /// vector.
6365 /// \param Mask The input mask for the shuffle. If the best candidate is found
6366 /// during looking-through-shuffles attempt, it is updated accordingly.
6367 /// \param SinglePermute true if the shuffle operation is originally a
6368 /// single-value-permutation. In this case the look-through-shuffles procedure
6369 /// may look for resizing shuffles as the best candidates.
6370 /// \return true if the shuffle results in the non-resizing identity shuffle
6371 /// (and thus can be ignored), false - otherwise.
6372 static bool peekThroughShuffles(Value *&V, SmallVectorImpl<int> &Mask,
6373 bool SinglePermute) {
6374 Value *Op = V;
6375 ShuffleVectorInst *IdentityOp = nullptr;
6376 SmallVector<int> IdentityMask;
6377 while (auto *SV = dyn_cast<ShuffleVectorInst>(Op)) {
6378 // Exit if not a fixed vector type or changing size shuffle.
6379 auto *SVTy = dyn_cast<FixedVectorType>(SV->getType());
6380 if (!SVTy)
6381 break;
6382 // Remember the identity or broadcast mask, if it is not a resizing
6383 // shuffle. If no better candidates are found, this Op and Mask will be
6384 // used in the final shuffle.
6385 if (isIdentityMask(Mask, SVTy, /*IsStrict=*/false)) {
6386 if (!IdentityOp || !SinglePermute ||
6387 (isIdentityMask(Mask, SVTy, /*IsStrict=*/true) &&
6388 !ShuffleVectorInst::isZeroEltSplatMask(IdentityMask))) {
6389 IdentityOp = SV;
6390 // Store current mask in the IdentityMask so later we did not lost
6391 // this info if IdentityOp is selected as the best candidate for the
6392 // permutation.
6393 IdentityMask.assign(Mask);
6394 }
6395 }
6396 // Remember the broadcast mask. If no better candidates are found, this Op
6397 // and Mask will be used in the final shuffle.
6398 // Zero splat can be used as identity too, since it might be used with
6399 // mask <0, 1, 2, ...>, i.e. identity mask without extra reshuffling.
6400 // E.g. if need to shuffle the vector with the mask <3, 1, 2, 0>, which is
6401 // expensive, the analysis founds out, that the source vector is just a
6402 // broadcast, this original mask can be transformed to identity mask <0,
6403 // 1, 2, 3>.
6404 // \code
6405 // %0 = shuffle %v, poison, zeroinitalizer
6406 // %res = shuffle %0, poison, <3, 1, 2, 0>
6407 // \endcode
6408 // may be transformed to
6409 // \code
6410 // %0 = shuffle %v, poison, zeroinitalizer
6411 // %res = shuffle %0, poison, <0, 1, 2, 3>
6412 // \endcode
6413 if (SV->isZeroEltSplat()) {
6414 IdentityOp = SV;
6415 IdentityMask.assign(Mask);
6416 }
6417 int LocalVF = Mask.size();
6418 if (auto *SVOpTy =
6419 dyn_cast<FixedVectorType>(SV->getOperand(0)->getType()))
6420 LocalVF = SVOpTy->getNumElements();
6421 SmallVector<int> ExtMask(Mask.size(), UndefMaskElem);
6422 for (auto [Idx, I] : enumerate(Mask)) {
6423 if (I == UndefMaskElem)
6424 continue;
6425 ExtMask[Idx] = SV->getMaskValue(I);
6426 }
6427 bool IsOp1Undef =
6428 isUndefVector(SV->getOperand(0),
6429 buildUseMask(LocalVF, ExtMask, UseMask::FirstArg))
6430 .all();
6431 bool IsOp2Undef =
6432 isUndefVector(SV->getOperand(1),
6433 buildUseMask(LocalVF, ExtMask, UseMask::SecondArg))
6434 .all();
6435 if (!IsOp1Undef && !IsOp2Undef) {
6436 // Update mask and mark undef elems.
6437 for (int &I : Mask) {
6438 if (I == UndefMaskElem)
6439 continue;
6440 if (SV->getMaskValue(I % SV->getShuffleMask().size()) ==
6441 UndefMaskElem)
6442 I = UndefMaskElem;
6443 }
6444 break;
6445 }
6446 SmallVector<int> ShuffleMask(SV->getShuffleMask().begin(),
6447 SV->getShuffleMask().end());
6448 combineMasks(LocalVF, ShuffleMask, Mask);
6449 Mask.swap(ShuffleMask);
6450 if (IsOp2Undef)
6451 Op = SV->getOperand(0);
6452 else
6453 Op = SV->getOperand(1);
6454 }
6455 if (auto *OpTy = dyn_cast<FixedVectorType>(Op->getType());
6456 !OpTy || !isIdentityMask(Mask, OpTy, SinglePermute)) {
6457 if (IdentityOp) {
6458 V = IdentityOp;
6459 assert(Mask.size() == IdentityMask.size() &&(static_cast <bool> (Mask.size() == IdentityMask.size()
&& "Expected masks of same sizes.") ? void (0) : __assert_fail
("Mask.size() == IdentityMask.size() && \"Expected masks of same sizes.\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 6460, __extension__
__PRETTY_FUNCTION__))
6460 "Expected masks of same sizes.")(static_cast <bool> (Mask.size() == IdentityMask.size()
&& "Expected masks of same sizes.") ? void (0) : __assert_fail
("Mask.size() == IdentityMask.size() && \"Expected masks of same sizes.\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 6460, __extension__
__PRETTY_FUNCTION__))
;
6461 // Clear known poison elements.
6462 for (auto [I, Idx] : enumerate(Mask))
6463 if (Idx == UndefMaskElem)
6464 IdentityMask[I] = UndefMaskElem;
6465 Mask.swap(IdentityMask);
6466 auto *Shuffle = dyn_cast<ShuffleVectorInst>(V);
6467 return SinglePermute &&
6468 (isIdentityMask(Mask, cast<FixedVectorType>(V->getType()),
6469 /*IsStrict=*/true) ||
6470 (Shuffle && Mask.size() == Shuffle->getShuffleMask().size() &&
6471 Shuffle->isZeroEltSplat() &&
6472 ShuffleVectorInst::isZeroEltSplatMask(Mask)));
6473 }
6474 V = Op;
6475 return false;
6476 }
6477 V = Op;
6478 return true;
6479 }
6480
6481 /// Smart shuffle instruction emission, walks through shuffles trees and
6482 /// tries to find the best matching vector for the actual shuffle
6483 /// instruction.
6484 template <typename ShuffleBuilderTy>
6485 static Value *createShuffle(Value *V1, Value *V2, ArrayRef<int> Mask,
6486 ShuffleBuilderTy &Builder) {
6487 assert(V1 && "Expected at least one vector value.")(static_cast <bool> (V1 && "Expected at least one vector value."
) ? void (0) : __assert_fail ("V1 && \"Expected at least one vector value.\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 6487, __extension__
__PRETTY_FUNCTION__))
;
6488 int VF = Mask.size();
6489 if (auto *FTy = dyn_cast<FixedVectorType>(V1->getType()))
6490 VF = FTy->getNumElements();
6491 if (V2 &&
6492 !isUndefVector(V2, buildUseMask(VF, Mask, UseMask::SecondArg)).all()) {
6493 // Peek through shuffles.
6494 Value *Op1 = V1;
6495 Value *Op2 = V2;
6496 int VF =
6497 cast<VectorType>(V1->getType())->getElementCount().getKnownMinValue();
6498 SmallVector<int> CombinedMask1(Mask.size(), UndefMaskElem);
6499 SmallVector<int> CombinedMask2(Mask.size(), UndefMaskElem);
6500 for (int I = 0, E = Mask.size(); I < E; ++I) {
6501 if (Mask[I] < VF)
6502 CombinedMask1[I] = Mask[I];
6503 else
6504 CombinedMask2[I] = Mask[I] - VF;
6505 }
6506 Value *PrevOp1;
6507 Value *PrevOp2;
6508 do {
6509 PrevOp1 = Op1;
6510 PrevOp2 = Op2;
6511 (void)peekThroughShuffles(Op1, CombinedMask1, /*SinglePermute=*/false);
6512 (void)peekThroughShuffles(Op2, CombinedMask2, /*SinglePermute=*/false);
6513 // Check if we have 2 resizing shuffles - need to peek through operands
6514 // again.
6515 if (auto *SV1 = dyn_cast<ShuffleVectorInst>(Op1))
6516 if (auto *SV2 = dyn_cast<ShuffleVectorInst>(Op2)) {
6517 SmallVector<int> ExtMask1(Mask.size(), UndefMaskElem);
6518 for (auto [Idx, I] : enumerate(CombinedMask1)) {
6519 if (I == UndefMaskElem)
6520 continue;
6521 ExtMask1[Idx] = SV1->getMaskValue(I);
6522 }
6523 SmallBitVector UseMask1 = buildUseMask(
6524 cast<FixedVectorType>(SV1->getOperand(1)->getType())
6525 ->getNumElements(),
6526 ExtMask1, UseMask::SecondArg);
6527 SmallVector<int> ExtMask2(CombinedMask2.size(), UndefMaskElem);
6528 for (auto [Idx, I] : enumerate(CombinedMask2)) {
6529 if (I == UndefMaskElem)
6530 continue;
6531 ExtMask2[Idx] = SV2->getMaskValue(I);
6532 }
6533 SmallBitVector UseMask2 = buildUseMask(
6534 cast<FixedVectorType>(SV2->getOperand(1)->getType())
6535 ->getNumElements(),
6536 ExtMask2, UseMask::SecondArg);
6537 if (SV1->getOperand(0)->getType() ==
6538 SV2->getOperand(0)->getType() &&
6539 SV1->getOperand(0)->getType() != SV1->getType() &&
6540 isUndefVector(SV1->getOperand(1), UseMask1).all() &&
6541 isUndefVector(SV2->getOperand(1), UseMask2).all()) {
6542 Op1 = SV1->getOperand(0);
6543 Op2 = SV2->getOperand(0);
6544 SmallVector<int> ShuffleMask1(SV1->getShuffleMask().begin(),
6545 SV1->getShuffleMask().end());
6546 int LocalVF = ShuffleMask1.size();
6547 if (auto *FTy = dyn_cast<FixedVectorType>(Op1->getType()))
6548 LocalVF = FTy->getNumElements();
6549 combineMasks(LocalVF, ShuffleMask1, CombinedMask1);
6550 CombinedMask1.swap(ShuffleMask1);
6551 SmallVector<int> ShuffleMask2(SV2->getShuffleMask().begin(),
6552 SV2->getShuffleMask().end());
6553 LocalVF = ShuffleMask2.size();
6554 if (auto *FTy = dyn_cast<FixedVectorType>(Op2->getType()))
6555 LocalVF = FTy->getNumElements();
6556 combineMasks(LocalVF, ShuffleMask2, CombinedMask2);
6557 CombinedMask2.swap(ShuffleMask2);
6558 }
6559 }
6560 } while (PrevOp1 != Op1 || PrevOp2 != Op2);
6561 Builder.resizeToMatch(Op1, Op2);
6562 VF = std::max(cast<VectorType>(Op1->getType())
6563 ->getElementCount()
6564 .getKnownMinValue(),
6565 cast<VectorType>(Op2->getType())
6566 ->getElementCount()
6567 .getKnownMinValue());
6568 for (int I = 0, E = Mask.size(); I < E; ++I) {
6569 if (CombinedMask2[I] != UndefMaskElem) {
6570 assert(CombinedMask1[I] == UndefMaskElem &&(static_cast <bool> (CombinedMask1[I] == UndefMaskElem &&
"Expected undefined mask element") ? void (0) : __assert_fail
("CombinedMask1[I] == UndefMaskElem && \"Expected undefined mask element\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 6571, __extension__
__PRETTY_FUNCTION__))
6571 "Expected undefined mask element")(static_cast <bool> (CombinedMask1[I] == UndefMaskElem &&
"Expected undefined mask element") ? void (0) : __assert_fail
("CombinedMask1[I] == UndefMaskElem && \"Expected undefined mask element\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 6571, __extension__
__PRETTY_FUNCTION__))
;
6572 CombinedMask1[I] = CombinedMask2[I] + (Op1 == Op2 ? 0 : VF);
6573 }
6574 }
6575 return Builder.createShuffleVector(
6576 Op1, Op1 == Op2 ? PoisonValue::get(Op1->getType()) : Op2,
6577 CombinedMask1);
6578 }
6579 if (isa<PoisonValue>(V1))
6580 return PoisonValue::get(FixedVectorType::get(
6581 cast<VectorType>(V1->getType())->getElementType(), Mask.size()));
6582 SmallVector<int> NewMask(Mask.begin(), Mask.end());
6583 bool IsIdentity = peekThroughShuffles(V1, NewMask, /*SinglePermute=*/true);
6584 assert(V1 && "Expected non-null value after looking through shuffles.")(static_cast <bool> (V1 && "Expected non-null value after looking through shuffles."
) ? void (0) : __assert_fail ("V1 && \"Expected non-null value after looking through shuffles.\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 6584, __extension__
__PRETTY_FUNCTION__))
;
6585
6586 if (!IsIdentity)
6587 return Builder.createShuffleVector(V1, NewMask);
6588 return V1;
6589 }
6590};
6591} // namespace
6592
6593InstructionCost BoUpSLP::getEntryCost(const TreeEntry *E,
6594 ArrayRef<Value *> VectorizedVals) {
6595 ArrayRef<Value *> VL = E->Scalars;
6596
6597 Type *ScalarTy = VL[0]->getType();
6598 if (StoreInst *SI = dyn_cast<StoreInst>(VL[0]))
6599 ScalarTy = SI->getValueOperand()->getType();
6600 else if (CmpInst *CI = dyn_cast<CmpInst>(VL[0]))
6601 ScalarTy = CI->getOperand(0)->getType();
6602 else if (auto *IE = dyn_cast<InsertElementInst>(VL[0]))
6603 ScalarTy = IE->getOperand(1)->getType();
6604 auto *VecTy = FixedVectorType::get(ScalarTy, VL.size());
6605 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
6606
6607 // If we have computed a smaller type for the expression, update VecTy so
6608 // that the costs will be accurate.
6609 if (MinBWs.count(VL[0]))
6610 VecTy = FixedVectorType::get(
6611 IntegerType::get(F->getContext(), MinBWs[VL[0]].first), VL.size());
6612 unsigned EntryVF = E->getVectorFactor();
6613 auto *FinalVecTy = FixedVectorType::get(VecTy->getElementType(), EntryVF);
6614
6615 bool NeedToShuffleReuses = !E->ReuseShuffleIndices.empty();
6616 // FIXME: it tries to fix a problem with MSVC buildbots.
6617 TargetTransformInfo *TTI = this->TTI;
6618 auto AdjustExtractsCost = [=](InstructionCost &Cost) {
6619 // If the resulting type is scalarized, do not adjust the cost.
6620 unsigned VecNumParts = TTI->getNumberOfParts(VecTy);
6621 if (VecNumParts == VecTy->getNumElements())
6622 return;
6623 DenseMap<Value *, int> ExtractVectorsTys;
6624 SmallPtrSet<Value *, 4> CheckedExtracts;
6625 for (auto *V : VL) {
6626 if (isa<UndefValue>(V))
6627 continue;
6628 // If all users of instruction are going to be vectorized and this
6629 // instruction itself is not going to be vectorized, consider this
6630 // instruction as dead and remove its cost from the final cost of the
6631 // vectorized tree.
6632 // Also, avoid adjusting the cost for extractelements with multiple uses
6633 // in different graph entries.
6634 const TreeEntry *VE = getTreeEntry(V);
6635 if (!CheckedExtracts.insert(V).second ||
6636 !areAllUsersVectorized(cast<Instruction>(V), VectorizedVals) ||
6637 (VE && VE != E))
6638 continue;
6639 auto *EE = cast<ExtractElementInst>(V);
6640 std::optional<unsigned> EEIdx = getExtractIndex(EE);
6641 if (!EEIdx)
6642 continue;
6643 unsigned Idx = *EEIdx;
6644 if (VecNumParts != TTI->getNumberOfParts(EE->getVectorOperandType())) {
6645 auto It =
6646 ExtractVectorsTys.try_emplace(EE->getVectorOperand(), Idx).first;
6647 It->getSecond() = std::min<int>(It->second, Idx);
6648 }
6649 // Take credit for instruction that will become dead.
6650 if (EE->hasOneUse()) {
6651 Instruction *Ext = EE->user_back();
6652 if (isa<SExtInst, ZExtInst>(Ext) && all_of(Ext->users(), [](User *U) {
6653 return isa<GetElementPtrInst>(U);
6654 })) {
6655 // Use getExtractWithExtendCost() to calculate the cost of
6656 // extractelement/ext pair.
6657 Cost -=
6658 TTI->getExtractWithExtendCost(Ext->getOpcode(), Ext->getType(),
6659 EE->getVectorOperandType(), Idx);
6660 // Add back the cost of s|zext which is subtracted separately.
6661 Cost += TTI->getCastInstrCost(
6662 Ext->getOpcode(), Ext->getType(), EE->getType(),
6663 TTI::getCastContextHint(Ext), CostKind, Ext);
6664 continue;
6665 }
6666 }
6667 Cost -= TTI->getVectorInstrCost(*EE, EE->getVectorOperandType(), CostKind,
6668 Idx);
6669 }
6670 // Add a cost for subvector extracts/inserts if required.
6671 for (const auto &Data : ExtractVectorsTys) {
6672 auto *EEVTy = cast<FixedVectorType>(Data.first->getType());
6673 unsigned NumElts = VecTy->getNumElements();
6674 if (Data.second % NumElts == 0)
6675 continue;
6676 if (TTI->getNumberOfParts(EEVTy) > VecNumParts) {
6677 unsigned Idx = (Data.second / NumElts) * NumElts;
6678 unsigned EENumElts = EEVTy->getNumElements();
6679 if (Idx + NumElts <= EENumElts) {
6680 Cost +=
6681 TTI->getShuffleCost(TargetTransformInfo::SK_ExtractSubvector,
6682 EEVTy, std::nullopt, CostKind, Idx, VecTy);
6683 } else {
6684 // Need to round up the subvector type vectorization factor to avoid a
6685 // crash in cost model functions. Make SubVT so that Idx + VF of SubVT
6686 // <= EENumElts.
6687 auto *SubVT =
6688 FixedVectorType::get(VecTy->getElementType(), EENumElts - Idx);
6689 Cost +=
6690 TTI->getShuffleCost(TargetTransformInfo::SK_ExtractSubvector,
6691 EEVTy, std::nullopt, CostKind, Idx, SubVT);
6692 }
6693 } else {
6694 Cost += TTI->getShuffleCost(TargetTransformInfo::SK_InsertSubvector,
6695 VecTy, std::nullopt, CostKind, 0, EEVTy);
6696 }
6697 }
6698 };
6699 if (E->State == TreeEntry::NeedToGather) {
6700 if (allConstant(VL))
6701 return 0;
6702 if (isa<InsertElementInst>(VL[0]))
6703 return InstructionCost::getInvalid();
6704 SmallVector<Value *> GatheredScalars(E->Scalars.begin(), E->Scalars.end());
6705 // Build a mask out of the reorder indices and reorder scalars per this
6706 // mask.
6707 SmallVector<int> ReorderMask;
6708 inversePermutation(E->ReorderIndices, ReorderMask);
6709 if (!ReorderMask.empty())
6710 reorderScalars(GatheredScalars, ReorderMask);
6711 SmallVector<int> Mask;
6712 std::optional<TargetTransformInfo::ShuffleKind> GatherShuffle;
6713 SmallVector<const TreeEntry *> Entries;
6714 // Do not try to look for reshuffled loads for gathered loads (they will be
6715 // handled later), for vectorized scalars, and cases, which are definitely
6716 // not profitable (splats and small gather nodes.)
6717 if (E->getOpcode() != Instruction::Load || E->isAltShuffle() ||
6718 all_of(E->Scalars, [this](Value *V) { return getTreeEntry(V); }) ||
6719 isSplat(E->Scalars) ||
6720 (E->Scalars != GatheredScalars && GatheredScalars.size() <= 2))
6721 GatherShuffle = isGatherShuffledEntry(E, GatheredScalars, Mask, Entries);
6722 if (GatherShuffle) {
6723 // Remove shuffled elements from list of gathers.
6724 for (int I = 0, Sz = Mask.size(); I < Sz; ++I) {
6725 if (Mask[I] != UndefMaskElem)
6726 GatheredScalars[I] = PoisonValue::get(ScalarTy);
6727 }
6728 assert((Entries.size() == 1 || Entries.size() == 2) &&(static_cast <bool> ((Entries.size() == 1 || Entries.size
() == 2) && "Expected shuffle of 1 or 2 entries.") ? void
(0) : __assert_fail ("(Entries.size() == 1 || Entries.size() == 2) && \"Expected shuffle of 1 or 2 entries.\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 6729, __extension__
__PRETTY_FUNCTION__))
6729 "Expected shuffle of 1 or 2 entries.")(static_cast <bool> ((Entries.size() == 1 || Entries.size
() == 2) && "Expected shuffle of 1 or 2 entries.") ? void
(0) : __assert_fail ("(Entries.size() == 1 || Entries.size() == 2) && \"Expected shuffle of 1 or 2 entries.\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 6729, __extension__
__PRETTY_FUNCTION__))
;
6730 InstructionCost GatherCost = 0;
6731 int Limit = Mask.size() * 2;
6732 if (all_of(Mask, [=](int Idx) { return Idx < Limit; }) &&
6733 ShuffleVectorInst::isIdentityMask(Mask)) {
6734 // Perfect match in the graph, will reuse the previously vectorized
6735 // node. Cost is 0.
6736 LLVM_DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: perfect diamond match for gather bundle that starts with "
<< *VL.front() << ".\n"; } } while (false)
6737 dbgs()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: perfect diamond match for gather bundle that starts with "
<< *VL.front() << ".\n"; } } while (false)
6738 << "SLP: perfect diamond match for gather bundle that starts with "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: perfect diamond match for gather bundle that starts with "
<< *VL.front() << ".\n"; } } while (false)
6739 << *VL.front() << ".\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: perfect diamond match for gather bundle that starts with "
<< *VL.front() << ".\n"; } } while (false)
;
6740 if (NeedToShuffleReuses)
6741 GatherCost =
6742 TTI->getShuffleCost(TargetTransformInfo::SK_PermuteSingleSrc,
6743 FinalVecTy, E->ReuseShuffleIndices);
6744 } else {
6745 LLVM_DEBUG(dbgs() << "SLP: shuffled " << Entries.size()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: shuffled " << Entries.
size() << " entries for bundle that starts with " <<
*VL.front() << ".\n"; } } while (false)
6746 << " entries for bundle that starts with "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: shuffled " << Entries.
size() << " entries for bundle that starts with " <<
*VL.front() << ".\n"; } } while (false)
6747 << *VL.front() << ".\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: shuffled " << Entries.
size() << " entries for bundle that starts with " <<
*VL.front() << ".\n"; } } while (false)
;
6748 // Detected that instead of gather we can emit a shuffle of single/two
6749 // previously vectorized nodes. Add the cost of the permutation rather
6750 // than gather.
6751 ::addMask(Mask, E->ReuseShuffleIndices);
6752 GatherCost = TTI->getShuffleCost(*GatherShuffle, FinalVecTy, Mask);
6753 }
6754 if (!all_of(GatheredScalars, UndefValue::classof))
6755 GatherCost += getGatherCost(GatheredScalars);
6756 return GatherCost;
6757 }
6758 if ((E->getOpcode() == Instruction::ExtractElement ||
6759 all_of(E->Scalars,
6760 [](Value *V) {
6761 return isa<ExtractElementInst, UndefValue>(V);
6762 })) &&
6763 allSameType(VL)) {
6764 // Check that gather of extractelements can be represented as just a
6765 // shuffle of a single/two vectors the scalars are extracted from.
6766 SmallVector<int> Mask;
6767 std::optional<TargetTransformInfo::ShuffleKind> ShuffleKind =
6768 isFixedVectorShuffle(VL, Mask);
6769 if (ShuffleKind) {
6770 // Found the bunch of extractelement instructions that must be gathered
6771 // into a vector and can be represented as a permutation elements in a
6772 // single input vector or of 2 input vectors.
6773 InstructionCost Cost =
6774 computeExtractCost(VL, VecTy, *ShuffleKind, Mask, *TTI);
6775 AdjustExtractsCost(Cost);
6776 if (NeedToShuffleReuses)
6777 Cost += TTI->getShuffleCost(TargetTransformInfo::SK_PermuteSingleSrc,
6778 FinalVecTy, E->ReuseShuffleIndices);
6779 return Cost;
6780 }
6781 }
6782 if (isSplat(VL)) {
6783 // Found the broadcasting of the single scalar, calculate the cost as the
6784 // broadcast.
6785 assert(VecTy == FinalVecTy &&(static_cast <bool> (VecTy == FinalVecTy && "No reused scalars expected for broadcast."
) ? void (0) : __assert_fail ("VecTy == FinalVecTy && \"No reused scalars expected for broadcast.\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 6786, __extension__
__PRETTY_FUNCTION__))
6786 "No reused scalars expected for broadcast.")(static_cast <bool> (VecTy == FinalVecTy && "No reused scalars expected for broadcast."
) ? void (0) : __assert_fail ("VecTy == FinalVecTy && \"No reused scalars expected for broadcast.\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 6786, __extension__
__PRETTY_FUNCTION__))
;
6787 const auto *It =
6788 find_if(VL, [](Value *V) { return !isa<UndefValue>(V); });
6789 // If all values are undefs - consider cost free.
6790 if (It == VL.end())
6791 return TTI::TCC_Free;
6792 // Add broadcast for non-identity shuffle only.
6793 bool NeedShuffle =
6794 VL.front() != *It || !all_of(VL.drop_front(), UndefValue::classof);
6795 InstructionCost InsertCost =
6796 TTI->getVectorInstrCost(Instruction::InsertElement, VecTy, CostKind,
6797 /*Index=*/0, PoisonValue::get(VecTy), *It);
6798 return InsertCost + (NeedShuffle
6799 ? TTI->getShuffleCost(
6800 TargetTransformInfo::SK_Broadcast, VecTy,
6801 /*Mask=*/std::nullopt, CostKind,
6802 /*Index=*/0,
6803 /*SubTp=*/nullptr, /*Args=*/VL[0])
6804 : TTI::TCC_Free);
6805 }
6806 InstructionCost ReuseShuffleCost = 0;
6807 if (NeedToShuffleReuses)
6808 ReuseShuffleCost = TTI->getShuffleCost(
6809 TTI::SK_PermuteSingleSrc, FinalVecTy, E->ReuseShuffleIndices);
6810 // Improve gather cost for gather of loads, if we can group some of the
6811 // loads into vector loads.
6812 if (VL.size() > 2 && E->getOpcode() == Instruction::Load &&
6813 !E->isAltShuffle()) {
6814 BoUpSLP::ValueSet VectorizedLoads;
6815 unsigned StartIdx = 0;
6816 unsigned VF = VL.size() / 2;
6817 unsigned VectorizedCnt = 0;
6818 unsigned ScatterVectorizeCnt = 0;
6819 const unsigned Sz = DL->getTypeSizeInBits(E->getMainOp()->getType());
6820 for (unsigned MinVF = getMinVF(2 * Sz); VF >= MinVF; VF /= 2) {
6821 for (unsigned Cnt = StartIdx, End = VL.size(); Cnt + VF <= End;
6822 Cnt += VF) {
6823 ArrayRef<Value *> Slice = VL.slice(Cnt, VF);
6824 if (!VectorizedLoads.count(Slice.front()) &&
6825 !VectorizedLoads.count(Slice.back()) && allSameBlock(Slice)) {
6826 SmallVector<Value *> PointerOps;
6827 OrdersType CurrentOrder;
6828 LoadsState LS =
6829 canVectorizeLoads(Slice, Slice.front(), *TTI, *DL, *SE, *LI,
6830 *TLI, CurrentOrder, PointerOps);
6831 switch (LS) {
6832 case LoadsState::Vectorize:
6833 case LoadsState::ScatterVectorize:
6834 // Mark the vectorized loads so that we don't vectorize them
6835 // again.
6836 if (LS == LoadsState::Vectorize)
6837 ++VectorizedCnt;
6838 else
6839 ++ScatterVectorizeCnt;
6840 VectorizedLoads.insert(Slice.begin(), Slice.end());
6841 // If we vectorized initial block, no need to try to vectorize it
6842 // again.
6843 if (Cnt == StartIdx)
6844 StartIdx += VF;
6845 break;
6846 case LoadsState::Gather:
6847 break;
6848 }
6849 }
6850 }
6851 // Check if the whole array was vectorized already - exit.
6852 if (StartIdx >= VL.size())
6853 break;
6854 // Found vectorizable parts - exit.
6855 if (!VectorizedLoads.empty())
6856 break;
6857 }
6858 if (!VectorizedLoads.empty()) {
6859 InstructionCost GatherCost = 0;
6860 unsigned NumParts = TTI->getNumberOfParts(VecTy);
6861 bool NeedInsertSubvectorAnalysis =
6862 !NumParts || (VL.size() / VF) > NumParts;
6863 // Get the cost for gathered loads.
6864 for (unsigned I = 0, End = VL.size(); I < End; I += VF) {
6865 if (VectorizedLoads.contains(VL[I]))
6866 continue;
6867 GatherCost += getGatherCost(VL.slice(I, VF));
6868 }
6869 // The cost for vectorized loads.
6870 InstructionCost ScalarsCost = 0;
6871 for (Value *V : VectorizedLoads) {
6872 auto *LI = cast<LoadInst>(V);
6873 ScalarsCost +=
6874 TTI->getMemoryOpCost(Instruction::Load, LI->getType(),
6875 LI->getAlign(), LI->getPointerAddressSpace(),
6876 CostKind, TTI::OperandValueInfo(), LI);
6877 }
6878 auto *LI = cast<LoadInst>(E->getMainOp());
6879 auto *LoadTy = FixedVectorType::get(LI->getType(), VF);
6880 Align Alignment = LI->getAlign();
6881 GatherCost +=
6882 VectorizedCnt *
6883 TTI->getMemoryOpCost(Instruction::Load, LoadTy, Alignment,
6884 LI->getPointerAddressSpace(), CostKind,
6885 TTI::OperandValueInfo(), LI);
6886 GatherCost += ScatterVectorizeCnt *
6887 TTI->getGatherScatterOpCost(
6888 Instruction::Load, LoadTy, LI->getPointerOperand(),
6889 /*VariableMask=*/false, Alignment, CostKind, LI);
6890 if (NeedInsertSubvectorAnalysis) {
6891 // Add the cost for the subvectors insert.
6892 for (int I = VF, E = VL.size(); I < E; I += VF)
6893 GatherCost +=
6894 TTI->getShuffleCost(TTI::SK_InsertSubvector, VecTy,
6895 std::nullopt, CostKind, I, LoadTy);
6896 }
6897 return ReuseShuffleCost + GatherCost - ScalarsCost;
6898 }
6899 }
6900 return ReuseShuffleCost + getGatherCost(VL);
6901 }
6902 InstructionCost CommonCost = 0;
6903 SmallVector<int> Mask;
6904 if (!E->ReorderIndices.empty()) {
6905 SmallVector<int> NewMask;
6906 if (E->getOpcode() == Instruction::Store) {
6907 // For stores the order is actually a mask.
6908 NewMask.resize(E->ReorderIndices.size());
6909 copy(E->ReorderIndices, NewMask.begin());
6910 } else {
6911 inversePermutation(E->ReorderIndices, NewMask);
6912 }
6913 ::addMask(Mask, NewMask);
6914 }
6915 if (NeedToShuffleReuses)
6916 ::addMask(Mask, E->ReuseShuffleIndices);
6917 if (!Mask.empty() && !ShuffleVectorInst::isIdentityMask(Mask))
6918 CommonCost =
6919 TTI->getShuffleCost(TTI::SK_PermuteSingleSrc, FinalVecTy, Mask);
6920 assert((E->State == TreeEntry::Vectorize ||(static_cast <bool> ((E->State == TreeEntry::Vectorize
|| E->State == TreeEntry::ScatterVectorize) && "Unhandled state"
) ? void (0) : __assert_fail ("(E->State == TreeEntry::Vectorize || E->State == TreeEntry::ScatterVectorize) && \"Unhandled state\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 6922, __extension__
__PRETTY_FUNCTION__))
6921 E->State == TreeEntry::ScatterVectorize) &&(static_cast <bool> ((E->State == TreeEntry::Vectorize
|| E->State == TreeEntry::ScatterVectorize) && "Unhandled state"
) ? void (0) : __assert_fail ("(E->State == TreeEntry::Vectorize || E->State == TreeEntry::ScatterVectorize) && \"Unhandled state\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 6922, __extension__
__PRETTY_FUNCTION__))
6922 "Unhandled state")(static_cast <bool> ((E->State == TreeEntry::Vectorize
|| E->State == TreeEntry::ScatterVectorize) && "Unhandled state"
) ? void (0) : __assert_fail ("(E->State == TreeEntry::Vectorize || E->State == TreeEntry::ScatterVectorize) && \"Unhandled state\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 6922, __extension__
__PRETTY_FUNCTION__))
;
6923 assert(E->getOpcode() &&(static_cast <bool> (E->getOpcode() && ((allSameType
(VL) && allSameBlock(VL)) || (E->getOpcode() == Instruction
::GetElementPtr && E->getMainOp()->getType()->
isPointerTy())) && "Invalid VL") ? void (0) : __assert_fail
("E->getOpcode() && ((allSameType(VL) && allSameBlock(VL)) || (E->getOpcode() == Instruction::GetElementPtr && E->getMainOp()->getType()->isPointerTy())) && \"Invalid VL\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 6927, __extension__
__PRETTY_FUNCTION__))
6924 ((allSameType(VL) && allSameBlock(VL)) ||(static_cast <bool> (E->getOpcode() && ((allSameType
(VL) && allSameBlock(VL)) || (E->getOpcode() == Instruction
::GetElementPtr && E->getMainOp()->getType()->
isPointerTy())) && "Invalid VL") ? void (0) : __assert_fail
("E->getOpcode() && ((allSameType(VL) && allSameBlock(VL)) || (E->getOpcode() == Instruction::GetElementPtr && E->getMainOp()->getType()->isPointerTy())) && \"Invalid VL\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 6927, __extension__
__PRETTY_FUNCTION__))
6925 (E->getOpcode() == Instruction::GetElementPtr &&(static_cast <bool> (E->getOpcode() && ((allSameType
(VL) && allSameBlock(VL)) || (E->getOpcode() == Instruction
::GetElementPtr && E->getMainOp()->getType()->
isPointerTy())) && "Invalid VL") ? void (0) : __assert_fail
("E->getOpcode() && ((allSameType(VL) && allSameBlock(VL)) || (E->getOpcode() == Instruction::GetElementPtr && E->getMainOp()->getType()->isPointerTy())) && \"Invalid VL\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 6927, __extension__
__PRETTY_FUNCTION__))
6926 E->getMainOp()->getType()->isPointerTy())) &&(static_cast <bool> (E->getOpcode() && ((allSameType
(VL) && allSameBlock(VL)) || (E->getOpcode() == Instruction
::GetElementPtr && E->getMainOp()->getType()->
isPointerTy())) && "Invalid VL") ? void (0) : __assert_fail
("E->getOpcode() && ((allSameType(VL) && allSameBlock(VL)) || (E->getOpcode() == Instruction::GetElementPtr && E->getMainOp()->getType()->isPointerTy())) && \"Invalid VL\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 6927, __extension__
__PRETTY_FUNCTION__))
6927 "Invalid VL")(static_cast <bool> (E->getOpcode() && ((allSameType
(VL) && allSameBlock(VL)) || (E->getOpcode() == Instruction
::GetElementPtr && E->getMainOp()->getType()->
isPointerTy())) && "Invalid VL") ? void (0) : __assert_fail
("E->getOpcode() && ((allSameType(VL) && allSameBlock(VL)) || (E->getOpcode() == Instruction::GetElementPtr && E->getMainOp()->getType()->isPointerTy())) && \"Invalid VL\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 6927, __extension__
__PRETTY_FUNCTION__))
;
6928 Instruction *VL0 = E->getMainOp();
6929 unsigned ShuffleOrOp =
6930 E->isAltShuffle() ? (unsigned)Instruction::ShuffleVector : E->getOpcode();
6931 const unsigned Sz = VL.size();
6932 auto GetCostDiff =
6933 [=](function_ref<InstructionCost(unsigned)> ScalarEltCost,
6934 function_ref<InstructionCost(InstructionCost)> VectorCost) {
6935 // Calculate the cost of this instruction.
6936 InstructionCost ScalarCost = 0;
6937 if (isa<CastInst, CmpInst, SelectInst, CallInst>(VL0)) {
6938 // For some of the instructions no need to calculate cost for each
6939 // particular instruction, we can use the cost of the single
6940 // instruction x total number of scalar instructions.
6941 ScalarCost = Sz * ScalarEltCost(0);
6942 } else {
6943 for (unsigned I = 0; I < Sz; ++I)
6944 ScalarCost += ScalarEltCost(I);
6945 }
6946
6947 InstructionCost VecCost = VectorCost(CommonCost);
6948 LLVM_DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dumpTreeCosts(E, CommonCost, VecCost - CommonCost,
ScalarCost); } } while (false)
6949 dumpTreeCosts(E, CommonCost, VecCost - CommonCost, ScalarCost))do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dumpTreeCosts(E, CommonCost, VecCost - CommonCost,
ScalarCost); } } while (false)
;
6950 // Disable warnings for `this` and `E` are unused. Required for
6951 // `dumpTreeCosts`.
6952 (void)this;
6953 (void)E;
6954 return VecCost - ScalarCost;
6955 };
6956 // Calculate cost difference from vectorizing set of GEPs.
6957 // Negative value means vectorizing is profitable.
6958 auto GetGEPCostDiff = [=](ArrayRef<Value *> Ptrs, Value *BasePtr) {
6959 InstructionCost CostSavings = 0;
6960 for (Value *V : Ptrs) {
6961 if (V == BasePtr)
6962 continue;
6963 auto *Ptr = dyn_cast<GetElementPtrInst>(V);
6964 // GEPs may contain just addresses without instructions, considered free.
6965 // GEPs with all constant indices also considered to have zero cost.
6966 if (!Ptr || Ptr->hasAllConstantIndices())
6967 continue;
6968
6969 // Here we differentiate two cases: when GEPs represent a regular
6970 // vectorization tree node (and hence vectorized) and when the set is
6971 // arguments of a set of loads or stores being vectorized. In the former
6972 // case all the scalar GEPs will be removed as a result of vectorization.
6973 // For any external uses of some lanes extract element instructions will
6974 // be generated (which cost is estimated separately). For the latter case
6975 // since the set of GEPs itself is not vectorized those used more than
6976 // once will remain staying in vectorized code as well. So we should not
6977 // count them as savings.
6978 if (!Ptr->hasOneUse() && isa<LoadInst, StoreInst>(VL0))
6979 continue;
6980
6981 // TODO: it is target dependent, so need to implement and then use a TTI
6982 // interface.
6983 CostSavings += TTI->getArithmeticInstrCost(Instruction::Add,
6984 Ptr->getType(), CostKind);
6985 }
6986 LLVM_DEBUG(dbgs() << "SLP: Calculated GEPs cost savings or Tree:\n";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Calculated GEPs cost savings or Tree:\n"
; E->dump(); } } while (false)
6987 E->dump())do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Calculated GEPs cost savings or Tree:\n"
; E->dump(); } } while (false)
;
6988 LLVM_DEBUG(dbgs() << "SLP: GEP cost saving = " << CostSavings << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: GEP cost saving = " <<
CostSavings << "\n"; } } while (false)
;
6989 return InstructionCost() - CostSavings;
6990 };
6991
6992 switch (ShuffleOrOp) {
6993 case Instruction::PHI: {
6994 // Count reused scalars.
6995 InstructionCost ScalarCost = 0;
6996 SmallPtrSet<const TreeEntry *, 4> CountedOps;
6997 for (Value *V : VL) {
6998 auto *PHI = dyn_cast<PHINode>(V);
6999 if (!PHI)
7000 continue;
7001
7002 ValueList Operands(PHI->getNumIncomingValues(), nullptr);
7003 for (unsigned I = 0, N = PHI->getNumIncomingValues(); I < N; ++I) {
7004 Value *Op = PHI->getIncomingValue(I);
7005 Operands[I] = Op;
7006 }
7007 if (const TreeEntry *OpTE = getTreeEntry(Operands.front()))
7008 if (OpTE->isSame(Operands) && CountedOps.insert(OpTE).second)
7009 if (!OpTE->ReuseShuffleIndices.empty())
7010 ScalarCost += TTI::TCC_Basic * (OpTE->ReuseShuffleIndices.size() -
7011 OpTE->Scalars.size());
7012 }
7013
7014 return CommonCost - ScalarCost;
7015 }
7016 case Instruction::ExtractValue:
7017 case Instruction::ExtractElement: {
7018 auto GetScalarCost = [=](unsigned Idx) {
7019 auto *I = cast<Instruction>(VL[Idx]);
7020 VectorType *SrcVecTy;
7021 if (ShuffleOrOp == Instruction::ExtractElement) {
7022 auto *EE = cast<ExtractElementInst>(I);
7023 SrcVecTy = EE->getVectorOperandType();
7024 } else {
7025 auto *EV = cast<ExtractValueInst>(I);
7026 Type *AggregateTy = EV->getAggregateOperand()->getType();
7027 unsigned NumElts;
7028 if (auto *ATy = dyn_cast<ArrayType>(AggregateTy))
7029 NumElts = ATy->getNumElements();
7030 else
7031 NumElts = AggregateTy->getStructNumElements();
7032 SrcVecTy = FixedVectorType::get(ScalarTy, NumElts);
7033 }
7034 if (I->hasOneUse()) {
7035 Instruction *Ext = I->user_back();
7036 if ((isa<SExtInst>(Ext) || isa<ZExtInst>(Ext)) &&
7037 all_of(Ext->users(),
7038 [](User *U) { return isa<GetElementPtrInst>(U); })) {
7039 // Use getExtractWithExtendCost() to calculate the cost of
7040 // extractelement/ext pair.
7041 InstructionCost Cost = TTI->getExtractWithExtendCost(
7042 Ext->getOpcode(), Ext->getType(), SrcVecTy, *getExtractIndex(I));
7043 // Subtract the cost of s|zext which is subtracted separately.
7044 Cost -= TTI->getCastInstrCost(
7045 Ext->getOpcode(), Ext->getType(), I->getType(),
7046 TTI::getCastContextHint(Ext), CostKind, Ext);
7047 return Cost;
7048 }
7049 }
7050 return TTI->getVectorInstrCost(Instruction::ExtractElement, SrcVecTy,
7051 CostKind, *getExtractIndex(I));
7052 };
7053 auto GetVectorCost = [](InstructionCost CommonCost) { return CommonCost; };
7054 return GetCostDiff(GetScalarCost, GetVectorCost);
7055 }
7056 case Instruction::InsertElement: {
7057 assert(E->ReuseShuffleIndices.empty() &&(static_cast <bool> (E->ReuseShuffleIndices.empty() &&
"Unique insertelements only are expected.") ? void (0) : __assert_fail
("E->ReuseShuffleIndices.empty() && \"Unique insertelements only are expected.\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 7058, __extension__
__PRETTY_FUNCTION__))
7058 "Unique insertelements only are expected.")(static_cast <bool> (E->ReuseShuffleIndices.empty() &&
"Unique insertelements only are expected.") ? void (0) : __assert_fail
("E->ReuseShuffleIndices.empty() && \"Unique insertelements only are expected.\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 7058, __extension__
__PRETTY_FUNCTION__))
;
7059 auto *SrcVecTy = cast<FixedVectorType>(VL0->getType());
7060 unsigned const NumElts = SrcVecTy->getNumElements();
7061 unsigned const NumScalars = VL.size();
7062
7063 unsigned NumOfParts = TTI->getNumberOfParts(SrcVecTy);
7064
7065 SmallVector<int> InsertMask(NumElts, UndefMaskElem);
7066 unsigned OffsetBeg = *getInsertIndex(VL.front());
7067 unsigned OffsetEnd = OffsetBeg;
7068 InsertMask[OffsetBeg] = 0;
7069 for (auto [I, V] : enumerate(VL.drop_front())) {
7070 unsigned Idx = *getInsertIndex(V);
7071 if (OffsetBeg > Idx)
7072 OffsetBeg = Idx;
7073 else if (OffsetEnd < Idx)
7074 OffsetEnd = Idx;
7075 InsertMask[Idx] = I + 1;
7076 }
7077 unsigned VecScalarsSz = PowerOf2Ceil(NumElts);
7078 if (NumOfParts > 0)
7079 VecScalarsSz = PowerOf2Ceil((NumElts + NumOfParts - 1) / NumOfParts);
7080 unsigned VecSz = (1 + OffsetEnd / VecScalarsSz - OffsetBeg / VecScalarsSz) *
7081 VecScalarsSz;
7082 unsigned Offset = VecScalarsSz * (OffsetBeg / VecScalarsSz);
7083 unsigned InsertVecSz = std::min<unsigned>(
7084 PowerOf2Ceil(OffsetEnd - OffsetBeg + 1),
7085 ((OffsetEnd - OffsetBeg + VecScalarsSz) / VecScalarsSz) * VecScalarsSz);
7086 bool IsWholeSubvector =
7087 OffsetBeg == Offset && ((OffsetEnd + 1) % VecScalarsSz == 0);
7088 // Check if we can safely insert a subvector. If it is not possible, just
7089 // generate a whole-sized vector and shuffle the source vector and the new
7090 // subvector.
7091 if (OffsetBeg + InsertVecSz > VecSz) {
7092 // Align OffsetBeg to generate correct mask.
7093 OffsetBeg = alignDown(OffsetBeg, VecSz, Offset);
7094 InsertVecSz = VecSz;
7095 }
7096
7097 APInt DemandedElts = APInt::getZero(NumElts);
7098 // TODO: Add support for Instruction::InsertValue.
7099 SmallVector<int> Mask;
7100 if (!E->ReorderIndices.empty()) {
7101 inversePermutation(E->ReorderIndices, Mask);
7102 Mask.append(InsertVecSz - Mask.size(), UndefMaskElem);
7103 } else {
7104 Mask.assign(VecSz, UndefMaskElem);
7105 std::iota(Mask.begin(), std::next(Mask.begin(), InsertVecSz), 0);
7106 }
7107 bool IsIdentity = true;
7108 SmallVector<int> PrevMask(InsertVecSz, UndefMaskElem);
7109 Mask.swap(PrevMask);
7110 for (unsigned I = 0; I < NumScalars; ++I) {
7111 unsigned InsertIdx = *getInsertIndex(VL[PrevMask[I]]);
7112 DemandedElts.setBit(InsertIdx);
7113 IsIdentity &= InsertIdx - OffsetBeg == I;
7114 Mask[InsertIdx - OffsetBeg] = I;
7115 }
7116 assert(Offset < NumElts && "Failed to find vector index offset")(static_cast <bool> (Offset < NumElts && "Failed to find vector index offset"
) ? void (0) : __assert_fail ("Offset < NumElts && \"Failed to find vector index offset\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 7116, __extension__
__PRETTY_FUNCTION__))
;
7117
7118 InstructionCost Cost = 0;
7119 Cost -= TTI->getScalarizationOverhead(SrcVecTy, DemandedElts,
7120 /*Insert*/ true, /*Extract*/ false,
7121 CostKind);
7122
7123 // First cost - resize to actual vector size if not identity shuffle or
7124 // need to shift the vector.
7125 // Do not calculate the cost if the actual size is the register size and
7126 // we can merge this shuffle with the following SK_Select.
7127 auto *InsertVecTy =
7128 FixedVectorType::get(SrcVecTy->getElementType(), InsertVecSz);
7129 if (!IsIdentity)
7130 Cost += TTI->getShuffleCost(TargetTransformInfo::SK_PermuteSingleSrc,
7131 InsertVecTy, Mask);
7132 auto *FirstInsert = cast<Instruction>(*find_if(E->Scalars, [E](Value *V) {
7133 return !is_contained(E->Scalars, cast<Instruction>(V)->getOperand(0));
7134 }));
7135 // Second cost - permutation with subvector, if some elements are from the
7136 // initial vector or inserting a subvector.
7137 // TODO: Implement the analysis of the FirstInsert->getOperand(0)
7138 // subvector of ActualVecTy.
7139 SmallBitVector InMask =
7140 isUndefVector(FirstInsert->getOperand(0),
7141 buildUseMask(NumElts, InsertMask, UseMask::UndefsAsMask));
7142 if (!InMask.all() && NumScalars != NumElts && !IsWholeSubvector) {
7143 if (InsertVecSz != VecSz) {
7144 auto *ActualVecTy =
7145 FixedVectorType::get(SrcVecTy->getElementType(), VecSz);
7146 Cost += TTI->getShuffleCost(TTI::SK_InsertSubvector, ActualVecTy,
7147 std::nullopt, CostKind, OffsetBeg - Offset,
7148 InsertVecTy);
7149 } else {
7150 for (unsigned I = 0, End = OffsetBeg - Offset; I < End; ++I)
7151 Mask[I] = InMask.test(I) ? UndefMaskElem : I;
7152 for (unsigned I = OffsetBeg - Offset, End = OffsetEnd - Offset;
7153 I <= End; ++I)
7154 if (Mask[I] != UndefMaskElem)
7155 Mask[I] = I + VecSz;
7156 for (unsigned I = OffsetEnd + 1 - Offset; I < VecSz; ++I)
7157 Mask[I] =
7158 ((I >= InMask.size()) || InMask.test(I)) ? UndefMaskElem : I;
7159 Cost += TTI->getShuffleCost(TTI::SK_PermuteTwoSrc, InsertVecTy, Mask);
7160 }
7161 }
7162 return Cost;
7163 }
7164 case Instruction::ZExt:
7165 case Instruction::SExt:
7166 case Instruction::FPToUI:
7167 case Instruction::FPToSI:
7168 case Instruction::FPExt:
7169 case Instruction::PtrToInt:
7170 case Instruction::IntToPtr:
7171 case Instruction::SIToFP:
7172 case Instruction::UIToFP:
7173 case Instruction::Trunc:
7174 case Instruction::FPTrunc:
7175 case Instruction::BitCast: {
7176 auto GetScalarCost = [=](unsigned Idx) {
7177 auto *VI = cast<Instruction>(VL[Idx]);
7178 return TTI->getCastInstrCost(E->getOpcode(), ScalarTy,
7179 VI->getOperand(0)->getType(),
7180 TTI::getCastContextHint(VI), CostKind, VI);
7181 };
7182 auto GetVectorCost = [=](InstructionCost CommonCost) {
7183 Type *SrcTy = VL0->getOperand(0)->getType();
7184 auto *SrcVecTy = FixedVectorType::get(SrcTy, VL.size());
7185 InstructionCost VecCost = CommonCost;
7186 // Check if the values are candidates to demote.
7187 if (!MinBWs.count(VL0) || VecTy != SrcVecTy)
7188 VecCost +=
7189 TTI->getCastInstrCost(E->getOpcode(), VecTy, SrcVecTy,
7190 TTI::getCastContextHint(VL0), CostKind, VL0);
7191 return VecCost;
7192 };
7193 return GetCostDiff(GetScalarCost, GetVectorCost);
7194 }
7195 case Instruction::FCmp:
7196 case Instruction::ICmp:
7197 case Instruction::Select: {
7198 CmpInst::Predicate VecPred, SwappedVecPred;
7199 auto MatchCmp = m_Cmp(VecPred, m_Value(), m_Value());
7200 if (match(VL0, m_Select(MatchCmp, m_Value(), m_Value())) ||
7201 match(VL0, MatchCmp))
7202 SwappedVecPred = CmpInst::getSwappedPredicate(VecPred);
7203 else
7204 SwappedVecPred = VecPred = ScalarTy->isFloatingPointTy()
7205 ? CmpInst::BAD_FCMP_PREDICATE
7206 : CmpInst::BAD_ICMP_PREDICATE;
7207 auto GetScalarCost = [&](unsigned Idx) {
7208 auto *VI = cast<Instruction>(VL[Idx]);
7209 CmpInst::Predicate CurrentPred = ScalarTy->isFloatingPointTy()
7210 ? CmpInst::BAD_FCMP_PREDICATE
7211 : CmpInst::BAD_ICMP_PREDICATE;
7212 auto MatchCmp = m_Cmp(CurrentPred, m_Value(), m_Value());
7213 if ((!match(VI, m_Select(MatchCmp, m_Value(), m_Value())) &&
7214 !match(VI, MatchCmp)) ||
7215 (CurrentPred != VecPred && CurrentPred != SwappedVecPred))
7216 VecPred = SwappedVecPred = ScalarTy->isFloatingPointTy()
7217 ? CmpInst::BAD_FCMP_PREDICATE
7218 : CmpInst::BAD_ICMP_PREDICATE;
7219
7220 return TTI->getCmpSelInstrCost(E->getOpcode(), ScalarTy,
7221 Builder.getInt1Ty(), CurrentPred, CostKind,
7222 VI);
7223 };
7224 auto GetVectorCost = [&](InstructionCost CommonCost) {
7225 auto *MaskTy = FixedVectorType::get(Builder.getInt1Ty(), VL.size());
7226
7227 InstructionCost VecCost = TTI->getCmpSelInstrCost(
7228 E->getOpcode(), VecTy, MaskTy, VecPred, CostKind, VL0);
7229 // Check if it is possible and profitable to use min/max for selects
7230 // in VL.
7231 //
7232 auto IntrinsicAndUse = canConvertToMinOrMaxIntrinsic(VL);
7233 if (IntrinsicAndUse.first != Intrinsic::not_intrinsic) {
7234 IntrinsicCostAttributes CostAttrs(IntrinsicAndUse.first, VecTy,
7235 {VecTy, VecTy});
7236 InstructionCost IntrinsicCost =
7237 TTI->getIntrinsicInstrCost(CostAttrs, CostKind);
7238 // If the selects are the only uses of the compares, they will be
7239 // dead and we can adjust the cost by removing their cost.
7240 if (IntrinsicAndUse.second)
7241 IntrinsicCost -= TTI->getCmpSelInstrCost(Instruction::ICmp, VecTy,
7242 MaskTy, VecPred, CostKind);
7243 VecCost = std::min(VecCost, IntrinsicCost);
7244 }
7245 return VecCost + CommonCost;
7246 };
7247 return GetCostDiff(GetScalarCost, GetVectorCost);
7248 }
7249 case Instruction::FNeg:
7250 case Instruction::Add:
7251 case Instruction::FAdd:
7252 case Instruction::Sub:
7253 case Instruction::FSub:
7254 case Instruction::Mul:
7255 case Instruction::FMul:
7256 case Instruction::UDiv:
7257 case Instruction::SDiv:
7258 case Instruction::FDiv:
7259 case Instruction::URem:
7260 case Instruction::SRem:
7261 case Instruction::FRem:
7262 case Instruction::Shl:
7263 case Instruction::LShr:
7264 case Instruction::AShr:
7265 case Instruction::And:
7266 case Instruction::Or:
7267 case Instruction::Xor: {
7268 auto GetScalarCost = [=](unsigned Idx) {
7269 auto *VI = cast<Instruction>(VL[Idx]);
7270 unsigned OpIdx = isa<UnaryOperator>(VI) ? 0 : 1;
7271 TTI::OperandValueInfo Op1Info = TTI::getOperandInfo(VI->getOperand(0));
7272 TTI::OperandValueInfo Op2Info =
7273 TTI::getOperandInfo(VI->getOperand(OpIdx));
7274 SmallVector<const Value *> Operands(VI->operand_values());
7275 return TTI->getArithmeticInstrCost(ShuffleOrOp, ScalarTy, CostKind,
7276 Op1Info, Op2Info, Operands, VI);
7277 };
7278 auto GetVectorCost = [=](InstructionCost CommonCost) {
7279 unsigned OpIdx = isa<UnaryOperator>(VL0) ? 0 : 1;
7280 TTI::OperandValueInfo Op1Info = getOperandInfo(VL, 0);
7281 TTI::OperandValueInfo Op2Info = getOperandInfo(VL, OpIdx);
7282 return TTI->getArithmeticInstrCost(ShuffleOrOp, VecTy, CostKind, Op1Info,
7283 Op2Info) +
7284 CommonCost;
7285 };
7286 return GetCostDiff(GetScalarCost, GetVectorCost);
7287 }
7288 case Instruction::GetElementPtr: {
7289 return CommonCost + GetGEPCostDiff(VL, VL0);
7290 }
7291 case Instruction::Load: {
7292 auto GetScalarCost = [=](unsigned Idx) {
7293 auto *VI = cast<LoadInst>(VL[Idx]);
7294 return TTI->getMemoryOpCost(Instruction::Load, ScalarTy, VI->getAlign(),
7295 VI->getPointerAddressSpace(), CostKind,
7296 TTI::OperandValueInfo(), VI);
7297 };
7298 auto *LI0 = cast<LoadInst>(VL0);
7299 auto GetVectorCost = [=](InstructionCost CommonCost) {
7300 InstructionCost VecLdCost;
7301 if (E->State == TreeEntry::Vectorize) {
7302 VecLdCost = TTI->getMemoryOpCost(
7303 Instruction::Load, VecTy, LI0->getAlign(),
7304 LI0->getPointerAddressSpace(), CostKind, TTI::OperandValueInfo());
7305 } else {
7306 assert(E->State == TreeEntry::ScatterVectorize && "Unknown EntryState")(static_cast <bool> (E->State == TreeEntry::ScatterVectorize
&& "Unknown EntryState") ? void (0) : __assert_fail (
"E->State == TreeEntry::ScatterVectorize && \"Unknown EntryState\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 7306, __extension__
__PRETTY_FUNCTION__))
;
7307 Align CommonAlignment = LI0->getAlign();
7308 for (Value *V : VL)
7309 CommonAlignment =
7310 std::min(CommonAlignment, cast<LoadInst>(V)->getAlign());
7311 VecLdCost = TTI->getGatherScatterOpCost(
7312 Instruction::Load, VecTy, LI0->getPointerOperand(),
7313 /*VariableMask=*/false, CommonAlignment, CostKind);
7314 }
7315 return VecLdCost + CommonCost;
7316 };
7317
7318 InstructionCost Cost = GetCostDiff(GetScalarCost, GetVectorCost);
7319 // If this node generates masked gather load then it is not a terminal node.
7320 // Hence address operand cost is estimated separately.
7321 if (E->State == TreeEntry::ScatterVectorize)
7322 return Cost;
7323
7324 // Estimate cost of GEPs since this tree node is a terminator.
7325 SmallVector<Value *> PointerOps(VL.size());
7326 for (auto [I, V] : enumerate(VL))
7327 PointerOps[I] = cast<LoadInst>(V)->getPointerOperand();
7328 return Cost + GetGEPCostDiff(PointerOps, LI0->getPointerOperand());
7329 }
7330 case Instruction::Store: {
7331 bool IsReorder = !E->ReorderIndices.empty();
7332 auto GetScalarCost = [=](unsigned Idx) {
7333 auto *VI = cast<StoreInst>(VL[Idx]);
7334 TTI::OperandValueInfo OpInfo = getOperandInfo(VI, 0);
7335 return TTI->getMemoryOpCost(Instruction::Store, ScalarTy, VI->getAlign(),
7336 VI->getPointerAddressSpace(), CostKind,
7337 OpInfo, VI);
7338 };
7339 auto *BaseSI =
7340 cast<StoreInst>(IsReorder ? VL[E->ReorderIndices.front()] : VL0);
7341 auto GetVectorCost = [=](InstructionCost CommonCost) {
7342 // We know that we can merge the stores. Calculate the cost.
7343 TTI::OperandValueInfo OpInfo = getOperandInfo(VL, 0);
7344 return TTI->getMemoryOpCost(Instruction::Store, VecTy, BaseSI->getAlign(),
7345 BaseSI->getPointerAddressSpace(), CostKind,
7346 OpInfo) +
7347 CommonCost;
7348 };
7349 SmallVector<Value *> PointerOps(VL.size());
7350 for (auto [I, V] : enumerate(VL)) {
7351 unsigned Idx = IsReorder ? E->ReorderIndices[I] : I;
7352 PointerOps[Idx] = cast<StoreInst>(V)->getPointerOperand();
7353 }
7354
7355 return GetCostDiff(GetScalarCost, GetVectorCost) +
7356 GetGEPCostDiff(PointerOps, BaseSI->getPointerOperand());
7357 }
7358 case Instruction::Call: {
7359 auto GetScalarCost = [=](unsigned Idx) {
7360 auto *CI = cast<CallInst>(VL[Idx]);
7361 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
7362 if (ID != Intrinsic::not_intrinsic) {
7363 IntrinsicCostAttributes CostAttrs(ID, *CI, 1);
7364 return TTI->getIntrinsicInstrCost(CostAttrs, CostKind);
7365 }
7366 return TTI->getCallInstrCost(CI->getCalledFunction(),
7367 CI->getFunctionType()->getReturnType(),
7368 CI->getFunctionType()->params(), CostKind);
7369 };
7370 auto GetVectorCost = [=](InstructionCost CommonCost) {
7371 auto *CI = cast<CallInst>(VL0);
7372 auto VecCallCosts = getVectorCallCosts(CI, VecTy, TTI, TLI);
7373 return std::min(VecCallCosts.first, VecCallCosts.second) + CommonCost;
7374 };
7375 return GetCostDiff(GetScalarCost, GetVectorCost);
7376 }
7377 case Instruction::ShuffleVector: {
7378 assert(E->isAltShuffle() &&(static_cast <bool> (E->isAltShuffle() && ((
Instruction::isBinaryOp(E->getOpcode()) && Instruction
::isBinaryOp(E->getAltOpcode())) || (Instruction::isCast(E
->getOpcode()) && Instruction::isCast(E->getAltOpcode
())) || (isa<CmpInst>(VL0) && isa<CmpInst>
(E->getAltOp()))) && "Invalid Shuffle Vector Operand"
) ? void (0) : __assert_fail ("E->isAltShuffle() && ((Instruction::isBinaryOp(E->getOpcode()) && Instruction::isBinaryOp(E->getAltOpcode())) || (Instruction::isCast(E->getOpcode()) && Instruction::isCast(E->getAltOpcode())) || (isa<CmpInst>(VL0) && isa<CmpInst>(E->getAltOp()))) && \"Invalid Shuffle Vector Operand\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 7384, __extension__
__PRETTY_FUNCTION__))
7379 ((Instruction::isBinaryOp(E->getOpcode()) &&(static_cast <bool> (E->isAltShuffle() && ((
Instruction::isBinaryOp(E->getOpcode()) && Instruction
::isBinaryOp(E->getAltOpcode())) || (Instruction::isCast(E
->getOpcode()) && Instruction::isCast(E->getAltOpcode
())) || (isa<CmpInst>(VL0) && isa<CmpInst>
(E->getAltOp()))) && "Invalid Shuffle Vector Operand"
) ? void (0) : __assert_fail ("E->isAltShuffle() && ((Instruction::isBinaryOp(E->getOpcode()) && Instruction::isBinaryOp(E->getAltOpcode())) || (Instruction::isCast(E->getOpcode()) && Instruction::isCast(E->getAltOpcode())) || (isa<CmpInst>(VL0) && isa<CmpInst>(E->getAltOp()))) && \"Invalid Shuffle Vector Operand\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 7384, __extension__
__PRETTY_FUNCTION__))
7380 Instruction::isBinaryOp(E->getAltOpcode())) ||(static_cast <bool> (E->isAltShuffle() && ((
Instruction::isBinaryOp(E->getOpcode()) && Instruction
::isBinaryOp(E->getAltOpcode())) || (Instruction::isCast(E
->getOpcode()) && Instruction::isCast(E->getAltOpcode
())) || (isa<CmpInst>(VL0) && isa<CmpInst>
(E->getAltOp()))) && "Invalid Shuffle Vector Operand"
) ? void (0) : __assert_fail ("E->isAltShuffle() && ((Instruction::isBinaryOp(E->getOpcode()) && Instruction::isBinaryOp(E->getAltOpcode())) || (Instruction::isCast(E->getOpcode()) && Instruction::isCast(E->getAltOpcode())) || (isa<CmpInst>(VL0) && isa<CmpInst>(E->getAltOp()))) && \"Invalid Shuffle Vector Operand\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 7384, __extension__
__PRETTY_FUNCTION__))
7381 (Instruction::isCast(E->getOpcode()) &&(static_cast <bool> (E->isAltShuffle() && ((
Instruction::isBinaryOp(E->getOpcode()) && Instruction
::isBinaryOp(E->getAltOpcode())) || (Instruction::isCast(E
->getOpcode()) && Instruction::isCast(E->getAltOpcode
())) || (isa<CmpInst>(VL0) && isa<CmpInst>
(E->getAltOp()))) && "Invalid Shuffle Vector Operand"
) ? void (0) : __assert_fail ("E->isAltShuffle() && ((Instruction::isBinaryOp(E->getOpcode()) && Instruction::isBinaryOp(E->getAltOpcode())) || (Instruction::isCast(E->getOpcode()) && Instruction::isCast(E->getAltOpcode())) || (isa<CmpInst>(VL0) && isa<CmpInst>(E->getAltOp()))) && \"Invalid Shuffle Vector Operand\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 7384, __extension__
__PRETTY_FUNCTION__))
7382 Instruction::isCast(E->getAltOpcode())) ||(static_cast <bool> (E->isAltShuffle() && ((
Instruction::isBinaryOp(E->getOpcode()) && Instruction
::isBinaryOp(E->getAltOpcode())) || (Instruction::isCast(E
->getOpcode()) && Instruction::isCast(E->getAltOpcode
())) || (isa<CmpInst>(VL0) && isa<CmpInst>
(E->getAltOp()))) && "Invalid Shuffle Vector Operand"
) ? void (0) : __assert_fail ("E->isAltShuffle() && ((Instruction::isBinaryOp(E->getOpcode()) && Instruction::isBinaryOp(E->getAltOpcode())) || (Instruction::isCast(E->getOpcode()) && Instruction::isCast(E->getAltOpcode())) || (isa<CmpInst>(VL0) && isa<CmpInst>(E->getAltOp()))) && \"Invalid Shuffle Vector Operand\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 7384, __extension__
__PRETTY_FUNCTION__))
7383 (isa<CmpInst>(VL0) && isa<CmpInst>(E->getAltOp()))) &&(static_cast <bool> (E->isAltShuffle() && ((
Instruction::isBinaryOp(E->getOpcode()) && Instruction
::isBinaryOp(E->getAltOpcode())) || (Instruction::isCast(E
->getOpcode()) && Instruction::isCast(E->getAltOpcode
())) || (isa<CmpInst>(VL0) && isa<CmpInst>
(E->getAltOp()))) && "Invalid Shuffle Vector Operand"
) ? void (0) : __assert_fail ("E->isAltShuffle() && ((Instruction::isBinaryOp(E->getOpcode()) && Instruction::isBinaryOp(E->getAltOpcode())) || (Instruction::isCast(E->getOpcode()) && Instruction::isCast(E->getAltOpcode())) || (isa<CmpInst>(VL0) && isa<CmpInst>(E->getAltOp()))) && \"Invalid Shuffle Vector Operand\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 7384, __extension__
__PRETTY_FUNCTION__))
7384 "Invalid Shuffle Vector Operand")(static_cast <bool> (E->isAltShuffle() && ((
Instruction::isBinaryOp(E->getOpcode()) && Instruction
::isBinaryOp(E->getAltOpcode())) || (Instruction::isCast(E
->getOpcode()) && Instruction::isCast(E->getAltOpcode
())) || (isa<CmpInst>(VL0) && isa<CmpInst>
(E->getAltOp()))) && "Invalid Shuffle Vector Operand"
) ? void (0) : __assert_fail ("E->isAltShuffle() && ((Instruction::isBinaryOp(E->getOpcode()) && Instruction::isBinaryOp(E->getAltOpcode())) || (Instruction::isCast(E->getOpcode()) && Instruction::isCast(E->getAltOpcode())) || (isa<CmpInst>(VL0) && isa<CmpInst>(E->getAltOp()))) && \"Invalid Shuffle Vector Operand\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 7384, __extension__
__PRETTY_FUNCTION__))
;
7385 // Try to find the previous shuffle node with the same operands and same
7386 // main/alternate ops.
7387 auto TryFindNodeWithEqualOperands = [=]() {
7388 for (const std::unique_ptr<TreeEntry> &TE : VectorizableTree) {
7389 if (TE.get() == E)
7390 break;
7391 if (TE->isAltShuffle() &&
7392 ((TE->getOpcode() == E->getOpcode() &&
7393 TE->getAltOpcode() == E->getAltOpcode()) ||
7394 (TE->getOpcode() == E->getAltOpcode() &&
7395 TE->getAltOpcode() == E->getOpcode())) &&
7396 TE->hasEqualOperands(*E))
7397 return true;
7398 }
7399 return false;
7400 };
7401 auto GetScalarCost = [=](unsigned Idx) {
7402 auto *VI = cast<Instruction>(VL[Idx]);
7403 assert(E->isOpcodeOrAlt(VI) && "Unexpected main/alternate opcode")(static_cast <bool> (E->isOpcodeOrAlt(VI) &&
"Unexpected main/alternate opcode") ? void (0) : __assert_fail
("E->isOpcodeOrAlt(VI) && \"Unexpected main/alternate opcode\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 7403, __extension__
__PRETTY_FUNCTION__))
;
7404 (void)E;
7405 return TTI->getInstructionCost(VI, CostKind);
7406 };
7407 // Need to clear CommonCost since the final shuffle cost is included into
7408 // vector cost.
7409 auto GetVectorCost = [&](InstructionCost) {
7410 // VecCost is equal to sum of the cost of creating 2 vectors
7411 // and the cost of creating shuffle.
7412 InstructionCost VecCost = 0;
7413 if (TryFindNodeWithEqualOperands()) {
7414 LLVM_DEBUG({do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { { dbgs() << "SLP: diamond match for alternate node found.\n"
; E->dump(); }; } } while (false)
7415 dbgs() << "SLP: diamond match for alternate node found.\n";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { { dbgs() << "SLP: diamond match for alternate node found.\n"
; E->dump(); }; } } while (false)
7416 E->dump();do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { { dbgs() << "SLP: diamond match for alternate node found.\n"
; E->dump(); }; } } while (false)
7417 })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { { dbgs() << "SLP: diamond match for alternate node found.\n"
; E->dump(); }; } } while (false)
;
7418 // No need to add new vector costs here since we're going to reuse
7419 // same main/alternate vector ops, just do different shuffling.
7420 } else if (Instruction::isBinaryOp(E->getOpcode())) {
7421 VecCost = TTI->getArithmeticInstrCost(E->getOpcode(), VecTy, CostKind);
7422 VecCost +=
7423 TTI->getArithmeticInstrCost(E->getAltOpcode(), VecTy, CostKind);
7424 } else if (auto *CI0 = dyn_cast<CmpInst>(VL0)) {
7425 VecCost = TTI->getCmpSelInstrCost(E->getOpcode(), ScalarTy,
7426 Builder.getInt1Ty(),
7427 CI0->getPredicate(), CostKind, VL0);
7428 VecCost += TTI->getCmpSelInstrCost(
7429 E->getOpcode(), ScalarTy, Builder.getInt1Ty(),
7430 cast<CmpInst>(E->getAltOp())->getPredicate(), CostKind,
7431 E->getAltOp());
7432 } else {
7433 Type *Src0SclTy = E->getMainOp()->getOperand(0)->getType();
7434 Type *Src1SclTy = E->getAltOp()->getOperand(0)->getType();
7435 auto *Src0Ty = FixedVectorType::get(Src0SclTy, VL.size());
7436 auto *Src1Ty = FixedVectorType::get(Src1SclTy, VL.size());
7437 VecCost = TTI->getCastInstrCost(E->getOpcode(), VecTy, Src0Ty,
7438 TTI::CastContextHint::None, CostKind);
7439 VecCost += TTI->getCastInstrCost(E->getAltOpcode(), VecTy, Src1Ty,
7440 TTI::CastContextHint::None, CostKind);
7441 }
7442 if (E->ReuseShuffleIndices.empty()) {
7443 VecCost +=
7444 TTI->getShuffleCost(TargetTransformInfo::SK_Select, FinalVecTy);
7445 } else {
7446 SmallVector<int> Mask;
7447 buildShuffleEntryMask(
7448 E->Scalars, E->ReorderIndices, E->ReuseShuffleIndices,
7449 [E](Instruction *I) {
7450 assert(E->isOpcodeOrAlt(I) && "Unexpected main/alternate opcode")(static_cast <bool> (E->isOpcodeOrAlt(I) && "Unexpected main/alternate opcode"
) ? void (0) : __assert_fail ("E->isOpcodeOrAlt(I) && \"Unexpected main/alternate opcode\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 7450, __extension__
__PRETTY_FUNCTION__))
;
7451 return I->getOpcode() == E->getAltOpcode();
7452 },
7453 Mask);
7454 VecCost += TTI->getShuffleCost(TargetTransformInfo::SK_PermuteTwoSrc,
7455 FinalVecTy, Mask);
7456 }
7457 return VecCost;
7458 };
7459 return GetCostDiff(GetScalarCost, GetVectorCost);
7460 }
7461 default:
7462 llvm_unreachable("Unknown instruction")::llvm::llvm_unreachable_internal("Unknown instruction", "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp"
, 7462)
;
7463 }
7464}
7465
7466bool BoUpSLP::isFullyVectorizableTinyTree(bool ForReduction) const {
7467 LLVM_DEBUG(dbgs() << "SLP: Check whether the tree with height "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Check whether the tree with height "
<< VectorizableTree.size() << " is fully vectorizable .\n"
; } } while (false)
7468 << VectorizableTree.size() << " is fully vectorizable .\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Check whether the tree with height "
<< VectorizableTree.size() << " is fully vectorizable .\n"
; } } while (false)
;
7469
7470 auto &&AreVectorizableGathers = [this](const TreeEntry *TE, unsigned Limit) {
7471 SmallVector<int> Mask;
7472 return TE->State == TreeEntry::NeedToGather &&
7473 !any_of(TE->Scalars,
7474 [this](Value *V) { return EphValues.contains(V); }) &&
7475 (allConstant(TE->Scalars) || isSplat(TE->Scalars) ||
7476 TE->Scalars.size() < Limit ||
7477 ((TE->getOpcode() == Instruction::ExtractElement ||
7478 all_of(TE->Scalars,
7479 [](Value *V) {
7480 return isa<ExtractElementInst, UndefValue>(V);
7481 })) &&
7482 isFixedVectorShuffle(TE->Scalars, Mask)) ||
7483 (TE->State == TreeEntry::NeedToGather &&
7484 TE->getOpcode() == Instruction::Load && !TE->isAltShuffle()));
7485 };
7486
7487 // We only handle trees of heights 1 and 2.
7488 if (VectorizableTree.size() == 1 &&
7489 (VectorizableTree[0]->State == TreeEntry::Vectorize ||
7490 (ForReduction &&
7491 AreVectorizableGathers(VectorizableTree[0].get(),
7492 VectorizableTree[0]->Scalars.size()) &&
7493 VectorizableTree[0]->getVectorFactor() > 2)))
7494 return true;
7495
7496 if (VectorizableTree.size() != 2)
7497 return false;
7498
7499 // Handle splat and all-constants stores. Also try to vectorize tiny trees
7500 // with the second gather nodes if they have less scalar operands rather than
7501 // the initial tree element (may be profitable to shuffle the second gather)
7502 // or they are extractelements, which form shuffle.
7503 SmallVector<int> Mask;
7504 if (VectorizableTree[0]->State == TreeEntry::Vectorize &&
7505 AreVectorizableGathers(VectorizableTree[1].get(),
7506 VectorizableTree[0]->Scalars.size()))
7507 return true;
7508
7509 // Gathering cost would be too much for tiny trees.
7510 if (VectorizableTree[0]->State == TreeEntry::NeedToGather ||
7511 (VectorizableTree[1]->State == TreeEntry::NeedToGather &&
7512 VectorizableTree[0]->State != TreeEntry::ScatterVectorize))
7513 return false;
7514
7515 return true;
7516}
7517
7518static bool isLoadCombineCandidateImpl(Value *Root, unsigned NumElts,
7519 TargetTransformInfo *TTI,
7520 bool MustMatchOrInst) {
7521 // Look past the root to find a source value. Arbitrarily follow the
7522 // path through operand 0 of any 'or'. Also, peek through optional
7523 // shift-left-by-multiple-of-8-bits.
7524 Value *ZextLoad = Root;
7525 const APInt *ShAmtC;
7526 bool FoundOr = false;
7527 while (!isa<ConstantExpr>(ZextLoad) &&
7528 (match(ZextLoad, m_Or(m_Value(), m_Value())) ||
7529 (match(ZextLoad, m_Shl(m_Value(), m_APInt(ShAmtC))) &&
7530 ShAmtC->urem(8) == 0))) {
7531 auto *BinOp = cast<BinaryOperator>(ZextLoad);
7532 ZextLoad = BinOp->getOperand(0);
7533 if (BinOp->getOpcode() == Instruction::Or)
7534 FoundOr = true;
7535 }
7536 // Check if the input is an extended load of the required or/shift expression.
7537 Value *Load;
7538 if ((MustMatchOrInst && !FoundOr) || ZextLoad == Root ||
7539 !match(ZextLoad, m_ZExt(m_Value(Load))) || !isa<LoadInst>(Load))
7540 return false;
7541
7542 // Require that the total load bit width is a legal integer type.
7543 // For example, <8 x i8> --> i64 is a legal integer on a 64-bit target.
7544 // But <16 x i8> --> i128 is not, so the backend probably can't reduce it.
7545 Type *SrcTy = Load->getType();
7546 unsigned LoadBitWidth = SrcTy->getIntegerBitWidth() * NumElts;
7547 if (!TTI->isTypeLegal(IntegerType::get(Root->getContext(), LoadBitWidth)))
7548 return false;
7549
7550 // Everything matched - assume that we can fold the whole sequence using
7551 // load combining.
7552 LLVM_DEBUG(dbgs() << "SLP: Assume load combining for tree starting at "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Assume load combining for tree starting at "
<< *(cast<Instruction>(Root)) << "\n"; } }
while (false)
7553 << *(cast<Instruction>(Root)) << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Assume load combining for tree starting at "
<< *(cast<Instruction>(Root)) << "\n"; } }
while (false)
;
7554
7555 return true;
7556}
7557
7558bool BoUpSLP::isLoadCombineReductionCandidate(RecurKind RdxKind) const {
7559 if (RdxKind != RecurKind::Or)
7560 return false;
7561
7562 unsigned NumElts = VectorizableTree[0]->Scalars.size();
7563 Value *FirstReduced = VectorizableTree[0]->Scalars[0];
7564 return isLoadCombineCandidateImpl(FirstReduced, NumElts, TTI,
7565 /* MatchOr */ false);
7566}
7567
7568bool BoUpSLP::isLoadCombineCandidate() const {
7569 // Peek through a final sequence of stores and check if all operations are
7570 // likely to be load-combined.
7571 unsigned NumElts = VectorizableTree[0]->Scalars.size();
7572 for (Value *Scalar : VectorizableTree[0]->Scalars) {
7573 Value *X;
7574 if (!match(Scalar, m_Store(m_Value(X), m_Value())) ||
7575 !isLoadCombineCandidateImpl(X, NumElts, TTI, /* MatchOr */ true))
7576 return false;
7577 }
7578 return true;
7579}
7580
7581bool BoUpSLP::isTreeTinyAndNotFullyVectorizable(bool ForReduction) const {
7582 // No need to vectorize inserts of gathered values.
7583 if (VectorizableTree.size() == 2 &&
7584 isa<InsertElementInst>(VectorizableTree[0]->Scalars[0]) &&
7585 VectorizableTree[1]->State == TreeEntry::NeedToGather &&
7586 (VectorizableTree[1]->getVectorFactor() <= 2 ||
7587 !(isSplat(VectorizableTree[1]->Scalars) ||
7588 allConstant(VectorizableTree[1]->Scalars))))
7589 return true;
7590
7591 // We can vectorize the tree if its size is greater than or equal to the
7592 // minimum size specified by the MinTreeSize command line option.
7593 if (VectorizableTree.size() >= MinTreeSize)
7594 return false;
7595
7596 // If we have a tiny tree (a tree whose size is less than MinTreeSize), we
7597 // can vectorize it if we can prove it fully vectorizable.
7598 if (isFullyVectorizableTinyTree(ForReduction))
7599 return false;
7600
7601 assert(VectorizableTree.empty()(static_cast <bool> (VectorizableTree.empty() ? ExternalUses
.empty() : true && "We shouldn't have any external users"
) ? void (0) : __assert_fail ("VectorizableTree.empty() ? ExternalUses.empty() : true && \"We shouldn't have any external users\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 7603, __extension__
__PRETTY_FUNCTION__))
7602 ? ExternalUses.empty()(static_cast <bool> (VectorizableTree.empty() ? ExternalUses
.empty() : true && "We shouldn't have any external users"
) ? void (0) : __assert_fail ("VectorizableTree.empty() ? ExternalUses.empty() : true && \"We shouldn't have any external users\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 7603, __extension__
__PRETTY_FUNCTION__))
7603 : true && "We shouldn't have any external users")(static_cast <bool> (VectorizableTree.empty() ? ExternalUses
.empty() : true && "We shouldn't have any external users"
) ? void (0) : __assert_fail ("VectorizableTree.empty() ? ExternalUses.empty() : true && \"We shouldn't have any external users\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 7603, __extension__
__PRETTY_FUNCTION__))
;
7604
7605 // Otherwise, we can't vectorize the tree. It is both tiny and not fully
7606 // vectorizable.
7607 return true;
7608}
7609
7610InstructionCost BoUpSLP::getSpillCost() const {
7611 // Walk from the bottom of the tree to the top, tracking which values are
7612 // live. When we see a call instruction that is not part of our tree,
7613 // query TTI to see if there is a cost to keeping values live over it
7614 // (for example, if spills and fills are required).
7615 unsigned BundleWidth = VectorizableTree.front()->Scalars.size();
7616 InstructionCost Cost = 0;
7617
7618 SmallPtrSet<Instruction*, 4> LiveValues;
7619 Instruction *PrevInst = nullptr;
7620
7621 // The entries in VectorizableTree are not necessarily ordered by their
7622 // position in basic blocks. Collect them and order them by dominance so later
7623 // instructions are guaranteed to be visited first. For instructions in
7624 // different basic blocks, we only scan to the beginning of the block, so
7625 // their order does not matter, as long as all instructions in a basic block
7626 // are grouped together. Using dominance ensures a deterministic order.
7627 SmallVector<Instruction *, 16> OrderedScalars;
7628 for (const auto &TEPtr : VectorizableTree) {
7629 Instruction *Inst = dyn_cast<Instruction>(TEPtr->Scalars[0]);
7630 if (!Inst)
7631 continue;
7632 OrderedScalars.push_back(Inst);
7633 }
7634 llvm::sort(OrderedScalars, [&](Instruction *A, Instruction *B) {
7635 auto *NodeA = DT->getNode(A->getParent());
7636 auto *NodeB = DT->getNode(B->getParent());
7637 assert(NodeA && "Should only process reachable instructions")(static_cast <bool> (NodeA && "Should only process reachable instructions"
) ? void (0) : __assert_fail ("NodeA && \"Should only process reachable instructions\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 7637, __extension__
__PRETTY_FUNCTION__))
;
7638 assert(NodeB && "Should only process reachable instructions")(static_cast <bool> (NodeB && "Should only process reachable instructions"
) ? void (0) : __assert_fail ("NodeB && \"Should only process reachable instructions\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 7638, __extension__
__PRETTY_FUNCTION__))
;
7639 assert((NodeA == NodeB) == (NodeA->getDFSNumIn() == NodeB->getDFSNumIn()) &&(static_cast <bool> ((NodeA == NodeB) == (NodeA->getDFSNumIn
() == NodeB->getDFSNumIn()) && "Different nodes should have different DFS numbers"
) ? void (0) : __assert_fail ("(NodeA == NodeB) == (NodeA->getDFSNumIn() == NodeB->getDFSNumIn()) && \"Different nodes should have different DFS numbers\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 7640, __extension__
__PRETTY_FUNCTION__))
7640 "Different nodes should have different DFS numbers")(static_cast <bool> ((NodeA == NodeB) == (NodeA->getDFSNumIn
() == NodeB->getDFSNumIn()) && "Different nodes should have different DFS numbers"
) ? void (0) : __assert_fail ("(NodeA == NodeB) == (NodeA->getDFSNumIn() == NodeB->getDFSNumIn()) && \"Different nodes should have different DFS numbers\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 7640, __extension__
__PRETTY_FUNCTION__))
;
7641 if (NodeA != NodeB)
7642 return NodeA->getDFSNumIn() < NodeB->getDFSNumIn();
7643 return B->comesBefore(A);
7644 });
7645
7646 for (Instruction *Inst : OrderedScalars) {
7647 if (!PrevInst) {
7648 PrevInst = Inst;
7649 continue;
7650 }
7651
7652 // Update LiveValues.
7653 LiveValues.erase(PrevInst);
7654 for (auto &J : PrevInst->operands()) {
7655 if (isa<Instruction>(&*J) && getTreeEntry(&*J))
7656 LiveValues.insert(cast<Instruction>(&*J));
7657 }
7658
7659 LLVM_DEBUG({do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { { dbgs() << "SLP: #LV: " << LiveValues
.size(); for (auto *X : LiveValues) dbgs() << " " <<
X->getName(); dbgs() << ", Looking at "; Inst->dump
(); }; } } while (false)
7660 dbgs() << "SLP: #LV: " << LiveValues.size();do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { { dbgs() << "SLP: #LV: " << LiveValues
.size(); for (auto *X : LiveValues) dbgs() << " " <<
X->getName(); dbgs() << ", Looking at "; Inst->dump
(); }; } } while (false)
7661 for (auto *X : LiveValues)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { { dbgs() << "SLP: #LV: " << LiveValues
.size(); for (auto *X : LiveValues) dbgs() << " " <<
X->getName(); dbgs() << ", Looking at "; Inst->dump
(); }; } } while (false)
7662 dbgs() << " " << X->getName();do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { { dbgs() << "SLP: #LV: " << LiveValues
.size(); for (auto *X : LiveValues) dbgs() << " " <<
X->getName(); dbgs() << ", Looking at "; Inst->dump
(); }; } } while (false)
7663 dbgs() << ", Looking at ";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { { dbgs() << "SLP: #LV: " << LiveValues
.size(); for (auto *X : LiveValues) dbgs() << " " <<
X->getName(); dbgs() << ", Looking at "; Inst->dump
(); }; } } while (false)
7664 Inst->dump();do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { { dbgs() << "SLP: #LV: " << LiveValues
.size(); for (auto *X : LiveValues) dbgs() << " " <<
X->getName(); dbgs() << ", Looking at "; Inst->dump
(); }; } } while (false)
7665 })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { { dbgs() << "SLP: #LV: " << LiveValues
.size(); for (auto *X : LiveValues) dbgs() << " " <<
X->getName(); dbgs() << ", Looking at "; Inst->dump
(); }; } } while (false)
;
7666
7667 // Now find the sequence of instructions between PrevInst and Inst.
7668 unsigned NumCalls = 0;
7669 BasicBlock::reverse_iterator InstIt = ++Inst->getIterator().getReverse(),
7670 PrevInstIt =
7671 PrevInst->getIterator().getReverse();
7672 while (InstIt != PrevInstIt) {
7673 if (PrevInstIt == PrevInst->getParent()->rend()) {
7674 PrevInstIt = Inst->getParent()->rbegin();
7675 continue;
7676 }
7677
7678 auto NoCallIntrinsic = [this](Instruction *I) {
7679 if (auto *II = dyn_cast<IntrinsicInst>(I)) {
7680 if (II->isAssumeLikeIntrinsic())
7681 return true;
7682 FastMathFlags FMF;
7683 SmallVector<Type *, 4> Tys;
7684 for (auto &ArgOp : II->args())
7685 Tys.push_back(ArgOp->getType());
7686 if (auto *FPMO = dyn_cast<FPMathOperator>(II))
7687 FMF = FPMO->getFastMathFlags();
7688 IntrinsicCostAttributes ICA(II->getIntrinsicID(), II->getType(), Tys,
7689 FMF);
7690 InstructionCost IntrCost =
7691 TTI->getIntrinsicInstrCost(ICA, TTI::TCK_RecipThroughput);
7692 InstructionCost CallCost = TTI->getCallInstrCost(
7693 nullptr, II->getType(), Tys, TTI::TCK_RecipThroughput);
7694 if (IntrCost < CallCost)
7695 return true;
7696 }
7697 return false;
7698 };
7699
7700 // Debug information does not impact spill cost.
7701 if (isa<CallInst>(&*PrevInstIt) && !NoCallIntrinsic(&*PrevInstIt) &&
7702 &*PrevInstIt != PrevInst)
7703 NumCalls++;
7704
7705 ++PrevInstIt;
7706 }
7707
7708 if (NumCalls) {
7709 SmallVector<Type*, 4> V;
7710 for (auto *II : LiveValues) {
7711 auto *ScalarTy = II->getType();
7712 if (auto *VectorTy = dyn_cast<FixedVectorType>(ScalarTy))
7713 ScalarTy = VectorTy->getElementType();
7714 V.push_back(FixedVectorType::get(ScalarTy, BundleWidth));
7715 }
7716 Cost += NumCalls * TTI->getCostOfKeepingLiveOverCall(V);
7717 }
7718
7719 PrevInst = Inst;
7720 }
7721
7722 return Cost;
7723}
7724
7725/// Checks if the \p IE1 instructions is followed by \p IE2 instruction in the
7726/// buildvector sequence.
7727static bool isFirstInsertElement(const InsertElementInst *IE1,
7728 const InsertElementInst *IE2) {
7729 if (IE1 == IE2)
7730 return false;
7731 const auto *I1 = IE1;
7732 const auto *I2 = IE2;
7733 const InsertElementInst *PrevI1;
7734 const InsertElementInst *PrevI2;
7735 unsigned Idx1 = *getInsertIndex(IE1);
7736 unsigned Idx2 = *getInsertIndex(IE2);
7737 do {
7738 if (I2 == IE1)
7739 return true;
7740 if (I1 == IE2)
7741 return false;
7742 PrevI1 = I1;
7743 PrevI2 = I2;
7744 if (I1 && (I1 == IE1 || I1->hasOneUse()) &&
7745 getInsertIndex(I1).value_or(Idx2) != Idx2)
7746 I1 = dyn_cast<InsertElementInst>(I1->getOperand(0));
7747 if (I2 && ((I2 == IE2 || I2->hasOneUse())) &&
7748 getInsertIndex(I2).value_or(Idx1) != Idx1)
7749 I2 = dyn_cast<InsertElementInst>(I2->getOperand(0));
7750 } while ((I1 && PrevI1 != I1) || (I2 && PrevI2 != I2));
7751 llvm_unreachable("Two different buildvectors not expected.")::llvm::llvm_unreachable_internal("Two different buildvectors not expected."
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 7751)
;
7752}
7753
7754namespace {
7755/// Returns incoming Value *, if the requested type is Value * too, or a default
7756/// value, otherwise.
7757struct ValueSelect {
7758 template <typename U>
7759 static std::enable_if_t<std::is_same_v<Value *, U>, Value *> get(Value *V) {
7760 return V;
7761 }
7762 template <typename U>
7763 static std::enable_if_t<!std::is_same_v<Value *, U>, U> get(Value *) {
7764 return U();
7765 }
7766};
7767} // namespace
7768
7769/// Does the analysis of the provided shuffle masks and performs the requested
7770/// actions on the vectors with the given shuffle masks. It tries to do it in
7771/// several steps.
7772/// 1. If the Base vector is not undef vector, resizing the very first mask to
7773/// have common VF and perform action for 2 input vectors (including non-undef
7774/// Base). Other shuffle masks are combined with the resulting after the 1 stage
7775/// and processed as a shuffle of 2 elements.
7776/// 2. If the Base is undef vector and have only 1 shuffle mask, perform the
7777/// action only for 1 vector with the given mask, if it is not the identity
7778/// mask.
7779/// 3. If > 2 masks are used, perform the remaining shuffle actions for 2
7780/// vectors, combing the masks properly between the steps.
7781template <typename T>
7782static T *performExtractsShuffleAction(
7783 MutableArrayRef<std::pair<T *, SmallVector<int>>> ShuffleMask, Value *Base,
7784 function_ref<unsigned(T *)> GetVF,
7785 function_ref<std::pair<T *, bool>(T *, ArrayRef<int>, bool)> ResizeAction,
7786 function_ref<T *(ArrayRef<int>, ArrayRef<T *>)> Action) {
7787 assert(!ShuffleMask.empty() && "Empty list of shuffles for inserts.")(static_cast <bool> (!ShuffleMask.empty() && "Empty list of shuffles for inserts."
) ? void (0) : __assert_fail ("!ShuffleMask.empty() && \"Empty list of shuffles for inserts.\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 7787, __extension__
__PRETTY_FUNCTION__))
;
7788 SmallVector<int> Mask(ShuffleMask.begin()->second);
7789 auto VMIt = std::next(ShuffleMask.begin());
7790 T *Prev = nullptr;
7791 SmallBitVector UseMask =
7792 buildUseMask(Mask.size(), Mask, UseMask::UndefsAsMask);
7793 SmallBitVector IsBaseUndef = isUndefVector(Base, UseMask);
7794 if (!IsBaseUndef.all()) {
7795 // Base is not undef, need to combine it with the next subvectors.
7796 std::pair<T *, bool> Res =
7797 ResizeAction(ShuffleMask.begin()->first, Mask, /*ForSingleMask=*/false);
7798 SmallBitVector IsBasePoison = isUndefVector<true>(Base, UseMask);
7799 for (unsigned Idx = 0, VF = Mask.size(); Idx < VF; ++Idx) {
7800 if (Mask[Idx] == UndefMaskElem)
7801 Mask[Idx] = IsBasePoison.test(Idx) ? UndefMaskElem : Idx;
7802 else
7803 Mask[Idx] = (Res.second ? Idx : Mask[Idx]) + VF;
7804 }
7805 auto *V = ValueSelect::get<T *>(Base);
7806 (void)V;
7807 assert((!V || GetVF(V) == Mask.size()) &&(static_cast <bool> ((!V || GetVF(V) == Mask.size()) &&
"Expected base vector of VF number of elements.") ? void (0)
: __assert_fail ("(!V || GetVF(V) == Mask.size()) && \"Expected base vector of VF number of elements.\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 7808, __extension__
__PRETTY_FUNCTION__))
7808 "Expected base vector of VF number of elements.")(static_cast <bool> ((!V || GetVF(V) == Mask.size()) &&
"Expected base vector of VF number of elements.") ? void (0)
: __assert_fail ("(!V || GetVF(V) == Mask.size()) && \"Expected base vector of VF number of elements.\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 7808, __extension__
__PRETTY_FUNCTION__))
;
7809 Prev = Action(Mask, {nullptr, Res.first});
7810 } else if (ShuffleMask.size() == 1) {
7811 // Base is undef and only 1 vector is shuffled - perform the action only for
7812 // single vector, if the mask is not the identity mask.
7813 std::pair<T *, bool> Res = ResizeAction(ShuffleMask.begin()->first, Mask,
7814 /*ForSingleMask=*/true);
7815 if (Res.second)
7816 // Identity mask is found.
7817 Prev = Res.first;
7818 else
7819 Prev = Action(Mask, {ShuffleMask.begin()->first});
7820 } else {
7821 // Base is undef and at least 2 input vectors shuffled - perform 2 vectors
7822 // shuffles step by step, combining shuffle between the steps.
7823 unsigned Vec1VF = GetVF(ShuffleMask.begin()->first);
7824 unsigned Vec2VF = GetVF(VMIt->first);
7825 if (Vec1VF == Vec2VF) {
7826 // No need to resize the input vectors since they are of the same size, we
7827 // can shuffle them directly.
7828 ArrayRef<int> SecMask = VMIt->second;
7829 for (unsigned I = 0, VF = Mask.size(); I < VF; ++I) {
7830 if (SecMask[I] != UndefMaskElem) {
7831 assert(Mask[I] == UndefMaskElem && "Multiple uses of scalars.")(static_cast <bool> (Mask[I] == UndefMaskElem &&
"Multiple uses of scalars.") ? void (0) : __assert_fail ("Mask[I] == UndefMaskElem && \"Multiple uses of scalars.\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 7831, __extension__
__PRETTY_FUNCTION__))
;
7832 Mask[I] = SecMask[I] + Vec1VF;
7833 }
7834 }
7835 Prev = Action(Mask, {ShuffleMask.begin()->first, VMIt->first});
7836 } else {
7837 // Vectors of different sizes - resize and reshuffle.
7838 std::pair<T *, bool> Res1 = ResizeAction(ShuffleMask.begin()->first, Mask,
7839 /*ForSingleMask=*/false);
7840 std::pair<T *, bool> Res2 =
7841 ResizeAction(VMIt->first, VMIt->second, /*ForSingleMask=*/false);
7842 ArrayRef<int> SecMask = VMIt->second;
7843 for (unsigned I = 0, VF = Mask.size(); I < VF; ++I) {
7844 if (Mask[I] != UndefMaskElem) {
7845 assert(SecMask[I] == UndefMaskElem && "Multiple uses of scalars.")(static_cast <bool> (SecMask[I] == UndefMaskElem &&
"Multiple uses of scalars.") ? void (0) : __assert_fail ("SecMask[I] == UndefMaskElem && \"Multiple uses of scalars.\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 7845, __extension__
__PRETTY_FUNCTION__))
;
7846 if (Res1.second)
7847 Mask[I] = I;
7848 } else if (SecMask[I] != UndefMaskElem) {
7849 assert(Mask[I] == UndefMaskElem && "Multiple uses of scalars.")(static_cast <bool> (Mask[I] == UndefMaskElem &&
"Multiple uses of scalars.") ? void (0) : __assert_fail ("Mask[I] == UndefMaskElem && \"Multiple uses of scalars.\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 7849, __extension__
__PRETTY_FUNCTION__))
;
7850 Mask[I] = (Res2.second ? I : SecMask[I]) + VF;
7851 }
7852 }
7853 Prev = Action(Mask, {Res1.first, Res2.first});
7854 }
7855 VMIt = std::next(VMIt);
7856 }
7857 bool IsBaseNotUndef = !IsBaseUndef.all();
7858 (void)IsBaseNotUndef;
7859 // Perform requested actions for the remaining masks/vectors.
7860 for (auto E = ShuffleMask.end(); VMIt != E; ++VMIt) {
7861 // Shuffle other input vectors, if any.
7862 std::pair<T *, bool> Res =
7863 ResizeAction(VMIt->first, VMIt->second, /*ForSingleMask=*/false);
7864 ArrayRef<int> SecMask = VMIt->second;
7865 for (unsigned I = 0, VF = Mask.size(); I < VF; ++I) {
7866 if (SecMask[I] != UndefMaskElem) {
7867 assert((Mask[I] == UndefMaskElem || IsBaseNotUndef) &&(static_cast <bool> ((Mask[I] == UndefMaskElem || IsBaseNotUndef
) && "Multiple uses of scalars.") ? void (0) : __assert_fail
("(Mask[I] == UndefMaskElem || IsBaseNotUndef) && \"Multiple uses of scalars.\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 7868, __extension__
__PRETTY_FUNCTION__))
7868 "Multiple uses of scalars.")(static_cast <bool> ((Mask[I] == UndefMaskElem || IsBaseNotUndef
) && "Multiple uses of scalars.") ? void (0) : __assert_fail
("(Mask[I] == UndefMaskElem || IsBaseNotUndef) && \"Multiple uses of scalars.\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 7868, __extension__
__PRETTY_FUNCTION__))
;
7869 Mask[I] = (Res.second ? I : SecMask[I]) + VF;
7870 } else if (Mask[I] != UndefMaskElem) {
7871 Mask[I] = I;
7872 }
7873 }
7874 Prev = Action(Mask, {Prev, Res.first});
7875 }
7876 return Prev;
7877}
7878
7879InstructionCost BoUpSLP::getTreeCost(ArrayRef<Value *> VectorizedVals) {
7880 InstructionCost Cost = 0;
7881 LLVM_DEBUG(dbgs() << "SLP: Calculating cost for tree of size "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Calculating cost for tree of size "
<< VectorizableTree.size() << ".\n"; } } while (
false)
7882 << VectorizableTree.size() << ".\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Calculating cost for tree of size "
<< VectorizableTree.size() << ".\n"; } } while (
false)
;
7883
7884 unsigned BundleWidth = VectorizableTree[0]->Scalars.size();
7885
7886 for (unsigned I = 0, E = VectorizableTree.size(); I < E; ++I) {
7887 TreeEntry &TE = *VectorizableTree[I];
7888 if (TE.State == TreeEntry::NeedToGather) {
7889 if (const TreeEntry *E = getTreeEntry(TE.getMainOp());
7890 E && E->getVectorFactor() == TE.getVectorFactor() &&
7891 E->isSame(TE.Scalars)) {
7892 // Some gather nodes might be absolutely the same as some vectorizable
7893 // nodes after reordering, need to handle it.
7894 LLVM_DEBUG(dbgs() << "SLP: Adding cost 0 for bundle that starts with "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Adding cost 0 for bundle that starts with "
<< *TE.Scalars[0] << ".\n" << "SLP: Current total cost = "
<< Cost << "\n"; } } while (false)
7895 << *TE.Scalars[0] << ".\n"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Adding cost 0 for bundle that starts with "
<< *TE.Scalars[0] << ".\n" << "SLP: Current total cost = "
<< Cost << "\n"; } } while (false)
7896 << "SLP: Current total cost = " << Cost << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Adding cost 0 for bundle that starts with "
<< *TE.Scalars[0] << ".\n" << "SLP: Current total cost = "
<< Cost << "\n"; } } while (false)
;
7897 continue;
7898 }
7899 }
7900
7901 InstructionCost C = getEntryCost(&TE, VectorizedVals);
7902 Cost += C;
7903 LLVM_DEBUG(dbgs() << "SLP: Adding cost " << Cdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Adding cost " << C <<
" for bundle that starts with " << *TE.Scalars[0] <<
".\n" << "SLP: Current total cost = " << Cost <<
"\n"; } } while (false)
7904 << " for bundle that starts with " << *TE.Scalars[0]do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Adding cost " << C <<
" for bundle that starts with " << *TE.Scalars[0] <<
".\n" << "SLP: Current total cost = " << Cost <<
"\n"; } } while (false)
7905 << ".\n"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Adding cost " << C <<
" for bundle that starts with " << *TE.Scalars[0] <<
".\n" << "SLP: Current total cost = " << Cost <<
"\n"; } } while (false)
7906 << "SLP: Current total cost = " << Cost << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Adding cost " << C <<
" for bundle that starts with " << *TE.Scalars[0] <<
".\n" << "SLP: Current total cost = " << Cost <<
"\n"; } } while (false)
;
7907 }
7908
7909 SmallPtrSet<Value *, 16> ExtractCostCalculated;
7910 InstructionCost ExtractCost = 0;
7911 SmallVector<MapVector<const TreeEntry *, SmallVector<int>>> ShuffleMasks;
7912 SmallVector<std::pair<Value *, const TreeEntry *>> FirstUsers;
7913 SmallVector<APInt> DemandedElts;
7914 for (ExternalUser &EU : ExternalUses) {
7915 // We only add extract cost once for the same scalar.
7916 if (!isa_and_nonnull<InsertElementInst>(EU.User) &&
7917 !ExtractCostCalculated.insert(EU.Scalar).second)
7918 continue;
7919
7920 // Uses by ephemeral values are free (because the ephemeral value will be
7921 // removed prior to code generation, and so the extraction will be
7922 // removed as well).
7923 if (EphValues.count(EU.User))
7924 continue;
7925
7926 // No extract cost for vector "scalar"
7927 if (isa<FixedVectorType>(EU.Scalar->getType()))
7928 continue;
7929
7930 // If found user is an insertelement, do not calculate extract cost but try
7931 // to detect it as a final shuffled/identity match.
7932 if (auto *VU = dyn_cast_or_null<InsertElementInst>(EU.User)) {
7933 if (auto *FTy = dyn_cast<FixedVectorType>(VU->getType())) {
7934 std::optional<unsigned> InsertIdx = getInsertIndex(VU);
7935 if (InsertIdx) {
7936 const TreeEntry *ScalarTE = getTreeEntry(EU.Scalar);
7937 auto *It = find_if(
7938 FirstUsers,
7939 [this, VU](const std::pair<Value *, const TreeEntry *> &Pair) {
7940 return areTwoInsertFromSameBuildVector(
7941 VU, cast<InsertElementInst>(Pair.first),
7942 [this](InsertElementInst *II) -> Value * {
7943 Value *Op0 = II->getOperand(0);
7944 if (getTreeEntry(II) && !getTreeEntry(Op0))
7945 return nullptr;
7946 return Op0;
7947 });
7948 });
7949 int VecId = -1;
7950 if (It == FirstUsers.end()) {
7951 (void)ShuffleMasks.emplace_back();
7952 SmallVectorImpl<int> &Mask = ShuffleMasks.back()[ScalarTE];
7953 if (Mask.empty())
7954 Mask.assign(FTy->getNumElements(), UndefMaskElem);
7955 // Find the insertvector, vectorized in tree, if any.
7956 Value *Base = VU;
7957 while (auto *IEBase = dyn_cast<InsertElementInst>(Base)) {
7958 if (IEBase != EU.User &&
7959 (!IEBase->hasOneUse() ||
7960 getInsertIndex(IEBase).value_or(*InsertIdx) == *InsertIdx))
7961 break;
7962 // Build the mask for the vectorized insertelement instructions.
7963 if (const TreeEntry *E = getTreeEntry(IEBase)) {
7964 VU = IEBase;
7965 do {
7966 IEBase = cast<InsertElementInst>(Base);
7967 int Idx = *getInsertIndex(IEBase);
7968 assert(Mask[Idx] == UndefMaskElem &&(static_cast <bool> (Mask[Idx] == UndefMaskElem &&
"InsertElementInstruction used already.") ? void (0) : __assert_fail
("Mask[Idx] == UndefMaskElem && \"InsertElementInstruction used already.\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 7969, __extension__
__PRETTY_FUNCTION__))
7969 "InsertElementInstruction used already.")(static_cast <bool> (Mask[Idx] == UndefMaskElem &&
"InsertElementInstruction used already.") ? void (0) : __assert_fail
("Mask[Idx] == UndefMaskElem && \"InsertElementInstruction used already.\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 7969, __extension__
__PRETTY_FUNCTION__))
;
7970 Mask[Idx] = Idx;
7971 Base = IEBase->getOperand(0);
7972 } while (E == getTreeEntry(Base));
7973 break;
7974 }
7975 Base = cast<InsertElementInst>(Base)->getOperand(0);
7976 }
7977 FirstUsers.emplace_back(VU, ScalarTE);
7978 DemandedElts.push_back(APInt::getZero(FTy->getNumElements()));
7979 VecId = FirstUsers.size() - 1;
7980 } else {
7981 if (isFirstInsertElement(VU, cast<InsertElementInst>(It->first)))
7982 It->first = VU;
7983 VecId = std::distance(FirstUsers.begin(), It);
7984 }
7985 int InIdx = *InsertIdx;
7986 SmallVectorImpl<int> &Mask = ShuffleMasks[VecId][ScalarTE];
7987 if (Mask.empty())
7988 Mask.assign(FTy->getNumElements(), UndefMaskElem);
7989 Mask[InIdx] = EU.Lane;
7990 DemandedElts[VecId].setBit(InIdx);
7991 continue;
7992 }
7993 }
7994 }
7995
7996 // If we plan to rewrite the tree in a smaller type, we will need to sign
7997 // extend the extracted value back to the original type. Here, we account
7998 // for the extract and the added cost of the sign extend if needed.
7999 auto *VecTy = FixedVectorType::get(EU.Scalar->getType(), BundleWidth);
8000 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
8001 auto *ScalarRoot = VectorizableTree[0]->Scalars[0];
8002 if (MinBWs.count(ScalarRoot)) {
8003 auto *MinTy = IntegerType::get(F->getContext(), MinBWs[ScalarRoot].first);
8004 auto Extend =
8005 MinBWs[ScalarRoot].second ? Instruction::SExt : Instruction::ZExt;
8006 VecTy = FixedVectorType::get(MinTy, BundleWidth);
8007 ExtractCost += TTI->getExtractWithExtendCost(Extend, EU.Scalar->getType(),
8008 VecTy, EU.Lane);
8009 } else {
8010 ExtractCost += TTI->getVectorInstrCost(Instruction::ExtractElement, VecTy,
8011 CostKind, EU.Lane);
8012 }
8013 }
8014
8015 InstructionCost SpillCost = getSpillCost();
8016 Cost += SpillCost + ExtractCost;
8017 auto &&ResizeToVF = [this, &Cost](const TreeEntry *TE, ArrayRef<int> Mask,
8018 bool) {
8019 InstructionCost C = 0;
8020 unsigned VF = Mask.size();
8021 unsigned VecVF = TE->getVectorFactor();
8022 if (VF != VecVF &&
8023 (any_of(Mask, [VF](int Idx) { return Idx >= static_cast<int>(VF); }) ||
8024 (all_of(Mask,
8025 [VF](int Idx) { return Idx < 2 * static_cast<int>(VF); }) &&
8026 !ShuffleVectorInst::isIdentityMask(Mask)))) {
8027 SmallVector<int> OrigMask(VecVF, UndefMaskElem);
8028 std::copy(Mask.begin(), std::next(Mask.begin(), std::min(VF, VecVF)),
8029 OrigMask.begin());
8030 C = TTI->getShuffleCost(
8031 TTI::SK_PermuteSingleSrc,
8032 FixedVectorType::get(TE->getMainOp()->getType(), VecVF), OrigMask);
8033 LLVM_DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Adding cost " << C <<
" for final shuffle of insertelement external users.\n"; TE->
dump(); dbgs() << "SLP: Current total cost = " <<
Cost << "\n"; } } while (false)
8034 dbgs() << "SLP: Adding cost " << Cdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Adding cost " << C <<
" for final shuffle of insertelement external users.\n"; TE->
dump(); dbgs() << "SLP: Current total cost = " <<
Cost << "\n"; } } while (false)
8035 << " for final shuffle of insertelement external users.\n";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Adding cost " << C <<
" for final shuffle of insertelement external users.\n"; TE->
dump(); dbgs() << "SLP: Current total cost = " <<
Cost << "\n"; } } while (false)
8036 TE->dump(); dbgs() << "SLP: Current total cost = " << Cost << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Adding cost " << C <<
" for final shuffle of insertelement external users.\n"; TE->
dump(); dbgs() << "SLP: Current total cost = " <<
Cost << "\n"; } } while (false)
;
8037 Cost += C;
8038 return std::make_pair(TE, true);
8039 }
8040 return std::make_pair(TE, false);
8041 };
8042 // Calculate the cost of the reshuffled vectors, if any.
8043 for (int I = 0, E = FirstUsers.size(); I < E; ++I) {
8044 Value *Base = cast<Instruction>(FirstUsers[I].first)->getOperand(0);
8045 unsigned VF = ShuffleMasks[I].begin()->second.size();
8046 auto *FTy = FixedVectorType::get(
8047 cast<VectorType>(FirstUsers[I].first->getType())->getElementType(), VF);
8048 auto Vector = ShuffleMasks[I].takeVector();
8049 auto &&EstimateShufflesCost = [this, FTy,
8050 &Cost](ArrayRef<int> Mask,
8051 ArrayRef<const TreeEntry *> TEs) {
8052 assert((TEs.size() == 1 || TEs.size() == 2) &&(static_cast <bool> ((TEs.size() == 1 || TEs.size() == 2
) && "Expected exactly 1 or 2 tree entries.") ? void (
0) : __assert_fail ("(TEs.size() == 1 || TEs.size() == 2) && \"Expected exactly 1 or 2 tree entries.\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 8053, __extension__
__PRETTY_FUNCTION__))
8053 "Expected exactly 1 or 2 tree entries.")(static_cast <bool> ((TEs.size() == 1 || TEs.size() == 2
) && "Expected exactly 1 or 2 tree entries.") ? void (
0) : __assert_fail ("(TEs.size() == 1 || TEs.size() == 2) && \"Expected exactly 1 or 2 tree entries.\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 8053, __extension__
__PRETTY_FUNCTION__))
;
8054 if (TEs.size() == 1) {
8055 int Limit = 2 * Mask.size();
8056 if (!all_of(Mask, [Limit](int Idx) { return Idx < Limit; }) ||
8057 !ShuffleVectorInst::isIdentityMask(Mask)) {
8058 InstructionCost C =
8059 TTI->getShuffleCost(TTI::SK_PermuteSingleSrc, FTy, Mask);
8060 LLVM_DEBUG(dbgs() << "SLP: Adding cost " << Cdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Adding cost " << C <<
" for final shuffle of insertelement " "external users.\n"; TEs
.front()->dump(); dbgs() << "SLP: Current total cost = "
<< Cost << "\n"; } } while (false)
8061 << " for final shuffle of insertelement "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Adding cost " << C <<
" for final shuffle of insertelement " "external users.\n"; TEs
.front()->dump(); dbgs() << "SLP: Current total cost = "
<< Cost << "\n"; } } while (false)
8062 "external users.\n";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Adding cost " << C <<
" for final shuffle of insertelement " "external users.\n"; TEs
.front()->dump(); dbgs() << "SLP: Current total cost = "
<< Cost << "\n"; } } while (false)
8063 TEs.front()->dump();do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Adding cost " << C <<
" for final shuffle of insertelement " "external users.\n"; TEs
.front()->dump(); dbgs() << "SLP: Current total cost = "
<< Cost << "\n"; } } while (false)
8064 dbgs() << "SLP: Current total cost = " << Cost << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Adding cost " << C <<
" for final shuffle of insertelement " "external users.\n"; TEs
.front()->dump(); dbgs() << "SLP: Current total cost = "
<< Cost << "\n"; } } while (false)
;
8065 Cost += C;
8066 }
8067 } else {
8068 InstructionCost C =
8069 TTI->getShuffleCost(TTI::SK_PermuteTwoSrc, FTy, Mask);
8070 LLVM_DEBUG(dbgs() << "SLP: Adding cost " << Cdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Adding cost " << C <<
" for final shuffle of vector node and external " "insertelement users.\n"
; if (TEs.front()) { TEs.front()->dump(); } TEs.back()->
dump(); dbgs() << "SLP: Current total cost = " <<
Cost << "\n"; } } while (false)
8071 << " for final shuffle of vector node and external "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Adding cost " << C <<
" for final shuffle of vector node and external " "insertelement users.\n"
; if (TEs.front()) { TEs.front()->dump(); } TEs.back()->
dump(); dbgs() << "SLP: Current total cost = " <<
Cost << "\n"; } } while (false)
8072 "insertelement users.\n";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Adding cost " << C <<
" for final shuffle of vector node and external " "insertelement users.\n"
; if (TEs.front()) { TEs.front()->dump(); } TEs.back()->
dump(); dbgs() << "SLP: Current total cost = " <<
Cost << "\n"; } } while (false)
8073 if (TEs.front()) { TEs.front()->dump(); } TEs.back()->dump();do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Adding cost " << C <<
" for final shuffle of vector node and external " "insertelement users.\n"
; if (TEs.front()) { TEs.front()->dump(); } TEs.back()->
dump(); dbgs() << "SLP: Current total cost = " <<
Cost << "\n"; } } while (false)
8074 dbgs() << "SLP: Current total cost = " << Cost << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Adding cost " << C <<
" for final shuffle of vector node and external " "insertelement users.\n"
; if (TEs.front()) { TEs.front()->dump(); } TEs.back()->
dump(); dbgs() << "SLP: Current total cost = " <<
Cost << "\n"; } } while (false)
;
8075 Cost += C;
8076 }
8077 return TEs.back();
8078 };
8079 (void)performExtractsShuffleAction<const TreeEntry>(
8080 MutableArrayRef(Vector.data(), Vector.size()), Base,
8081 [](const TreeEntry *E) { return E->getVectorFactor(); }, ResizeToVF,
8082 EstimateShufflesCost);
8083 InstructionCost InsertCost = TTI->getScalarizationOverhead(
8084 cast<FixedVectorType>(FirstUsers[I].first->getType()), DemandedElts[I],
8085 /*Insert*/ true, /*Extract*/ false, TTI::TCK_RecipThroughput);
8086 Cost -= InsertCost;
8087 }
8088
8089#ifndef NDEBUG
8090 SmallString<256> Str;
8091 {
8092 raw_svector_ostream OS(Str);
8093 OS << "SLP: Spill Cost = " << SpillCost << ".\n"
8094 << "SLP: Extract Cost = " << ExtractCost << ".\n"
8095 << "SLP: Total Cost = " << Cost << ".\n";
8096 }
8097 LLVM_DEBUG(dbgs() << Str)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << Str; } } while (false)
;
8098 if (ViewSLPTree)
8099 ViewGraph(this, "SLP" + F->getName(), false, Str);
8100#endif
8101
8102 return Cost;
8103}
8104
8105std::optional<TargetTransformInfo::ShuffleKind>
8106BoUpSLP::isGatherShuffledEntry(const TreeEntry *TE, ArrayRef<Value *> VL,
8107 SmallVectorImpl<int> &Mask,
8108 SmallVectorImpl<const TreeEntry *> &Entries) {
8109 Entries.clear();
8110 // No need to check for the topmost gather node.
8111 if (TE == VectorizableTree.front().get())
8112 return std::nullopt;
8113 Mask.assign(VL.size(), UndefMaskElem);
8114 assert(TE->UserTreeIndices.size() == 1 &&(static_cast <bool> (TE->UserTreeIndices.size() == 1
&& "Expected only single user of the gather node.") ?
void (0) : __assert_fail ("TE->UserTreeIndices.size() == 1 && \"Expected only single user of the gather node.\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 8115, __extension__
__PRETTY_FUNCTION__))
8115 "Expected only single user of the gather node.")(static_cast <bool> (TE->UserTreeIndices.size() == 1
&& "Expected only single user of the gather node.") ?
void (0) : __assert_fail ("TE->UserTreeIndices.size() == 1 && \"Expected only single user of the gather node.\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 8115, __extension__
__PRETTY_FUNCTION__))
;
8116 // TODO: currently checking only for Scalars in the tree entry, need to count
8117 // reused elements too for better cost estimation.
8118 Instruction &UserInst =
8119 getLastInstructionInBundle(TE->UserTreeIndices.front().UserTE);
8120 auto *PHI = dyn_cast<PHINode>(&UserInst);
8121 auto *NodeUI = DT->getNode(
8122 PHI ? PHI->getIncomingBlock(TE->UserTreeIndices.front().EdgeIdx)
8123 : UserInst.getParent());
8124 assert(NodeUI && "Should only process reachable instructions")(static_cast <bool> (NodeUI && "Should only process reachable instructions"
) ? void (0) : __assert_fail ("NodeUI && \"Should only process reachable instructions\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 8124, __extension__
__PRETTY_FUNCTION__))
;
8125 SmallPtrSet<Value *, 4> GatheredScalars(VL.begin(), VL.end());
8126 auto CheckOrdering = [&](Instruction *LastEI) {
8127 // Check if the user node of the TE comes after user node of EntryPtr,
8128 // otherwise EntryPtr depends on TE.
8129 // Gather nodes usually are not scheduled and inserted before their first
8130 // user node. So, instead of checking dependency between the gather nodes
8131 // themselves, we check the dependency between their user nodes.
8132 // If one user node comes before the second one, we cannot use the second
8133 // gather node as the source vector for the first gather node, because in
8134 // the list of instructions it will be emitted later.
8135 auto *EntryParent = LastEI->getParent();
8136 auto *NodeEUI = DT->getNode(EntryParent);
8137 if (!NodeEUI)
8138 return false;
8139 assert((NodeUI == NodeEUI) ==(static_cast <bool> ((NodeUI == NodeEUI) == (NodeUI->
getDFSNumIn() == NodeEUI->getDFSNumIn()) && "Different nodes should have different DFS numbers"
) ? void (0) : __assert_fail ("(NodeUI == NodeEUI) == (NodeUI->getDFSNumIn() == NodeEUI->getDFSNumIn()) && \"Different nodes should have different DFS numbers\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 8141, __extension__
__PRETTY_FUNCTION__))
8140 (NodeUI->getDFSNumIn() == NodeEUI->getDFSNumIn()) &&(static_cast <bool> ((NodeUI == NodeEUI) == (NodeUI->
getDFSNumIn() == NodeEUI->getDFSNumIn()) && "Different nodes should have different DFS numbers"
) ? void (0) : __assert_fail ("(NodeUI == NodeEUI) == (NodeUI->getDFSNumIn() == NodeEUI->getDFSNumIn()) && \"Different nodes should have different DFS numbers\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 8141, __extension__
__PRETTY_FUNCTION__))
8141 "Different nodes should have different DFS numbers")(static_cast <bool> ((NodeUI == NodeEUI) == (NodeUI->
getDFSNumIn() == NodeEUI->getDFSNumIn()) && "Different nodes should have different DFS numbers"
) ? void (0) : __assert_fail ("(NodeUI == NodeEUI) == (NodeUI->getDFSNumIn() == NodeEUI->getDFSNumIn()) && \"Different nodes should have different DFS numbers\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 8141, __extension__
__PRETTY_FUNCTION__))
;
8142 // Check the order of the gather nodes users.
8143 if (UserInst.getParent() != EntryParent &&
8144 (DT->dominates(NodeUI, NodeEUI) || !DT->dominates(NodeEUI, NodeUI)))
8145 return false;
8146 if (UserInst.getParent() == EntryParent && UserInst.comesBefore(LastEI))
8147 return false;
8148 return true;
8149 };
8150 // Build a lists of values to tree entries.
8151 DenseMap<Value *, SmallPtrSet<const TreeEntry *, 4>> ValueToTEs;
8152 for (const std::unique_ptr<TreeEntry> &EntryPtr : VectorizableTree) {
8153 if (EntryPtr.get() == TE)
8154 continue;
8155 if (EntryPtr->State != TreeEntry::NeedToGather)
8156 continue;
8157 if (!any_of(EntryPtr->Scalars, [&GatheredScalars](Value *V) {
8158 return GatheredScalars.contains(V);
8159 }))
8160 continue;
8161 assert(EntryPtr->UserTreeIndices.size() == 1 &&(static_cast <bool> (EntryPtr->UserTreeIndices.size(
) == 1 && "Expected only single user of the gather node."
) ? void (0) : __assert_fail ("EntryPtr->UserTreeIndices.size() == 1 && \"Expected only single user of the gather node.\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 8162, __extension__
__PRETTY_FUNCTION__))
8162 "Expected only single user of the gather node.")(static_cast <bool> (EntryPtr->UserTreeIndices.size(
) == 1 && "Expected only single user of the gather node."
) ? void (0) : __assert_fail ("EntryPtr->UserTreeIndices.size() == 1 && \"Expected only single user of the gather node.\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 8162, __extension__
__PRETTY_FUNCTION__))
;
8163 Instruction &EntryUserInst =
8164 getLastInstructionInBundle(EntryPtr->UserTreeIndices.front().UserTE);
8165 if (&UserInst == &EntryUserInst) {
8166 // If 2 gathers are operands of the same entry, compare operands indices,
8167 // use the earlier one as the base.
8168 if (TE->UserTreeIndices.front().UserTE ==
8169 EntryPtr->UserTreeIndices.front().UserTE &&
8170 TE->UserTreeIndices.front().EdgeIdx <
8171 EntryPtr->UserTreeIndices.front().EdgeIdx)
8172 continue;
8173 }
8174 // Check if the user node of the TE comes after user node of EntryPtr,
8175 // otherwise EntryPtr depends on TE.
8176 auto *EntryPHI = dyn_cast<PHINode>(&EntryUserInst);
8177 auto *EntryI =
8178 EntryPHI
8179 ? EntryPHI
8180 ->getIncomingBlock(EntryPtr->UserTreeIndices.front().EdgeIdx)
8181 ->getTerminator()
8182 : &EntryUserInst;
8183 if (!CheckOrdering(EntryI))
8184 continue;
8185 for (Value *V : EntryPtr->Scalars)
8186 if (!isConstant(V))
8187 ValueToTEs.try_emplace(V).first->getSecond().insert(EntryPtr.get());
8188 }
8189 // Find all tree entries used by the gathered values. If no common entries
8190 // found - not a shuffle.
8191 // Here we build a set of tree nodes for each gathered value and trying to
8192 // find the intersection between these sets. If we have at least one common
8193 // tree node for each gathered value - we have just a permutation of the
8194 // single vector. If we have 2 different sets, we're in situation where we
8195 // have a permutation of 2 input vectors.
8196 SmallVector<SmallPtrSet<const TreeEntry *, 4>> UsedTEs;
8197 DenseMap<Value *, int> UsedValuesEntry;
8198 for (Value *V : TE->Scalars) {
8199 if (isConstant(V))
8200 continue;
8201 // Build a list of tree entries where V is used.
8202 SmallPtrSet<const TreeEntry *, 4> VToTEs;
8203 auto It = ValueToTEs.find(V);
8204 if (It != ValueToTEs.end())
8205 VToTEs = It->second;
8206 if (const TreeEntry *VTE = getTreeEntry(V))
8207 VToTEs.insert(VTE);
8208 if (VToTEs.empty())
8209 continue;
8210 if (UsedTEs.empty()) {
8211 // The first iteration, just insert the list of nodes to vector.
8212 UsedTEs.push_back(VToTEs);
8213 UsedValuesEntry.try_emplace(V, 0);
8214 } else {
8215 // Need to check if there are any previously used tree nodes which use V.
8216 // If there are no such nodes, consider that we have another one input
8217 // vector.
8218 SmallPtrSet<const TreeEntry *, 4> SavedVToTEs(VToTEs);
8219 unsigned Idx = 0;
8220 for (SmallPtrSet<const TreeEntry *, 4> &Set : UsedTEs) {
8221 // Do we have a non-empty intersection of previously listed tree entries
8222 // and tree entries using current V?
8223 set_intersect(VToTEs, Set);
8224 if (!VToTEs.empty()) {
8225 // Yes, write the new subset and continue analysis for the next
8226 // scalar.
8227 Set.swap(VToTEs);
8228 break;
8229 }
8230 VToTEs = SavedVToTEs;
8231 ++Idx;
8232 }
8233 // No non-empty intersection found - need to add a second set of possible
8234 // source vectors.
8235 if (Idx == UsedTEs.size()) {
8236 // If the number of input vectors is greater than 2 - not a permutation,
8237 // fallback to the regular gather.
8238 // TODO: support multiple reshuffled nodes.
8239 if (UsedTEs.size() == 2)
8240 continue;
8241 UsedTEs.push_back(SavedVToTEs);
8242 Idx = UsedTEs.size() - 1;
8243 }
8244 UsedValuesEntry.try_emplace(V, Idx);
8245 }
8246 }
8247
8248 if (UsedTEs.empty())
8249 return std::nullopt;
8250
8251 unsigned VF = 0;
8252 if (UsedTEs.size() == 1) {
8253 // Keep the order to avoid non-determinism.
8254 SmallVector<const TreeEntry *> FirstEntries(UsedTEs.front().begin(),
8255 UsedTEs.front().end());
8256 sort(FirstEntries, [](const TreeEntry *TE1, const TreeEntry *TE2) {
8257 return TE1->Idx < TE2->Idx;
8258 });
8259 // Try to find the perfect match in another gather node at first.
8260 auto *It = find_if(FirstEntries, [=](const TreeEntry *EntryPtr) {
8261 return EntryPtr->isSame(VL) || EntryPtr->isSame(TE->Scalars);
8262 });
8263 if (It != FirstEntries.end()) {
8264 Entries.push_back(*It);
8265 std::iota(Mask.begin(), Mask.end(), 0);
8266 // Clear undef scalars.
8267 for (int I = 0, Sz = VL.size(); I < Sz; ++I)
8268 if (isa<PoisonValue>(TE->Scalars[I]))
8269 Mask[I] = UndefMaskElem;
8270 return TargetTransformInfo::SK_PermuteSingleSrc;
8271 }
8272 // No perfect match, just shuffle, so choose the first tree node from the
8273 // tree.
8274 Entries.push_back(FirstEntries.front());
8275 } else {
8276 // Try to find nodes with the same vector factor.
8277 assert(UsedTEs.size() == 2 && "Expected at max 2 permuted entries.")(static_cast <bool> (UsedTEs.size() == 2 && "Expected at max 2 permuted entries."
) ? void (0) : __assert_fail ("UsedTEs.size() == 2 && \"Expected at max 2 permuted entries.\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 8277, __extension__
__PRETTY_FUNCTION__))
;
8278 // Keep the order of tree nodes to avoid non-determinism.
8279 DenseMap<int, const TreeEntry *> VFToTE;
8280 for (const TreeEntry *TE : UsedTEs.front()) {
8281 unsigned VF = TE->getVectorFactor();
8282 auto It = VFToTE.find(VF);
8283 if (It != VFToTE.end()) {
8284 if (It->second->Idx > TE->Idx)
8285 It->getSecond() = TE;
8286 continue;
8287 }
8288 VFToTE.try_emplace(VF, TE);
8289 }
8290 // Same, keep the order to avoid non-determinism.
8291 SmallVector<const TreeEntry *> SecondEntries(UsedTEs.back().begin(),
8292 UsedTEs.back().end());
8293 sort(SecondEntries, [](const TreeEntry *TE1, const TreeEntry *TE2) {
8294 return TE1->Idx < TE2->Idx;
8295 });
8296 for (const TreeEntry *TE : SecondEntries) {
8297 auto It = VFToTE.find(TE->getVectorFactor());
8298 if (It != VFToTE.end()) {
8299 VF = It->first;
8300 Entries.push_back(It->second);
8301 Entries.push_back(TE);
8302 break;
8303 }
8304 }
8305 // No 2 source vectors with the same vector factor - give up and do regular
8306 // gather.
8307 if (Entries.empty())
8308 return std::nullopt;
8309 }
8310
8311 bool IsSplatOrUndefs = isSplat(VL) || all_of(VL, UndefValue::classof);
8312 // Checks if the 2 PHIs are compatible in terms of high possibility to be
8313 // vectorized.
8314 auto AreCompatiblePHIs = [&](Value *V, Value *V1) {
8315 auto *PHI = cast<PHINode>(V);
8316 auto *PHI1 = cast<PHINode>(V1);
8317 // Check that all incoming values are compatible/from same parent (if they
8318 // are instructions).
8319 // The incoming values are compatible if they all are constants, or
8320 // instruction with the same/alternate opcodes from the same basic block.
8321 for (int I = 0, E = PHI->getNumIncomingValues(); I < E; ++I) {
8322 Value *In = PHI->getIncomingValue(I);
8323 Value *In1 = PHI1->getIncomingValue(I);
8324 if (isConstant(In) && isConstant(In1))
8325 continue;
8326 if (!getSameOpcode({In, In1}, *TLI).getOpcode())
8327 return false;
8328 if (cast<Instruction>(In)->getParent() !=
8329 cast<Instruction>(In1)->getParent())
8330 return false;
8331 }
8332 return true;
8333 };
8334 // Check if the value can be ignored during analysis for shuffled gathers.
8335 // We suppose it is better to ignore instruction, which do not form splats,
8336 // are not vectorized/not extractelements (these instructions will be handled
8337 // by extractelements processing) or may form vector node in future.
8338 auto MightBeIgnored = [=](Value *V) {
8339 auto *I = dyn_cast<Instruction>(V);
8340 SmallVector<Value *> IgnoredVals;
8341 if (UserIgnoreList)
8342 IgnoredVals.assign(UserIgnoreList->begin(), UserIgnoreList->end());
8343 return I && !IsSplatOrUndefs && !ScalarToTreeEntry.count(I) &&
8344 !isVectorLikeInstWithConstOps(I) &&
8345 !areAllUsersVectorized(I, IgnoredVals) && isSimple(I);
8346 };
8347 // Check that the neighbor instruction may form a full vector node with the
8348 // current instruction V. It is possible, if they have same/alternate opcode
8349 // and same parent basic block.
8350 auto NeighborMightBeIgnored = [&](Value *V, int Idx) {
8351 Value *V1 = VL[Idx];
8352 bool UsedInSameVTE = false;
8353 auto It = UsedValuesEntry.find(V1);
8354 if (It != UsedValuesEntry.end())
8355 UsedInSameVTE = It->second == UsedValuesEntry.find(V)->second;
8356 return V != V1 && MightBeIgnored(V1) && !UsedInSameVTE &&
8357 getSameOpcode({V, V1}, *TLI).getOpcode() &&
8358 cast<Instruction>(V)->getParent() ==
8359 cast<Instruction>(V1)->getParent() &&
8360 (!isa<PHINode>(V1) || AreCompatiblePHIs(V, V1));
8361 };
8362 // Build a shuffle mask for better cost estimation and vector emission.
8363 SmallBitVector UsedIdxs(Entries.size());
8364 SmallVector<std::pair<unsigned, int>> EntryLanes;
8365 for (int I = 0, E = VL.size(); I < E; ++I) {
8366 Value *V = VL[I];
8367 auto It = UsedValuesEntry.find(V);
8368 if (It == UsedValuesEntry.end())
8369 continue;
8370 // Do not try to shuffle scalars, if they are constants, or instructions
8371 // that can be vectorized as a result of the following vector build
8372 // vectorization.
8373 if (isConstant(V) || (MightBeIgnored(V) &&
8374 ((I > 0 && NeighborMightBeIgnored(V, I - 1)) ||
8375 (I != E - 1 && NeighborMightBeIgnored(V, I + 1)))))
8376 continue;
8377 unsigned Idx = It->second;
8378 EntryLanes.emplace_back(Idx, I);
8379 UsedIdxs.set(Idx);
8380 }
8381 // Iterate through all shuffled scalars and select entries, which can be used
8382 // for final shuffle.
8383 SmallVector<const TreeEntry *> TempEntries;
8384 for (unsigned I = 0, Sz = Entries.size(); I < Sz; ++I) {
8385 if (!UsedIdxs.test(I))
8386 continue;
8387 // Fix the entry number for the given scalar. If it is the first entry, set
8388 // Pair.first to 0, otherwise to 1 (currently select at max 2 nodes).
8389 // These indices are used when calculating final shuffle mask as the vector
8390 // offset.
8391 for (std::pair<unsigned, int> &Pair : EntryLanes)
8392 if (Pair.first == I)
8393 Pair.first = TempEntries.size();
8394 TempEntries.push_back(Entries[I]);
8395 }
8396 Entries.swap(TempEntries);
8397 if (EntryLanes.size() == Entries.size() && !VL.equals(TE->Scalars)) {
8398 // We may have here 1 or 2 entries only. If the number of scalars is equal
8399 // to the number of entries, no need to do the analysis, it is not very
8400 // profitable. Since VL is not the same as TE->Scalars, it means we already
8401 // have some shuffles before. Cut off not profitable case.
8402 Entries.clear();
8403 return std::nullopt;
8404 }
8405 // Build the final mask, check for the identity shuffle, if possible.
8406 bool IsIdentity = Entries.size() == 1;
8407 // Pair.first is the offset to the vector, while Pair.second is the index of
8408 // scalar in the list.
8409 for (const std::pair<unsigned, int> &Pair : EntryLanes) {
8410 Mask[Pair.second] = Pair.first * VF +
8411 Entries[Pair.first]->findLaneForValue(VL[Pair.second]);
8412 IsIdentity &= Mask[Pair.second] == Pair.second;
8413 }
8414 switch (Entries.size()) {
8415 case 1:
8416 if (IsIdentity || EntryLanes.size() > 1 || VL.size() <= 2)
8417 return TargetTransformInfo::SK_PermuteSingleSrc;
8418 break;
8419 case 2:
8420 if (EntryLanes.size() > 2 || VL.size() <= 2)
8421 return TargetTransformInfo::SK_PermuteTwoSrc;
8422 break;
8423 default:
8424 break;
8425 }
8426 Entries.clear();
8427 return std::nullopt;
8428}
8429
8430InstructionCost BoUpSLP::getGatherCost(FixedVectorType *Ty,
8431 const APInt &ShuffledIndices,
8432 bool NeedToShuffle) const {
8433 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
8434 InstructionCost Cost =
8435 TTI->getScalarizationOverhead(Ty, ~ShuffledIndices, /*Insert*/ true,
8436 /*Extract*/ false, CostKind);
8437 if (NeedToShuffle)
8438 Cost += TTI->getShuffleCost(TargetTransformInfo::SK_PermuteSingleSrc, Ty);
8439 return Cost;
8440}
8441
8442InstructionCost BoUpSLP::getGatherCost(ArrayRef<Value *> VL) const {
8443 // Find the type of the operands in VL.
8444 Type *ScalarTy = VL[0]->getType();
8445 if (StoreInst *SI = dyn_cast<StoreInst>(VL[0]))
8446 ScalarTy = SI->getValueOperand()->getType();
8447 auto *VecTy = FixedVectorType::get(ScalarTy, VL.size());
8448 bool DuplicateNonConst = false;
8449 // Find the cost of inserting/extracting values from the vector.
8450 // Check if the same elements are inserted several times and count them as
8451 // shuffle candidates.
8452 APInt ShuffledElements = APInt::getZero(VL.size());
8453 DenseSet<Value *> UniqueElements;
8454 // Iterate in reverse order to consider insert elements with the high cost.
8455 for (unsigned I = VL.size(); I > 0; --I) {
8456 unsigned Idx = I - 1;
8457 // No need to shuffle duplicates for constants.
8458 if (isConstant(VL[Idx])) {
8459 ShuffledElements.setBit(Idx);
8460 continue;
8461 }
8462 if (!UniqueElements.insert(VL[Idx]).second) {
8463 DuplicateNonConst = true;
8464 ShuffledElements.setBit(Idx);
8465 }
8466 }
8467 return getGatherCost(VecTy, ShuffledElements, DuplicateNonConst);
8468}
8469
8470// Perform operand reordering on the instructions in VL and return the reordered
8471// operands in Left and Right.
8472void BoUpSLP::reorderInputsAccordingToOpcode(
8473 ArrayRef<Value *> VL, SmallVectorImpl<Value *> &Left,
8474 SmallVectorImpl<Value *> &Right, const TargetLibraryInfo &TLI,
8475 const DataLayout &DL, ScalarEvolution &SE, const BoUpSLP &R) {
8476 if (VL.empty())
8477 return;
8478 VLOperands Ops(VL, TLI, DL, SE, R);
8479 // Reorder the operands in place.
8480 Ops.reorder();
8481 Left = Ops.getVL(0);
8482 Right = Ops.getVL(1);
8483}
8484
8485Instruction &BoUpSLP::getLastInstructionInBundle(const TreeEntry *E) {
8486 // Get the basic block this bundle is in. All instructions in the bundle
8487 // should be in this block (except for extractelement-like instructions with
8488 // constant indeces).
8489 auto *Front = E->getMainOp();
8490 auto *BB = Front->getParent();
8491 assert(llvm::all_of(E->Scalars, [=](Value *V) -> bool {(static_cast <bool> (llvm::all_of(E->Scalars, [=](Value
*V) -> bool { if (E->getOpcode() == Instruction::GetElementPtr
&& !isa<GetElementPtrInst>(V)) return true; auto
*I = cast<Instruction>(V); return !E->isOpcodeOrAlt
(I) || I->getParent() == BB || isVectorLikeInstWithConstOps
(I); })) ? void (0) : __assert_fail ("llvm::all_of(E->Scalars, [=](Value *V) -> bool { if (E->getOpcode() == Instruction::GetElementPtr && !isa<GetElementPtrInst>(V)) return true; auto *I = cast<Instruction>(V); return !E->isOpcodeOrAlt(I) || I->getParent() == BB || isVectorLikeInstWithConstOps(I); })"
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 8498, __extension__
__PRETTY_FUNCTION__))
8492 if (E->getOpcode() == Instruction::GetElementPtr &&(static_cast <bool> (llvm::all_of(E->Scalars, [=](Value
*V) -> bool { if (E->getOpcode() == Instruction::GetElementPtr
&& !isa<GetElementPtrInst>(V)) return true; auto
*I = cast<Instruction>(V); return !E->isOpcodeOrAlt
(I) || I->getParent() == BB || isVectorLikeInstWithConstOps
(I); })) ? void (0) : __assert_fail ("llvm::all_of(E->Scalars, [=](Value *V) -> bool { if (E->getOpcode() == Instruction::GetElementPtr && !isa<GetElementPtrInst>(V)) return true; auto *I = cast<Instruction>(V); return !E->isOpcodeOrAlt(I) || I->getParent() == BB || isVectorLikeInstWithConstOps(I); })"
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 8498, __extension__
__PRETTY_FUNCTION__))
8493 !isa<GetElementPtrInst>(V))(static_cast <bool> (llvm::all_of(E->Scalars, [=](Value
*V) -> bool { if (E->getOpcode() == Instruction::GetElementPtr
&& !isa<GetElementPtrInst>(V)) return true; auto
*I = cast<Instruction>(V); return !E->isOpcodeOrAlt
(I) || I->getParent() == BB || isVectorLikeInstWithConstOps
(I); })) ? void (0) : __assert_fail ("llvm::all_of(E->Scalars, [=](Value *V) -> bool { if (E->getOpcode() == Instruction::GetElementPtr && !isa<GetElementPtrInst>(V)) return true; auto *I = cast<Instruction>(V); return !E->isOpcodeOrAlt(I) || I->getParent() == BB || isVectorLikeInstWithConstOps(I); })"
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 8498, __extension__
__PRETTY_FUNCTION__))
8494 return true;(static_cast <bool> (llvm::all_of(E->Scalars, [=](Value
*V) -> bool { if (E->getOpcode() == Instruction::GetElementPtr
&& !isa<GetElementPtrInst>(V)) return true; auto
*I = cast<Instruction>(V); return !E->isOpcodeOrAlt
(I) || I->getParent() == BB || isVectorLikeInstWithConstOps
(I); })) ? void (0) : __assert_fail ("llvm::all_of(E->Scalars, [=](Value *V) -> bool { if (E->getOpcode() == Instruction::GetElementPtr && !isa<GetElementPtrInst>(V)) return true; auto *I = cast<Instruction>(V); return !E->isOpcodeOrAlt(I) || I->getParent() == BB || isVectorLikeInstWithConstOps(I); })"
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 8498, __extension__
__PRETTY_FUNCTION__))
8495 auto *I = cast<Instruction>(V);(static_cast <bool> (llvm::all_of(E->Scalars, [=](Value
*V) -> bool { if (E->getOpcode() == Instruction::GetElementPtr
&& !isa<GetElementPtrInst>(V)) return true; auto
*I = cast<Instruction>(V); return !E->isOpcodeOrAlt
(I) || I->getParent() == BB || isVectorLikeInstWithConstOps
(I); })) ? void (0) : __assert_fail ("llvm::all_of(E->Scalars, [=](Value *V) -> bool { if (E->getOpcode() == Instruction::GetElementPtr && !isa<GetElementPtrInst>(V)) return true; auto *I = cast<Instruction>(V); return !E->isOpcodeOrAlt(I) || I->getParent() == BB || isVectorLikeInstWithConstOps(I); })"
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 8498, __extension__
__PRETTY_FUNCTION__))
8496 return !E->isOpcodeOrAlt(I) || I->getParent() == BB ||(static_cast <bool> (llvm::all_of(E->Scalars, [=](Value
*V) -> bool { if (E->getOpcode() == Instruction::GetElementPtr
&& !isa<GetElementPtrInst>(V)) return true; auto
*I = cast<Instruction>(V); return !E->isOpcodeOrAlt
(I) || I->getParent() == BB || isVectorLikeInstWithConstOps
(I); })) ? void (0) : __assert_fail ("llvm::all_of(E->Scalars, [=](Value *V) -> bool { if (E->getOpcode() == Instruction::GetElementPtr && !isa<GetElementPtrInst>(V)) return true; auto *I = cast<Instruction>(V); return !E->isOpcodeOrAlt(I) || I->getParent() == BB || isVectorLikeInstWithConstOps(I); })"
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 8498, __extension__
__PRETTY_FUNCTION__))
8497 isVectorLikeInstWithConstOps(I);(static_cast <bool> (llvm::all_of(E->Scalars, [=](Value
*V) -> bool { if (E->getOpcode() == Instruction::GetElementPtr
&& !isa<GetElementPtrInst>(V)) return true; auto
*I = cast<Instruction>(V); return !E->isOpcodeOrAlt
(I) || I->getParent() == BB || isVectorLikeInstWithConstOps
(I); })) ? void (0) : __assert_fail ("llvm::all_of(E->Scalars, [=](Value *V) -> bool { if (E->getOpcode() == Instruction::GetElementPtr && !isa<GetElementPtrInst>(V)) return true; auto *I = cast<Instruction>(V); return !E->isOpcodeOrAlt(I) || I->getParent() == BB || isVectorLikeInstWithConstOps(I); })"
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 8498, __extension__
__PRETTY_FUNCTION__))
8498 }))(static_cast <bool> (llvm::all_of(E->Scalars, [=](Value
*V) -> bool { if (E->getOpcode() == Instruction::GetElementPtr
&& !isa<GetElementPtrInst>(V)) return true; auto
*I = cast<Instruction>(V); return !E->isOpcodeOrAlt
(I) || I->getParent() == BB || isVectorLikeInstWithConstOps
(I); })) ? void (0) : __assert_fail ("llvm::all_of(E->Scalars, [=](Value *V) -> bool { if (E->getOpcode() == Instruction::GetElementPtr && !isa<GetElementPtrInst>(V)) return true; auto *I = cast<Instruction>(V); return !E->isOpcodeOrAlt(I) || I->getParent() == BB || isVectorLikeInstWithConstOps(I); })"
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 8498, __extension__
__PRETTY_FUNCTION__))
;
8499
8500 auto &&FindLastInst = [E, Front, this, &BB]() {
8501 Instruction *LastInst = Front;
8502 for (Value *V : E->Scalars) {
8503 auto *I = dyn_cast<Instruction>(V);
8504 if (!I)
8505 continue;
8506 if (LastInst->getParent() == I->getParent()) {
8507 if (LastInst->comesBefore(I))
8508 LastInst = I;
8509 continue;
8510 }
8511 assert(isVectorLikeInstWithConstOps(LastInst) &&(static_cast <bool> (isVectorLikeInstWithConstOps(LastInst
) && isVectorLikeInstWithConstOps(I) && "Expected vector-like insts only."
) ? void (0) : __assert_fail ("isVectorLikeInstWithConstOps(LastInst) && isVectorLikeInstWithConstOps(I) && \"Expected vector-like insts only.\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 8513, __extension__
__PRETTY_FUNCTION__))
8512 isVectorLikeInstWithConstOps(I) &&(static_cast <bool> (isVectorLikeInstWithConstOps(LastInst
) && isVectorLikeInstWithConstOps(I) && "Expected vector-like insts only."
) ? void (0) : __assert_fail ("isVectorLikeInstWithConstOps(LastInst) && isVectorLikeInstWithConstOps(I) && \"Expected vector-like insts only.\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 8513, __extension__
__PRETTY_FUNCTION__))
8513 "Expected vector-like insts only.")(static_cast <bool> (isVectorLikeInstWithConstOps(LastInst
) && isVectorLikeInstWithConstOps(I) && "Expected vector-like insts only."
) ? void (0) : __assert_fail ("isVectorLikeInstWithConstOps(LastInst) && isVectorLikeInstWithConstOps(I) && \"Expected vector-like insts only.\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 8513, __extension__
__PRETTY_FUNCTION__))
;
8514 if (!DT->isReachableFromEntry(LastInst->getParent())) {
8515 LastInst = I;
8516 continue;
8517 }
8518 if (!DT->isReachableFromEntry(I->getParent()))
8519 continue;
8520 auto *NodeA = DT->getNode(LastInst->getParent());
8521 auto *NodeB = DT->getNode(I->getParent());
8522 assert(NodeA && "Should only process reachable instructions")(static_cast <bool> (NodeA && "Should only process reachable instructions"
) ? void (0) : __assert_fail ("NodeA && \"Should only process reachable instructions\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 8522, __extension__
__PRETTY_FUNCTION__))
;
8523 assert(NodeB && "Should only process reachable instructions")(static_cast <bool> (NodeB && "Should only process reachable instructions"
) ? void (0) : __assert_fail ("NodeB && \"Should only process reachable instructions\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 8523, __extension__
__PRETTY_FUNCTION__))
;
8524 assert((NodeA == NodeB) ==(static_cast <bool> ((NodeA == NodeB) == (NodeA->getDFSNumIn
() == NodeB->getDFSNumIn()) && "Different nodes should have different DFS numbers"
) ? void (0) : __assert_fail ("(NodeA == NodeB) == (NodeA->getDFSNumIn() == NodeB->getDFSNumIn()) && \"Different nodes should have different DFS numbers\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 8526, __extension__
__PRETTY_FUNCTION__))
8525 (NodeA->getDFSNumIn() == NodeB->getDFSNumIn()) &&(static_cast <bool> ((NodeA == NodeB) == (NodeA->getDFSNumIn
() == NodeB->getDFSNumIn()) && "Different nodes should have different DFS numbers"
) ? void (0) : __assert_fail ("(NodeA == NodeB) == (NodeA->getDFSNumIn() == NodeB->getDFSNumIn()) && \"Different nodes should have different DFS numbers\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 8526, __extension__
__PRETTY_FUNCTION__))
8526 "Different nodes should have different DFS numbers")(static_cast <bool> ((NodeA == NodeB) == (NodeA->getDFSNumIn
() == NodeB->getDFSNumIn()) && "Different nodes should have different DFS numbers"
) ? void (0) : __assert_fail ("(NodeA == NodeB) == (NodeA->getDFSNumIn() == NodeB->getDFSNumIn()) && \"Different nodes should have different DFS numbers\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 8526, __extension__
__PRETTY_FUNCTION__))
;
8527 if (NodeA->getDFSNumIn() < NodeB->getDFSNumIn())
8528 LastInst = I;
8529 }
8530 BB = LastInst->getParent();
8531 return LastInst;
8532 };
8533
8534 auto &&FindFirstInst = [E, Front, this]() {
8535 Instruction *FirstInst = Front;
8536 for (Value *V : E->Scalars) {
8537 auto *I = dyn_cast<Instruction>(V);
8538 if (!I)
8539 continue;
8540 if (FirstInst->getParent() == I->getParent()) {
8541 if (I->comesBefore(FirstInst))
8542 FirstInst = I;
8543 continue;
8544 }
8545 assert(isVectorLikeInstWithConstOps(FirstInst) &&(static_cast <bool> (isVectorLikeInstWithConstOps(FirstInst
) && isVectorLikeInstWithConstOps(I) && "Expected vector-like insts only."
) ? void (0) : __assert_fail ("isVectorLikeInstWithConstOps(FirstInst) && isVectorLikeInstWithConstOps(I) && \"Expected vector-like insts only.\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 8547, __extension__
__PRETTY_FUNCTION__))
8546 isVectorLikeInstWithConstOps(I) &&(static_cast <bool> (isVectorLikeInstWithConstOps(FirstInst
) && isVectorLikeInstWithConstOps(I) && "Expected vector-like insts only."
) ? void (0) : __assert_fail ("isVectorLikeInstWithConstOps(FirstInst) && isVectorLikeInstWithConstOps(I) && \"Expected vector-like insts only.\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 8547, __extension__
__PRETTY_FUNCTION__))
8547 "Expected vector-like insts only.")(static_cast <bool> (isVectorLikeInstWithConstOps(FirstInst
) && isVectorLikeInstWithConstOps(I) && "Expected vector-like insts only."
) ? void (0) : __assert_fail ("isVectorLikeInstWithConstOps(FirstInst) && isVectorLikeInstWithConstOps(I) && \"Expected vector-like insts only.\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 8547, __extension__
__PRETTY_FUNCTION__))
;
8548 if (!DT->isReachableFromEntry(FirstInst->getParent())) {
8549 FirstInst = I;
8550 continue;
8551 }
8552 if (!DT->isReachableFromEntry(I->getParent()))
8553 continue;
8554 auto *NodeA = DT->getNode(FirstInst->getParent());
8555 auto *NodeB = DT->getNode(I->getParent());
8556 assert(NodeA && "Should only process reachable instructions")(static_cast <bool> (NodeA && "Should only process reachable instructions"
) ? void (0) : __assert_fail ("NodeA && \"Should only process reachable instructions\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 8556, __extension__
__PRETTY_FUNCTION__))
;
8557 assert(NodeB && "Should only process reachable instructions")(static_cast <bool> (NodeB && "Should only process reachable instructions"
) ? void (0) : __assert_fail ("NodeB && \"Should only process reachable instructions\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 8557, __extension__
__PRETTY_FUNCTION__))
;
8558 assert((NodeA == NodeB) ==(static_cast <bool> ((NodeA == NodeB) == (NodeA->getDFSNumIn
() == NodeB->getDFSNumIn()) && "Different nodes should have different DFS numbers"
) ? void (0) : __assert_fail ("(NodeA == NodeB) == (NodeA->getDFSNumIn() == NodeB->getDFSNumIn()) && \"Different nodes should have different DFS numbers\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 8560, __extension__
__PRETTY_FUNCTION__))
8559 (NodeA->getDFSNumIn() == NodeB->getDFSNumIn()) &&(static_cast <bool> ((NodeA == NodeB) == (NodeA->getDFSNumIn
() == NodeB->getDFSNumIn()) && "Different nodes should have different DFS numbers"
) ? void (0) : __assert_fail ("(NodeA == NodeB) == (NodeA->getDFSNumIn() == NodeB->getDFSNumIn()) && \"Different nodes should have different DFS numbers\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 8560, __extension__
__PRETTY_FUNCTION__))
8560 "Different nodes should have different DFS numbers")(static_cast <bool> ((NodeA == NodeB) == (NodeA->getDFSNumIn
() == NodeB->getDFSNumIn()) && "Different nodes should have different DFS numbers"
) ? void (0) : __assert_fail ("(NodeA == NodeB) == (NodeA->getDFSNumIn() == NodeB->getDFSNumIn()) && \"Different nodes should have different DFS numbers\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 8560, __extension__
__PRETTY_FUNCTION__))
;
8561 if (NodeA->getDFSNumIn() > NodeB->getDFSNumIn())
8562 FirstInst = I;
8563 }
8564 return FirstInst;
8565 };
8566
8567 // Set the insert point to the beginning of the basic block if the entry
8568 // should not be scheduled.
8569 if (E->State != TreeEntry::NeedToGather &&
8570 (doesNotNeedToSchedule(E->Scalars) ||
8571 all_of(E->Scalars, isVectorLikeInstWithConstOps))) {
8572 Instruction *InsertInst;
8573 if (all_of(E->Scalars, [](Value *V) {
8574 return !isVectorLikeInstWithConstOps(V) && isUsedOutsideBlock(V);
8575 }))
8576 InsertInst = FindLastInst();
8577 else
8578 InsertInst = FindFirstInst();
8579 return *InsertInst;
8580 }
8581
8582 // The last instruction in the bundle in program order.
8583 Instruction *LastInst = nullptr;
8584
8585 // Find the last instruction. The common case should be that BB has been
8586 // scheduled, and the last instruction is VL.back(). So we start with
8587 // VL.back() and iterate over schedule data until we reach the end of the
8588 // bundle. The end of the bundle is marked by null ScheduleData.
8589 if (BlocksSchedules.count(BB)) {
8590 Value *V = E->isOneOf(E->Scalars.back());
8591 if (doesNotNeedToBeScheduled(V))
8592 V = *find_if_not(E->Scalars, doesNotNeedToBeScheduled);
8593 auto *Bundle = BlocksSchedules[BB]->getScheduleData(V);
8594 if (Bundle && Bundle->isPartOfBundle())
8595 for (; Bundle; Bundle = Bundle->NextInBundle)
8596 if (Bundle->OpValue == Bundle->Inst)
8597 LastInst = Bundle->Inst;
8598 }
8599
8600 // LastInst can still be null at this point if there's either not an entry
8601 // for BB in BlocksSchedules or there's no ScheduleData available for
8602 // VL.back(). This can be the case if buildTree_rec aborts for various
8603 // reasons (e.g., the maximum recursion depth is reached, the maximum region
8604 // size is reached, etc.). ScheduleData is initialized in the scheduling
8605 // "dry-run".
8606 //
8607 // If this happens, we can still find the last instruction by brute force. We
8608 // iterate forwards from Front (inclusive) until we either see all
8609 // instructions in the bundle or reach the end of the block. If Front is the
8610 // last instruction in program order, LastInst will be set to Front, and we
8611 // will visit all the remaining instructions in the block.
8612 //
8613 // One of the reasons we exit early from buildTree_rec is to place an upper
8614 // bound on compile-time. Thus, taking an additional compile-time hit here is
8615 // not ideal. However, this should be exceedingly rare since it requires that
8616 // we both exit early from buildTree_rec and that the bundle be out-of-order
8617 // (causing us to iterate all the way to the end of the block).
8618 if (!LastInst)
8619 LastInst = FindLastInst();
8620 assert(LastInst && "Failed to find last instruction in bundle")(static_cast <bool> (LastInst && "Failed to find last instruction in bundle"
) ? void (0) : __assert_fail ("LastInst && \"Failed to find last instruction in bundle\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 8620, __extension__
__PRETTY_FUNCTION__))
;
8621 return *LastInst;
8622}
8623
8624void BoUpSLP::setInsertPointAfterBundle(const TreeEntry *E) {
8625 auto *Front = E->getMainOp();
8626 Instruction *LastInst = EntryToLastInstruction.lookup(E);
8627 assert(LastInst && "Failed to find last instruction in bundle")(static_cast <bool> (LastInst && "Failed to find last instruction in bundle"
) ? void (0) : __assert_fail ("LastInst && \"Failed to find last instruction in bundle\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 8627, __extension__
__PRETTY_FUNCTION__))
;
8628 // If the instruction is PHI, set the insert point after all the PHIs.
8629 bool IsPHI = isa<PHINode>(LastInst);
8630 if (IsPHI)
8631 LastInst = LastInst->getParent()->getFirstNonPHI();
8632 if (IsPHI || (E->State != TreeEntry::NeedToGather &&
8633 doesNotNeedToSchedule(E->Scalars))) {
8634 Builder.SetInsertPoint(LastInst);
8635 } else {
8636 // Set the insertion point after the last instruction in the bundle. Set the
8637 // debug location to Front.
8638 Builder.SetInsertPoint(LastInst->getParent(),
8639 std::next(LastInst->getIterator()));
8640 }
8641 Builder.SetCurrentDebugLocation(Front->getDebugLoc());
8642}
8643
8644Value *BoUpSLP::gather(ArrayRef<Value *> VL) {
8645 // List of instructions/lanes from current block and/or the blocks which are
8646 // part of the current loop. These instructions will be inserted at the end to
8647 // make it possible to optimize loops and hoist invariant instructions out of
8648 // the loops body with better chances for success.
8649 SmallVector<std::pair<Value *, unsigned>, 4> PostponedInsts;
8650 SmallSet<int, 4> PostponedIndices;
8651 Loop *L = LI->getLoopFor(Builder.GetInsertBlock());
8652 auto &&CheckPredecessor = [](BasicBlock *InstBB, BasicBlock *InsertBB) {
8653 SmallPtrSet<BasicBlock *, 4> Visited;
8654 while (InsertBB && InsertBB != InstBB && Visited.insert(InsertBB).second)
8655 InsertBB = InsertBB->getSinglePredecessor();
8656 return InsertBB && InsertBB == InstBB;
8657 };
8658 for (int I = 0, E = VL.size(); I < E; ++I) {
8659 if (auto *Inst = dyn_cast<Instruction>(VL[I]))
8660 if ((CheckPredecessor(Inst->getParent(), Builder.GetInsertBlock()) ||
8661 getTreeEntry(Inst) || (L && (L->contains(Inst)))) &&
8662 PostponedIndices.insert(I).second)
8663 PostponedInsts.emplace_back(Inst, I);
8664 }
8665
8666 auto &&CreateInsertElement = [this](Value *Vec, Value *V, unsigned Pos) {
8667 Vec = Builder.CreateInsertElement(Vec, V, Builder.getInt32(Pos));
8668 auto *InsElt = dyn_cast<InsertElementInst>(Vec);
8669 if (!InsElt)
8670 return Vec;
8671 GatherShuffleExtractSeq.insert(InsElt);
8672 CSEBlocks.insert(InsElt->getParent());
8673 // Add to our 'need-to-extract' list.
8674 if (TreeEntry *Entry = getTreeEntry(V)) {
8675 // Find which lane we need to extract.
8676 unsigned FoundLane = Entry->findLaneForValue(V);
8677 ExternalUses.emplace_back(V, InsElt, FoundLane);
8678 }
8679 return Vec;
8680 };
8681 Value *Val0 =
8682 isa<StoreInst>(VL[0]) ? cast<StoreInst>(VL[0])->getValueOperand() : VL[0];
8683 FixedVectorType *VecTy = FixedVectorType::get(Val0->getType(), VL.size());
8684 Value *Vec = PoisonValue::get(VecTy);
8685 SmallVector<int> NonConsts;
8686 // Insert constant values at first.
8687 for (int I = 0, E = VL.size(); I < E; ++I) {
8688 if (PostponedIndices.contains(I))
8689 continue;
8690 if (!isConstant(VL[I])) {
8691 NonConsts.push_back(I);
8692 continue;
8693 }
8694 Vec = CreateInsertElement(Vec, VL[I], I);
8695 }
8696 // Insert non-constant values.
8697 for (int I : NonConsts)
8698 Vec = CreateInsertElement(Vec, VL[I], I);
8699 // Append instructions, which are/may be part of the loop, in the end to make
8700 // it possible to hoist non-loop-based instructions.
8701 for (const std::pair<Value *, unsigned> &Pair : PostponedInsts)
8702 Vec = CreateInsertElement(Vec, Pair.first, Pair.second);
8703
8704 return Vec;
8705}
8706
8707/// Merges shuffle masks and emits final shuffle instruction, if required. It
8708/// supports shuffling of 2 input vectors. It implements lazy shuffles emission,
8709/// when the actual shuffle instruction is generated only if this is actually
8710/// required. Otherwise, the shuffle instruction emission is delayed till the
8711/// end of the process, to reduce the number of emitted instructions and further
8712/// analysis/transformations.
8713/// The class also will look through the previously emitted shuffle instructions
8714/// and properly mark indices in mask as undef.
8715/// For example, given the code
8716/// \code
8717/// %s1 = shufflevector <2 x ty> %0, poison, <1, 0>
8718/// %s2 = shufflevector <2 x ty> %1, poison, <1, 0>
8719/// \endcode
8720/// and if need to emit shuffle of %s1 and %s2 with mask <1, 0, 3, 2>, it will
8721/// look through %s1 and %s2 and emit
8722/// \code
8723/// %res = shufflevector <2 x ty> %0, %1, <0, 1, 2, 3>
8724/// \endcode
8725/// instead.
8726/// If 2 operands are of different size, the smallest one will be resized and
8727/// the mask recalculated properly.
8728/// For example, given the code
8729/// \code
8730/// %s1 = shufflevector <2 x ty> %0, poison, <1, 0, 1, 0>
8731/// %s2 = shufflevector <2 x ty> %1, poison, <1, 0, 1, 0>
8732/// \endcode
8733/// and if need to emit shuffle of %s1 and %s2 with mask <1, 0, 5, 4>, it will
8734/// look through %s1 and %s2 and emit
8735/// \code
8736/// %res = shufflevector <2 x ty> %0, %1, <0, 1, 2, 3>
8737/// \endcode
8738/// instead.
8739class BoUpSLP::ShuffleInstructionBuilder final : public BaseShuffleAnalysis {
8740 bool IsFinalized = false;
8741 /// Combined mask for all applied operands and masks. It is built during
8742 /// analysis and actual emission of shuffle vector instructions.
8743 SmallVector<int> CommonMask;
8744 /// List of operands for the shuffle vector instruction. It hold at max 2
8745 /// operands, if the 3rd is going to be added, the first 2 are combined into
8746 /// shuffle with \p CommonMask mask, the first operand sets to be the
8747 /// resulting shuffle and the second operand sets to be the newly added
8748 /// operand. The \p CommonMask is transformed in the proper way after that.
8749 SmallVector<Value *, 2> InVectors;
8750 IRBuilderBase &Builder;
8751 BoUpSLP &R;
8752
8753 class ShuffleIRBuilder {
8754 IRBuilderBase &Builder;
8755 /// Holds all of the instructions that we gathered.
8756 SetVector<Instruction *> &GatherShuffleExtractSeq;
8757 /// A list of blocks that we are going to CSE.
8758 SetVector<BasicBlock *> &CSEBlocks;
8759
8760 public:
8761 ShuffleIRBuilder(IRBuilderBase &Builder,
8762 SetVector<Instruction *> &GatherShuffleExtractSeq,
8763 SetVector<BasicBlock *> &CSEBlocks)
8764 : Builder(Builder), GatherShuffleExtractSeq(GatherShuffleExtractSeq),
8765 CSEBlocks(CSEBlocks) {}
8766 ~ShuffleIRBuilder() = default;
8767 /// Creates shufflevector for the 2 operands with the given mask.
8768 Value *createShuffleVector(Value *V1, Value *V2, ArrayRef<int> Mask) {
8769 Value *Vec = Builder.CreateShuffleVector(V1, V2, Mask);
8770 if (auto *I = dyn_cast<Instruction>(Vec)) {
8771 GatherShuffleExtractSeq.insert(I);
8772 CSEBlocks.insert(I->getParent());
8773 }
8774 return Vec;
8775 }
8776 /// Creates permutation of the single vector operand with the given mask, if
8777 /// it is not identity mask.
8778 Value *createShuffleVector(Value *V1, ArrayRef<int> Mask) {
8779 if (Mask.empty())
8780 return V1;
8781 unsigned VF = Mask.size();
8782 unsigned LocalVF = cast<FixedVectorType>(V1->getType())->getNumElements();
8783 if (VF == LocalVF && ShuffleVectorInst::isIdentityMask(Mask))
8784 return V1;
8785 Value *Vec = Builder.CreateShuffleVector(V1, Mask);
8786 if (auto *I = dyn_cast<Instruction>(Vec)) {
8787 GatherShuffleExtractSeq.insert(I);
8788 CSEBlocks.insert(I->getParent());
8789 }
8790 return Vec;
8791 }
8792 /// Resizes 2 input vector to match the sizes, if the they are not equal
8793 /// yet. The smallest vector is resized to the size of the larger vector.
8794 void resizeToMatch(Value *&V1, Value *&V2) {
8795 if (V1->getType() == V2->getType())
8796 return;
8797 int V1VF = cast<FixedVectorType>(V1->getType())->getNumElements();
8798 int V2VF = cast<FixedVectorType>(V2->getType())->getNumElements();
8799 int VF = std::max(V1VF, V2VF);
8800 int MinVF = std::min(V1VF, V2VF);
8801 SmallVector<int> IdentityMask(VF, UndefMaskElem);
8802 std::iota(IdentityMask.begin(), std::next(IdentityMask.begin(), MinVF),
8803 0);
8804 Value *&Op = MinVF == V1VF ? V1 : V2;
8805 Op = Builder.CreateShuffleVector(Op, IdentityMask);
8806 if (auto *I = dyn_cast<Instruction>(Op)) {
8807 GatherShuffleExtractSeq.insert(I);
8808 CSEBlocks.insert(I->getParent());
8809 }
8810 if (MinVF == V1VF)
8811 V1 = Op;
8812 else
8813 V2 = Op;
8814 }
8815 };
8816
8817 /// Smart shuffle instruction emission, walks through shuffles trees and
8818 /// tries to find the best matching vector for the actual shuffle
8819 /// instruction.
8820 Value *createShuffle(Value *V1, Value *V2, ArrayRef<int> Mask) {
8821 assert(V1 && "Expected at least one vector value.")(static_cast <bool> (V1 && "Expected at least one vector value."
) ? void (0) : __assert_fail ("V1 && \"Expected at least one vector value.\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 8821, __extension__
__PRETTY_FUNCTION__))
;
8822 ShuffleIRBuilder ShuffleBuilder(Builder, R.GatherShuffleExtractSeq,
8823 R.CSEBlocks);
8824 return BaseShuffleAnalysis::createShuffle(V1, V2, Mask, ShuffleBuilder);
8825 }
8826
8827 /// Transforms mask \p CommonMask per given \p Mask to make proper set after
8828 /// shuffle emission.
8829 static void transformMaskAfterShuffle(MutableArrayRef<int> CommonMask,
8830 ArrayRef<int> Mask) {
8831 for (unsigned Idx = 0, Sz = CommonMask.size(); Idx < Sz; ++Idx)
8832 if (Mask[Idx] != UndefMaskElem)
8833 CommonMask[Idx] = Idx;
8834 }
8835
8836public:
8837 ShuffleInstructionBuilder(IRBuilderBase &Builder, BoUpSLP &R)
8838 : Builder(Builder), R(R) {}
8839
8840 /// Adds 2 input vectors and the mask for their shuffling.
8841 void add(Value *V1, Value *V2, ArrayRef<int> Mask) {
8842 assert(V1 && V2 && !Mask.empty() && "Expected non-empty input vectors.")(static_cast <bool> (V1 && V2 && !Mask.
empty() && "Expected non-empty input vectors.") ? void
(0) : __assert_fail ("V1 && V2 && !Mask.empty() && \"Expected non-empty input vectors.\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 8842, __extension__
__PRETTY_FUNCTION__))
;
8843 if (InVectors.empty()) {
8844 InVectors.push_back(V1);
8845 InVectors.push_back(V2);
8846 CommonMask.assign(Mask.begin(), Mask.end());
8847 return;
8848 }
8849 Value *Vec = InVectors.front();
8850 if (InVectors.size() == 2) {
8851 Vec = createShuffle(Vec, InVectors.back(), CommonMask);
8852 transformMaskAfterShuffle(CommonMask, Mask);
8853 } else if (cast<FixedVectorType>(Vec->getType())->getNumElements() !=
8854 Mask.size()) {
8855 Vec = createShuffle(Vec, nullptr, CommonMask);
8856 transformMaskAfterShuffle(CommonMask, Mask);
8857 }
8858 V1 = createShuffle(V1, V2, Mask);
8859 for (unsigned Idx = 0, Sz = CommonMask.size(); Idx < Sz; ++Idx)
8860 if (Mask[Idx] != UndefMaskElem)
8861 CommonMask[Idx] = Idx + Sz;
8862 InVectors.front() = Vec;
8863 if (InVectors.size() == 2)
8864 InVectors.back() = V1;
8865 else
8866 InVectors.push_back(V1);
8867 }
8868 /// Adds another one input vector and the mask for the shuffling.
8869 void add(Value *V1, ArrayRef<int> Mask) {
8870 if (InVectors.empty()) {
8871 if (!isa<FixedVectorType>(V1->getType())) {
8872 V1 = createShuffle(V1, nullptr, CommonMask);
8873 CommonMask.assign(Mask.size(), UndefMaskElem);
8874 transformMaskAfterShuffle(CommonMask, Mask);
8875 }
8876 InVectors.push_back(V1);
8877 CommonMask.assign(Mask.begin(), Mask.end());
8878 return;
8879 }
8880 const auto *It = find(InVectors, V1);
8881 if (It == InVectors.end()) {
8882 if (InVectors.size() == 2 ||
8883 InVectors.front()->getType() != V1->getType() ||
8884 !isa<FixedVectorType>(V1->getType())) {
8885 Value *V = InVectors.front();
8886 if (InVectors.size() == 2) {
8887 V = createShuffle(InVectors.front(), InVectors.back(), CommonMask);
8888 transformMaskAfterShuffle(CommonMask, CommonMask);
8889 } else if (cast<FixedVectorType>(V->getType())->getNumElements() !=
8890 CommonMask.size()) {
8891 V = createShuffle(InVectors.front(), nullptr, CommonMask);
8892 transformMaskAfterShuffle(CommonMask, CommonMask);
8893 }
8894 for (unsigned Idx = 0, Sz = CommonMask.size(); Idx < Sz; ++Idx)
8895 if (CommonMask[Idx] == UndefMaskElem && Mask[Idx] != UndefMaskElem)
8896 CommonMask[Idx] =
8897 V->getType() != V1->getType()
8898 ? Idx + Sz
8899 : Mask[Idx] + cast<FixedVectorType>(V1->getType())
8900 ->getNumElements();
8901 if (V->getType() != V1->getType())
8902 V1 = createShuffle(V1, nullptr, Mask);
8903 InVectors.front() = V;
8904 if (InVectors.size() == 2)
8905 InVectors.back() = V1;
8906 else
8907 InVectors.push_back(V1);
8908 return;
8909 }
8910 // Check if second vector is required if the used elements are already
8911 // used from the first one.
8912 for (unsigned Idx = 0, Sz = CommonMask.size(); Idx < Sz; ++Idx)
8913 if (Mask[Idx] != UndefMaskElem && CommonMask[Idx] == UndefMaskElem) {
8914 InVectors.push_back(V1);
8915 break;
8916 }
8917 }
8918 int VF = CommonMask.size();
8919 if (auto *FTy = dyn_cast<FixedVectorType>(V1->getType()))
8920 VF = FTy->getNumElements();
8921 for (unsigned Idx = 0, Sz = CommonMask.size(); Idx < Sz; ++Idx)
8922 if (Mask[Idx] != UndefMaskElem && CommonMask[Idx] == UndefMaskElem)
8923 CommonMask[Idx] = Mask[Idx] + (It == InVectors.begin() ? 0 : VF);
8924 }
8925 /// Adds another one input vector and the mask for the shuffling.
8926 void addOrdered(Value *V1, ArrayRef<unsigned> Order) {
8927 SmallVector<int> NewMask;
8928 inversePermutation(Order, NewMask);
8929 add(V1, NewMask);
8930 }
8931 /// Finalize emission of the shuffles.
8932 Value *
8933 finalize(ArrayRef<int> ExtMask = std::nullopt) {
8934 IsFinalized = true;
8935 if (!ExtMask.empty()) {
8936 if (CommonMask.empty()) {
8937 CommonMask.assign(ExtMask.begin(), ExtMask.end());
8938 } else {
8939 SmallVector<int> NewMask(ExtMask.size(), UndefMaskElem);
8940 for (int I = 0, Sz = ExtMask.size(); I < Sz; ++I) {
8941 if (ExtMask[I] == UndefMaskElem)
8942 continue;
8943 NewMask[I] = CommonMask[ExtMask[I]];
8944 }
8945 CommonMask.swap(NewMask);
8946 }
8947 }
8948 if (CommonMask.empty()) {
8949 assert(InVectors.size() == 1 && "Expected only one vector with no mask")(static_cast <bool> (InVectors.size() == 1 && "Expected only one vector with no mask"
) ? void (0) : __assert_fail ("InVectors.size() == 1 && \"Expected only one vector with no mask\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 8949, __extension__
__PRETTY_FUNCTION__))
;
8950 return InVectors.front();
8951 }
8952 if (InVectors.size() == 2)
8953 return createShuffle(InVectors.front(), InVectors.back(), CommonMask);
8954 return createShuffle(InVectors.front(), nullptr, CommonMask);
8955 }
8956
8957 ~ShuffleInstructionBuilder() {
8958 assert((IsFinalized || CommonMask.empty()) &&(static_cast <bool> ((IsFinalized || CommonMask.empty()
) && "Shuffle construction must be finalized.") ? void
(0) : __assert_fail ("(IsFinalized || CommonMask.empty()) && \"Shuffle construction must be finalized.\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 8959, __extension__
__PRETTY_FUNCTION__))
8959 "Shuffle construction must be finalized.")(static_cast <bool> ((IsFinalized || CommonMask.empty()
) && "Shuffle construction must be finalized.") ? void
(0) : __assert_fail ("(IsFinalized || CommonMask.empty()) && \"Shuffle construction must be finalized.\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 8959, __extension__
__PRETTY_FUNCTION__))
;
8960 }
8961};
8962
8963Value *BoUpSLP::vectorizeOperand(TreeEntry *E, unsigned NodeIdx) {
8964 ArrayRef<Value *> VL = E->getOperand(NodeIdx);
8965 const unsigned VF = VL.size();
8966 InstructionsState S = getSameOpcode(VL, *TLI);
8967 // Special processing for GEPs bundle, which may include non-gep values.
8968 if (!S.getOpcode() && VL.front()->getType()->isPointerTy()) {
8969 const auto *It =
8970 find_if(VL, [](Value *V) { return isa<GetElementPtrInst>(V); });
8971 if (It != VL.end())
8972 S = getSameOpcode(*It, *TLI);
8973 }
8974 if (S.getOpcode()) {
8975 if (TreeEntry *VE = getTreeEntry(S.OpValue);
8976 VE && VE->isSame(VL) &&
8977 (any_of(VE->UserTreeIndices,
8978 [E, NodeIdx](const EdgeInfo &EI) {
8979 return EI.UserTE == E && EI.EdgeIdx == NodeIdx;
8980 }) ||
8981 any_of(VectorizableTree,
8982 [E, NodeIdx, VE](const std::unique_ptr<TreeEntry> &TE) {
8983 return TE->isOperandGatherNode({E, NodeIdx}) &&
8984 VE->isSame(TE->Scalars);
8985 }))) {
8986 auto FinalShuffle = [&](Value *V, ArrayRef<int> Mask) {
8987 ShuffleInstructionBuilder ShuffleBuilder(Builder, *this);
8988 ShuffleBuilder.add(V, Mask);
8989 return ShuffleBuilder.finalize(std::nullopt);
8990 };
8991 Value *V = vectorizeTree(VE);
8992 if (VF != cast<FixedVectorType>(V->getType())->getNumElements()) {
8993 if (!VE->ReuseShuffleIndices.empty()) {
8994 // Reshuffle to get only unique values.
8995 // If some of the scalars are duplicated in the vectorization
8996 // tree entry, we do not vectorize them but instead generate a
8997 // mask for the reuses. But if there are several users of the
8998 // same entry, they may have different vectorization factors.
8999 // This is especially important for PHI nodes. In this case, we
9000 // need to adapt the resulting instruction for the user
9001 // vectorization factor and have to reshuffle it again to take
9002 // only unique elements of the vector. Without this code the
9003 // function incorrectly returns reduced vector instruction with
9004 // the same elements, not with the unique ones.
9005
9006 // block:
9007 // %phi = phi <2 x > { .., %entry} {%shuffle, %block}
9008 // %2 = shuffle <2 x > %phi, poison, <4 x > <1, 1, 0, 0>
9009 // ... (use %2)
9010 // %shuffle = shuffle <2 x> %2, poison, <2 x> {2, 0}
9011 // br %block
9012 SmallVector<int> UniqueIdxs(VF, UndefMaskElem);
9013 SmallSet<int, 4> UsedIdxs;
9014 int Pos = 0;
9015 for (int Idx : VE->ReuseShuffleIndices) {
9016 if (Idx != static_cast<int>(VF) && Idx != UndefMaskElem &&
9017 UsedIdxs.insert(Idx).second)
9018 UniqueIdxs[Idx] = Pos;
9019 ++Pos;
9020 }
9021 assert(VF >= UsedIdxs.size() && "Expected vectorization factor "(static_cast <bool> (VF >= UsedIdxs.size() &&
"Expected vectorization factor " "less than original vector size."
) ? void (0) : __assert_fail ("VF >= UsedIdxs.size() && \"Expected vectorization factor \" \"less than original vector size.\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 9022, __extension__
__PRETTY_FUNCTION__))
9022 "less than original vector size.")(static_cast <bool> (VF >= UsedIdxs.size() &&
"Expected vectorization factor " "less than original vector size."
) ? void (0) : __assert_fail ("VF >= UsedIdxs.size() && \"Expected vectorization factor \" \"less than original vector size.\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 9022, __extension__
__PRETTY_FUNCTION__))
;
9023 UniqueIdxs.append(VF - UsedIdxs.size(), UndefMaskElem);
9024 V = FinalShuffle(V, UniqueIdxs);
9025 } else {
9026 assert(VF < cast<FixedVectorType>(V->getType())->getNumElements() &&(static_cast <bool> (VF < cast<FixedVectorType>
(V->getType())->getNumElements() && "Expected vectorization factor less "
"than original vector size.") ? void (0) : __assert_fail ("VF < cast<FixedVectorType>(V->getType())->getNumElements() && \"Expected vectorization factor less \" \"than original vector size.\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 9028, __extension__
__PRETTY_FUNCTION__))
9027 "Expected vectorization factor less "(static_cast <bool> (VF < cast<FixedVectorType>
(V->getType())->getNumElements() && "Expected vectorization factor less "
"than original vector size.") ? void (0) : __assert_fail ("VF < cast<FixedVectorType>(V->getType())->getNumElements() && \"Expected vectorization factor less \" \"than original vector size.\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 9028, __extension__
__PRETTY_FUNCTION__))
9028 "than original vector size.")(static_cast <bool> (VF < cast<FixedVectorType>
(V->getType())->getNumElements() && "Expected vectorization factor less "
"than original vector size.") ? void (0) : __assert_fail ("VF < cast<FixedVectorType>(V->getType())->getNumElements() && \"Expected vectorization factor less \" \"than original vector size.\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 9028, __extension__
__PRETTY_FUNCTION__))
;
9029 SmallVector<int> UniformMask(VF, 0);
9030 std::iota(UniformMask.begin(), UniformMask.end(), 0);
9031 V = FinalShuffle(V, UniformMask);
9032 }
9033 }
9034 return V;
9035 }
9036 }
9037
9038 // Find the corresponding gather entry and vectorize it.
9039 // Allows to be more accurate with tree/graph transformations, checks for the
9040 // correctness of the transformations in many cases.
9041 auto *I = find_if(VectorizableTree,
9042 [E, NodeIdx](const std::unique_ptr<TreeEntry> &TE) {
9043 return TE->isOperandGatherNode({E, NodeIdx});
9044 });
9045 assert(I != VectorizableTree.end() && "Gather node is not in the graph.")(static_cast <bool> (I != VectorizableTree.end() &&
"Gather node is not in the graph.") ? void (0) : __assert_fail
("I != VectorizableTree.end() && \"Gather node is not in the graph.\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 9045, __extension__
__PRETTY_FUNCTION__))
;
9046 assert(I->get()->UserTreeIndices.size() == 1 &&(static_cast <bool> (I->get()->UserTreeIndices.size
() == 1 && "Expected only single user for the gather node."
) ? void (0) : __assert_fail ("I->get()->UserTreeIndices.size() == 1 && \"Expected only single user for the gather node.\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 9047, __extension__
__PRETTY_FUNCTION__))
9047 "Expected only single user for the gather node.")(static_cast <bool> (I->get()->UserTreeIndices.size
() == 1 && "Expected only single user for the gather node."
) ? void (0) : __assert_fail ("I->get()->UserTreeIndices.size() == 1 && \"Expected only single user for the gather node.\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 9047, __extension__
__PRETTY_FUNCTION__))
;
9048 assert(I->get()->isSame(VL) && "Expected same list of scalars.")(static_cast <bool> (I->get()->isSame(VL) &&
"Expected same list of scalars.") ? void (0) : __assert_fail
("I->get()->isSame(VL) && \"Expected same list of scalars.\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 9048, __extension__
__PRETTY_FUNCTION__))
;
9049 IRBuilder<>::InsertPointGuard Guard(Builder);
9050 if (E->getOpcode() != Instruction::InsertElement &&
9051 E->getOpcode() != Instruction::PHI) {
9052 Instruction *LastInst = EntryToLastInstruction.lookup(E);
9053 assert(LastInst && "Failed to find last instruction in bundle")(static_cast <bool> (LastInst && "Failed to find last instruction in bundle"
) ? void (0) : __assert_fail ("LastInst && \"Failed to find last instruction in bundle\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 9053, __extension__
__PRETTY_FUNCTION__))
;
9054 Builder.SetInsertPoint(LastInst);
9055 }
9056 return vectorizeTree(I->get());
9057}
9058
9059Value *BoUpSLP::createBuildVector(const TreeEntry *E) {
9060 assert(E->State == TreeEntry::NeedToGather && "Expected gather node.")(static_cast <bool> (E->State == TreeEntry::NeedToGather
&& "Expected gather node.") ? void (0) : __assert_fail
("E->State == TreeEntry::NeedToGather && \"Expected gather node.\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 9060, __extension__
__PRETTY_FUNCTION__))
;
9061 unsigned VF = E->getVectorFactor();
9062
9063 ShuffleInstructionBuilder ShuffleBuilder(Builder, *this);
9064 SmallVector<Value *> Gathered(
9065 VF, PoisonValue::get(E->Scalars.front()->getType()));
9066 bool NeedFreeze = false;
9067 SmallVector<Value *> VL(E->Scalars.begin(), E->Scalars.end());
9068 // Build a mask out of the redorder indices and reorder scalars per this mask.
9069 SmallVector<int> ReorderMask;
9070 inversePermutation(E->ReorderIndices, ReorderMask);
9071 if (!ReorderMask.empty())
9072 reorderScalars(VL, ReorderMask);
9073 SmallVector<int> ReuseMask(VF, UndefMaskElem);
9074 if (!allConstant(VL)) {
9075 // For splats with can emit broadcasts instead of gathers, so try to find
9076 // such sequences.
9077 bool IsSplat = isSplat(VL) && (VL.size() > 2 || VL.front() == VL.back());
9078 SmallVector<int> UndefPos;
9079 DenseMap<Value *, unsigned> UniquePositions;
9080 // Gather unique non-const values and all constant values.
9081 // For repeated values, just shuffle them.
9082 for (auto [I, V] : enumerate(VL)) {
9083 if (isa<UndefValue>(V)) {
9084 if (!isa<PoisonValue>(V)) {
9085 Gathered[I] = V;
9086 ReuseMask[I] = I;
9087 UndefPos.push_back(I);
9088 }
9089 continue;
9090 }
9091 if (isConstant(V)) {
9092 Gathered[I] = V;
9093 ReuseMask[I] = I;
9094 continue;
9095 }
9096 if (IsSplat) {
9097 Gathered.front() = V;
9098 ReuseMask[I] = 0;
9099 } else {
9100 const auto Res = UniquePositions.try_emplace(V, I);
9101 Gathered[Res.first->second] = V;
9102 ReuseMask[I] = Res.first->second;
9103 }
9104 }
9105 if (!UndefPos.empty() && IsSplat) {
9106 // For undef values, try to replace them with the simple broadcast.
9107 // We can do it if the broadcasted value is guaranteed to be
9108 // non-poisonous, or by freezing the incoming scalar value first.
9109 auto *It = find_if(Gathered, [this, E](Value *V) {
9110 return !isa<UndefValue>(V) &&
9111 (getTreeEntry(V) || isGuaranteedNotToBePoison(V) ||
9112 any_of(V->uses(), [E](const Use &U) {
9113 // Check if the value already used in the same operation in
9114 // one of the nodes already.
9115 return E->UserTreeIndices.size() == 1 &&
9116 is_contained(
9117 E->UserTreeIndices.front().UserTE->Scalars,
9118 U.getUser()) &&
9119 E->UserTreeIndices.front().EdgeIdx != U.getOperandNo();
9120 }));
9121 });
9122 if (It != Gathered.end()) {
9123 // Replace undefs by the non-poisoned scalars and emit broadcast.
9124 int Pos = std::distance(Gathered.begin(), It);
9125 for_each(UndefPos, [&](int I) {
9126 // Set the undef position to the non-poisoned scalar.
9127 ReuseMask[I] = Pos;
9128 // Replace the undef by the poison, in the mask it is replaced by non-poisoned scalar already.
9129 if (I != Pos)
9130 Gathered[I] = PoisonValue::get(Gathered[I]->getType());
9131 });
9132 } else {
9133 // Replace undefs by the poisons, emit broadcast and then emit
9134 // freeze.
9135 for_each(UndefPos, [&](int I) {
9136 ReuseMask[I] = UndefMaskElem;
9137 if (isa<UndefValue>(Gathered[I]))
9138 Gathered[I] = PoisonValue::get(Gathered[I]->getType());
9139 });
9140 NeedFreeze = true;
9141 }
9142 }
9143 } else {
9144 ReuseMask.clear();
9145 copy(VL, Gathered.begin());
9146 }
9147 // Gather unique scalars and all constants.
9148 Value *Vec = gather(Gathered);
9149 ShuffleBuilder.add(Vec, ReuseMask);
9150 Vec = ShuffleBuilder.finalize(E->ReuseShuffleIndices);
9151 if (NeedFreeze)
9152 Vec = Builder.CreateFreeze(Vec);
9153 return Vec;
9154}
9155
9156Value *BoUpSLP::vectorizeTree(TreeEntry *E) {
9157 IRBuilder<>::InsertPointGuard Guard(Builder);
9158
9159 if (E->VectorizedValue) {
9160 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *E->Scalars[0] << ".\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Diamond merged for " <<
*E->Scalars[0] << ".\n"; } } while (false)
;
9161 return E->VectorizedValue;
9162 }
9163
9164 auto FinalShuffle = [&](Value *V, const TreeEntry *E) {
9165 ShuffleInstructionBuilder ShuffleBuilder(Builder, *this);
9166 if (E->State != TreeEntry::NeedToGather &&
9167 E->getOpcode() == Instruction::Store) {
9168 ArrayRef<int> Mask =
9169 ArrayRef(reinterpret_cast<const int *>(E->ReorderIndices.begin()),
9170 E->ReorderIndices.size());
9171 ShuffleBuilder.add(V, Mask);
9172 } else {
9173 ShuffleBuilder.addOrdered(V, E->ReorderIndices);
9174 }
9175 return ShuffleBuilder.finalize(E->ReuseShuffleIndices);
9176 };
9177
9178 if (E->State == TreeEntry::NeedToGather) {
9179 if (E->Idx > 0) {
9180 // We are in the middle of a vectorizable chain. We need to gather the
9181 // scalars from the users.
9182 Value *Vec = createBuildVector(E);
9183 E->VectorizedValue = Vec;
9184 return Vec;
9185 }
9186 if (E->getMainOp())
9187 setInsertPointAfterBundle(E);
9188 SmallVector<Value *> GatheredScalars(E->Scalars.begin(), E->Scalars.end());
9189 // Build a mask out of the reorder indices and reorder scalars per this
9190 // mask.
9191 SmallVector<int> ReorderMask;
9192 inversePermutation(E->ReorderIndices, ReorderMask);
9193 if (!ReorderMask.empty())
9194 reorderScalars(GatheredScalars, ReorderMask);
9195 Value *Vec;
9196 SmallVector<int> Mask;
9197 SmallVector<const TreeEntry *> Entries;
9198 std::optional<TargetTransformInfo::ShuffleKind> Shuffle =
9199 isGatherShuffledEntry(E, GatheredScalars, Mask, Entries);
9200 if (Shuffle) {
9201 assert((Entries.size() == 1 || Entries.size() == 2) &&(static_cast <bool> ((Entries.size() == 1 || Entries.size
() == 2) && "Expected shuffle of 1 or 2 entries.") ? void
(0) : __assert_fail ("(Entries.size() == 1 || Entries.size() == 2) && \"Expected shuffle of 1 or 2 entries.\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 9202, __extension__
__PRETTY_FUNCTION__))
9202 "Expected shuffle of 1 or 2 entries.")(static_cast <bool> ((Entries.size() == 1 || Entries.size
() == 2) && "Expected shuffle of 1 or 2 entries.") ? void
(0) : __assert_fail ("(Entries.size() == 1 || Entries.size() == 2) && \"Expected shuffle of 1 or 2 entries.\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 9202, __extension__
__PRETTY_FUNCTION__))
;
9203 Vec = Builder.CreateShuffleVector(Entries.front()->VectorizedValue,
9204 Entries.back()->VectorizedValue, Mask);
9205 if (auto *I = dyn_cast<Instruction>(Vec)) {
9206 GatherShuffleExtractSeq.insert(I);
9207 CSEBlocks.insert(I->getParent());
9208 }
9209 } else {
9210 Vec = gather(E->Scalars);
9211 }
9212 Vec = FinalShuffle(Vec, E);
9213 E->VectorizedValue = Vec;
9214 return Vec;
9215 }
9216
9217 assert((E->State == TreeEntry::Vectorize ||(static_cast <bool> ((E->State == TreeEntry::Vectorize
|| E->State == TreeEntry::ScatterVectorize) && "Unhandled state"
) ? void (0) : __assert_fail ("(E->State == TreeEntry::Vectorize || E->State == TreeEntry::ScatterVectorize) && \"Unhandled state\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 9219, __extension__
__PRETTY_FUNCTION__))
9218 E->State == TreeEntry::ScatterVectorize) &&(static_cast <bool> ((E->State == TreeEntry::Vectorize
|| E->State == TreeEntry::ScatterVectorize) && "Unhandled state"
) ? void (0) : __assert_fail ("(E->State == TreeEntry::Vectorize || E->State == TreeEntry::ScatterVectorize) && \"Unhandled state\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 9219, __extension__
__PRETTY_FUNCTION__))
9219 "Unhandled state")(static_cast <bool> ((E->State == TreeEntry::Vectorize
|| E->State == TreeEntry::ScatterVectorize) && "Unhandled state"
) ? void (0) : __assert_fail ("(E->State == TreeEntry::Vectorize || E->State == TreeEntry::ScatterVectorize) && \"Unhandled state\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 9219, __extension__
__PRETTY_FUNCTION__))
;
9220 unsigned ShuffleOrOp =
9221 E->isAltShuffle() ? (unsigned)Instruction::ShuffleVector : E->getOpcode();
9222 Instruction *VL0 = E->getMainOp();
9223 Type *ScalarTy = VL0->getType();
9224 if (auto *Store = dyn_cast<StoreInst>(VL0))
9225 ScalarTy = Store->getValueOperand()->getType();
9226 else if (auto *IE = dyn_cast<InsertElementInst>(VL0))
9227 ScalarTy = IE->getOperand(1)->getType();
9228 auto *VecTy = FixedVectorType::get(ScalarTy, E->Scalars.size());
9229 switch (ShuffleOrOp) {
9230 case Instruction::PHI: {
9231 assert((E->ReorderIndices.empty() ||(static_cast <bool> ((E->ReorderIndices.empty() || E
!= VectorizableTree.front().get() || !E->UserTreeIndices.
empty()) && "PHI reordering is free.") ? void (0) : __assert_fail
("(E->ReorderIndices.empty() || E != VectorizableTree.front().get() || !E->UserTreeIndices.empty()) && \"PHI reordering is free.\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 9234, __extension__
__PRETTY_FUNCTION__))
9232 E != VectorizableTree.front().get() ||(static_cast <bool> ((E->ReorderIndices.empty() || E
!= VectorizableTree.front().get() || !E->UserTreeIndices.
empty()) && "PHI reordering is free.") ? void (0) : __assert_fail
("(E->ReorderIndices.empty() || E != VectorizableTree.front().get() || !E->UserTreeIndices.empty()) && \"PHI reordering is free.\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 9234, __extension__
__PRETTY_FUNCTION__))
9233 !E->UserTreeIndices.empty()) &&(static_cast <bool> ((E->ReorderIndices.empty() || E
!= VectorizableTree.front().get() || !E->UserTreeIndices.
empty()) && "PHI reordering is free.") ? void (0) : __assert_fail
("(E->ReorderIndices.empty() || E != VectorizableTree.front().get() || !E->UserTreeIndices.empty()) && \"PHI reordering is free.\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 9234, __extension__
__PRETTY_FUNCTION__))
9234 "PHI reordering is free.")(static_cast <bool> ((E->ReorderIndices.empty() || E
!= VectorizableTree.front().get() || !E->UserTreeIndices.
empty()) && "PHI reordering is free.") ? void (0) : __assert_fail
("(E->ReorderIndices.empty() || E != VectorizableTree.front().get() || !E->UserTreeIndices.empty()) && \"PHI reordering is free.\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 9234, __extension__
__PRETTY_FUNCTION__))
;
9235 auto *PH = cast<PHINode>(VL0);
9236 Builder.SetInsertPoint(PH->getParent()->getFirstNonPHI());
9237 Builder.SetCurrentDebugLocation(PH->getDebugLoc());
9238 PHINode *NewPhi = Builder.CreatePHI(VecTy, PH->getNumIncomingValues());
9239 Value *V = NewPhi;
9240
9241 // Adjust insertion point once all PHI's have been generated.
9242 Builder.SetInsertPoint(&*PH->getParent()->getFirstInsertionPt());
9243 Builder.SetCurrentDebugLocation(PH->getDebugLoc());
9244
9245 V = FinalShuffle(V, E);
9246
9247 E->VectorizedValue = V;
9248
9249 // PHINodes may have multiple entries from the same block. We want to
9250 // visit every block once.
9251 SmallPtrSet<BasicBlock*, 4> VisitedBBs;
9252
9253 for (unsigned i = 0, e = PH->getNumIncomingValues(); i < e; ++i) {
9254 ValueList Operands;
9255 BasicBlock *IBB = PH->getIncomingBlock(i);
9256
9257 // Stop emission if all incoming values are generated.
9258 if (NewPhi->getNumIncomingValues() == PH->getNumIncomingValues()) {
9259 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Diamond merged for " <<
*VL0 << ".\n"; } } while (false)
;
9260 return V;
9261 }
9262
9263 if (!VisitedBBs.insert(IBB).second) {
9264 NewPhi->addIncoming(NewPhi->getIncomingValueForBlock(IBB), IBB);
9265 continue;
9266 }
9267
9268 Builder.SetInsertPoint(IBB->getTerminator());
9269 Builder.SetCurrentDebugLocation(PH->getDebugLoc());
9270 Value *Vec = vectorizeOperand(E, i);
9271 NewPhi->addIncoming(Vec, IBB);
9272 }
9273
9274 assert(NewPhi->getNumIncomingValues() == PH->getNumIncomingValues() &&(static_cast <bool> (NewPhi->getNumIncomingValues() ==
PH->getNumIncomingValues() && "Invalid number of incoming values"
) ? void (0) : __assert_fail ("NewPhi->getNumIncomingValues() == PH->getNumIncomingValues() && \"Invalid number of incoming values\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 9275, __extension__
__PRETTY_FUNCTION__))
9275 "Invalid number of incoming values")(static_cast <bool> (NewPhi->getNumIncomingValues() ==
PH->getNumIncomingValues() && "Invalid number of incoming values"
) ? void (0) : __assert_fail ("NewPhi->getNumIncomingValues() == PH->getNumIncomingValues() && \"Invalid number of incoming values\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 9275, __extension__
__PRETTY_FUNCTION__))
;
9276 return V;
9277 }
9278
9279 case Instruction::ExtractElement: {
9280 Value *V = E->getSingleOperand(0);
9281 setInsertPointAfterBundle(E);
9282 V = FinalShuffle(V, E);
9283 E->VectorizedValue = V;
9284 return V;
9285 }
9286 case Instruction::ExtractValue: {
9287 auto *LI = cast<LoadInst>(E->getSingleOperand(0));
9288 Builder.SetInsertPoint(LI);
9289 auto *PtrTy = PointerType::get(VecTy, LI->getPointerAddressSpace());
9290 Value *Ptr = Builder.CreateBitCast(LI->getOperand(0), PtrTy);
9291 LoadInst *V = Builder.CreateAlignedLoad(VecTy, Ptr, LI->getAlign());
9292 Value *NewV = propagateMetadata(V, E->Scalars);
9293 NewV = FinalShuffle(NewV, E);
9294 E->VectorizedValue = NewV;
9295 return NewV;
9296 }
9297 case Instruction::InsertElement: {
9298 assert(E->ReuseShuffleIndices.empty() && "All inserts should be unique")(static_cast <bool> (E->ReuseShuffleIndices.empty() &&
"All inserts should be unique") ? void (0) : __assert_fail (
"E->ReuseShuffleIndices.empty() && \"All inserts should be unique\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 9298, __extension__
__PRETTY_FUNCTION__))
;
9299 Builder.SetInsertPoint(cast<Instruction>(E->Scalars.back()));
9300 Value *V = vectorizeOperand(E, 1);
9301
9302 // Create InsertVector shuffle if necessary
9303 auto *FirstInsert = cast<Instruction>(*find_if(E->Scalars, [E](Value *V) {
9304 return !is_contained(E->Scalars, cast<Instruction>(V)->getOperand(0));
9305 }));
9306 const unsigned NumElts =
9307 cast<FixedVectorType>(FirstInsert->getType())->getNumElements();
9308 const unsigned NumScalars = E->Scalars.size();
9309
9310 unsigned Offset = *getInsertIndex(VL0);
9311 assert(Offset < NumElts && "Failed to find vector index offset")(static_cast <bool> (Offset < NumElts && "Failed to find vector index offset"
) ? void (0) : __assert_fail ("Offset < NumElts && \"Failed to find vector index offset\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 9311, __extension__
__PRETTY_FUNCTION__))
;
9312
9313 // Create shuffle to resize vector
9314 SmallVector<int> Mask;
9315 if (!E->ReorderIndices.empty()) {
9316 inversePermutation(E->ReorderIndices, Mask);
9317 Mask.append(NumElts - NumScalars, UndefMaskElem);
9318 } else {
9319 Mask.assign(NumElts, UndefMaskElem);
9320 std::iota(Mask.begin(), std::next(Mask.begin(), NumScalars), 0);
9321 }
9322 // Create InsertVector shuffle if necessary
9323 bool IsIdentity = true;
9324 SmallVector<int> PrevMask(NumElts, UndefMaskElem);
9325 Mask.swap(PrevMask);
9326 for (unsigned I = 0; I < NumScalars; ++I) {
9327 Value *Scalar = E->Scalars[PrevMask[I]];
9328 unsigned InsertIdx = *getInsertIndex(Scalar);
9329 IsIdentity &= InsertIdx - Offset == I;
9330 Mask[InsertIdx - Offset] = I;
9331 }
9332 if (!IsIdentity || NumElts != NumScalars) {
9333 V = Builder.CreateShuffleVector(V, Mask);
9334 if (auto *I = dyn_cast<Instruction>(V)) {
9335 GatherShuffleExtractSeq.insert(I);
9336 CSEBlocks.insert(I->getParent());
9337 }
9338 }
9339
9340 SmallVector<int> InsertMask(NumElts, UndefMaskElem);
9341 for (unsigned I = 0; I < NumElts; I++) {
9342 if (Mask[I] != UndefMaskElem)
9343 InsertMask[Offset + I] = I;
9344 }
9345 SmallBitVector UseMask =
9346 buildUseMask(NumElts, InsertMask, UseMask::UndefsAsMask);
9347 SmallBitVector IsFirstUndef =
9348 isUndefVector(FirstInsert->getOperand(0), UseMask);
9349 if ((!IsIdentity || Offset != 0 || !IsFirstUndef.all()) &&
9350 NumElts != NumScalars) {
9351 if (IsFirstUndef.all()) {
9352 if (!ShuffleVectorInst::isIdentityMask(InsertMask)) {
9353 SmallBitVector IsFirstPoison =
9354 isUndefVector<true>(FirstInsert->getOperand(0), UseMask);
9355 if (!IsFirstPoison.all()) {
9356 for (unsigned I = 0; I < NumElts; I++) {
9357 if (InsertMask[I] == UndefMaskElem && !IsFirstPoison.test(I))
9358 InsertMask[I] = I + NumElts;
9359 }
9360 }
9361 V = Builder.CreateShuffleVector(
9362 V,
9363 IsFirstPoison.all() ? PoisonValue::get(V->getType())
9364 : FirstInsert->getOperand(0),
9365 InsertMask, cast<Instruction>(E->Scalars.back())->getName());
9366 if (auto *I = dyn_cast<Instruction>(V)) {
9367 GatherShuffleExtractSeq.insert(I);
9368 CSEBlocks.insert(I->getParent());
9369 }
9370 }
9371 } else {
9372 SmallBitVector IsFirstPoison =
9373 isUndefVector<true>(FirstInsert->getOperand(0), UseMask);
9374 for (unsigned I = 0; I < NumElts; I++) {
9375 if (InsertMask[I] == UndefMaskElem)
9376 InsertMask[I] = IsFirstPoison.test(I) ? UndefMaskElem : I;
9377 else
9378 InsertMask[I] += NumElts;
9379 }
9380 V = Builder.CreateShuffleVector(
9381 FirstInsert->getOperand(0), V, InsertMask,
9382 cast<Instruction>(E->Scalars.back())->getName());
9383 if (auto *I = dyn_cast<Instruction>(V)) {
9384 GatherShuffleExtractSeq.insert(I);
9385 CSEBlocks.insert(I->getParent());
9386 }
9387 }
9388 }
9389
9390 ++NumVectorInstructions;
9391 E->VectorizedValue = V;
9392 return V;
9393 }
9394 case Instruction::ZExt:
9395 case Instruction::SExt:
9396 case Instruction::FPToUI:
9397 case Instruction::FPToSI:
9398 case Instruction::FPExt:
9399 case Instruction::PtrToInt:
9400 case Instruction::IntToPtr:
9401 case Instruction::SIToFP:
9402 case Instruction::UIToFP:
9403 case Instruction::Trunc:
9404 case Instruction::FPTrunc:
9405 case Instruction::BitCast: {
9406 setInsertPointAfterBundle(E);
9407
9408 Value *InVec = vectorizeOperand(E, 0);
9409 if (E->VectorizedValue) {
9410 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Diamond merged for " <<
*VL0 << ".\n"; } } while (false)
;
9411 return E->VectorizedValue;
9412 }
9413
9414 auto *CI = cast<CastInst>(VL0);
9415 Value *V = Builder.CreateCast(CI->getOpcode(), InVec, VecTy);
9416 V = FinalShuffle(V, E);
9417
9418 E->VectorizedValue = V;
9419 ++NumVectorInstructions;
9420 return V;
9421 }
9422 case Instruction::FCmp:
9423 case Instruction::ICmp: {
9424 setInsertPointAfterBundle(E);
9425
9426 Value *L = vectorizeOperand(E, 0);
9427 if (E->VectorizedValue) {
9428 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Diamond merged for " <<
*VL0 << ".\n"; } } while (false)
;
9429 return E->VectorizedValue;
9430 }
9431 Value *R = vectorizeOperand(E, 1);
9432 if (E->VectorizedValue) {
9433 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Diamond merged for " <<
*VL0 << ".\n"; } } while (false)
;
9434 return E->VectorizedValue;
9435 }
9436
9437 CmpInst::Predicate P0 = cast<CmpInst>(VL0)->getPredicate();
9438 Value *V = Builder.CreateCmp(P0, L, R);
9439 propagateIRFlags(V, E->Scalars, VL0);
9440 V = FinalShuffle(V, E);
9441
9442 E->VectorizedValue = V;
9443 ++NumVectorInstructions;
9444 return V;
9445 }
9446 case Instruction::Select: {
9447 setInsertPointAfterBundle(E);
9448
9449 Value *Cond = vectorizeOperand(E, 0);
9450 if (E->VectorizedValue) {
9451 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Diamond merged for " <<
*VL0 << ".\n"; } } while (false)
;
9452 return E->VectorizedValue;
9453 }
9454 Value *True = vectorizeOperand(E, 1);
9455 if (E->VectorizedValue) {
9456 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Diamond merged for " <<
*VL0 << ".\n"; } } while (false)
;
9457 return E->VectorizedValue;
9458 }
9459 Value *False = vectorizeOperand(E, 2);
9460 if (E->VectorizedValue) {
9461 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Diamond merged for " <<
*VL0 << ".\n"; } } while (false)
;
9462 return E->VectorizedValue;
9463 }
9464
9465 Value *V = Builder.CreateSelect(Cond, True, False);
9466 V = FinalShuffle(V, E);
9467
9468 E->VectorizedValue = V;
9469 ++NumVectorInstructions;
9470 return V;
9471 }
9472 case Instruction::FNeg: {
9473 setInsertPointAfterBundle(E);
9474
9475 Value *Op = vectorizeOperand(E, 0);
9476
9477 if (E->VectorizedValue) {
9478 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Diamond merged for " <<
*VL0 << ".\n"; } } while (false)
;
9479 return E->VectorizedValue;
9480 }
9481
9482 Value *V = Builder.CreateUnOp(
9483 static_cast<Instruction::UnaryOps>(E->getOpcode()), Op);
9484 propagateIRFlags(V, E->Scalars, VL0);
9485 if (auto *I = dyn_cast<Instruction>(V))
9486 V = propagateMetadata(I, E->Scalars);
9487
9488 V = FinalShuffle(V, E);
9489
9490 E->VectorizedValue = V;
9491 ++NumVectorInstructions;
9492
9493 return V;
9494 }
9495 case Instruction::Add:
9496 case Instruction::FAdd:
9497 case Instruction::Sub:
9498 case Instruction::FSub:
9499 case Instruction::Mul:
9500 case Instruction::FMul:
9501 case Instruction::UDiv:
9502 case Instruction::SDiv:
9503 case Instruction::FDiv:
9504 case Instruction::URem:
9505 case Instruction::SRem:
9506 case Instruction::FRem:
9507 case Instruction::Shl:
9508 case Instruction::LShr:
9509 case Instruction::AShr:
9510 case Instruction::And:
9511 case Instruction::Or:
9512 case Instruction::Xor: {
9513 setInsertPointAfterBundle(E);
9514
9515 Value *LHS = vectorizeOperand(E, 0);
9516 if (E->VectorizedValue) {
9517 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Diamond merged for " <<
*VL0 << ".\n"; } } while (false)
;
9518 return E->VectorizedValue;
9519 }
9520 Value *RHS = vectorizeOperand(E, 1);
9521 if (E->VectorizedValue) {
9522 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Diamond merged for " <<
*VL0 << ".\n"; } } while (false)
;
9523 return E->VectorizedValue;
9524 }
9525
9526 Value *V = Builder.CreateBinOp(
9527 static_cast<Instruction::BinaryOps>(E->getOpcode()), LHS,
9528 RHS);
9529 propagateIRFlags(V, E->Scalars, VL0);
9530 if (auto *I = dyn_cast<Instruction>(V))
9531 V = propagateMetadata(I, E->Scalars);
9532
9533 V = FinalShuffle(V, E);
9534
9535 E->VectorizedValue = V;
9536 ++NumVectorInstructions;
9537
9538 return V;
9539 }
9540 case Instruction::Load: {
9541 // Loads are inserted at the head of the tree because we don't want to
9542 // sink them all the way down past store instructions.
9543 setInsertPointAfterBundle(E);
9544
9545 LoadInst *LI = cast<LoadInst>(VL0);
9546 Instruction *NewLI;
9547 unsigned AS = LI->getPointerAddressSpace();
9548 Value *PO = LI->getPointerOperand();
9549 if (E->State == TreeEntry::Vectorize) {
9550 Value *VecPtr = Builder.CreateBitCast(PO, VecTy->getPointerTo(AS));
9551 NewLI = Builder.CreateAlignedLoad(VecTy, VecPtr, LI->getAlign());
9552
9553 // The pointer operand uses an in-tree scalar so we add the new BitCast
9554 // or LoadInst to ExternalUses list to make sure that an extract will
9555 // be generated in the future.
9556 if (TreeEntry *Entry = getTreeEntry(PO)) {
9557 // Find which lane we need to extract.
9558 unsigned FoundLane = Entry->findLaneForValue(PO);
9559 ExternalUses.emplace_back(
9560 PO, PO != VecPtr ? cast<User>(VecPtr) : NewLI, FoundLane);
9561 }
9562 } else {
9563 assert(E->State == TreeEntry::ScatterVectorize && "Unhandled state")(static_cast <bool> (E->State == TreeEntry::ScatterVectorize
&& "Unhandled state") ? void (0) : __assert_fail ("E->State == TreeEntry::ScatterVectorize && \"Unhandled state\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 9563, __extension__
__PRETTY_FUNCTION__))
;
9564 Value *VecPtr = vectorizeOperand(E, 0);
9565 if (E->VectorizedValue) {
9566 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Diamond merged for " <<
*VL0 << ".\n"; } } while (false)
;
9567 return E->VectorizedValue;
9568 }
9569 // Use the minimum alignment of the gathered loads.
9570 Align CommonAlignment = LI->getAlign();
9571 for (Value *V : E->Scalars)
9572 CommonAlignment =
9573 std::min(CommonAlignment, cast<LoadInst>(V)->getAlign());
9574 NewLI = Builder.CreateMaskedGather(VecTy, VecPtr, CommonAlignment);
9575 }
9576 Value *V = propagateMetadata(NewLI, E->Scalars);
9577
9578 V = FinalShuffle(V, E);
9579 E->VectorizedValue = V;
9580 ++NumVectorInstructions;
9581 return V;
9582 }
9583 case Instruction::Store: {
9584 auto *SI = cast<StoreInst>(VL0);
9585 unsigned AS = SI->getPointerAddressSpace();
9586
9587 setInsertPointAfterBundle(E);
9588
9589 Value *VecValue = vectorizeOperand(E, 0);
9590 VecValue = FinalShuffle(VecValue, E);
9591
9592 Value *ScalarPtr = SI->getPointerOperand();
9593 Value *VecPtr = Builder.CreateBitCast(
9594 ScalarPtr, VecValue->getType()->getPointerTo(AS));
9595 StoreInst *ST =
9596 Builder.CreateAlignedStore(VecValue, VecPtr, SI->getAlign());
9597
9598 // The pointer operand uses an in-tree scalar, so add the new BitCast or
9599 // StoreInst to ExternalUses to make sure that an extract will be
9600 // generated in the future.
9601 if (TreeEntry *Entry = getTreeEntry(ScalarPtr)) {
9602 // Find which lane we need to extract.
9603 unsigned FoundLane = Entry->findLaneForValue(ScalarPtr);
9604 ExternalUses.push_back(ExternalUser(
9605 ScalarPtr, ScalarPtr != VecPtr ? cast<User>(VecPtr) : ST,
9606 FoundLane));
9607 }
9608
9609 Value *V = propagateMetadata(ST, E->Scalars);
9610
9611 E->VectorizedValue = V;
9612 ++NumVectorInstructions;
9613 return V;
9614 }
9615 case Instruction::GetElementPtr: {
9616 auto *GEP0 = cast<GetElementPtrInst>(VL0);
9617 setInsertPointAfterBundle(E);
9618
9619 Value *Op0 = vectorizeOperand(E, 0);
9620 if (E->VectorizedValue) {
9621 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Diamond merged for " <<
*VL0 << ".\n"; } } while (false)
;
9622 return E->VectorizedValue;
9623 }
9624
9625 SmallVector<Value *> OpVecs;
9626 for (int J = 1, N = GEP0->getNumOperands(); J < N; ++J) {
9627 Value *OpVec = vectorizeOperand(E, J);
9628 if (E->VectorizedValue) {
9629 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Diamond merged for " <<
*VL0 << ".\n"; } } while (false)
;
9630 return E->VectorizedValue;
9631 }
9632 OpVecs.push_back(OpVec);
9633 }
9634
9635 Value *V = Builder.CreateGEP(GEP0->getSourceElementType(), Op0, OpVecs);
9636 if (Instruction *I = dyn_cast<GetElementPtrInst>(V)) {
9637 SmallVector<Value *> GEPs;
9638 for (Value *V : E->Scalars) {
9639 if (isa<GetElementPtrInst>(V))
9640 GEPs.push_back(V);
9641 }
9642 V = propagateMetadata(I, GEPs);
9643 }
9644
9645 V = FinalShuffle(V, E);
9646
9647 E->VectorizedValue = V;
9648 ++NumVectorInstructions;
9649
9650 return V;
9651 }
9652 case Instruction::Call: {
9653 CallInst *CI = cast<CallInst>(VL0);
9654 setInsertPointAfterBundle(E);
9655
9656 Intrinsic::ID IID = Intrinsic::not_intrinsic;
9657 if (Function *FI = CI->getCalledFunction())
9658 IID = FI->getIntrinsicID();
9659
9660 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
9661
9662 auto VecCallCosts = getVectorCallCosts(CI, VecTy, TTI, TLI);
9663 bool UseIntrinsic = ID != Intrinsic::not_intrinsic &&
9664 VecCallCosts.first <= VecCallCosts.second;
9665
9666 Value *ScalarArg = nullptr;
9667 std::vector<Value *> OpVecs;
9668 SmallVector<Type *, 2> TysForDecl =
9669 {FixedVectorType::get(CI->getType(), E->Scalars.size())};
9670 for (int j = 0, e = CI->arg_size(); j < e; ++j) {
9671 ValueList OpVL;
9672 // Some intrinsics have scalar arguments. This argument should not be
9673 // vectorized.
9674 if (UseIntrinsic && isVectorIntrinsicWithScalarOpAtArg(IID, j)) {
9675 CallInst *CEI = cast<CallInst>(VL0);
9676 ScalarArg = CEI->getArgOperand(j);
9677 OpVecs.push_back(CEI->getArgOperand(j));
9678 if (isVectorIntrinsicWithOverloadTypeAtArg(IID, j))
9679 TysForDecl.push_back(ScalarArg->getType());
9680 continue;
9681 }
9682
9683 Value *OpVec = vectorizeOperand(E, j);
9684 if (E->VectorizedValue) {
9685 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Diamond merged for " <<
*VL0 << ".\n"; } } while (false)
;
9686 return E->VectorizedValue;
9687 }
9688 LLVM_DEBUG(dbgs() << "SLP: OpVec[" << j << "]: " << *OpVec << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: OpVec[" << j << "]: "
<< *OpVec << "\n"; } } while (false)
;
9689 OpVecs.push_back(OpVec);
9690 if (isVectorIntrinsicWithOverloadTypeAtArg(IID, j))
9691 TysForDecl.push_back(OpVec->getType());
9692 }
9693
9694 Function *CF;
9695 if (!UseIntrinsic) {
9696 VFShape Shape =
9697 VFShape::get(*CI, ElementCount::getFixed(static_cast<unsigned>(
9698 VecTy->getNumElements())),
9699 false /*HasGlobalPred*/);
9700 CF = VFDatabase(*CI).getVectorizedFunction(Shape);
9701 } else {
9702 CF = Intrinsic::getDeclaration(F->getParent(), ID, TysForDecl);
9703 }
9704
9705 SmallVector<OperandBundleDef, 1> OpBundles;
9706 CI->getOperandBundlesAsDefs(OpBundles);
9707 Value *V = Builder.CreateCall(CF, OpVecs, OpBundles);
9708
9709 // The scalar argument uses an in-tree scalar so we add the new vectorized
9710 // call to ExternalUses list to make sure that an extract will be
9711 // generated in the future.
9712 if (ScalarArg) {
9713 if (TreeEntry *Entry = getTreeEntry(ScalarArg)) {
9714 // Find which lane we need to extract.
9715 unsigned FoundLane = Entry->findLaneForValue(ScalarArg);
9716 ExternalUses.push_back(
9717 ExternalUser(ScalarArg, cast<User>(V), FoundLane));
9718 }
9719 }
9720
9721 propagateIRFlags(V, E->Scalars, VL0);
9722 V = FinalShuffle(V, E);
9723
9724 E->VectorizedValue = V;
9725 ++NumVectorInstructions;
9726 return V;
9727 }
9728 case Instruction::ShuffleVector: {
9729 assert(E->isAltShuffle() &&(static_cast <bool> (E->isAltShuffle() && ((
Instruction::isBinaryOp(E->getOpcode()) && Instruction
::isBinaryOp(E->getAltOpcode())) || (Instruction::isCast(E
->getOpcode()) && Instruction::isCast(E->getAltOpcode
())) || (isa<CmpInst>(VL0) && isa<CmpInst>
(E->getAltOp()))) && "Invalid Shuffle Vector Operand"
) ? void (0) : __assert_fail ("E->isAltShuffle() && ((Instruction::isBinaryOp(E->getOpcode()) && Instruction::isBinaryOp(E->getAltOpcode())) || (Instruction::isCast(E->getOpcode()) && Instruction::isCast(E->getAltOpcode())) || (isa<CmpInst>(VL0) && isa<CmpInst>(E->getAltOp()))) && \"Invalid Shuffle Vector Operand\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 9735, __extension__
__PRETTY_FUNCTION__))
9730 ((Instruction::isBinaryOp(E->getOpcode()) &&(static_cast <bool> (E->isAltShuffle() && ((
Instruction::isBinaryOp(E->getOpcode()) && Instruction
::isBinaryOp(E->getAltOpcode())) || (Instruction::isCast(E
->getOpcode()) && Instruction::isCast(E->getAltOpcode
())) || (isa<CmpInst>(VL0) && isa<CmpInst>
(E->getAltOp()))) && "Invalid Shuffle Vector Operand"
) ? void (0) : __assert_fail ("E->isAltShuffle() && ((Instruction::isBinaryOp(E->getOpcode()) && Instruction::isBinaryOp(E->getAltOpcode())) || (Instruction::isCast(E->getOpcode()) && Instruction::isCast(E->getAltOpcode())) || (isa<CmpInst>(VL0) && isa<CmpInst>(E->getAltOp()))) && \"Invalid Shuffle Vector Operand\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 9735, __extension__
__PRETTY_FUNCTION__))
9731 Instruction::isBinaryOp(E->getAltOpcode())) ||(static_cast <bool> (E->isAltShuffle() && ((
Instruction::isBinaryOp(E->getOpcode()) && Instruction
::isBinaryOp(E->getAltOpcode())) || (Instruction::isCast(E
->getOpcode()) && Instruction::isCast(E->getAltOpcode
())) || (isa<CmpInst>(VL0) && isa<CmpInst>
(E->getAltOp()))) && "Invalid Shuffle Vector Operand"
) ? void (0) : __assert_fail ("E->isAltShuffle() && ((Instruction::isBinaryOp(E->getOpcode()) && Instruction::isBinaryOp(E->getAltOpcode())) || (Instruction::isCast(E->getOpcode()) && Instruction::isCast(E->getAltOpcode())) || (isa<CmpInst>(VL0) && isa<CmpInst>(E->getAltOp()))) && \"Invalid Shuffle Vector Operand\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 9735, __extension__
__PRETTY_FUNCTION__))
9732 (Instruction::isCast(E->getOpcode()) &&(static_cast <bool> (E->isAltShuffle() && ((
Instruction::isBinaryOp(E->getOpcode()) && Instruction
::isBinaryOp(E->getAltOpcode())) || (Instruction::isCast(E
->getOpcode()) && Instruction::isCast(E->getAltOpcode
())) || (isa<CmpInst>(VL0) && isa<CmpInst>
(E->getAltOp()))) && "Invalid Shuffle Vector Operand"
) ? void (0) : __assert_fail ("E->isAltShuffle() && ((Instruction::isBinaryOp(E->getOpcode()) && Instruction::isBinaryOp(E->getAltOpcode())) || (Instruction::isCast(E->getOpcode()) && Instruction::isCast(E->getAltOpcode())) || (isa<CmpInst>(VL0) && isa<CmpInst>(E->getAltOp()))) && \"Invalid Shuffle Vector Operand\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 9735, __extension__
__PRETTY_FUNCTION__))
9733 Instruction::isCast(E->getAltOpcode())) ||(static_cast <bool> (E->isAltShuffle() && ((
Instruction::isBinaryOp(E->getOpcode()) && Instruction
::isBinaryOp(E->getAltOpcode())) || (Instruction::isCast(E
->getOpcode()) && Instruction::isCast(E->getAltOpcode
())) || (isa<CmpInst>(VL0) && isa<CmpInst>
(E->getAltOp()))) && "Invalid Shuffle Vector Operand"
) ? void (0) : __assert_fail ("E->isAltShuffle() && ((Instruction::isBinaryOp(E->getOpcode()) && Instruction::isBinaryOp(E->getAltOpcode())) || (Instruction::isCast(E->getOpcode()) && Instruction::isCast(E->getAltOpcode())) || (isa<CmpInst>(VL0) && isa<CmpInst>(E->getAltOp()))) && \"Invalid Shuffle Vector Operand\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 9735, __extension__
__PRETTY_FUNCTION__))
9734 (isa<CmpInst>(VL0) && isa<CmpInst>(E->getAltOp()))) &&(static_cast <bool> (E->isAltShuffle() && ((
Instruction::isBinaryOp(E->getOpcode()) && Instruction
::isBinaryOp(E->getAltOpcode())) || (Instruction::isCast(E
->getOpcode()) && Instruction::isCast(E->getAltOpcode
())) || (isa<CmpInst>(VL0) && isa<CmpInst>
(E->getAltOp()))) && "Invalid Shuffle Vector Operand"
) ? void (0) : __assert_fail ("E->isAltShuffle() && ((Instruction::isBinaryOp(E->getOpcode()) && Instruction::isBinaryOp(E->getAltOpcode())) || (Instruction::isCast(E->getOpcode()) && Instruction::isCast(E->getAltOpcode())) || (isa<CmpInst>(VL0) && isa<CmpInst>(E->getAltOp()))) && \"Invalid Shuffle Vector Operand\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 9735, __extension__
__PRETTY_FUNCTION__))
9735 "Invalid Shuffle Vector Operand")(static_cast <bool> (E->isAltShuffle() && ((
Instruction::isBinaryOp(E->getOpcode()) && Instruction
::isBinaryOp(E->getAltOpcode())) || (Instruction::isCast(E
->getOpcode()) && Instruction::isCast(E->getAltOpcode
())) || (isa<CmpInst>(VL0) && isa<CmpInst>
(E->getAltOp()))) && "Invalid Shuffle Vector Operand"
) ? void (0) : __assert_fail ("E->isAltShuffle() && ((Instruction::isBinaryOp(E->getOpcode()) && Instruction::isBinaryOp(E->getAltOpcode())) || (Instruction::isCast(E->getOpcode()) && Instruction::isCast(E->getAltOpcode())) || (isa<CmpInst>(VL0) && isa<CmpInst>(E->getAltOp()))) && \"Invalid Shuffle Vector Operand\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 9735, __extension__
__PRETTY_FUNCTION__))
;
9736
9737 Value *LHS = nullptr, *RHS = nullptr;
9738 if (Instruction::isBinaryOp(E->getOpcode()) || isa<CmpInst>(VL0)) {
9739 setInsertPointAfterBundle(E);
9740 LHS = vectorizeOperand(E, 0);
9741 if (E->VectorizedValue) {
9742 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Diamond merged for " <<
*VL0 << ".\n"; } } while (false)
;
9743 return E->VectorizedValue;
9744 }
9745 RHS = vectorizeOperand(E, 1);
9746 } else {
9747 setInsertPointAfterBundle(E);
9748 LHS = vectorizeOperand(E, 0);
9749 }
9750 if (E->VectorizedValue) {
9751 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Diamond merged for " <<
*VL0 << ".\n"; } } while (false)
;
9752 return E->VectorizedValue;
9753 }
9754
9755 Value *V0, *V1;
9756 if (Instruction::isBinaryOp(E->getOpcode())) {
9757 V0 = Builder.CreateBinOp(
9758 static_cast<Instruction::BinaryOps>(E->getOpcode()), LHS, RHS);
9759 V1 = Builder.CreateBinOp(
9760 static_cast<Instruction::BinaryOps>(E->getAltOpcode()), LHS, RHS);
9761 } else if (auto *CI0 = dyn_cast<CmpInst>(VL0)) {
9762 V0 = Builder.CreateCmp(CI0->getPredicate(), LHS, RHS);
9763 auto *AltCI = cast<CmpInst>(E->getAltOp());
9764 CmpInst::Predicate AltPred = AltCI->getPredicate();
9765 V1 = Builder.CreateCmp(AltPred, LHS, RHS);
9766 } else {
9767 V0 = Builder.CreateCast(
9768 static_cast<Instruction::CastOps>(E->getOpcode()), LHS, VecTy);
9769 V1 = Builder.CreateCast(
9770 static_cast<Instruction::CastOps>(E->getAltOpcode()), LHS, VecTy);
9771 }
9772 // Add V0 and V1 to later analysis to try to find and remove matching
9773 // instruction, if any.
9774 for (Value *V : {V0, V1}) {
9775 if (auto *I = dyn_cast<Instruction>(V)) {
9776 GatherShuffleExtractSeq.insert(I);
9777 CSEBlocks.insert(I->getParent());
9778 }
9779 }
9780
9781 // Create shuffle to take alternate operations from the vector.
9782 // Also, gather up main and alt scalar ops to propagate IR flags to
9783 // each vector operation.
9784 ValueList OpScalars, AltScalars;
9785 SmallVector<int> Mask;
9786 buildShuffleEntryMask(
9787 E->Scalars, E->ReorderIndices, E->ReuseShuffleIndices,
9788 [E, this](Instruction *I) {
9789 assert(E->isOpcodeOrAlt(I) && "Unexpected main/alternate opcode")(static_cast <bool> (E->isOpcodeOrAlt(I) && "Unexpected main/alternate opcode"
) ? void (0) : __assert_fail ("E->isOpcodeOrAlt(I) && \"Unexpected main/alternate opcode\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 9789, __extension__
__PRETTY_FUNCTION__))
;
9790 return isAlternateInstruction(I, E->getMainOp(), E->getAltOp(),
9791 *TLI);
9792 },
9793 Mask, &OpScalars, &AltScalars);
9794
9795 propagateIRFlags(V0, OpScalars);
9796 propagateIRFlags(V1, AltScalars);
9797
9798 Value *V = Builder.CreateShuffleVector(V0, V1, Mask);
9799 if (auto *I = dyn_cast<Instruction>(V)) {
9800 V = propagateMetadata(I, E->Scalars);
9801 GatherShuffleExtractSeq.insert(I);
9802 CSEBlocks.insert(I->getParent());
9803 }
9804
9805 E->VectorizedValue = V;
9806 ++NumVectorInstructions;
9807
9808 return V;
9809 }
9810 default:
9811 llvm_unreachable("unknown inst")::llvm::llvm_unreachable_internal("unknown inst", "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp"
, 9811)
;
9812 }
9813 return nullptr;
9814}
9815
9816Value *BoUpSLP::vectorizeTree() {
9817 ExtraValueToDebugLocsMap ExternallyUsedValues;
9818 return vectorizeTree(ExternallyUsedValues);
9819}
9820
9821namespace {
9822/// Data type for handling buildvector sequences with the reused scalars from
9823/// other tree entries.
9824struct ShuffledInsertData {
9825 /// List of insertelements to be replaced by shuffles.
9826 SmallVector<InsertElementInst *> InsertElements;
9827 /// The parent vectors and shuffle mask for the given list of inserts.
9828 MapVector<Value *, SmallVector<int>> ValueMasks;
9829};
9830} // namespace
9831
9832Value *BoUpSLP::vectorizeTree(ExtraValueToDebugLocsMap &ExternallyUsedValues,
9833 Instruction *ReductionRoot) {
9834 // All blocks must be scheduled before any instructions are inserted.
9835 for (auto &BSIter : BlocksSchedules) {
9836 scheduleBlock(BSIter.second.get());
9837 }
9838
9839 // Pre-gather last instructions.
9840 for (const std::unique_ptr<TreeEntry> &E : VectorizableTree) {
9841 if ((E->State == TreeEntry::NeedToGather &&
9842 (!E->getMainOp() || E->Idx > 0)) ||
9843 (E->State != TreeEntry::NeedToGather &&
9844 E->getOpcode() == Instruction::ExtractValue) ||
9845 E->getOpcode() == Instruction::InsertElement)
9846 continue;
9847 Instruction *LastInst = &getLastInstructionInBundle(E.get());
9848 EntryToLastInstruction.try_emplace(E.get(), LastInst);
9849 }
9850
9851 Builder.SetInsertPoint(ReductionRoot ? ReductionRoot
9852 : &F->getEntryBlock().front());
9853 auto *VectorRoot = vectorizeTree(VectorizableTree[0].get());
9854
9855 // If the vectorized tree can be rewritten in a smaller type, we truncate the
9856 // vectorized root. InstCombine will then rewrite the entire expression. We
9857 // sign extend the extracted values below.
9858 auto *ScalarRoot = VectorizableTree[0]->Scalars[0];
9859 if (MinBWs.count(ScalarRoot)) {
9860 if (auto *I = dyn_cast<Instruction>(VectorRoot)) {
9861 // If current instr is a phi and not the last phi, insert it after the
9862 // last phi node.
9863 if (isa<PHINode>(I))
9864 Builder.SetInsertPoint(&*I->getParent()->getFirstInsertionPt());
9865 else
9866 Builder.SetInsertPoint(&*++BasicBlock::iterator(I));
9867 }
9868 auto BundleWidth = VectorizableTree[0]->Scalars.size();
9869 auto *MinTy = IntegerType::get(F->getContext(), MinBWs[ScalarRoot].first);
9870 auto *VecTy = FixedVectorType::get(MinTy, BundleWidth);
9871 auto *Trunc = Builder.CreateTrunc(VectorRoot, VecTy);
9872 VectorizableTree[0]->VectorizedValue = Trunc;
9873 }
9874
9875 LLVM_DEBUG(dbgs() << "SLP: Extracting " << ExternalUses.size()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Extracting " << ExternalUses
.size() << " values .\n"; } } while (false)
9876 << " values .\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Extracting " << ExternalUses
.size() << " values .\n"; } } while (false)
;
9877
9878 SmallVector<ShuffledInsertData> ShuffledInserts;
9879 // Maps vector instruction to original insertelement instruction
9880 DenseMap<Value *, InsertElementInst *> VectorToInsertElement;
9881 // Maps extract Scalar to the corresponding extractelement instruction in the
9882 // basic block. Only one extractelement per block should be emitted.
9883 DenseMap<Value *, DenseMap<BasicBlock *, Instruction *>> ScalarToEEs;
9884 // Extract all of the elements with the external uses.
9885 for (const auto &ExternalUse : ExternalUses) {
9886 Value *Scalar = ExternalUse.Scalar;
9887 llvm::User *User = ExternalUse.User;
9888
9889 // Skip users that we already RAUW. This happens when one instruction
9890 // has multiple uses of the same value.
9891 if (User && !is_contained(Scalar->users(), User))
9892 continue;
9893 TreeEntry *E = getTreeEntry(Scalar);
9894 assert(E && "Invalid scalar")(static_cast <bool> (E && "Invalid scalar") ? void
(0) : __assert_fail ("E && \"Invalid scalar\"", "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp"
, 9894, __extension__ __PRETTY_FUNCTION__))
;
9895 assert(E->State != TreeEntry::NeedToGather &&(static_cast <bool> (E->State != TreeEntry::NeedToGather
&& "Extracting from a gather list") ? void (0) : __assert_fail
("E->State != TreeEntry::NeedToGather && \"Extracting from a gather list\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 9896, __extension__
__PRETTY_FUNCTION__))
9896 "Extracting from a gather list")(static_cast <bool> (E->State != TreeEntry::NeedToGather
&& "Extracting from a gather list") ? void (0) : __assert_fail
("E->State != TreeEntry::NeedToGather && \"Extracting from a gather list\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 9896, __extension__
__PRETTY_FUNCTION__))
;
9897 // Non-instruction pointers are not deleted, just skip them.
9898 if (E->getOpcode() == Instruction::GetElementPtr &&
9899 !isa<GetElementPtrInst>(Scalar))
9900 continue;
9901
9902 Value *Vec = E->VectorizedValue;
9903 assert(Vec && "Can't find vectorizable value")(static_cast <bool> (Vec && "Can't find vectorizable value"
) ? void (0) : __assert_fail ("Vec && \"Can't find vectorizable value\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 9903, __extension__
__PRETTY_FUNCTION__))
;
9904
9905 Value *Lane = Builder.getInt32(ExternalUse.Lane);
9906 auto ExtractAndExtendIfNeeded = [&](Value *Vec) {
9907 if (Scalar->getType() != Vec->getType()) {
9908 Value *Ex = nullptr;
9909 auto It = ScalarToEEs.find(Scalar);
9910 if (It != ScalarToEEs.end()) {
9911 // No need to emit many extracts, just move the only one in the
9912 // current block.
9913 auto EEIt = It->second.find(Builder.GetInsertBlock());
9914 if (EEIt != It->second.end()) {
9915 Instruction *I = EEIt->second;
9916 if (Builder.GetInsertPoint() != Builder.GetInsertBlock()->end() &&
9917 Builder.GetInsertPoint()->comesBefore(I))
9918 I->moveBefore(&*Builder.GetInsertPoint());
9919 Ex = I;
9920 }
9921 }
9922 if (!Ex) {
9923 // "Reuse" the existing extract to improve final codegen.
9924 if (auto *ES = dyn_cast<ExtractElementInst>(Scalar)) {
9925 Ex = Builder.CreateExtractElement(ES->getOperand(0),
9926 ES->getOperand(1));
9927 } else {
9928 Ex = Builder.CreateExtractElement(Vec, Lane);
9929 }
9930 if (auto *I = dyn_cast<Instruction>(Ex))
9931 ScalarToEEs[Scalar].try_emplace(Builder.GetInsertBlock(), I);
9932 }
9933 // The then branch of the previous if may produce constants, since 0
9934 // operand might be a constant.
9935 if (auto *ExI = dyn_cast<Instruction>(Ex)) {
9936 GatherShuffleExtractSeq.insert(ExI);
9937 CSEBlocks.insert(ExI->getParent());
9938 }
9939 // If necessary, sign-extend or zero-extend ScalarRoot
9940 // to the larger type.
9941 if (!MinBWs.count(ScalarRoot))
9942 return Ex;
9943 if (MinBWs[ScalarRoot].second)
9944 return Builder.CreateSExt(Ex, Scalar->getType());
9945 return Builder.CreateZExt(Ex, Scalar->getType());
9946 }
9947 assert(isa<FixedVectorType>(Scalar->getType()) &&(static_cast <bool> (isa<FixedVectorType>(Scalar->
getType()) && isa<InsertElementInst>(Scalar) &&
"In-tree scalar of vector type is not insertelement?") ? void
(0) : __assert_fail ("isa<FixedVectorType>(Scalar->getType()) && isa<InsertElementInst>(Scalar) && \"In-tree scalar of vector type is not insertelement?\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 9949, __extension__
__PRETTY_FUNCTION__))
9948 isa<InsertElementInst>(Scalar) &&(static_cast <bool> (isa<FixedVectorType>(Scalar->
getType()) && isa<InsertElementInst>(Scalar) &&
"In-tree scalar of vector type is not insertelement?") ? void
(0) : __assert_fail ("isa<FixedVectorType>(Scalar->getType()) && isa<InsertElementInst>(Scalar) && \"In-tree scalar of vector type is not insertelement?\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 9949, __extension__
__PRETTY_FUNCTION__))
9949 "In-tree scalar of vector type is not insertelement?")(static_cast <bool> (isa<FixedVectorType>(Scalar->
getType()) && isa<InsertElementInst>(Scalar) &&
"In-tree scalar of vector type is not insertelement?") ? void
(0) : __assert_fail ("isa<FixedVectorType>(Scalar->getType()) && isa<InsertElementInst>(Scalar) && \"In-tree scalar of vector type is not insertelement?\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 9949, __extension__
__PRETTY_FUNCTION__))
;
9950 auto *IE = cast<InsertElementInst>(Scalar);
9951 VectorToInsertElement.try_emplace(Vec, IE);
9952 return Vec;
9953 };
9954 // If User == nullptr, the Scalar is used as extra arg. Generate
9955 // ExtractElement instruction and update the record for this scalar in
9956 // ExternallyUsedValues.
9957 if (!User) {
9958 assert(ExternallyUsedValues.count(Scalar) &&(static_cast <bool> (ExternallyUsedValues.count(Scalar)
&& "Scalar with nullptr as an external user must be registered in "
"ExternallyUsedValues map") ? void (0) : __assert_fail ("ExternallyUsedValues.count(Scalar) && \"Scalar with nullptr as an external user must be registered in \" \"ExternallyUsedValues map\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 9960, __extension__
__PRETTY_FUNCTION__))
9959 "Scalar with nullptr as an external user must be registered in "(static_cast <bool> (ExternallyUsedValues.count(Scalar)
&& "Scalar with nullptr as an external user must be registered in "
"ExternallyUsedValues map") ? void (0) : __assert_fail ("ExternallyUsedValues.count(Scalar) && \"Scalar with nullptr as an external user must be registered in \" \"ExternallyUsedValues map\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 9960, __extension__
__PRETTY_FUNCTION__))
9960 "ExternallyUsedValues map")(static_cast <bool> (ExternallyUsedValues.count(Scalar)
&& "Scalar with nullptr as an external user must be registered in "
"ExternallyUsedValues map") ? void (0) : __assert_fail ("ExternallyUsedValues.count(Scalar) && \"Scalar with nullptr as an external user must be registered in \" \"ExternallyUsedValues map\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 9960, __extension__
__PRETTY_FUNCTION__))
;
9961 if (auto *VecI = dyn_cast<Instruction>(Vec)) {
9962 if (auto *PHI = dyn_cast<PHINode>(VecI))
9963 Builder.SetInsertPoint(PHI->getParent()->getFirstNonPHI());
9964 else
9965 Builder.SetInsertPoint(VecI->getParent(),
9966 std::next(VecI->getIterator()));
9967 } else {
9968 Builder.SetInsertPoint(&F->getEntryBlock().front());
9969 }
9970 Value *NewInst = ExtractAndExtendIfNeeded(Vec);
9971 auto &NewInstLocs = ExternallyUsedValues[NewInst];
9972 auto It = ExternallyUsedValues.find(Scalar);
9973 assert(It != ExternallyUsedValues.end() &&(static_cast <bool> (It != ExternallyUsedValues.end() &&
"Externally used scalar is not found in ExternallyUsedValues"
) ? void (0) : __assert_fail ("It != ExternallyUsedValues.end() && \"Externally used scalar is not found in ExternallyUsedValues\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 9974, __extension__
__PRETTY_FUNCTION__))
9974 "Externally used scalar is not found in ExternallyUsedValues")(static_cast <bool> (It != ExternallyUsedValues.end() &&
"Externally used scalar is not found in ExternallyUsedValues"
) ? void (0) : __assert_fail ("It != ExternallyUsedValues.end() && \"Externally used scalar is not found in ExternallyUsedValues\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 9974, __extension__
__PRETTY_FUNCTION__))
;
9975 NewInstLocs.append(It->second);
9976 ExternallyUsedValues.erase(Scalar);
9977 // Required to update internally referenced instructions.
9978 Scalar->replaceAllUsesWith(NewInst);
9979 continue;
9980 }
9981
9982 if (auto *VU = dyn_cast<InsertElementInst>(User)) {
9983 // Skip if the scalar is another vector op or Vec is not an instruction.
9984 if (!Scalar->getType()->isVectorTy() && isa<Instruction>(Vec)) {
9985 if (auto *FTy = dyn_cast<FixedVectorType>(User->getType())) {
9986 std::optional<unsigned> InsertIdx = getInsertIndex(VU);
9987 if (InsertIdx) {
9988 // Need to use original vector, if the root is truncated.
9989 if (MinBWs.count(Scalar) &&
9990 VectorizableTree[0]->VectorizedValue == Vec)
9991 Vec = VectorRoot;
9992 auto *It =
9993 find_if(ShuffledInserts, [VU](const ShuffledInsertData &Data) {
9994 // Checks if 2 insertelements are from the same buildvector.
9995 InsertElementInst *VecInsert = Data.InsertElements.front();
9996 return areTwoInsertFromSameBuildVector(
9997 VU, VecInsert,
9998 [](InsertElementInst *II) { return II->getOperand(0); });
9999 });
10000 unsigned Idx = *InsertIdx;
10001 if (It == ShuffledInserts.end()) {
10002 (void)ShuffledInserts.emplace_back();
10003 It = std::next(ShuffledInserts.begin(),
10004 ShuffledInserts.size() - 1);
10005 SmallVectorImpl<int> &Mask = It->ValueMasks[Vec];
10006 if (Mask.empty())
10007 Mask.assign(FTy->getNumElements(), UndefMaskElem);
10008 // Find the insertvector, vectorized in tree, if any.
10009 Value *Base = VU;
10010 while (auto *IEBase = dyn_cast<InsertElementInst>(Base)) {
10011 if (IEBase != User &&
10012 (!IEBase->hasOneUse() ||
10013 getInsertIndex(IEBase).value_or(Idx) == Idx))
10014 break;
10015 // Build the mask for the vectorized insertelement instructions.
10016 if (const TreeEntry *E = getTreeEntry(IEBase)) {
10017 do {
10018 IEBase = cast<InsertElementInst>(Base);
10019 int IEIdx = *getInsertIndex(IEBase);
10020 assert(Mask[Idx] == UndefMaskElem &&(static_cast <bool> (Mask[Idx] == UndefMaskElem &&
"InsertElementInstruction used already.") ? void (0) : __assert_fail
("Mask[Idx] == UndefMaskElem && \"InsertElementInstruction used already.\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 10021, __extension__
__PRETTY_FUNCTION__))
10021 "InsertElementInstruction used already.")(static_cast <bool> (Mask[Idx] == UndefMaskElem &&
"InsertElementInstruction used already.") ? void (0) : __assert_fail
("Mask[Idx] == UndefMaskElem && \"InsertElementInstruction used already.\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 10021, __extension__
__PRETTY_FUNCTION__))
;
10022 Mask[IEIdx] = IEIdx;
10023 Base = IEBase->getOperand(0);
10024 } while (E == getTreeEntry(Base));
10025 break;
10026 }
10027 Base = cast<InsertElementInst>(Base)->getOperand(0);
10028 // After the vectorization the def-use chain has changed, need
10029 // to look through original insertelement instructions, if they
10030 // get replaced by vector instructions.
10031 auto It = VectorToInsertElement.find(Base);
10032 if (It != VectorToInsertElement.end())
10033 Base = It->second;
10034 }
10035 }
10036 SmallVectorImpl<int> &Mask = It->ValueMasks[Vec];
10037 if (Mask.empty())
10038 Mask.assign(FTy->getNumElements(), UndefMaskElem);
10039 Mask[Idx] = ExternalUse.Lane;
10040 It->InsertElements.push_back(cast<InsertElementInst>(User));
10041 continue;
10042 }
10043 }
10044 }
10045 }
10046
10047 // Generate extracts for out-of-tree users.
10048 // Find the insertion point for the extractelement lane.
10049 if (auto *VecI = dyn_cast<Instruction>(Vec)) {
10050 if (PHINode *PH = dyn_cast<PHINode>(User)) {
10051 for (int i = 0, e = PH->getNumIncomingValues(); i != e; ++i) {
10052 if (PH->getIncomingValue(i) == Scalar) {
10053 Instruction *IncomingTerminator =
10054 PH->getIncomingBlock(i)->getTerminator();
10055 if (isa<CatchSwitchInst>(IncomingTerminator)) {
10056 Builder.SetInsertPoint(VecI->getParent(),
10057 std::next(VecI->getIterator()));
10058 } else {
10059 Builder.SetInsertPoint(PH->getIncomingBlock(i)->getTerminator());
10060 }
10061 Value *NewInst = ExtractAndExtendIfNeeded(Vec);
10062 PH->setOperand(i, NewInst);
10063 }
10064 }
10065 } else {
10066 Builder.SetInsertPoint(cast<Instruction>(User));
10067 Value *NewInst = ExtractAndExtendIfNeeded(Vec);
10068 User->replaceUsesOfWith(Scalar, NewInst);
10069 }
10070 } else {
10071 Builder.SetInsertPoint(&F->getEntryBlock().front());
10072 Value *NewInst = ExtractAndExtendIfNeeded(Vec);
10073 User->replaceUsesOfWith(Scalar, NewInst);
10074 }
10075
10076 LLVM_DEBUG(dbgs() << "SLP: Replaced:" << *User << ".\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Replaced:" << *User <<
".\n"; } } while (false)
;
10077 }
10078
10079 auto CreateShuffle = [&](Value *V1, Value *V2, ArrayRef<int> Mask) {
10080 SmallVector<int> CombinedMask1(Mask.size(), UndefMaskElem);
10081 SmallVector<int> CombinedMask2(Mask.size(), UndefMaskElem);
10082 int VF = cast<FixedVectorType>(V1->getType())->getNumElements();
10083 for (int I = 0, E = Mask.size(); I < E; ++I) {
10084 if (Mask[I] < VF)
10085 CombinedMask1[I] = Mask[I];
10086 else
10087 CombinedMask2[I] = Mask[I] - VF;
10088 }
10089 ShuffleInstructionBuilder ShuffleBuilder(Builder, *this);
10090 ShuffleBuilder.add(V1, CombinedMask1);
10091 if (V2)
10092 ShuffleBuilder.add(V2, CombinedMask2);
10093 return ShuffleBuilder.finalize(std::nullopt);
10094 };
10095
10096 auto &&ResizeToVF = [&CreateShuffle](Value *Vec, ArrayRef<int> Mask,
10097 bool ForSingleMask) {
10098 unsigned VF = Mask.size();
10099 unsigned VecVF = cast<FixedVectorType>(Vec->getType())->getNumElements();
10100 if (VF != VecVF) {
10101 if (any_of(Mask, [VF](int Idx) { return Idx >= static_cast<int>(VF); })) {
10102 Vec = CreateShuffle(Vec, nullptr, Mask);
10103 return std::make_pair(Vec, true);
10104 }
10105 if (!ForSingleMask) {
10106 SmallVector<int> ResizeMask(VF, UndefMaskElem);
10107 for (unsigned I = 0; I < VF; ++I) {
10108 if (Mask[I] != UndefMaskElem)
10109 ResizeMask[Mask[I]] = Mask[I];
10110 }
10111 Vec = CreateShuffle(Vec, nullptr, ResizeMask);
10112 }
10113 }
10114
10115 return std::make_pair(Vec, false);
10116 };
10117 // Perform shuffling of the vectorize tree entries for better handling of
10118 // external extracts.
10119 for (int I = 0, E = ShuffledInserts.size(); I < E; ++I) {
10120 // Find the first and the last instruction in the list of insertelements.
10121 sort(ShuffledInserts[I].InsertElements, isFirstInsertElement);
10122 InsertElementInst *FirstInsert = ShuffledInserts[I].InsertElements.front();
10123 InsertElementInst *LastInsert = ShuffledInserts[I].InsertElements.back();
10124 Builder.SetInsertPoint(LastInsert);
10125 auto Vector = ShuffledInserts[I].ValueMasks.takeVector();
10126 Value *NewInst = performExtractsShuffleAction<Value>(
10127 MutableArrayRef(Vector.data(), Vector.size()),
10128 FirstInsert->getOperand(0),
10129 [](Value *Vec) {
10130 return cast<VectorType>(Vec->getType())
10131 ->getElementCount()
10132 .getKnownMinValue();
10133 },
10134 ResizeToVF,
10135 [FirstInsert, &CreateShuffle](ArrayRef<int> Mask,
10136 ArrayRef<Value *> Vals) {
10137 assert((Vals.size() == 1 || Vals.size() == 2) &&(static_cast <bool> ((Vals.size() == 1 || Vals.size() ==
2) && "Expected exactly 1 or 2 input values.") ? void
(0) : __assert_fail ("(Vals.size() == 1 || Vals.size() == 2) && \"Expected exactly 1 or 2 input values.\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 10138, __extension__
__PRETTY_FUNCTION__))
10138 "Expected exactly 1 or 2 input values.")(static_cast <bool> ((Vals.size() == 1 || Vals.size() ==
2) && "Expected exactly 1 or 2 input values.") ? void
(0) : __assert_fail ("(Vals.size() == 1 || Vals.size() == 2) && \"Expected exactly 1 or 2 input values.\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 10138, __extension__
__PRETTY_FUNCTION__))
;
10139 if (Vals.size() == 1) {
10140 // Do not create shuffle if the mask is a simple identity
10141 // non-resizing mask.
10142 if (Mask.size() != cast<FixedVectorType>(Vals.front()->getType())
10143 ->getNumElements() ||
10144 !ShuffleVectorInst::isIdentityMask(Mask))
10145 return CreateShuffle(Vals.front(), nullptr, Mask);
10146 return Vals.front();
10147 }
10148 return CreateShuffle(Vals.front() ? Vals.front()
10149 : FirstInsert->getOperand(0),
10150 Vals.back(), Mask);
10151 });
10152 auto It = ShuffledInserts[I].InsertElements.rbegin();
10153 // Rebuild buildvector chain.
10154 InsertElementInst *II = nullptr;
10155 if (It != ShuffledInserts[I].InsertElements.rend())
10156 II = *It;
10157 SmallVector<Instruction *> Inserts;
10158 while (It != ShuffledInserts[I].InsertElements.rend()) {
10159 assert(II && "Must be an insertelement instruction.")(static_cast <bool> (II && "Must be an insertelement instruction."
) ? void (0) : __assert_fail ("II && \"Must be an insertelement instruction.\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 10159, __extension__
__PRETTY_FUNCTION__))
;
10160 if (*It == II)
10161 ++It;
10162 else
10163 Inserts.push_back(cast<Instruction>(II));
10164 II = dyn_cast<InsertElementInst>(II->getOperand(0));
10165 }
10166 for (Instruction *II : reverse(Inserts)) {
10167 II->replaceUsesOfWith(II->getOperand(0), NewInst);
10168 if (auto *NewI = dyn_cast<Instruction>(NewInst))
10169 if (II->getParent() == NewI->getParent() && II->comesBefore(NewI))
10170 II->moveAfter(NewI);
10171 NewInst = II;
10172 }
10173 LastInsert->replaceAllUsesWith(NewInst);
10174 for (InsertElementInst *IE : reverse(ShuffledInserts[I].InsertElements)) {
10175 IE->replaceUsesOfWith(IE->getOperand(0),
10176 PoisonValue::get(IE->getOperand(0)->getType()));
10177 IE->replaceUsesOfWith(IE->getOperand(1),
10178 PoisonValue::get(IE->getOperand(1)->getType()));
10179 eraseInstruction(IE);
10180 }
10181 CSEBlocks.insert(LastInsert->getParent());
10182 }
10183
10184 SmallVector<Instruction *> RemovedInsts;
10185 // For each vectorized value:
10186 for (auto &TEPtr : VectorizableTree) {
10187 TreeEntry *Entry = TEPtr.get();
10188
10189 // No need to handle users of gathered values.
10190 if (Entry->State == TreeEntry::NeedToGather)
10191 continue;
10192
10193 assert(Entry->VectorizedValue && "Can't find vectorizable value")(static_cast <bool> (Entry->VectorizedValue &&
"Can't find vectorizable value") ? void (0) : __assert_fail (
"Entry->VectorizedValue && \"Can't find vectorizable value\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 10193, __extension__
__PRETTY_FUNCTION__))
;
10194
10195 // For each lane:
10196 for (int Lane = 0, LE = Entry->Scalars.size(); Lane != LE; ++Lane) {
10197 Value *Scalar = Entry->Scalars[Lane];
10198
10199 if (Entry->getOpcode() == Instruction::GetElementPtr &&
10200 !isa<GetElementPtrInst>(Scalar))
10201 continue;
10202#ifndef NDEBUG
10203 Type *Ty = Scalar->getType();
10204 if (!Ty->isVoidTy()) {
10205 for (User *U : Scalar->users()) {
10206 LLVM_DEBUG(dbgs() << "SLP: \tvalidating user:" << *U << ".\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: \tvalidating user:" <<
*U << ".\n"; } } while (false)
;
10207
10208 // It is legal to delete users in the ignorelist.
10209 assert((getTreeEntry(U) ||(static_cast <bool> ((getTreeEntry(U) || (UserIgnoreList
&& UserIgnoreList->contains(U)) || (isa_and_nonnull
<Instruction>(U) && isDeleted(cast<Instruction
>(U)))) && "Deleting out-of-tree value") ? void (0
) : __assert_fail ("(getTreeEntry(U) || (UserIgnoreList && UserIgnoreList->contains(U)) || (isa_and_nonnull<Instruction>(U) && isDeleted(cast<Instruction>(U)))) && \"Deleting out-of-tree value\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 10213, __extension__
__PRETTY_FUNCTION__))
10210 (UserIgnoreList && UserIgnoreList->contains(U)) ||(static_cast <bool> ((getTreeEntry(U) || (UserIgnoreList
&& UserIgnoreList->contains(U)) || (isa_and_nonnull
<Instruction>(U) && isDeleted(cast<Instruction
>(U)))) && "Deleting out-of-tree value") ? void (0
) : __assert_fail ("(getTreeEntry(U) || (UserIgnoreList && UserIgnoreList->contains(U)) || (isa_and_nonnull<Instruction>(U) && isDeleted(cast<Instruction>(U)))) && \"Deleting out-of-tree value\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 10213, __extension__
__PRETTY_FUNCTION__))
10211 (isa_and_nonnull<Instruction>(U) &&(static_cast <bool> ((getTreeEntry(U) || (UserIgnoreList
&& UserIgnoreList->contains(U)) || (isa_and_nonnull
<Instruction>(U) && isDeleted(cast<Instruction
>(U)))) && "Deleting out-of-tree value") ? void (0
) : __assert_fail ("(getTreeEntry(U) || (UserIgnoreList && UserIgnoreList->contains(U)) || (isa_and_nonnull<Instruction>(U) && isDeleted(cast<Instruction>(U)))) && \"Deleting out-of-tree value\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 10213, __extension__
__PRETTY_FUNCTION__))
10212 isDeleted(cast<Instruction>(U)))) &&(static_cast <bool> ((getTreeEntry(U) || (UserIgnoreList
&& UserIgnoreList->contains(U)) || (isa_and_nonnull
<Instruction>(U) && isDeleted(cast<Instruction
>(U)))) && "Deleting out-of-tree value") ? void (0
) : __assert_fail ("(getTreeEntry(U) || (UserIgnoreList && UserIgnoreList->contains(U)) || (isa_and_nonnull<Instruction>(U) && isDeleted(cast<Instruction>(U)))) && \"Deleting out-of-tree value\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 10213, __extension__
__PRETTY_FUNCTION__))
10213 "Deleting out-of-tree value")(static_cast <bool> ((getTreeEntry(U) || (UserIgnoreList
&& UserIgnoreList->contains(U)) || (isa_and_nonnull
<Instruction>(U) && isDeleted(cast<Instruction
>(U)))) && "Deleting out-of-tree value") ? void (0
) : __assert_fail ("(getTreeEntry(U) || (UserIgnoreList && UserIgnoreList->contains(U)) || (isa_and_nonnull<Instruction>(U) && isDeleted(cast<Instruction>(U)))) && \"Deleting out-of-tree value\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 10213, __extension__
__PRETTY_FUNCTION__))
;
10214 }
10215 }
10216#endif
10217 LLVM_DEBUG(dbgs() << "SLP: \tErasing scalar:" << *Scalar << ".\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: \tErasing scalar:" << *
Scalar << ".\n"; } } while (false)
;
10218 eraseInstruction(cast<Instruction>(Scalar));
10219 // Retain to-be-deleted instructions for some debug-info
10220 // bookkeeping. NOTE: eraseInstruction only marks the instruction for
10221 // deletion - instructions are not deleted until later.
10222 RemovedInsts.push_back(cast<Instruction>(Scalar));
10223 }
10224 }
10225
10226 // Merge the DIAssignIDs from the about-to-be-deleted instructions into the
10227 // new vector instruction.
10228 if (auto *V = dyn_cast<Instruction>(VectorizableTree[0]->VectorizedValue))
10229 V->mergeDIAssignID(RemovedInsts);
10230
10231 Builder.ClearInsertionPoint();
10232 InstrElementSize.clear();
10233
10234 return VectorizableTree[0]->VectorizedValue;
10235}
10236
10237void BoUpSLP::optimizeGatherSequence() {
10238 LLVM_DEBUG(dbgs() << "SLP: Optimizing " << GatherShuffleExtractSeq.size()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Optimizing " << GatherShuffleExtractSeq
.size() << " gather sequences instructions.\n"; } } while
(false)
10239 << " gather sequences instructions.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Optimizing " << GatherShuffleExtractSeq
.size() << " gather sequences instructions.\n"; } } while
(false)
;
10240 // LICM InsertElementInst sequences.
10241 for (Instruction *I : GatherShuffleExtractSeq) {
10242 if (isDeleted(I))
10243 continue;
10244
10245 // Check if this block is inside a loop.
10246 Loop *L = LI->getLoopFor(I->getParent());
10247 if (!L)
10248 continue;
10249
10250 // Check if it has a preheader.
10251 BasicBlock *PreHeader = L->getLoopPreheader();
10252 if (!PreHeader)
10253 continue;
10254
10255 // If the vector or the element that we insert into it are
10256 // instructions that are defined in this basic block then we can't
10257 // hoist this instruction.
10258 if (any_of(I->operands(), [L](Value *V) {
10259 auto *OpI = dyn_cast<Instruction>(V);
10260 return OpI && L->contains(OpI);
10261 }))
10262 continue;
10263
10264 // We can hoist this instruction. Move it to the pre-header.
10265 I->moveBefore(PreHeader->getTerminator());
10266 CSEBlocks.insert(PreHeader);
10267 }
10268
10269 // Make a list of all reachable blocks in our CSE queue.
10270 SmallVector<const DomTreeNode *, 8> CSEWorkList;
10271 CSEWorkList.reserve(CSEBlocks.size());
10272 for (BasicBlock *BB : CSEBlocks)
10273 if (DomTreeNode *N = DT->getNode(BB)) {
10274 assert(DT->isReachableFromEntry(N))(static_cast <bool> (DT->isReachableFromEntry(N)) ? void
(0) : __assert_fail ("DT->isReachableFromEntry(N)", "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp"
, 10274, __extension__ __PRETTY_FUNCTION__))
;
10275 CSEWorkList.push_back(N);
10276 }
10277
10278 // Sort blocks by domination. This ensures we visit a block after all blocks
10279 // dominating it are visited.
10280 llvm::sort(CSEWorkList, [](const DomTreeNode *A, const DomTreeNode *B) {
10281 assert((A == B) == (A->getDFSNumIn() == B->getDFSNumIn()) &&(static_cast <bool> ((A == B) == (A->getDFSNumIn() ==
B->getDFSNumIn()) && "Different nodes should have different DFS numbers"
) ? void (0) : __assert_fail ("(A == B) == (A->getDFSNumIn() == B->getDFSNumIn()) && \"Different nodes should have different DFS numbers\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 10282, __extension__
__PRETTY_FUNCTION__))
10282 "Different nodes should have different DFS numbers")(static_cast <bool> ((A == B) == (A->getDFSNumIn() ==
B->getDFSNumIn()) && "Different nodes should have different DFS numbers"
) ? void (0) : __assert_fail ("(A == B) == (A->getDFSNumIn() == B->getDFSNumIn()) && \"Different nodes should have different DFS numbers\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 10282, __extension__
__PRETTY_FUNCTION__))
;
10283 return A->getDFSNumIn() < B->getDFSNumIn();
10284 });
10285
10286 // Less defined shuffles can be replaced by the more defined copies.
10287 // Between two shuffles one is less defined if it has the same vector operands
10288 // and its mask indeces are the same as in the first one or undefs. E.g.
10289 // shuffle %0, poison, <0, 0, 0, undef> is less defined than shuffle %0,
10290 // poison, <0, 0, 0, 0>.
10291 auto &&IsIdenticalOrLessDefined = [this](Instruction *I1, Instruction *I2,
10292 SmallVectorImpl<int> &NewMask) {
10293 if (I1->getType() != I2->getType())
10294 return false;
10295 auto *SI1 = dyn_cast<ShuffleVectorInst>(I1);
10296 auto *SI2 = dyn_cast<ShuffleVectorInst>(I2);
10297 if (!SI1 || !SI2)
10298 return I1->isIdenticalTo(I2);
10299 if (SI1->isIdenticalTo(SI2))
10300 return true;
10301 for (int I = 0, E = SI1->getNumOperands(); I < E; ++I)
10302 if (SI1->getOperand(I) != SI2->getOperand(I))
10303 return false;
10304 // Check if the second instruction is more defined than the first one.
10305 NewMask.assign(SI2->getShuffleMask().begin(), SI2->getShuffleMask().end());
10306 ArrayRef<int> SM1 = SI1->getShuffleMask();
10307 // Count trailing undefs in the mask to check the final number of used
10308 // registers.
10309 unsigned LastUndefsCnt = 0;
10310 for (int I = 0, E = NewMask.size(); I < E; ++I) {
10311 if (SM1[I] == UndefMaskElem)
10312 ++LastUndefsCnt;
10313 else
10314 LastUndefsCnt = 0;
10315 if (NewMask[I] != UndefMaskElem && SM1[I] != UndefMaskElem &&
10316 NewMask[I] != SM1[I])
10317 return false;
10318 if (NewMask[I] == UndefMaskElem)
10319 NewMask[I] = SM1[I];
10320 }
10321 // Check if the last undefs actually change the final number of used vector
10322 // registers.
10323 return SM1.size() - LastUndefsCnt > 1 &&
10324 TTI->getNumberOfParts(SI1->getType()) ==
10325 TTI->getNumberOfParts(
10326 FixedVectorType::get(SI1->getType()->getElementType(),
10327 SM1.size() - LastUndefsCnt));
10328 };
10329 // Perform O(N^2) search over the gather/shuffle sequences and merge identical
10330 // instructions. TODO: We can further optimize this scan if we split the
10331 // instructions into different buckets based on the insert lane.
10332 SmallVector<Instruction *, 16> Visited;
10333 for (auto I = CSEWorkList.begin(), E = CSEWorkList.end(); I != E; ++I) {
10334 assert(*I &&(static_cast <bool> (*I && (I == CSEWorkList.begin
() || !DT->dominates(*I, *std::prev(I))) && "Worklist not sorted properly!"
) ? void (0) : __assert_fail ("*I && (I == CSEWorkList.begin() || !DT->dominates(*I, *std::prev(I))) && \"Worklist not sorted properly!\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 10336, __extension__
__PRETTY_FUNCTION__))
10335 (I == CSEWorkList.begin() || !DT->dominates(*I, *std::prev(I))) &&(static_cast <bool> (*I && (I == CSEWorkList.begin
() || !DT->dominates(*I, *std::prev(I))) && "Worklist not sorted properly!"
) ? void (0) : __assert_fail ("*I && (I == CSEWorkList.begin() || !DT->dominates(*I, *std::prev(I))) && \"Worklist not sorted properly!\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 10336, __extension__
__PRETTY_FUNCTION__))
10336 "Worklist not sorted properly!")(static_cast <bool> (*I && (I == CSEWorkList.begin
() || !DT->dominates(*I, *std::prev(I))) && "Worklist not sorted properly!"
) ? void (0) : __assert_fail ("*I && (I == CSEWorkList.begin() || !DT->dominates(*I, *std::prev(I))) && \"Worklist not sorted properly!\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 10336, __extension__
__PRETTY_FUNCTION__))
;
10337 BasicBlock *BB = (*I)->getBlock();
10338 // For all instructions in blocks containing gather sequences:
10339 for (Instruction &In : llvm::make_early_inc_range(*BB)) {
10340 if (isDeleted(&In))
10341 continue;
10342 if (!isa<InsertElementInst, ExtractElementInst, ShuffleVectorInst>(&In) &&
10343 !GatherShuffleExtractSeq.contains(&In))
10344 continue;
10345
10346 // Check if we can replace this instruction with any of the
10347 // visited instructions.
10348 bool Replaced = false;
10349 for (Instruction *&V : Visited) {
10350 SmallVector<int> NewMask;
10351 if (IsIdenticalOrLessDefined(&In, V, NewMask) &&
10352 DT->dominates(V->getParent(), In.getParent())) {
10353 In.replaceAllUsesWith(V);
10354 eraseInstruction(&In);
10355 if (auto *SI = dyn_cast<ShuffleVectorInst>(V))
10356 if (!NewMask.empty())
10357 SI->setShuffleMask(NewMask);
10358 Replaced = true;
10359 break;
10360 }
10361 if (isa<ShuffleVectorInst>(In) && isa<ShuffleVectorInst>(V) &&
10362 GatherShuffleExtractSeq.contains(V) &&
10363 IsIdenticalOrLessDefined(V, &In, NewMask) &&
10364 DT->dominates(In.getParent(), V->getParent())) {
10365 In.moveAfter(V);
10366 V->replaceAllUsesWith(&In);
10367 eraseInstruction(V);
10368 if (auto *SI = dyn_cast<ShuffleVectorInst>(&In))
10369 if (!NewMask.empty())
10370 SI->setShuffleMask(NewMask);
10371 V = &In;
10372 Replaced = true;
10373 break;
10374 }
10375 }
10376 if (!Replaced) {
10377 assert(!is_contained(Visited, &In))(static_cast <bool> (!is_contained(Visited, &In)) ?
void (0) : __assert_fail ("!is_contained(Visited, &In)",
"llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 10377, __extension__
__PRETTY_FUNCTION__))
;
10378 Visited.push_back(&In);
10379 }
10380 }
10381 }
10382 CSEBlocks.clear();
10383 GatherShuffleExtractSeq.clear();
10384}
10385
10386BoUpSLP::ScheduleData *
10387BoUpSLP::BlockScheduling::buildBundle(ArrayRef<Value *> VL) {
10388 ScheduleData *Bundle = nullptr;
10389 ScheduleData *PrevInBundle = nullptr;
10390 for (Value *V : VL) {
10391 if (doesNotNeedToBeScheduled(V))
10392 continue;
10393 ScheduleData *BundleMember = getScheduleData(V);
10394 assert(BundleMember &&(static_cast <bool> (BundleMember && "no ScheduleData for bundle member "
"(maybe not in same basic block)") ? void (0) : __assert_fail
("BundleMember && \"no ScheduleData for bundle member \" \"(maybe not in same basic block)\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 10396, __extension__
__PRETTY_FUNCTION__))
10395 "no ScheduleData for bundle member "(static_cast <bool> (BundleMember && "no ScheduleData for bundle member "
"(maybe not in same basic block)") ? void (0) : __assert_fail
("BundleMember && \"no ScheduleData for bundle member \" \"(maybe not in same basic block)\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 10396, __extension__
__PRETTY_FUNCTION__))
10396 "(maybe not in same basic block)")(static_cast <bool> (BundleMember && "no ScheduleData for bundle member "
"(maybe not in same basic block)") ? void (0) : __assert_fail
("BundleMember && \"no ScheduleData for bundle member \" \"(maybe not in same basic block)\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 10396, __extension__
__PRETTY_FUNCTION__))
;
10397 assert(BundleMember->isSchedulingEntity() &&(static_cast <bool> (BundleMember->isSchedulingEntity
() && "bundle member already part of other bundle") ?
void (0) : __assert_fail ("BundleMember->isSchedulingEntity() && \"bundle member already part of other bundle\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 10398, __extension__
__PRETTY_FUNCTION__))
10398 "bundle member already part of other bundle")(static_cast <bool> (BundleMember->isSchedulingEntity
() && "bundle member already part of other bundle") ?
void (0) : __assert_fail ("BundleMember->isSchedulingEntity() && \"bundle member already part of other bundle\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 10398, __extension__
__PRETTY_FUNCTION__))
;
10399 if (PrevInBundle) {
10400 PrevInBundle->NextInBundle = BundleMember;
10401 } else {
10402 Bundle = BundleMember;
10403 }
10404
10405 // Group the instructions to a bundle.
10406 BundleMember->FirstInBundle = Bundle;
10407 PrevInBundle = BundleMember;
10408 }
10409 assert(Bundle && "Failed to find schedule bundle")(static_cast <bool> (Bundle && "Failed to find schedule bundle"
) ? void (0) : __assert_fail ("Bundle && \"Failed to find schedule bundle\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 10409, __extension__
__PRETTY_FUNCTION__))
;
10410 return Bundle;
10411}
10412
10413// Groups the instructions to a bundle (which is then a single scheduling entity)
10414// and schedules instructions until the bundle gets ready.
10415std::optional<BoUpSLP::ScheduleData *>
10416BoUpSLP::BlockScheduling::tryScheduleBundle(ArrayRef<Value *> VL, BoUpSLP *SLP,
10417 const InstructionsState &S) {
10418 // No need to schedule PHIs, insertelement, extractelement and extractvalue
10419 // instructions.
10420 if (isa<PHINode>(S.OpValue) || isVectorLikeInstWithConstOps(S.OpValue) ||
10421 doesNotNeedToSchedule(VL))
10422 return nullptr;
10423
10424 // Initialize the instruction bundle.
10425 Instruction *OldScheduleEnd = ScheduleEnd;
10426 LLVM_DEBUG(dbgs() << "SLP: bundle: " << *S.OpValue << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: bundle: " << *S.OpValue
<< "\n"; } } while (false)
;
10427
10428 auto TryScheduleBundleImpl = [this, OldScheduleEnd, SLP](bool ReSchedule,
10429 ScheduleData *Bundle) {
10430 // The scheduling region got new instructions at the lower end (or it is a
10431 // new region for the first bundle). This makes it necessary to
10432 // recalculate all dependencies.
10433 // It is seldom that this needs to be done a second time after adding the
10434 // initial bundle to the region.
10435 if (ScheduleEnd != OldScheduleEnd) {
10436 for (auto *I = ScheduleStart; I != ScheduleEnd; I = I->getNextNode())
10437 doForAllOpcodes(I, [](ScheduleData *SD) { SD->clearDependencies(); });
10438 ReSchedule = true;
10439 }
10440 if (Bundle) {
10441 LLVM_DEBUG(dbgs() << "SLP: try schedule bundle " << *Bundledo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: try schedule bundle " <<
*Bundle << " in block " << BB->getName() <<
"\n"; } } while (false)
10442 << " in block " << BB->getName() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: try schedule bundle " <<
*Bundle << " in block " << BB->getName() <<
"\n"; } } while (false)
;
10443 calculateDependencies(Bundle, /*InsertInReadyList=*/true, SLP);
10444 }
10445
10446 if (ReSchedule) {
10447 resetSchedule();
10448 initialFillReadyList(ReadyInsts);
10449 }
10450
10451 // Now try to schedule the new bundle or (if no bundle) just calculate
10452 // dependencies. As soon as the bundle is "ready" it means that there are no
10453 // cyclic dependencies and we can schedule it. Note that's important that we
10454 // don't "schedule" the bundle yet (see cancelScheduling).
10455 while (((!Bundle && ReSchedule) || (Bundle && !Bundle->isReady())) &&
10456 !ReadyInsts.empty()) {
10457 ScheduleData *Picked = ReadyInsts.pop_back_val();
10458 assert(Picked->isSchedulingEntity() && Picked->isReady() &&(static_cast <bool> (Picked->isSchedulingEntity() &&
Picked->isReady() && "must be ready to schedule")
? void (0) : __assert_fail ("Picked->isSchedulingEntity() && Picked->isReady() && \"must be ready to schedule\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 10459, __extension__
__PRETTY_FUNCTION__))
10459 "must be ready to schedule")(static_cast <bool> (Picked->isSchedulingEntity() &&
Picked->isReady() && "must be ready to schedule")
? void (0) : __assert_fail ("Picked->isSchedulingEntity() && Picked->isReady() && \"must be ready to schedule\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 10459, __extension__
__PRETTY_FUNCTION__))
;
10460 schedule(Picked, ReadyInsts);
10461 }
10462 };
10463
10464 // Make sure that the scheduling region contains all
10465 // instructions of the bundle.
10466 for (Value *V : VL) {
10467 if (doesNotNeedToBeScheduled(V))
10468 continue;
10469 if (!extendSchedulingRegion(V, S)) {
10470 // If the scheduling region got new instructions at the lower end (or it
10471 // is a new region for the first bundle). This makes it necessary to
10472 // recalculate all dependencies.
10473 // Otherwise the compiler may crash trying to incorrectly calculate
10474 // dependencies and emit instruction in the wrong order at the actual
10475 // scheduling.
10476 TryScheduleBundleImpl(/*ReSchedule=*/false, nullptr);
10477 return std::nullopt;
10478 }
10479 }
10480
10481 bool ReSchedule = false;
10482 for (Value *V : VL) {
10483 if (doesNotNeedToBeScheduled(V))
10484 continue;
10485 ScheduleData *BundleMember = getScheduleData(V);
10486 assert(BundleMember &&(static_cast <bool> (BundleMember && "no ScheduleData for bundle member (maybe not in same basic block)"
) ? void (0) : __assert_fail ("BundleMember && \"no ScheduleData for bundle member (maybe not in same basic block)\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 10487, __extension__
__PRETTY_FUNCTION__))
10487 "no ScheduleData for bundle member (maybe not in same basic block)")(static_cast <bool> (BundleMember && "no ScheduleData for bundle member (maybe not in same basic block)"
) ? void (0) : __assert_fail ("BundleMember && \"no ScheduleData for bundle member (maybe not in same basic block)\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 10487, __extension__
__PRETTY_FUNCTION__))
;
10488
10489 // Make sure we don't leave the pieces of the bundle in the ready list when
10490 // whole bundle might not be ready.
10491 ReadyInsts.remove(BundleMember);
10492
10493 if (!BundleMember->IsScheduled)
10494 continue;
10495 // A bundle member was scheduled as single instruction before and now
10496 // needs to be scheduled as part of the bundle. We just get rid of the
10497 // existing schedule.
10498 LLVM_DEBUG(dbgs() << "SLP: reset schedule because " << *BundleMemberdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: reset schedule because " <<
*BundleMember << " was already scheduled\n"; } } while
(false)
10499 << " was already scheduled\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: reset schedule because " <<
*BundleMember << " was already scheduled\n"; } } while
(false)
;
10500 ReSchedule = true;
10501 }
10502
10503 auto *Bundle = buildBundle(VL);
10504 TryScheduleBundleImpl(ReSchedule, Bundle);
10505 if (!Bundle->isReady()) {
10506 cancelScheduling(VL, S.OpValue);
10507 return std::nullopt;
10508 }
10509 return Bundle;
10510}
10511
10512void BoUpSLP::BlockScheduling::cancelScheduling(ArrayRef<Value *> VL,
10513 Value *OpValue) {
10514 if (isa<PHINode>(OpValue) || isVectorLikeInstWithConstOps(OpValue) ||
10515 doesNotNeedToSchedule(VL))
10516 return;
10517
10518 if (doesNotNeedToBeScheduled(OpValue))
10519 OpValue = *find_if_not(VL, doesNotNeedToBeScheduled);
10520 ScheduleData *Bundle = getScheduleData(OpValue);
10521 LLVM_DEBUG(dbgs() << "SLP: cancel scheduling of " << *Bundle << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: cancel scheduling of " <<
*Bundle << "\n"; } } while (false)
;
10522 assert(!Bundle->IsScheduled &&(static_cast <bool> (!Bundle->IsScheduled &&
"Can't cancel bundle which is already scheduled") ? void (0)
: __assert_fail ("!Bundle->IsScheduled && \"Can't cancel bundle which is already scheduled\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 10523, __extension__
__PRETTY_FUNCTION__))
10523 "Can't cancel bundle which is already scheduled")(static_cast <bool> (!Bundle->IsScheduled &&
"Can't cancel bundle which is already scheduled") ? void (0)
: __assert_fail ("!Bundle->IsScheduled && \"Can't cancel bundle which is already scheduled\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 10523, __extension__
__PRETTY_FUNCTION__))
;
10524 assert(Bundle->isSchedulingEntity() &&(static_cast <bool> (Bundle->isSchedulingEntity() &&
(Bundle->isPartOfBundle() || needToScheduleSingleInstruction
(VL)) && "tried to unbundle something which is not a bundle"
) ? void (0) : __assert_fail ("Bundle->isSchedulingEntity() && (Bundle->isPartOfBundle() || needToScheduleSingleInstruction(VL)) && \"tried to unbundle something which is not a bundle\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 10526, __extension__
__PRETTY_FUNCTION__))
10525 (Bundle->isPartOfBundle() || needToScheduleSingleInstruction(VL)) &&(static_cast <bool> (Bundle->isSchedulingEntity() &&
(Bundle->isPartOfBundle() || needToScheduleSingleInstruction
(VL)) && "tried to unbundle something which is not a bundle"
) ? void (0) : __assert_fail ("Bundle->isSchedulingEntity() && (Bundle->isPartOfBundle() || needToScheduleSingleInstruction(VL)) && \"tried to unbundle something which is not a bundle\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 10526, __extension__
__PRETTY_FUNCTION__))
10526 "tried to unbundle something which is not a bundle")(static_cast <bool> (Bundle->isSchedulingEntity() &&
(Bundle->isPartOfBundle() || needToScheduleSingleInstruction
(VL)) && "tried to unbundle something which is not a bundle"
) ? void (0) : __assert_fail ("Bundle->isSchedulingEntity() && (Bundle->isPartOfBundle() || needToScheduleSingleInstruction(VL)) && \"tried to unbundle something which is not a bundle\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 10526, __extension__
__PRETTY_FUNCTION__))
;
10527
10528 // Remove the bundle from the ready list.
10529 if (Bundle->isReady())
10530 ReadyInsts.remove(Bundle);
10531
10532 // Un-bundle: make single instructions out of the bundle.
10533 ScheduleData *BundleMember = Bundle;
10534 while (BundleMember) {
10535 assert(BundleMember->FirstInBundle == Bundle && "corrupt bundle links")(static_cast <bool> (BundleMember->FirstInBundle == Bundle
&& "corrupt bundle links") ? void (0) : __assert_fail
("BundleMember->FirstInBundle == Bundle && \"corrupt bundle links\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 10535, __extension__
__PRETTY_FUNCTION__))
;
10536 BundleMember->FirstInBundle = BundleMember;
10537 ScheduleData *Next = BundleMember->NextInBundle;
10538 BundleMember->NextInBundle = nullptr;
10539 BundleMember->TE = nullptr;
10540 if (BundleMember->unscheduledDepsInBundle() == 0) {
10541 ReadyInsts.insert(BundleMember);
10542 }
10543 BundleMember = Next;
10544 }
10545}
10546
10547BoUpSLP::ScheduleData *BoUpSLP::BlockScheduling::allocateScheduleDataChunks() {
10548 // Allocate a new ScheduleData for the instruction.
10549 if (ChunkPos >= ChunkSize) {
10550 ScheduleDataChunks.push_back(std::make_unique<ScheduleData[]>(ChunkSize));
10551 ChunkPos = 0;
10552 }
10553 return &(ScheduleDataChunks.back()[ChunkPos++]);
10554}
10555
10556bool BoUpSLP::BlockScheduling::extendSchedulingRegion(Value *V,
10557 const InstructionsState &S) {
10558 if (getScheduleData(V, isOneOf(S, V)))
10559 return true;
10560 Instruction *I = dyn_cast<Instruction>(V);
10561 assert(I && "bundle member must be an instruction")(static_cast <bool> (I && "bundle member must be an instruction"
) ? void (0) : __assert_fail ("I && \"bundle member must be an instruction\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 10561, __extension__
__PRETTY_FUNCTION__))
;
10562 assert(!isa<PHINode>(I) && !isVectorLikeInstWithConstOps(I) &&(static_cast <bool> (!isa<PHINode>(I) && !
isVectorLikeInstWithConstOps(I) && !doesNotNeedToBeScheduled
(I) && "phi nodes/insertelements/extractelements/extractvalues don't need to "
"be scheduled") ? void (0) : __assert_fail ("!isa<PHINode>(I) && !isVectorLikeInstWithConstOps(I) && !doesNotNeedToBeScheduled(I) && \"phi nodes/insertelements/extractelements/extractvalues don't need to \" \"be scheduled\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 10565, __extension__
__PRETTY_FUNCTION__))
10563 !doesNotNeedToBeScheduled(I) &&(static_cast <bool> (!isa<PHINode>(I) && !
isVectorLikeInstWithConstOps(I) && !doesNotNeedToBeScheduled
(I) && "phi nodes/insertelements/extractelements/extractvalues don't need to "
"be scheduled") ? void (0) : __assert_fail ("!isa<PHINode>(I) && !isVectorLikeInstWithConstOps(I) && !doesNotNeedToBeScheduled(I) && \"phi nodes/insertelements/extractelements/extractvalues don't need to \" \"be scheduled\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 10565, __extension__
__PRETTY_FUNCTION__))
10564 "phi nodes/insertelements/extractelements/extractvalues don't need to "(static_cast <bool> (!isa<PHINode>(I) && !
isVectorLikeInstWithConstOps(I) && !doesNotNeedToBeScheduled
(I) && "phi nodes/insertelements/extractelements/extractvalues don't need to "
"be scheduled") ? void (0) : __assert_fail ("!isa<PHINode>(I) && !isVectorLikeInstWithConstOps(I) && !doesNotNeedToBeScheduled(I) && \"phi nodes/insertelements/extractelements/extractvalues don't need to \" \"be scheduled\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 10565, __extension__
__PRETTY_FUNCTION__))
10565 "be scheduled")(static_cast <bool> (!isa<PHINode>(I) && !
isVectorLikeInstWithConstOps(I) && !doesNotNeedToBeScheduled
(I) && "phi nodes/insertelements/extractelements/extractvalues don't need to "
"be scheduled") ? void (0) : __assert_fail ("!isa<PHINode>(I) && !isVectorLikeInstWithConstOps(I) && !doesNotNeedToBeScheduled(I) && \"phi nodes/insertelements/extractelements/extractvalues don't need to \" \"be scheduled\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 10565, __extension__
__PRETTY_FUNCTION__))
;
10566 auto &&CheckScheduleForI = [this, &S](Instruction *I) -> bool {
10567 ScheduleData *ISD = getScheduleData(I);
10568 if (!ISD)
10569 return false;
10570 assert(isInSchedulingRegion(ISD) &&(static_cast <bool> (isInSchedulingRegion(ISD) &&
"ScheduleData not in scheduling region") ? void (0) : __assert_fail
("isInSchedulingRegion(ISD) && \"ScheduleData not in scheduling region\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 10571, __extension__
__PRETTY_FUNCTION__))
10571 "ScheduleData not in scheduling region")(static_cast <bool> (isInSchedulingRegion(ISD) &&
"ScheduleData not in scheduling region") ? void (0) : __assert_fail
("isInSchedulingRegion(ISD) && \"ScheduleData not in scheduling region\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 10571, __extension__
__PRETTY_FUNCTION__))
;
10572 ScheduleData *SD = allocateScheduleDataChunks();
10573 SD->Inst = I;
10574 SD->init(SchedulingRegionID, S.OpValue);
10575 ExtraScheduleDataMap[I][S.OpValue] = SD;
10576 return true;
10577 };
10578 if (CheckScheduleForI(I))
10579 return true;
10580 if (!ScheduleStart) {
10581 // It's the first instruction in the new region.
10582 initScheduleData(I, I->getNextNode(), nullptr, nullptr);
10583 ScheduleStart = I;
10584 ScheduleEnd = I->getNextNode();
10585 if (isOneOf(S, I) != I)
10586 CheckScheduleForI(I);
10587 assert(ScheduleEnd && "tried to vectorize a terminator?")(static_cast <bool> (ScheduleEnd && "tried to vectorize a terminator?"
) ? void (0) : __assert_fail ("ScheduleEnd && \"tried to vectorize a terminator?\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 10587, __extension__
__PRETTY_FUNCTION__))
;
10588 LLVM_DEBUG(dbgs() << "SLP: initialize schedule region to " << *I << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: initialize schedule region to "
<< *I << "\n"; } } while (false)
;
10589 return true;
10590 }
10591 // Search up and down at the same time, because we don't know if the new
10592 // instruction is above or below the existing scheduling region.
10593 BasicBlock::reverse_iterator UpIter =
10594 ++ScheduleStart->getIterator().getReverse();
10595 BasicBlock::reverse_iterator UpperEnd = BB->rend();
10596 BasicBlock::iterator DownIter = ScheduleEnd->getIterator();
10597 BasicBlock::iterator LowerEnd = BB->end();
10598 while (UpIter != UpperEnd && DownIter != LowerEnd && &*UpIter != I &&
10599 &*DownIter != I) {
10600 if (++ScheduleRegionSize > ScheduleRegionSizeLimit) {
10601 LLVM_DEBUG(dbgs() << "SLP: exceeded schedule region size limit\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: exceeded schedule region size limit\n"
; } } while (false)
;
10602 return false;
10603 }
10604
10605 ++UpIter;
10606 ++DownIter;
10607 }
10608 if (DownIter == LowerEnd || (UpIter != UpperEnd && &*UpIter == I)) {
10609 assert(I->getParent() == ScheduleStart->getParent() &&(static_cast <bool> (I->getParent() == ScheduleStart
->getParent() && "Instruction is in wrong basic block."
) ? void (0) : __assert_fail ("I->getParent() == ScheduleStart->getParent() && \"Instruction is in wrong basic block.\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 10610, __extension__
__PRETTY_FUNCTION__))
10610 "Instruction is in wrong basic block.")(static_cast <bool> (I->getParent() == ScheduleStart
->getParent() && "Instruction is in wrong basic block."
) ? void (0) : __assert_fail ("I->getParent() == ScheduleStart->getParent() && \"Instruction is in wrong basic block.\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 10610, __extension__
__PRETTY_FUNCTION__))
;
10611 initScheduleData(I, ScheduleStart, nullptr, FirstLoadStoreInRegion);
10612 ScheduleStart = I;
10613 if (isOneOf(S, I) != I)
10614 CheckScheduleForI(I);
10615 LLVM_DEBUG(dbgs() << "SLP: extend schedule region start to " << *Ido { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: extend schedule region start to "
<< *I << "\n"; } } while (false)
10616 << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: extend schedule region start to "
<< *I << "\n"; } } while (false)
;
10617 return true;
10618 }
10619 assert((UpIter == UpperEnd || (DownIter != LowerEnd && &*DownIter == I)) &&(static_cast <bool> ((UpIter == UpperEnd || (DownIter !=
LowerEnd && &*DownIter == I)) && "Expected to reach top of the basic block or instruction down the "
"lower end.") ? void (0) : __assert_fail ("(UpIter == UpperEnd || (DownIter != LowerEnd && &*DownIter == I)) && \"Expected to reach top of the basic block or instruction down the \" \"lower end.\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 10621, __extension__
__PRETTY_FUNCTION__))
10620 "Expected to reach top of the basic block or instruction down the "(static_cast <bool> ((UpIter == UpperEnd || (DownIter !=
LowerEnd && &*DownIter == I)) && "Expected to reach top of the basic block or instruction down the "
"lower end.") ? void (0) : __assert_fail ("(UpIter == UpperEnd || (DownIter != LowerEnd && &*DownIter == I)) && \"Expected to reach top of the basic block or instruction down the \" \"lower end.\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 10621, __extension__
__PRETTY_FUNCTION__))
10621 "lower end.")(static_cast <bool> ((UpIter == UpperEnd || (DownIter !=
LowerEnd && &*DownIter == I)) && "Expected to reach top of the basic block or instruction down the "
"lower end.") ? void (0) : __assert_fail ("(UpIter == UpperEnd || (DownIter != LowerEnd && &*DownIter == I)) && \"Expected to reach top of the basic block or instruction down the \" \"lower end.\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 10621, __extension__
__PRETTY_FUNCTION__))
;
10622 assert(I->getParent() == ScheduleEnd->getParent() &&(static_cast <bool> (I->getParent() == ScheduleEnd->
getParent() && "Instruction is in wrong basic block."
) ? void (0) : __assert_fail ("I->getParent() == ScheduleEnd->getParent() && \"Instruction is in wrong basic block.\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 10623, __extension__
__PRETTY_FUNCTION__))
10623 "Instruction is in wrong basic block.")(static_cast <bool> (I->getParent() == ScheduleEnd->
getParent() && "Instruction is in wrong basic block."
) ? void (0) : __assert_fail ("I->getParent() == ScheduleEnd->getParent() && \"Instruction is in wrong basic block.\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 10623, __extension__
__PRETTY_FUNCTION__))
;
10624 initScheduleData(ScheduleEnd, I->getNextNode(), LastLoadStoreInRegion,
10625 nullptr);
10626 ScheduleEnd = I->getNextNode();
10627 if (isOneOf(S, I) != I)
10628 CheckScheduleForI(I);
10629 assert(ScheduleEnd && "tried to vectorize a terminator?")(static_cast <bool> (ScheduleEnd && "tried to vectorize a terminator?"
) ? void (0) : __assert_fail ("ScheduleEnd && \"tried to vectorize a terminator?\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 10629, __extension__
__PRETTY_FUNCTION__))
;
10630 LLVM_DEBUG(dbgs() << "SLP: extend schedule region end to " << *I << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: extend schedule region end to "
<< *I << "\n"; } } while (false)
;
10631 return true;
10632}
10633
10634void BoUpSLP::BlockScheduling::initScheduleData(Instruction *FromI,
10635 Instruction *ToI,
10636 ScheduleData *PrevLoadStore,
10637 ScheduleData *NextLoadStore) {
10638 ScheduleData *CurrentLoadStore = PrevLoadStore;
10639 for (Instruction *I = FromI; I != ToI; I = I->getNextNode()) {
10640 // No need to allocate data for non-schedulable instructions.
10641 if (doesNotNeedToBeScheduled(I))
10642 continue;
10643 ScheduleData *SD = ScheduleDataMap.lookup(I);
10644 if (!SD) {
10645 SD = allocateScheduleDataChunks();
10646 ScheduleDataMap[I] = SD;
10647 SD->Inst = I;
10648 }
10649 assert(!isInSchedulingRegion(SD) &&(static_cast <bool> (!isInSchedulingRegion(SD) &&
"new ScheduleData already in scheduling region") ? void (0) :
__assert_fail ("!isInSchedulingRegion(SD) && \"new ScheduleData already in scheduling region\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 10650, __extension__
__PRETTY_FUNCTION__))
10650 "new ScheduleData already in scheduling region")(static_cast <bool> (!isInSchedulingRegion(SD) &&
"new ScheduleData already in scheduling region") ? void (0) :
__assert_fail ("!isInSchedulingRegion(SD) && \"new ScheduleData already in scheduling region\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 10650, __extension__
__PRETTY_FUNCTION__))
;
10651 SD->init(SchedulingRegionID, I);
10652
10653 if (I->mayReadOrWriteMemory() &&
10654 (!isa<IntrinsicInst>(I) ||
10655 (cast<IntrinsicInst>(I)->getIntrinsicID() != Intrinsic::sideeffect &&
10656 cast<IntrinsicInst>(I)->getIntrinsicID() !=
10657 Intrinsic::pseudoprobe))) {
10658 // Update the linked list of memory accessing instructions.
10659 if (CurrentLoadStore) {
10660 CurrentLoadStore->NextLoadStore = SD;
10661 } else {
10662 FirstLoadStoreInRegion = SD;
10663 }
10664 CurrentLoadStore = SD;
10665 }
10666
10667 if (match(I, m_Intrinsic<Intrinsic::stacksave>()) ||
10668 match(I, m_Intrinsic<Intrinsic::stackrestore>()))
10669 RegionHasStackSave = true;
10670 }
10671 if (NextLoadStore) {
10672 if (CurrentLoadStore)
10673 CurrentLoadStore->NextLoadStore = NextLoadStore;
10674 } else {
10675 LastLoadStoreInRegion = CurrentLoadStore;
10676 }
10677}
10678
10679void BoUpSLP::BlockScheduling::calculateDependencies(ScheduleData *SD,
10680 bool InsertInReadyList,
10681 BoUpSLP *SLP) {
10682 assert(SD->isSchedulingEntity())(static_cast <bool> (SD->isSchedulingEntity()) ? void
(0) : __assert_fail ("SD->isSchedulingEntity()", "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp"
, 10682, __extension__ __PRETTY_FUNCTION__))
;
10683
10684 SmallVector<ScheduleData *, 10> WorkList;
10685 WorkList.push_back(SD);
10686
10687 while (!WorkList.empty()) {
10688 ScheduleData *SD = WorkList.pop_back_val();
10689 for (ScheduleData *BundleMember = SD; BundleMember;
10690 BundleMember = BundleMember->NextInBundle) {
10691 assert(isInSchedulingRegion(BundleMember))(static_cast <bool> (isInSchedulingRegion(BundleMember)
) ? void (0) : __assert_fail ("isInSchedulingRegion(BundleMember)"
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 10691, __extension__
__PRETTY_FUNCTION__))
;
10692 if (BundleMember->hasValidDependencies())
10693 continue;
10694
10695 LLVM_DEBUG(dbgs() << "SLP: update deps of " << *BundleMemberdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: update deps of " <<
*BundleMember << "\n"; } } while (false)
10696 << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: update deps of " <<
*BundleMember << "\n"; } } while (false)
;
10697 BundleMember->Dependencies = 0;
10698 BundleMember->resetUnscheduledDeps();
10699
10700 // Handle def-use chain dependencies.
10701 if (BundleMember->OpValue != BundleMember->Inst) {
10702 if (ScheduleData *UseSD = getScheduleData(BundleMember->Inst)) {
10703 BundleMember->Dependencies++;
10704 ScheduleData *DestBundle = UseSD->FirstInBundle;
10705 if (!DestBundle->IsScheduled)
10706 BundleMember->incrementUnscheduledDeps(1);
10707 if (!DestBundle->hasValidDependencies())
10708 WorkList.push_back(DestBundle);
10709 }
10710 } else {
10711 for (User *U : BundleMember->Inst->users()) {
10712 if (ScheduleData *UseSD = getScheduleData(cast<Instruction>(U))) {
10713 BundleMember->Dependencies++;
10714 ScheduleData *DestBundle = UseSD->FirstInBundle;
10715 if (!DestBundle->IsScheduled)
10716 BundleMember->incrementUnscheduledDeps(1);
10717 if (!DestBundle->hasValidDependencies())
10718 WorkList.push_back(DestBundle);
10719 }
10720 }
10721 }
10722
10723 auto makeControlDependent = [&](Instruction *I) {
10724 auto *DepDest = getScheduleData(I);
10725 assert(DepDest && "must be in schedule window")(static_cast <bool> (DepDest && "must be in schedule window"
) ? void (0) : __assert_fail ("DepDest && \"must be in schedule window\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 10725, __extension__
__PRETTY_FUNCTION__))
;
10726 DepDest->ControlDependencies.push_back(BundleMember);
10727 BundleMember->Dependencies++;
10728 ScheduleData *DestBundle = DepDest->FirstInBundle;
10729 if (!DestBundle->IsScheduled)
10730 BundleMember->incrementUnscheduledDeps(1);
10731 if (!DestBundle->hasValidDependencies())
10732 WorkList.push_back(DestBundle);
10733 };
10734
10735 // Any instruction which isn't safe to speculate at the beginning of the
10736 // block is control dependend on any early exit or non-willreturn call
10737 // which proceeds it.
10738 if (!isGuaranteedToTransferExecutionToSuccessor(BundleMember->Inst)) {
10739 for (Instruction *I = BundleMember->Inst->getNextNode();
10740 I != ScheduleEnd; I = I->getNextNode()) {
10741 if (isSafeToSpeculativelyExecute(I, &*BB->begin(), SLP->AC))
10742 continue;
10743
10744 // Add the dependency
10745 makeControlDependent(I);
10746
10747 if (!isGuaranteedToTransferExecutionToSuccessor(I))
10748 // Everything past here must be control dependent on I.
10749 break;
10750 }
10751 }
10752
10753 if (RegionHasStackSave) {
10754 // If we have an inalloc alloca instruction, it needs to be scheduled
10755 // after any preceeding stacksave. We also need to prevent any alloca
10756 // from reordering above a preceeding stackrestore.
10757 if (match(BundleMember->Inst, m_Intrinsic<Intrinsic::stacksave>()) ||
10758 match(BundleMember->Inst, m_Intrinsic<Intrinsic::stackrestore>())) {
10759 for (Instruction *I = BundleMember->Inst->getNextNode();
10760 I != ScheduleEnd; I = I->getNextNode()) {
10761 if (match(I, m_Intrinsic<Intrinsic::stacksave>()) ||
10762 match(I, m_Intrinsic<Intrinsic::stackrestore>()))
10763 // Any allocas past here must be control dependent on I, and I
10764 // must be memory dependend on BundleMember->Inst.
10765 break;
10766
10767 if (!isa<AllocaInst>(I))
10768 continue;
10769
10770 // Add the dependency
10771 makeControlDependent(I);
10772 }
10773 }
10774
10775 // In addition to the cases handle just above, we need to prevent
10776 // allocas and loads/stores from moving below a stacksave or a
10777 // stackrestore. Avoiding moving allocas below stackrestore is currently
10778 // thought to be conservatism. Moving loads/stores below a stackrestore
10779 // can lead to incorrect code.
10780 if (isa<AllocaInst>(BundleMember->Inst) ||
10781 BundleMember->Inst->mayReadOrWriteMemory()) {
10782 for (Instruction *I = BundleMember->Inst->getNextNode();
10783 I != ScheduleEnd; I = I->getNextNode()) {
10784 if (!match(I, m_Intrinsic<Intrinsic::stacksave>()) &&
10785 !match(I, m_Intrinsic<Intrinsic::stackrestore>()))
10786 continue;
10787
10788 // Add the dependency
10789 makeControlDependent(I);
10790 break;
10791 }
10792 }
10793 }
10794
10795 // Handle the memory dependencies (if any).
10796 ScheduleData *DepDest = BundleMember->NextLoadStore;
10797 if (!DepDest)
10798 continue;
10799 Instruction *SrcInst = BundleMember->Inst;
10800 assert(SrcInst->mayReadOrWriteMemory() &&(static_cast <bool> (SrcInst->mayReadOrWriteMemory()
&& "NextLoadStore list for non memory effecting bundle?"
) ? void (0) : __assert_fail ("SrcInst->mayReadOrWriteMemory() && \"NextLoadStore list for non memory effecting bundle?\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 10801, __extension__
__PRETTY_FUNCTION__))
10801 "NextLoadStore list for non memory effecting bundle?")(static_cast <bool> (SrcInst->mayReadOrWriteMemory()
&& "NextLoadStore list for non memory effecting bundle?"
) ? void (0) : __assert_fail ("SrcInst->mayReadOrWriteMemory() && \"NextLoadStore list for non memory effecting bundle?\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 10801, __extension__
__PRETTY_FUNCTION__))
;
10802 MemoryLocation SrcLoc = getLocation(SrcInst);
10803 bool SrcMayWrite = BundleMember->Inst->mayWriteToMemory();
10804 unsigned numAliased = 0;
10805 unsigned DistToSrc = 1;
10806
10807 for ( ; DepDest; DepDest = DepDest->NextLoadStore) {
10808 assert(isInSchedulingRegion(DepDest))(static_cast <bool> (isInSchedulingRegion(DepDest)) ? void
(0) : __assert_fail ("isInSchedulingRegion(DepDest)", "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp"
, 10808, __extension__ __PRETTY_FUNCTION__))
;
10809
10810 // We have two limits to reduce the complexity:
10811 // 1) AliasedCheckLimit: It's a small limit to reduce calls to
10812 // SLP->isAliased (which is the expensive part in this loop).
10813 // 2) MaxMemDepDistance: It's for very large blocks and it aborts
10814 // the whole loop (even if the loop is fast, it's quadratic).
10815 // It's important for the loop break condition (see below) to
10816 // check this limit even between two read-only instructions.
10817 if (DistToSrc >= MaxMemDepDistance ||
10818 ((SrcMayWrite || DepDest->Inst->mayWriteToMemory()) &&
10819 (numAliased >= AliasedCheckLimit ||
10820 SLP->isAliased(SrcLoc, SrcInst, DepDest->Inst)))) {
10821
10822 // We increment the counter only if the locations are aliased
10823 // (instead of counting all alias checks). This gives a better
10824 // balance between reduced runtime and accurate dependencies.
10825 numAliased++;
10826
10827 DepDest->MemoryDependencies.push_back(BundleMember);
10828 BundleMember->Dependencies++;
10829 ScheduleData *DestBundle = DepDest->FirstInBundle;
10830 if (!DestBundle->IsScheduled) {
10831 BundleMember->incrementUnscheduledDeps(1);
10832 }
10833 if (!DestBundle->hasValidDependencies()) {
10834 WorkList.push_back(DestBundle);
10835 }
10836 }
10837
10838 // Example, explaining the loop break condition: Let's assume our
10839 // starting instruction is i0 and MaxMemDepDistance = 3.
10840 //
10841 // +--------v--v--v
10842 // i0,i1,i2,i3,i4,i5,i6,i7,i8
10843 // +--------^--^--^
10844 //
10845 // MaxMemDepDistance let us stop alias-checking at i3 and we add
10846 // dependencies from i0 to i3,i4,.. (even if they are not aliased).
10847 // Previously we already added dependencies from i3 to i6,i7,i8
10848 // (because of MaxMemDepDistance). As we added a dependency from
10849 // i0 to i3, we have transitive dependencies from i0 to i6,i7,i8
10850 // and we can abort this loop at i6.
10851 if (DistToSrc >= 2 * MaxMemDepDistance)
10852 break;
10853 DistToSrc++;
10854 }
10855 }
10856 if (InsertInReadyList && SD->isReady()) {
10857 ReadyInsts.insert(SD);
10858 LLVM_DEBUG(dbgs() << "SLP: gets ready on update: " << *SD->Instdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: gets ready on update: " <<
*SD->Inst << "\n"; } } while (false)
10859 << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: gets ready on update: " <<
*SD->Inst << "\n"; } } while (false)
;
10860 }
10861 }
10862}
10863
10864void BoUpSLP::BlockScheduling::resetSchedule() {
10865 assert(ScheduleStart &&(static_cast <bool> (ScheduleStart && "tried to reset schedule on block which has not been scheduled"
) ? void (0) : __assert_fail ("ScheduleStart && \"tried to reset schedule on block which has not been scheduled\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 10866, __extension__
__PRETTY_FUNCTION__))
10866 "tried to reset schedule on block which has not been scheduled")(static_cast <bool> (ScheduleStart && "tried to reset schedule on block which has not been scheduled"
) ? void (0) : __assert_fail ("ScheduleStart && \"tried to reset schedule on block which has not been scheduled\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 10866, __extension__
__PRETTY_FUNCTION__))
;
10867 for (Instruction *I = ScheduleStart; I != ScheduleEnd; I = I->getNextNode()) {
10868 doForAllOpcodes(I, [&](ScheduleData *SD) {
10869 assert(isInSchedulingRegion(SD) &&(static_cast <bool> (isInSchedulingRegion(SD) &&
"ScheduleData not in scheduling region") ? void (0) : __assert_fail
("isInSchedulingRegion(SD) && \"ScheduleData not in scheduling region\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 10870, __extension__
__PRETTY_FUNCTION__))
10870 "ScheduleData not in scheduling region")(static_cast <bool> (isInSchedulingRegion(SD) &&
"ScheduleData not in scheduling region") ? void (0) : __assert_fail
("isInSchedulingRegion(SD) && \"ScheduleData not in scheduling region\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 10870, __extension__
__PRETTY_FUNCTION__))
;
10871 SD->IsScheduled = false;
10872 SD->resetUnscheduledDeps();
10873 });
10874 }
10875 ReadyInsts.clear();
10876}
10877
10878void BoUpSLP::scheduleBlock(BlockScheduling *BS) {
10879 if (!BS->ScheduleStart)
10880 return;
10881
10882 LLVM_DEBUG(dbgs() << "SLP: schedule block " << BS->BB->getName() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: schedule block " << BS
->BB->getName() << "\n"; } } while (false)
;
10883
10884 // A key point - if we got here, pre-scheduling was able to find a valid
10885 // scheduling of the sub-graph of the scheduling window which consists
10886 // of all vector bundles and their transitive users. As such, we do not
10887 // need to reschedule anything *outside of* that subgraph.
10888
10889 BS->resetSchedule();
10890
10891 // For the real scheduling we use a more sophisticated ready-list: it is
10892 // sorted by the original instruction location. This lets the final schedule
10893 // be as close as possible to the original instruction order.
10894 // WARNING: If changing this order causes a correctness issue, that means
10895 // there is some missing dependence edge in the schedule data graph.
10896 struct ScheduleDataCompare {
10897 bool operator()(ScheduleData *SD1, ScheduleData *SD2) const {
10898 return SD2->SchedulingPriority < SD1->SchedulingPriority;
10899 }
10900 };
10901 std::set<ScheduleData *, ScheduleDataCompare> ReadyInsts;
10902
10903 // Ensure that all dependency data is updated (for nodes in the sub-graph)
10904 // and fill the ready-list with initial instructions.
10905 int Idx = 0;
10906 for (auto *I = BS->ScheduleStart; I != BS->ScheduleEnd;
10907 I = I->getNextNode()) {
10908 BS->doForAllOpcodes(I, [this, &Idx, BS](ScheduleData *SD) {
10909 TreeEntry *SDTE = getTreeEntry(SD->Inst);
10910 (void)SDTE;
10911 assert((isVectorLikeInstWithConstOps(SD->Inst) ||(static_cast <bool> ((isVectorLikeInstWithConstOps(SD->
Inst) || SD->isPartOfBundle() == (SDTE && !doesNotNeedToSchedule
(SDTE->Scalars))) && "scheduler and vectorizer bundle mismatch"
) ? void (0) : __assert_fail ("(isVectorLikeInstWithConstOps(SD->Inst) || SD->isPartOfBundle() == (SDTE && !doesNotNeedToSchedule(SDTE->Scalars))) && \"scheduler and vectorizer bundle mismatch\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 10914, __extension__
__PRETTY_FUNCTION__))
10912 SD->isPartOfBundle() ==(static_cast <bool> ((isVectorLikeInstWithConstOps(SD->
Inst) || SD->isPartOfBundle() == (SDTE && !doesNotNeedToSchedule
(SDTE->Scalars))) && "scheduler and vectorizer bundle mismatch"
) ? void (0) : __assert_fail ("(isVectorLikeInstWithConstOps(SD->Inst) || SD->isPartOfBundle() == (SDTE && !doesNotNeedToSchedule(SDTE->Scalars))) && \"scheduler and vectorizer bundle mismatch\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 10914, __extension__
__PRETTY_FUNCTION__))
10913 (SDTE && !doesNotNeedToSchedule(SDTE->Scalars))) &&(static_cast <bool> ((isVectorLikeInstWithConstOps(SD->
Inst) || SD->isPartOfBundle() == (SDTE && !doesNotNeedToSchedule
(SDTE->Scalars))) && "scheduler and vectorizer bundle mismatch"
) ? void (0) : __assert_fail ("(isVectorLikeInstWithConstOps(SD->Inst) || SD->isPartOfBundle() == (SDTE && !doesNotNeedToSchedule(SDTE->Scalars))) && \"scheduler and vectorizer bundle mismatch\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 10914, __extension__
__PRETTY_FUNCTION__))
10914 "scheduler and vectorizer bundle mismatch")(static_cast <bool> ((isVectorLikeInstWithConstOps(SD->
Inst) || SD->isPartOfBundle() == (SDTE && !doesNotNeedToSchedule
(SDTE->Scalars))) && "scheduler and vectorizer bundle mismatch"
) ? void (0) : __assert_fail ("(isVectorLikeInstWithConstOps(SD->Inst) || SD->isPartOfBundle() == (SDTE && !doesNotNeedToSchedule(SDTE->Scalars))) && \"scheduler and vectorizer bundle mismatch\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 10914, __extension__
__PRETTY_FUNCTION__))
;
10915 SD->FirstInBundle->SchedulingPriority = Idx++;
10916
10917 if (SD->isSchedulingEntity() && SD->isPartOfBundle())
10918 BS->calculateDependencies(SD, false, this);
10919 });
10920 }
10921 BS->initialFillReadyList(ReadyInsts);
10922
10923 Instruction *LastScheduledInst = BS->ScheduleEnd;
10924
10925 // Do the "real" scheduling.
10926 while (!ReadyInsts.empty()) {
10927 ScheduleData *picked = *ReadyInsts.begin();
10928 ReadyInsts.erase(ReadyInsts.begin());
10929
10930 // Move the scheduled instruction(s) to their dedicated places, if not
10931 // there yet.
10932 for (ScheduleData *BundleMember = picked; BundleMember;
10933 BundleMember = BundleMember->NextInBundle) {
10934 Instruction *pickedInst = BundleMember->Inst;
10935 if (pickedInst->getNextNode() != LastScheduledInst)
10936 pickedInst->moveBefore(LastScheduledInst);
10937 LastScheduledInst = pickedInst;
10938 }
10939
10940 BS->schedule(picked, ReadyInsts);
10941 }
10942
10943 // Check that we didn't break any of our invariants.
10944#ifdef EXPENSIVE_CHECKS
10945 BS->verify();
10946#endif
10947
10948#if !defined(NDEBUG) || defined(EXPENSIVE_CHECKS)
10949 // Check that all schedulable entities got scheduled
10950 for (auto *I = BS->ScheduleStart; I != BS->ScheduleEnd; I = I->getNextNode()) {
10951 BS->doForAllOpcodes(I, [&](ScheduleData *SD) {
10952 if (SD->isSchedulingEntity() && SD->hasValidDependencies()) {
10953 assert(SD->IsScheduled && "must be scheduled at this point")(static_cast <bool> (SD->IsScheduled && "must be scheduled at this point"
) ? void (0) : __assert_fail ("SD->IsScheduled && \"must be scheduled at this point\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 10953, __extension__
__PRETTY_FUNCTION__))
;
10954 }
10955 });
10956 }
10957#endif
10958
10959 // Avoid duplicate scheduling of the block.
10960 BS->ScheduleStart = nullptr;
10961}
10962
10963unsigned BoUpSLP::getVectorElementSize(Value *V) {
10964 // If V is a store, just return the width of the stored value (or value
10965 // truncated just before storing) without traversing the expression tree.
10966 // This is the common case.
10967 if (auto *Store = dyn_cast<StoreInst>(V))
10968 return DL->getTypeSizeInBits(Store->getValueOperand()->getType());
10969
10970 if (auto *IEI = dyn_cast<InsertElementInst>(V))
10971 return getVectorElementSize(IEI->getOperand(1));
10972
10973 auto E = InstrElementSize.find(V);
10974 if (E != InstrElementSize.end())
10975 return E->second;
10976
10977 // If V is not a store, we can traverse the expression tree to find loads
10978 // that feed it. The type of the loaded value may indicate a more suitable
10979 // width than V's type. We want to base the vector element size on the width
10980 // of memory operations where possible.
10981 SmallVector<std::pair<Instruction *, BasicBlock *>, 16> Worklist;
10982 SmallPtrSet<Instruction *, 16> Visited;
10983 if (auto *I = dyn_cast<Instruction>(V)) {
10984 Worklist.emplace_back(I, I->getParent());
10985 Visited.insert(I);
10986 }
10987
10988 // Traverse the expression tree in bottom-up order looking for loads. If we
10989 // encounter an instruction we don't yet handle, we give up.
10990 auto Width = 0u;
10991 while (!Worklist.empty()) {
10992 Instruction *I;
10993 BasicBlock *Parent;
10994 std::tie(I, Parent) = Worklist.pop_back_val();
10995
10996 // We should only be looking at scalar instructions here. If the current
10997 // instruction has a vector type, skip.
10998 auto *Ty = I->getType();
10999 if (isa<VectorType>(Ty))
11000 continue;
11001
11002 // If the current instruction is a load, update MaxWidth to reflect the
11003 // width of the loaded value.
11004 if (isa<LoadInst, ExtractElementInst, ExtractValueInst>(I))
11005 Width = std::max<unsigned>(Width, DL->getTypeSizeInBits(Ty));
11006
11007 // Otherwise, we need to visit the operands of the instruction. We only
11008 // handle the interesting cases from buildTree here. If an operand is an
11009 // instruction we haven't yet visited and from the same basic block as the
11010 // user or the use is a PHI node, we add it to the worklist.
11011 else if (isa<PHINode, CastInst, GetElementPtrInst, CmpInst, SelectInst,
11012 BinaryOperator, UnaryOperator>(I)) {
11013 for (Use &U : I->operands())
11014 if (auto *J = dyn_cast<Instruction>(U.get()))
11015 if (Visited.insert(J).second &&
11016 (isa<PHINode>(I) || J->getParent() == Parent))
11017 Worklist.emplace_back(J, J->getParent());
11018 } else {
11019 break;
11020 }
11021 }
11022
11023 // If we didn't encounter a memory access in the expression tree, or if we
11024 // gave up for some reason, just return the width of V. Otherwise, return the
11025 // maximum width we found.
11026 if (!Width) {
11027 if (auto *CI = dyn_cast<CmpInst>(V))
11028 V = CI->getOperand(0);
11029 Width = DL->getTypeSizeInBits(V->getType());
11030 }
11031
11032 for (Instruction *I : Visited)
11033 InstrElementSize[I] = Width;
11034
11035 return Width;
11036}
11037
11038// Determine if a value V in a vectorizable expression Expr can be demoted to a
11039// smaller type with a truncation. We collect the values that will be demoted
11040// in ToDemote and additional roots that require investigating in Roots.
11041static bool collectValuesToDemote(Value *V, SmallPtrSetImpl<Value *> &Expr,
11042 SmallVectorImpl<Value *> &ToDemote,
11043 SmallVectorImpl<Value *> &Roots) {
11044 // We can always demote constants.
11045 if (isa<Constant>(V)) {
11046 ToDemote.push_back(V);
11047 return true;
11048 }
11049
11050 // If the value is not an instruction in the expression with only one use, it
11051 // cannot be demoted.
11052 auto *I = dyn_cast<Instruction>(V);
11053 if (!I || !I->hasOneUse() || !Expr.count(I))
11054 return false;
11055
11056 switch (I->getOpcode()) {
11057
11058 // We can always demote truncations and extensions. Since truncations can
11059 // seed additional demotion, we save the truncated value.
11060 case Instruction::Trunc:
11061 Roots.push_back(I->getOperand(0));
11062 break;
11063 case Instruction::ZExt:
11064 case Instruction::SExt:
11065 if (isa<ExtractElementInst, InsertElementInst>(I->getOperand(0)))
11066 return false;
11067 break;
11068
11069 // We can demote certain binary operations if we can demote both of their
11070 // operands.
11071 case Instruction::Add:
11072 case Instruction::Sub:
11073 case Instruction::Mul:
11074 case Instruction::And:
11075 case Instruction::Or:
11076 case Instruction::Xor:
11077 if (!collectValuesToDemote(I->getOperand(0), Expr, ToDemote, Roots) ||
11078 !collectValuesToDemote(I->getOperand(1), Expr, ToDemote, Roots))
11079 return false;
11080 break;
11081
11082 // We can demote selects if we can demote their true and false values.
11083 case Instruction::Select: {
11084 SelectInst *SI = cast<SelectInst>(I);
11085 if (!collectValuesToDemote(SI->getTrueValue(), Expr, ToDemote, Roots) ||
11086 !collectValuesToDemote(SI->getFalseValue(), Expr, ToDemote, Roots))
11087 return false;
11088 break;
11089 }
11090
11091 // We can demote phis if we can demote all their incoming operands. Note that
11092 // we don't need to worry about cycles since we ensure single use above.
11093 case Instruction::PHI: {
11094 PHINode *PN = cast<PHINode>(I);
11095 for (Value *IncValue : PN->incoming_values())
11096 if (!collectValuesToDemote(IncValue, Expr, ToDemote, Roots))
11097 return false;
11098 break;
11099 }
11100
11101 // Otherwise, conservatively give up.
11102 default:
11103 return false;
11104 }
11105
11106 // Record the value that we can demote.
11107 ToDemote.push_back(V);
11108 return true;
11109}
11110
11111void BoUpSLP::computeMinimumValueSizes() {
11112 // If there are no external uses, the expression tree must be rooted by a
11113 // store. We can't demote in-memory values, so there is nothing to do here.
11114 if (ExternalUses.empty())
11115 return;
11116
11117 // We only attempt to truncate integer expressions.
11118 auto &TreeRoot = VectorizableTree[0]->Scalars;
11119 auto *TreeRootIT = dyn_cast<IntegerType>(TreeRoot[0]->getType());
11120 if (!TreeRootIT)
11121 return;
11122
11123 // If the expression is not rooted by a store, these roots should have
11124 // external uses. We will rely on InstCombine to rewrite the expression in
11125 // the narrower type. However, InstCombine only rewrites single-use values.
11126 // This means that if a tree entry other than a root is used externally, it
11127 // must have multiple uses and InstCombine will not rewrite it. The code
11128 // below ensures that only the roots are used externally.
11129 SmallPtrSet<Value *, 32> Expr(TreeRoot.begin(), TreeRoot.end());
11130 for (auto &EU : ExternalUses)
11131 if (!Expr.erase(EU.Scalar))
11132 return;
11133 if (!Expr.empty())
11134 return;
11135
11136 // Collect the scalar values of the vectorizable expression. We will use this
11137 // context to determine which values can be demoted. If we see a truncation,
11138 // we mark it as seeding another demotion.
11139 for (auto &EntryPtr : VectorizableTree)
11140 Expr.insert(EntryPtr->Scalars.begin(), EntryPtr->Scalars.end());
11141
11142 // Ensure the roots of the vectorizable tree don't form a cycle. They must
11143 // have a single external user that is not in the vectorizable tree.
11144 for (auto *Root : TreeRoot)
11145 if (!Root->hasOneUse() || Expr.count(*Root->user_begin()))
11146 return;
11147
11148 // Conservatively determine if we can actually truncate the roots of the
11149 // expression. Collect the values that can be demoted in ToDemote and
11150 // additional roots that require investigating in Roots.
11151 SmallVector<Value *, 32> ToDemote;
11152 SmallVector<Value *, 4> Roots;
11153 for (auto *Root : TreeRoot)
11154 if (!collectValuesToDemote(Root, Expr, ToDemote, Roots))
11155 return;
11156
11157 // The maximum bit width required to represent all the values that can be
11158 // demoted without loss of precision. It would be safe to truncate the roots
11159 // of the expression to this width.
11160 auto MaxBitWidth = 8u;
11161
11162 // We first check if all the bits of the roots are demanded. If they're not,
11163 // we can truncate the roots to this narrower type.
11164 for (auto *Root : TreeRoot) {
11165 auto Mask = DB->getDemandedBits(cast<Instruction>(Root));
11166 MaxBitWidth = std::max<unsigned>(
11167 Mask.getBitWidth() - Mask.countLeadingZeros(), MaxBitWidth);
11168 }
11169
11170 // True if the roots can be zero-extended back to their original type, rather
11171 // than sign-extended. We know that if the leading bits are not demanded, we
11172 // can safely zero-extend. So we initialize IsKnownPositive to True.
11173 bool IsKnownPositive = true;
11174
11175 // If all the bits of the roots are demanded, we can try a little harder to
11176 // compute a narrower type. This can happen, for example, if the roots are
11177 // getelementptr indices. InstCombine promotes these indices to the pointer
11178 // width. Thus, all their bits are technically demanded even though the
11179 // address computation might be vectorized in a smaller type.
11180 //
11181 // We start by looking at each entry that can be demoted. We compute the
11182 // maximum bit width required to store the scalar by using ValueTracking to
11183 // compute the number of high-order bits we can truncate.
11184 if (MaxBitWidth == DL->getTypeSizeInBits(TreeRoot[0]->getType()) &&
11185 llvm::all_of(TreeRoot, [](Value *R) {
11186 assert(R->hasOneUse() && "Root should have only one use!")(static_cast <bool> (R->hasOneUse() && "Root should have only one use!"
) ? void (0) : __assert_fail ("R->hasOneUse() && \"Root should have only one use!\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 11186, __extension__
__PRETTY_FUNCTION__))
;
11187 return isa<GetElementPtrInst>(R->user_back());
11188 })) {
11189 MaxBitWidth = 8u;
11190
11191 // Determine if the sign bit of all the roots is known to be zero. If not,
11192 // IsKnownPositive is set to False.
11193 IsKnownPositive = llvm::all_of(TreeRoot, [&](Value *R) {
11194 KnownBits Known = computeKnownBits(R, *DL);
11195 return Known.isNonNegative();
11196 });
11197
11198 // Determine the maximum number of bits required to store the scalar
11199 // values.
11200 for (auto *Scalar : ToDemote) {
11201 auto NumSignBits = ComputeNumSignBits(Scalar, *DL, 0, AC, nullptr, DT);
11202 auto NumTypeBits = DL->getTypeSizeInBits(Scalar->getType());
11203 MaxBitWidth = std::max<unsigned>(NumTypeBits - NumSignBits, MaxBitWidth);
11204 }
11205
11206 // If we can't prove that the sign bit is zero, we must add one to the
11207 // maximum bit width to account for the unknown sign bit. This preserves
11208 // the existing sign bit so we can safely sign-extend the root back to the
11209 // original type. Otherwise, if we know the sign bit is zero, we will
11210 // zero-extend the root instead.
11211 //
11212 // FIXME: This is somewhat suboptimal, as there will be cases where adding
11213 // one to the maximum bit width will yield a larger-than-necessary
11214 // type. In general, we need to add an extra bit only if we can't
11215 // prove that the upper bit of the original type is equal to the
11216 // upper bit of the proposed smaller type. If these two bits are the
11217 // same (either zero or one) we know that sign-extending from the
11218 // smaller type will result in the same value. Here, since we can't
11219 // yet prove this, we are just making the proposed smaller type
11220 // larger to ensure correctness.
11221 if (!IsKnownPositive)
11222 ++MaxBitWidth;
11223 }
11224
11225 // Round MaxBitWidth up to the next power-of-two.
11226 if (!isPowerOf2_64(MaxBitWidth))
11227 MaxBitWidth = NextPowerOf2(MaxBitWidth);
11228
11229 // If the maximum bit width we compute is less than the with of the roots'
11230 // type, we can proceed with the narrowing. Otherwise, do nothing.
11231 if (MaxBitWidth >= TreeRootIT->getBitWidth())
11232 return;
11233
11234 // If we can truncate the root, we must collect additional values that might
11235 // be demoted as a result. That is, those seeded by truncations we will
11236 // modify.
11237 while (!Roots.empty())
11238 collectValuesToDemote(Roots.pop_back_val(), Expr, ToDemote, Roots);
11239
11240 // Finally, map the values we can demote to the maximum bit with we computed.
11241 for (auto *Scalar : ToDemote)
11242 MinBWs[Scalar] = std::make_pair(MaxBitWidth, !IsKnownPositive);
11243}
11244
11245namespace {
11246
11247/// The SLPVectorizer Pass.
11248struct SLPVectorizer : public FunctionPass {
11249 SLPVectorizerPass Impl;
11250
11251 /// Pass identification, replacement for typeid
11252 static char ID;
11253
11254 explicit SLPVectorizer() : FunctionPass(ID) {
11255 initializeSLPVectorizerPass(*PassRegistry::getPassRegistry());
11256 }
11257
11258 bool doInitialization(Module &M) override { return false; }
11259
11260 bool runOnFunction(Function &F) override {
11261 if (skipFunction(F))
11262 return false;
11263
11264 auto *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
11265 auto *TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
11266 auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>();
11267 auto *TLI = TLIP ? &TLIP->getTLI(F) : nullptr;
11268 auto *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
11269 auto *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
11270 auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
11271 auto *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
11272 auto *DB = &getAnalysis<DemandedBitsWrapperPass>().getDemandedBits();
11273 auto *ORE = &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE();
11274
11275 return Impl.runImpl(F, SE, TTI, TLI, AA, LI, DT, AC, DB, ORE);
11276 }
11277
11278 void getAnalysisUsage(AnalysisUsage &AU) const override {
11279 FunctionPass::getAnalysisUsage(AU);
11280 AU.addRequired<AssumptionCacheTracker>();
11281 AU.addRequired<ScalarEvolutionWrapperPass>();
11282 AU.addRequired<AAResultsWrapperPass>();
11283 AU.addRequired<TargetTransformInfoWrapperPass>();
11284 AU.addRequired<LoopInfoWrapperPass>();
11285 AU.addRequired<DominatorTreeWrapperPass>();
11286 AU.addRequired<DemandedBitsWrapperPass>();
11287 AU.addRequired<OptimizationRemarkEmitterWrapperPass>();
11288 AU.addRequired<InjectTLIMappingsLegacy>();
11289 AU.addPreserved<LoopInfoWrapperPass>();
11290 AU.addPreserved<DominatorTreeWrapperPass>();
11291 AU.addPreserved<AAResultsWrapperPass>();
11292 AU.addPreserved<GlobalsAAWrapperPass>();
11293 AU.setPreservesCFG();
11294 }
11295};
11296
11297} // end anonymous namespace
11298
11299PreservedAnalyses SLPVectorizerPass::run(Function &F, FunctionAnalysisManager &AM) {
11300 auto *SE = &AM.getResult<ScalarEvolutionAnalysis>(F);
11301 auto *TTI = &AM.getResult<TargetIRAnalysis>(F);
11302 auto *TLI = AM.getCachedResult<TargetLibraryAnalysis>(F);
11303 auto *AA = &AM.getResult<AAManager>(F);
11304 auto *LI = &AM.getResult<LoopAnalysis>(F);
11305 auto *DT = &AM.getResult<DominatorTreeAnalysis>(F);
11306 auto *AC = &AM.getResult<AssumptionAnalysis>(F);
11307 auto *DB = &AM.getResult<DemandedBitsAnalysis>(F);
11308 auto *ORE = &AM.getResult<OptimizationRemarkEmitterAnalysis>(F);
11309
11310 bool Changed = runImpl(F, SE, TTI, TLI, AA, LI, DT, AC, DB, ORE);
11311 if (!Changed)
11312 return PreservedAnalyses::all();
11313
11314 PreservedAnalyses PA;
11315 PA.preserveSet<CFGAnalyses>();
11316 return PA;
11317}
11318
11319bool SLPVectorizerPass::runImpl(Function &F, ScalarEvolution *SE_,
11320 TargetTransformInfo *TTI_,
11321 TargetLibraryInfo *TLI_, AAResults *AA_,
11322 LoopInfo *LI_, DominatorTree *DT_,
11323 AssumptionCache *AC_, DemandedBits *DB_,
11324 OptimizationRemarkEmitter *ORE_) {
11325 if (!RunSLPVectorization)
11326 return false;
11327 SE = SE_;
11328 TTI = TTI_;
11329 TLI = TLI_;
11330 AA = AA_;
11331 LI = LI_;
11332 DT = DT_;
11333 AC = AC_;
11334 DB = DB_;
11335 DL = &F.getParent()->getDataLayout();
11336
11337 Stores.clear();
11338 GEPs.clear();
11339 bool Changed = false;
11340
11341 // If the target claims to have no vector registers don't attempt
11342 // vectorization.
11343 if (!TTI->getNumberOfRegisters(TTI->getRegisterClassForType(true))) {
11344 LLVM_DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Didn't find any vector registers for target, abort.\n"
; } } while (false)
11345 dbgs() << "SLP: Didn't find any vector registers for target, abort.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Didn't find any vector registers for target, abort.\n"
; } } while (false)
;
11346 return false;
11347 }
11348
11349 // Don't vectorize when the attribute NoImplicitFloat is used.
11350 if (F.hasFnAttribute(Attribute::NoImplicitFloat))
11351 return false;
11352
11353 LLVM_DEBUG(dbgs() << "SLP: Analyzing blocks in " << F.getName() << ".\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Analyzing blocks in " <<
F.getName() << ".\n"; } } while (false)
;
11354
11355 // Use the bottom up slp vectorizer to construct chains that start with
11356 // store instructions.
11357 BoUpSLP R(&F, SE, TTI, TLI, AA, LI, DT, AC, DB, DL, ORE_);
11358
11359 // A general note: the vectorizer must use BoUpSLP::eraseInstruction() to
11360 // delete instructions.
11361
11362 // Update DFS numbers now so that we can use them for ordering.
11363 DT->updateDFSNumbers();
11364
11365 // Scan the blocks in the function in post order.
11366 for (auto *BB : post_order(&F.getEntryBlock())) {
11367 // Start new block - clear the list of reduction roots.
11368 R.clearReductionData();
11369 collectSeedInstructions(BB);
11370
11371 // Vectorize trees that end at stores.
11372 if (!Stores.empty()) {
11373 LLVM_DEBUG(dbgs() << "SLP: Found stores for " << Stores.size()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Found stores for " << Stores
.size() << " underlying objects.\n"; } } while (false)
11374 << " underlying objects.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Found stores for " << Stores
.size() << " underlying objects.\n"; } } while (false)
;
11375 Changed |= vectorizeStoreChains(R);
11376 }
11377
11378 // Vectorize trees that end at reductions.
11379 Changed |= vectorizeChainsInBlock(BB, R);
11380
11381 // Vectorize the index computations of getelementptr instructions. This
11382 // is primarily intended to catch gather-like idioms ending at
11383 // non-consecutive loads.
11384 if (!GEPs.empty()) {
11385 LLVM_DEBUG(dbgs() << "SLP: Found GEPs for " << GEPs.size()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Found GEPs for " << GEPs
.size() << " underlying objects.\n"; } } while (false)
11386 << " underlying objects.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Found GEPs for " << GEPs
.size() << " underlying objects.\n"; } } while (false)
;
11387 Changed |= vectorizeGEPIndices(BB, R);
11388 }
11389 }
11390
11391 if (Changed) {
11392 R.optimizeGatherSequence();
11393 LLVM_DEBUG(dbgs() << "SLP: vectorized \"" << F.getName() << "\"\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: vectorized \"" << F.getName
() << "\"\n"; } } while (false)
;
11394 }
11395 return Changed;
11396}
11397
11398bool SLPVectorizerPass::vectorizeStoreChain(ArrayRef<Value *> Chain, BoUpSLP &R,
11399 unsigned Idx, unsigned MinVF) {
11400 LLVM_DEBUG(dbgs() << "SLP: Analyzing a store chain of length " << Chain.size()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Analyzing a store chain of length "
<< Chain.size() << "\n"; } } while (false)
11401 << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Analyzing a store chain of length "
<< Chain.size() << "\n"; } } while (false)
;
11402 const unsigned Sz = R.getVectorElementSize(Chain[0]);
11403 unsigned VF = Chain.size();
11404
11405 if (!isPowerOf2_32(Sz) || !isPowerOf2_32(VF) || VF < 2 || VF < MinVF)
11406 return false;
11407
11408 LLVM_DEBUG(dbgs() << "SLP: Analyzing " << VF << " stores at offset " << Idxdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Analyzing " << VF <<
" stores at offset " << Idx << "\n"; } } while (
false)
11409 << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Analyzing " << VF <<
" stores at offset " << Idx << "\n"; } } while (
false)
;
11410
11411 R.buildTree(Chain);
11412 if (R.isTreeTinyAndNotFullyVectorizable())
11413 return false;
11414 if (R.isLoadCombineCandidate())
11415 return false;
11416 R.reorderTopToBottom();
11417 R.reorderBottomToTop();
11418 R.buildExternalUses();
11419
11420 R.computeMinimumValueSizes();
11421
11422 InstructionCost Cost = R.getTreeCost();
11423
11424 LLVM_DEBUG(dbgs() << "SLP: Found cost = " << Cost << " for VF=" << VF << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Found cost = " << Cost
<< " for VF=" << VF << "\n"; } } while (false
)
;
11425 if (Cost < -SLPCostThreshold) {
11426 LLVM_DEBUG(dbgs() << "SLP: Decided to vectorize cost = " << Cost << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Decided to vectorize cost = "
<< Cost << "\n"; } } while (false)
;
11427
11428 using namespace ore;
11429
11430 R.getORE()->emit(OptimizationRemark(SV_NAME"slp-vectorizer", "StoresVectorized",
11431 cast<StoreInst>(Chain[0]))
11432 << "Stores SLP vectorized with cost " << NV("Cost", Cost)
11433 << " and with tree size "
11434 << NV("TreeSize", R.getTreeSize()));
11435
11436 R.vectorizeTree();
11437 return true;
11438 }
11439
11440 return false;
11441}
11442
11443bool SLPVectorizerPass::vectorizeStores(ArrayRef<StoreInst *> Stores,
11444 BoUpSLP &R) {
11445 // We may run into multiple chains that merge into a single chain. We mark the
11446 // stores that we vectorized so that we don't visit the same store twice.
11447 BoUpSLP::ValueSet VectorizedStores;
11448 bool Changed = false;
11449
11450 int E = Stores.size();
11451 SmallBitVector Tails(E, false);
11452 int MaxIter = MaxStoreLookup.getValue();
11453 SmallVector<std::pair<int, int>, 16> ConsecutiveChain(
11454 E, std::make_pair(E, INT_MAX2147483647));
11455 SmallVector<SmallBitVector, 4> CheckedPairs(E, SmallBitVector(E, false));
11456 int IterCnt;
11457 auto &&FindConsecutiveAccess = [this, &Stores, &Tails, &IterCnt, MaxIter,
11458 &CheckedPairs,
11459 &ConsecutiveChain](int K, int Idx) {
11460 if (IterCnt >= MaxIter)
11461 return true;
11462 if (CheckedPairs[Idx].test(K))
11463 return ConsecutiveChain[K].second == 1 &&
11464 ConsecutiveChain[K].first == Idx;
11465 ++IterCnt;
11466 CheckedPairs[Idx].set(K);
11467 CheckedPairs[K].set(Idx);
11468 std::optional<int> Diff = getPointersDiff(
11469 Stores[K]->getValueOperand()->getType(), Stores[K]->getPointerOperand(),
11470 Stores[Idx]->getValueOperand()->getType(),
11471 Stores[Idx]->getPointerOperand(), *DL, *SE, /*StrictCheck=*/true);
11472 if (!Diff || *Diff == 0)
11473 return false;
11474 int Val = *Diff;
11475 if (Val < 0) {
11476 if (ConsecutiveChain[Idx].second > -Val) {
11477 Tails.set(K);
11478 ConsecutiveChain[Idx] = std::make_pair(K, -Val);
11479 }
11480 return false;
11481 }
11482 if (ConsecutiveChain[K].second <= Val)
11483 return false;
11484
11485 Tails.set(Idx);
11486 ConsecutiveChain[K] = std::make_pair(Idx, Val);
11487 return Val == 1;
11488 };
11489 // Do a quadratic search on all of the given stores in reverse order and find
11490 // all of the pairs of stores that follow each other.
11491 for (int Idx = E - 1; Idx >= 0; --Idx) {
11492 // If a store has multiple consecutive store candidates, search according
11493 // to the sequence: Idx-1, Idx+1, Idx-2, Idx+2, ...
11494 // This is because usually pairing with immediate succeeding or preceding
11495 // candidate create the best chance to find slp vectorization opportunity.
11496 const int MaxLookDepth = std::max(E - Idx, Idx + 1);
11497 IterCnt = 0;
11498 for (int Offset = 1, F = MaxLookDepth; Offset < F; ++Offset)
11499 if ((Idx >= Offset && FindConsecutiveAccess(Idx - Offset, Idx)) ||
11500 (Idx + Offset < E && FindConsecutiveAccess(Idx + Offset, Idx)))
11501 break;
11502 }
11503
11504 // Tracks if we tried to vectorize stores starting from the given tail
11505 // already.
11506 SmallBitVector TriedTails(E, false);
11507 // For stores that start but don't end a link in the chain:
11508 for (int Cnt = E; Cnt > 0; --Cnt) {
11509 int I = Cnt - 1;
11510 if (ConsecutiveChain[I].first == E || Tails.test(I))
11511 continue;
11512 // We found a store instr that starts a chain. Now follow the chain and try
11513 // to vectorize it.
11514 BoUpSLP::ValueList Operands;
11515 // Collect the chain into a list.
11516 while (I != E && !VectorizedStores.count(Stores[I])) {
11517 Operands.push_back(Stores[I]);
11518 Tails.set(I);
11519 if (ConsecutiveChain[I].second != 1) {
11520 // Mark the new end in the chain and go back, if required. It might be
11521 // required if the original stores come in reversed order, for example.
11522 if (ConsecutiveChain[I].first != E &&
11523 Tails.test(ConsecutiveChain[I].first) && !TriedTails.test(I) &&
11524 !VectorizedStores.count(Stores[ConsecutiveChain[I].first])) {
11525 TriedTails.set(I);
11526 Tails.reset(ConsecutiveChain[I].first);
11527 if (Cnt < ConsecutiveChain[I].first + 2)
11528 Cnt = ConsecutiveChain[I].first + 2;
11529 }
11530 break;
11531 }
11532 // Move to the next value in the chain.
11533 I = ConsecutiveChain[I].first;
11534 }
11535 assert(!Operands.empty() && "Expected non-empty list of stores.")(static_cast <bool> (!Operands.empty() && "Expected non-empty list of stores."
) ? void (0) : __assert_fail ("!Operands.empty() && \"Expected non-empty list of stores.\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 11535, __extension__
__PRETTY_FUNCTION__))
;
11536
11537 unsigned MaxVecRegSize = R.getMaxVecRegSize();
11538 unsigned EltSize = R.getVectorElementSize(Operands[0]);
11539 unsigned MaxElts = llvm::PowerOf2Floor(MaxVecRegSize / EltSize);
11540
11541 unsigned MaxVF = std::min(R.getMaximumVF(EltSize, Instruction::Store),
11542 MaxElts);
11543 auto *Store = cast<StoreInst>(Operands[0]);
11544 Type *StoreTy = Store->getValueOperand()->getType();
11545 Type *ValueTy = StoreTy;
11546 if (auto *Trunc = dyn_cast<TruncInst>(Store->getValueOperand()))
11547 ValueTy = Trunc->getSrcTy();
11548 unsigned MinVF = TTI->getStoreMinimumVF(
11549 R.getMinVF(DL->getTypeSizeInBits(ValueTy)), StoreTy, ValueTy);
11550
11551 if (MaxVF <= MinVF) {
11552 LLVM_DEBUG(dbgs() << "SLP: Vectorization infeasible as MaxVF (" << MaxVF << ") <= "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Vectorization infeasible as MaxVF ("
<< MaxVF << ") <= " << "MinVF (" <<
MinVF << ")\n"; } } while (false)
11553 << "MinVF (" << MinVF << ")\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Vectorization infeasible as MaxVF ("
<< MaxVF << ") <= " << "MinVF (" <<
MinVF << ")\n"; } } while (false)
;
11554 }
11555
11556 // FIXME: Is division-by-2 the correct step? Should we assert that the
11557 // register size is a power-of-2?
11558 unsigned StartIdx = 0;
11559 for (unsigned Size = MaxVF; Size >= MinVF; Size /= 2) {
11560 for (unsigned Cnt = StartIdx, E = Operands.size(); Cnt + Size <= E;) {
11561 ArrayRef<Value *> Slice = ArrayRef(Operands).slice(Cnt, Size);
11562 if (!VectorizedStores.count(Slice.front()) &&
11563 !VectorizedStores.count(Slice.back()) &&
11564 vectorizeStoreChain(Slice, R, Cnt, MinVF)) {
11565 // Mark the vectorized stores so that we don't vectorize them again.
11566 VectorizedStores.insert(Slice.begin(), Slice.end());
11567 Changed = true;
11568 // If we vectorized initial block, no need to try to vectorize it
11569 // again.
11570 if (Cnt == StartIdx)
11571 StartIdx += Size;
11572 Cnt += Size;
11573 continue;
11574 }
11575 ++Cnt;
11576 }
11577 // Check if the whole array was vectorized already - exit.
11578 if (StartIdx >= Operands.size())
11579 break;
11580 }
11581 }
11582
11583 return Changed;
11584}
11585
11586void SLPVectorizerPass::collectSeedInstructions(BasicBlock *BB) {
11587 // Initialize the collections. We will make a single pass over the block.
11588 Stores.clear();
11589 GEPs.clear();
11590
11591 // Visit the store and getelementptr instructions in BB and organize them in
11592 // Stores and GEPs according to the underlying objects of their pointer
11593 // operands.
11594 for (Instruction &I : *BB) {
11595 // Ignore store instructions that are volatile or have a pointer operand
11596 // that doesn't point to a scalar type.
11597 if (auto *SI = dyn_cast<StoreInst>(&I)) {
11598 if (!SI->isSimple())
11599 continue;
11600 if (!isValidElementType(SI->getValueOperand()->getType()))
11601 continue;
11602 Stores[getUnderlyingObject(SI->getPointerOperand())].push_back(SI);
11603 }
11604
11605 // Ignore getelementptr instructions that have more than one index, a
11606 // constant index, or a pointer operand that doesn't point to a scalar
11607 // type.
11608 else if (auto *GEP = dyn_cast<GetElementPtrInst>(&I)) {
11609 auto Idx = GEP->idx_begin()->get();
11610 if (GEP->getNumIndices() > 1 || isa<Constant>(Idx))
11611 continue;
11612 if (!isValidElementType(Idx->getType()))
11613 continue;
11614 if (GEP->getType()->isVectorTy())
11615 continue;
11616 GEPs[GEP->getPointerOperand()].push_back(GEP);
11617 }
11618 }
11619}
11620
11621bool SLPVectorizerPass::tryToVectorizePair(Value *A, Value *B, BoUpSLP &R) {
11622 if (!A || !B)
11623 return false;
11624 if (isa<InsertElementInst>(A) || isa<InsertElementInst>(B))
11625 return false;
11626 Value *VL[] = {A, B};
11627 return tryToVectorizeList(VL, R);
11628}
11629
11630bool SLPVectorizerPass::tryToVectorizeList(ArrayRef<Value *> VL, BoUpSLP &R,
11631 bool LimitForRegisterSize) {
11632 if (VL.size() < 2)
11633 return false;
11634
11635 LLVM_DEBUG(dbgs() << "SLP: Trying to vectorize a list of length = "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Trying to vectorize a list of length = "
<< VL.size() << ".\n"; } } while (false)
11636 << VL.size() << ".\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Trying to vectorize a list of length = "
<< VL.size() << ".\n"; } } while (false)
;
11637
11638 // Check that all of the parts are instructions of the same type,
11639 // we permit an alternate opcode via InstructionsState.
11640 InstructionsState S = getSameOpcode(VL, *TLI);
11641 if (!S.getOpcode())
11642 return false;
11643
11644 Instruction *I0 = cast<Instruction>(S.OpValue);
11645 // Make sure invalid types (including vector type) are rejected before
11646 // determining vectorization factor for scalar instructions.
11647 for (Value *V : VL) {
11648 Type *Ty = V->getType();
11649 if (!isa<InsertElementInst>(V) && !isValidElementType(Ty)) {
11650 // NOTE: the following will give user internal llvm type name, which may
11651 // not be useful.
11652 R.getORE()->emit([&]() {
11653 std::string type_str;
11654 llvm::raw_string_ostream rso(type_str);
11655 Ty->print(rso);
11656 return OptimizationRemarkMissed(SV_NAME"slp-vectorizer", "UnsupportedType", I0)
11657 << "Cannot SLP vectorize list: type "
11658 << rso.str() + " is unsupported by vectorizer";
11659 });
11660 return false;
11661 }
11662 }
11663
11664 unsigned Sz = R.getVectorElementSize(I0);
11665 unsigned MinVF = R.getMinVF(Sz);
11666 unsigned MaxVF = std::max<unsigned>(PowerOf2Floor(VL.size()), MinVF);
11667 MaxVF = std::min(R.getMaximumVF(Sz, S.getOpcode()), MaxVF);
11668 if (MaxVF < 2) {
11669 R.getORE()->emit([&]() {
11670 return OptimizationRemarkMissed(SV_NAME"slp-vectorizer", "SmallVF", I0)
11671 << "Cannot SLP vectorize list: vectorization factor "
11672 << "less than 2 is not supported";
11673 });
11674 return false;
11675 }
11676
11677 bool Changed = false;
11678 bool CandidateFound = false;
11679 InstructionCost MinCost = SLPCostThreshold.getValue();
11680 Type *ScalarTy = VL[0]->getType();
11681 if (auto *IE = dyn_cast<InsertElementInst>(VL[0]))
11682 ScalarTy = IE->getOperand(1)->getType();
11683
11684 unsigned NextInst = 0, MaxInst = VL.size();
11685 for (unsigned VF = MaxVF; NextInst + 1 < MaxInst && VF >= MinVF; VF /= 2) {
11686 // No actual vectorization should happen, if number of parts is the same as
11687 // provided vectorization factor (i.e. the scalar type is used for vector
11688 // code during codegen).
11689 auto *VecTy = FixedVectorType::get(ScalarTy, VF);
11690 if (TTI->getNumberOfParts(VecTy) == VF)
11691 continue;
11692 for (unsigned I = NextInst; I < MaxInst; ++I) {
11693 unsigned OpsWidth = 0;
11694
11695 if (I + VF > MaxInst)
11696 OpsWidth = MaxInst - I;
11697 else
11698 OpsWidth = VF;
11699
11700 if (!isPowerOf2_32(OpsWidth))
11701 continue;
11702
11703 if ((LimitForRegisterSize && OpsWidth < MaxVF) ||
11704 (VF > MinVF && OpsWidth <= VF / 2) || (VF == MinVF && OpsWidth < 2))
11705 break;
11706
11707 ArrayRef<Value *> Ops = VL.slice(I, OpsWidth);
11708 // Check that a previous iteration of this loop did not delete the Value.
11709 if (llvm::any_of(Ops, [&R](Value *V) {
11710 auto *I = dyn_cast<Instruction>(V);
11711 return I && R.isDeleted(I);
11712 }))
11713 continue;
11714
11715 LLVM_DEBUG(dbgs() << "SLP: Analyzing " << OpsWidth << " operations "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Analyzing " << OpsWidth
<< " operations " << "\n"; } } while (false)
11716 << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Analyzing " << OpsWidth
<< " operations " << "\n"; } } while (false)
;
11717
11718 R.buildTree(Ops);
11719 if (R.isTreeTinyAndNotFullyVectorizable())
11720 continue;
11721 R.reorderTopToBottom();
11722 R.reorderBottomToTop(
11723 /*IgnoreReorder=*/!isa<InsertElementInst>(Ops.front()) &&
11724 !R.doesRootHaveInTreeUses());
11725 R.buildExternalUses();
11726
11727 R.computeMinimumValueSizes();
11728 InstructionCost Cost = R.getTreeCost();
11729 CandidateFound = true;
11730 MinCost = std::min(MinCost, Cost);
11731
11732 LLVM_DEBUG(dbgs() << "SLP: Found cost = " << Costdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Found cost = " << Cost
<< " for VF=" << OpsWidth << "\n"; } } while
(false)
11733 << " for VF=" << OpsWidth << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Found cost = " << Cost
<< " for VF=" << OpsWidth << "\n"; } } while
(false)
;
11734 if (Cost < -SLPCostThreshold) {
11735 LLVM_DEBUG(dbgs() << "SLP: Vectorizing list at cost:" << Cost << ".\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Vectorizing list at cost:" <<
Cost << ".\n"; } } while (false)
;
11736 R.getORE()->emit(OptimizationRemark(SV_NAME"slp-vectorizer", "VectorizedList",
11737 cast<Instruction>(Ops[0]))
11738 << "SLP vectorized with cost " << ore::NV("Cost", Cost)
11739 << " and with tree size "
11740 << ore::NV("TreeSize", R.getTreeSize()));
11741
11742 R.vectorizeTree();
11743 // Move to the next bundle.
11744 I += VF - 1;
11745 NextInst = I + 1;
11746 Changed = true;
11747 }
11748 }
11749 }
11750
11751 if (!Changed && CandidateFound) {
11752 R.getORE()->emit([&]() {
11753 return OptimizationRemarkMissed(SV_NAME"slp-vectorizer", "NotBeneficial", I0)
11754 << "List vectorization was possible but not beneficial with cost "
11755 << ore::NV("Cost", MinCost) << " >= "
11756 << ore::NV("Treshold", -SLPCostThreshold);
11757 });
11758 } else if (!Changed) {
11759 R.getORE()->emit([&]() {
11760 return OptimizationRemarkMissed(SV_NAME"slp-vectorizer", "NotPossible", I0)
11761 << "Cannot SLP vectorize list: vectorization was impossible"
11762 << " with available vectorization factors";
11763 });
11764 }
11765 return Changed;
11766}
11767
11768bool SLPVectorizerPass::tryToVectorize(Instruction *I, BoUpSLP &R) {
11769 if (!I)
11770 return false;
11771
11772 if (!isa<BinaryOperator, CmpInst>(I) || isa<VectorType>(I->getType()))
11773 return false;
11774
11775 Value *P = I->getParent();
11776
11777 // Vectorize in current basic block only.
11778 auto *Op0 = dyn_cast<Instruction>(I->getOperand(0));
11779 auto *Op1 = dyn_cast<Instruction>(I->getOperand(1));
11780 if (!Op0 || !Op1 || Op0->getParent() != P || Op1->getParent() != P)
11781 return false;
11782
11783 // First collect all possible candidates
11784 SmallVector<std::pair<Value *, Value *>, 4> Candidates;
11785 Candidates.emplace_back(Op0, Op1);
11786
11787 auto *A = dyn_cast<BinaryOperator>(Op0);
11788 auto *B = dyn_cast<BinaryOperator>(Op1);
11789 // Try to skip B.
11790 if (A && B && B->hasOneUse()) {
11791 auto *B0 = dyn_cast<BinaryOperator>(B->getOperand(0));
11792 auto *B1 = dyn_cast<BinaryOperator>(B->getOperand(1));
11793 if (B0 && B0->getParent() == P)
11794 Candidates.emplace_back(A, B0);
11795 if (B1 && B1->getParent() == P)
11796 Candidates.emplace_back(A, B1);
11797 }
11798 // Try to skip A.
11799 if (B && A && A->hasOneUse()) {
11800 auto *A0 = dyn_cast<BinaryOperator>(A->getOperand(0));
11801 auto *A1 = dyn_cast<BinaryOperator>(A->getOperand(1));
11802 if (A0 && A0->getParent() == P)
11803 Candidates.emplace_back(A0, B);
11804 if (A1 && A1->getParent() == P)
11805 Candidates.emplace_back(A1, B);
11806 }
11807
11808 if (Candidates.size() == 1)
11809 return tryToVectorizePair(Op0, Op1, R);
11810
11811 // We have multiple options. Try to pick the single best.
11812 std::optional<int> BestCandidate = R.findBestRootPair(Candidates);
11813 if (!BestCandidate)
11814 return false;
11815 return tryToVectorizePair(Candidates[*BestCandidate].first,
11816 Candidates[*BestCandidate].second, R);
11817}
11818
11819namespace {
11820
11821/// Model horizontal reductions.
11822///
11823/// A horizontal reduction is a tree of reduction instructions that has values
11824/// that can be put into a vector as its leaves. For example:
11825///
11826/// mul mul mul mul
11827/// \ / \ /
11828/// + +
11829/// \ /
11830/// +
11831/// This tree has "mul" as its leaf values and "+" as its reduction
11832/// instructions. A reduction can feed into a store or a binary operation
11833/// feeding a phi.
11834/// ...
11835/// \ /
11836/// +
11837/// |
11838/// phi +=
11839///
11840/// Or:
11841/// ...
11842/// \ /
11843/// +
11844/// |
11845/// *p =
11846///
11847class HorizontalReduction {
11848 using ReductionOpsType = SmallVector<Value *, 16>;
11849 using ReductionOpsListType = SmallVector<ReductionOpsType, 2>;
11850 ReductionOpsListType ReductionOps;
11851 /// List of possibly reduced values.
11852 SmallVector<SmallVector<Value *>> ReducedVals;
11853 /// Maps reduced value to the corresponding reduction operation.
11854 DenseMap<Value *, SmallVector<Instruction *>> ReducedValsToOps;
11855 // Use map vector to make stable output.
11856 MapVector<Instruction *, Value *> ExtraArgs;
11857 WeakTrackingVH ReductionRoot;
11858 /// The type of reduction operation.
11859 RecurKind RdxKind;
11860
11861 static bool isCmpSelMinMax(Instruction *I) {
11862 return match(I, m_Select(m_Cmp(), m_Value(), m_Value())) &&
11863 RecurrenceDescriptor::isMinMaxRecurrenceKind(getRdxKind(I));
11864 }
11865
11866 // And/or are potentially poison-safe logical patterns like:
11867 // select x, y, false
11868 // select x, true, y
11869 static bool isBoolLogicOp(Instruction *I) {
11870 return isa<SelectInst>(I) &&
11871 (match(I, m_LogicalAnd()) || match(I, m_LogicalOr()));
11872 }
11873
11874 /// Checks if instruction is associative and can be vectorized.
11875 static bool isVectorizable(RecurKind Kind, Instruction *I) {
11876 if (Kind == RecurKind::None)
11877 return false;
11878
11879 // Integer ops that map to select instructions or intrinsics are fine.
11880 if (RecurrenceDescriptor::isIntMinMaxRecurrenceKind(Kind) ||
11881 isBoolLogicOp(I))
11882 return true;
11883
11884 if (Kind == RecurKind::FMax || Kind == RecurKind::FMin) {
11885 // FP min/max are associative except for NaN and -0.0. We do not
11886 // have to rule out -0.0 here because the intrinsic semantics do not
11887 // specify a fixed result for it.
11888 return I->getFastMathFlags().noNaNs();
11889 }
11890
11891 return I->isAssociative();
11892 }
11893
11894 static Value *getRdxOperand(Instruction *I, unsigned Index) {
11895 // Poison-safe 'or' takes the form: select X, true, Y
11896 // To make that work with the normal operand processing, we skip the
11897 // true value operand.
11898 // TODO: Change the code and data structures to handle this without a hack.
11899 if (getRdxKind(I) == RecurKind::Or && isa<SelectInst>(I) && Index == 1)
11900 return I->getOperand(2);
11901 return I->getOperand(Index);
11902 }
11903
11904 /// Creates reduction operation with the current opcode.
11905 static Value *createOp(IRBuilder<> &Builder, RecurKind Kind, Value *LHS,
11906 Value *RHS, const Twine &Name, bool UseSelect) {
11907 unsigned RdxOpcode = RecurrenceDescriptor::getOpcode(Kind);
11908 switch (Kind) {
11909 case RecurKind::Or:
11910 if (UseSelect &&
11911 LHS->getType() == CmpInst::makeCmpResultType(LHS->getType()))
11912 return Builder.CreateSelect(LHS, Builder.getTrue(), RHS, Name);
11913 return Builder.CreateBinOp((Instruction::BinaryOps)RdxOpcode, LHS, RHS,
11914 Name);
11915 case RecurKind::And:
11916 if (UseSelect &&
11917 LHS->getType() == CmpInst::makeCmpResultType(LHS->getType()))
11918 return Builder.CreateSelect(LHS, RHS, Builder.getFalse(), Name);
11919 return Builder.CreateBinOp((Instruction::BinaryOps)RdxOpcode, LHS, RHS,
11920 Name);
11921 case RecurKind::Add:
11922 case RecurKind::Mul:
11923 case RecurKind::Xor:
11924 case RecurKind::FAdd:
11925 case RecurKind::FMul:
11926 return Builder.CreateBinOp((Instruction::BinaryOps)RdxOpcode, LHS, RHS,
11927 Name);
11928 case RecurKind::FMax:
11929 return Builder.CreateBinaryIntrinsic(Intrinsic::maxnum, LHS, RHS);
11930 case RecurKind::FMin:
11931 return Builder.CreateBinaryIntrinsic(Intrinsic::minnum, LHS, RHS);
11932 case RecurKind::SMax:
11933 if (UseSelect) {
11934 Value *Cmp = Builder.CreateICmpSGT(LHS, RHS, Name);
11935 return Builder.CreateSelect(Cmp, LHS, RHS, Name);
11936 }
11937 return Builder.CreateBinaryIntrinsic(Intrinsic::smax, LHS, RHS);
11938 case RecurKind::SMin:
11939 if (UseSelect) {
11940 Value *Cmp = Builder.CreateICmpSLT(LHS, RHS, Name);
11941 return Builder.CreateSelect(Cmp, LHS, RHS, Name);
11942 }
11943 return Builder.CreateBinaryIntrinsic(Intrinsic::smin, LHS, RHS);
11944 case RecurKind::UMax:
11945 if (UseSelect) {
11946 Value *Cmp = Builder.CreateICmpUGT(LHS, RHS, Name);
11947 return Builder.CreateSelect(Cmp, LHS, RHS, Name);
11948 }
11949 return Builder.CreateBinaryIntrinsic(Intrinsic::umax, LHS, RHS);
11950 case RecurKind::UMin:
11951 if (UseSelect) {
11952 Value *Cmp = Builder.CreateICmpULT(LHS, RHS, Name);
11953 return Builder.CreateSelect(Cmp, LHS, RHS, Name);
11954 }
11955 return Builder.CreateBinaryIntrinsic(Intrinsic::umin, LHS, RHS);
11956 default:
11957 llvm_unreachable("Unknown reduction operation.")::llvm::llvm_unreachable_internal("Unknown reduction operation."
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 11957)
;
11958 }
11959 }
11960
11961 /// Creates reduction operation with the current opcode with the IR flags
11962 /// from \p ReductionOps, dropping nuw/nsw flags.
11963 static Value *createOp(IRBuilder<> &Builder, RecurKind RdxKind, Value *LHS,
11964 Value *RHS, const Twine &Name,
11965 const ReductionOpsListType &ReductionOps) {
11966 bool UseSelect = ReductionOps.size() == 2 ||
11967 // Logical or/and.
11968 (ReductionOps.size() == 1 &&
11969 isa<SelectInst>(ReductionOps.front().front()));
11970 assert((!UseSelect || ReductionOps.size() != 2 ||(static_cast <bool> ((!UseSelect || ReductionOps.size()
!= 2 || isa<SelectInst>(ReductionOps[1][0])) &&
"Expected cmp + select pairs for reduction") ? void (0) : __assert_fail
("(!UseSelect || ReductionOps.size() != 2 || isa<SelectInst>(ReductionOps[1][0])) && \"Expected cmp + select pairs for reduction\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 11972, __extension__
__PRETTY_FUNCTION__))
11971 isa<SelectInst>(ReductionOps[1][0])) &&(static_cast <bool> ((!UseSelect || ReductionOps.size()
!= 2 || isa<SelectInst>(ReductionOps[1][0])) &&
"Expected cmp + select pairs for reduction") ? void (0) : __assert_fail
("(!UseSelect || ReductionOps.size() != 2 || isa<SelectInst>(ReductionOps[1][0])) && \"Expected cmp + select pairs for reduction\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 11972, __extension__
__PRETTY_FUNCTION__))
11972 "Expected cmp + select pairs for reduction")(static_cast <bool> ((!UseSelect || ReductionOps.size()
!= 2 || isa<SelectInst>(ReductionOps[1][0])) &&
"Expected cmp + select pairs for reduction") ? void (0) : __assert_fail
("(!UseSelect || ReductionOps.size() != 2 || isa<SelectInst>(ReductionOps[1][0])) && \"Expected cmp + select pairs for reduction\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 11972, __extension__
__PRETTY_FUNCTION__))
;
11973 Value *Op = createOp(Builder, RdxKind, LHS, RHS, Name, UseSelect);
11974 if (RecurrenceDescriptor::isIntMinMaxRecurrenceKind(RdxKind)) {
11975 if (auto *Sel = dyn_cast<SelectInst>(Op)) {
11976 propagateIRFlags(Sel->getCondition(), ReductionOps[0], nullptr,
11977 /*IncludeWrapFlags=*/false);
11978 propagateIRFlags(Op, ReductionOps[1], nullptr,
11979 /*IncludeWrapFlags=*/false);
11980 return Op;
11981 }
11982 }
11983 propagateIRFlags(Op, ReductionOps[0], nullptr, /*IncludeWrapFlags=*/false);
11984 return Op;
11985 }
11986
11987 static RecurKind getRdxKind(Value *V) {
11988 auto *I = dyn_cast<Instruction>(V);
11989 if (!I)
11990 return RecurKind::None;
11991 if (match(I, m_Add(m_Value(), m_Value())))
11992 return RecurKind::Add;
11993 if (match(I, m_Mul(m_Value(), m_Value())))
11994 return RecurKind::Mul;
11995 if (match(I, m_And(m_Value(), m_Value())) ||
11996 match(I, m_LogicalAnd(m_Value(), m_Value())))
11997 return RecurKind::And;
11998 if (match(I, m_Or(m_Value(), m_Value())) ||
11999 match(I, m_LogicalOr(m_Value(), m_Value())))
12000 return RecurKind::Or;
12001 if (match(I, m_Xor(m_Value(), m_Value())))
12002 return RecurKind::Xor;
12003 if (match(I, m_FAdd(m_Value(), m_Value())))
12004 return RecurKind::FAdd;
12005 if (match(I, m_FMul(m_Value(), m_Value())))
12006 return RecurKind::FMul;
12007
12008 if (match(I, m_Intrinsic<Intrinsic::maxnum>(m_Value(), m_Value())))
12009 return RecurKind::FMax;
12010 if (match(I, m_Intrinsic<Intrinsic::minnum>(m_Value(), m_Value())))
12011 return RecurKind::FMin;
12012
12013 // This matches either cmp+select or intrinsics. SLP is expected to handle
12014 // either form.
12015 // TODO: If we are canonicalizing to intrinsics, we can remove several
12016 // special-case paths that deal with selects.
12017 if (match(I, m_SMax(m_Value(), m_Value())))
12018 return RecurKind::SMax;
12019 if (match(I, m_SMin(m_Value(), m_Value())))
12020 return RecurKind::SMin;
12021 if (match(I, m_UMax(m_Value(), m_Value())))
12022 return RecurKind::UMax;
12023 if (match(I, m_UMin(m_Value(), m_Value())))
12024 return RecurKind::UMin;
12025
12026 if (auto *Select = dyn_cast<SelectInst>(I)) {
12027 // Try harder: look for min/max pattern based on instructions producing
12028 // same values such as: select ((cmp Inst1, Inst2), Inst1, Inst2).
12029 // During the intermediate stages of SLP, it's very common to have
12030 // pattern like this (since optimizeGatherSequence is run only once
12031 // at the end):
12032 // %1 = extractelement <2 x i32> %a, i32 0
12033 // %2 = extractelement <2 x i32> %a, i32 1
12034 // %cond = icmp sgt i32 %1, %2
12035 // %3 = extractelement <2 x i32> %a, i32 0
12036 // %4 = extractelement <2 x i32> %a, i32 1
12037 // %select = select i1 %cond, i32 %3, i32 %4
12038 CmpInst::Predicate Pred;
12039 Instruction *L1;
12040 Instruction *L2;
12041
12042 Value *LHS = Select->getTrueValue();
12043 Value *RHS = Select->getFalseValue();
12044 Value *Cond = Select->getCondition();
12045
12046 // TODO: Support inverse predicates.
12047 if (match(Cond, m_Cmp(Pred, m_Specific(LHS), m_Instruction(L2)))) {
12048 if (!isa<ExtractElementInst>(RHS) ||
12049 !L2->isIdenticalTo(cast<Instruction>(RHS)))
12050 return RecurKind::None;
12051 } else if (match(Cond, m_Cmp(Pred, m_Instruction(L1), m_Specific(RHS)))) {
12052 if (!isa<ExtractElementInst>(LHS) ||
12053 !L1->isIdenticalTo(cast<Instruction>(LHS)))
12054 return RecurKind::None;
12055 } else {
12056 if (!isa<ExtractElementInst>(LHS) || !isa<ExtractElementInst>(RHS))
12057 return RecurKind::None;
12058 if (!match(Cond, m_Cmp(Pred, m_Instruction(L1), m_Instruction(L2))) ||
12059 !L1->isIdenticalTo(cast<Instruction>(LHS)) ||
12060 !L2->isIdenticalTo(cast<Instruction>(RHS)))
12061 return RecurKind::None;
12062 }
12063
12064 switch (Pred) {
12065 default:
12066 return RecurKind::None;
12067 case CmpInst::ICMP_SGT:
12068 case CmpInst::ICMP_SGE:
12069 return RecurKind::SMax;
12070 case CmpInst::ICMP_SLT:
12071 case CmpInst::ICMP_SLE:
12072 return RecurKind::SMin;
12073 case CmpInst::ICMP_UGT:
12074 case CmpInst::ICMP_UGE:
12075 return RecurKind::UMax;
12076 case CmpInst::ICMP_ULT:
12077 case CmpInst::ICMP_ULE:
12078 return RecurKind::UMin;
12079 }
12080 }
12081 return RecurKind::None;
12082 }
12083
12084 /// Get the index of the first operand.
12085 static unsigned getFirstOperandIndex(Instruction *I) {
12086 return isCmpSelMinMax(I) ? 1 : 0;
12087 }
12088
12089 /// Total number of operands in the reduction operation.
12090 static unsigned getNumberOfOperands(Instruction *I) {
12091 return isCmpSelMinMax(I) ? 3 : 2;
12092 }
12093
12094 /// Checks if the instruction is in basic block \p BB.
12095 /// For a cmp+sel min/max reduction check that both ops are in \p BB.
12096 static bool hasSameParent(Instruction *I, BasicBlock *BB) {
12097 if (isCmpSelMinMax(I) || isBoolLogicOp(I)) {
12098 auto *Sel = cast<SelectInst>(I);
12099 auto *Cmp = dyn_cast<Instruction>(Sel->getCondition());
12100 return Sel->getParent() == BB && Cmp && Cmp->getParent() == BB;
12101 }
12102 return I->getParent() == BB;
12103 }
12104
12105 /// Expected number of uses for reduction operations/reduced values.
12106 static bool hasRequiredNumberOfUses(bool IsCmpSelMinMax, Instruction *I) {
12107 if (IsCmpSelMinMax) {
12108 // SelectInst must be used twice while the condition op must have single
12109 // use only.
12110 if (auto *Sel = dyn_cast<SelectInst>(I))
12111 return Sel->hasNUses(2) && Sel->getCondition()->hasOneUse();
12112 return I->hasNUses(2);
12113 }
12114
12115 // Arithmetic reduction operation must be used once only.
12116 return I->hasOneUse();
12117 }
12118
12119 /// Initializes the list of reduction operations.
12120 void initReductionOps(Instruction *I) {
12121 if (isCmpSelMinMax(I))
12122 ReductionOps.assign(2, ReductionOpsType());
12123 else
12124 ReductionOps.assign(1, ReductionOpsType());
12125 }
12126
12127 /// Add all reduction operations for the reduction instruction \p I.
12128 void addReductionOps(Instruction *I) {
12129 if (isCmpSelMinMax(I)) {
12130 ReductionOps[0].emplace_back(cast<SelectInst>(I)->getCondition());
12131 ReductionOps[1].emplace_back(I);
12132 } else {
12133 ReductionOps[0].emplace_back(I);
12134 }
12135 }
12136
12137 static Value *getLHS(RecurKind Kind, Instruction *I) {
12138 if (Kind == RecurKind::None)
12139 return nullptr;
12140 return I->getOperand(getFirstOperandIndex(I));
12141 }
12142 static Value *getRHS(RecurKind Kind, Instruction *I) {
12143 if (Kind == RecurKind::None)
12144 return nullptr;
12145 return I->getOperand(getFirstOperandIndex(I) + 1);
12146 }
12147
12148 static bool isGoodForReduction(ArrayRef<Value *> Data) {
12149 int Sz = Data.size();
12150 auto *I = dyn_cast<Instruction>(Data.front());
12151 return Sz > 1 || isConstant(Data.front()) ||
12152 (I && !isa<LoadInst>(I) && isValidForAlternation(I->getOpcode()));
12153 }
12154
12155public:
12156 HorizontalReduction() = default;
12157
12158 /// Try to find a reduction tree.
12159 bool matchAssociativeReduction(PHINode *Phi, Instruction *Inst,
12160 ScalarEvolution &SE, const DataLayout &DL,
12161 const TargetLibraryInfo &TLI) {
12162 assert((!Phi || is_contained(Phi->operands(), Inst)) &&(static_cast <bool> ((!Phi || is_contained(Phi->operands
(), Inst)) && "Phi needs to use the binary operator")
? void (0) : __assert_fail ("(!Phi || is_contained(Phi->operands(), Inst)) && \"Phi needs to use the binary operator\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 12163, __extension__
__PRETTY_FUNCTION__))
12163 "Phi needs to use the binary operator")(static_cast <bool> ((!Phi || is_contained(Phi->operands
(), Inst)) && "Phi needs to use the binary operator")
? void (0) : __assert_fail ("(!Phi || is_contained(Phi->operands(), Inst)) && \"Phi needs to use the binary operator\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 12163, __extension__
__PRETTY_FUNCTION__))
;
12164 assert((isa<BinaryOperator>(Inst) || isa<SelectInst>(Inst) ||(static_cast <bool> ((isa<BinaryOperator>(Inst) ||
isa<SelectInst>(Inst) || isa<IntrinsicInst>(Inst
)) && "Expected binop, select, or intrinsic for reduction matching"
) ? void (0) : __assert_fail ("(isa<BinaryOperator>(Inst) || isa<SelectInst>(Inst) || isa<IntrinsicInst>(Inst)) && \"Expected binop, select, or intrinsic for reduction matching\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 12166, __extension__
__PRETTY_FUNCTION__))
12165 isa<IntrinsicInst>(Inst)) &&(static_cast <bool> ((isa<BinaryOperator>(Inst) ||
isa<SelectInst>(Inst) || isa<IntrinsicInst>(Inst
)) && "Expected binop, select, or intrinsic for reduction matching"
) ? void (0) : __assert_fail ("(isa<BinaryOperator>(Inst) || isa<SelectInst>(Inst) || isa<IntrinsicInst>(Inst)) && \"Expected binop, select, or intrinsic for reduction matching\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 12166, __extension__
__PRETTY_FUNCTION__))
12166 "Expected binop, select, or intrinsic for reduction matching")(static_cast <bool> ((isa<BinaryOperator>(Inst) ||
isa<SelectInst>(Inst) || isa<IntrinsicInst>(Inst
)) && "Expected binop, select, or intrinsic for reduction matching"
) ? void (0) : __assert_fail ("(isa<BinaryOperator>(Inst) || isa<SelectInst>(Inst) || isa<IntrinsicInst>(Inst)) && \"Expected binop, select, or intrinsic for reduction matching\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 12166, __extension__
__PRETTY_FUNCTION__))
;
12167 RdxKind = getRdxKind(Inst);
12168
12169 // We could have a initial reductions that is not an add.
12170 // r *= v1 + v2 + v3 + v4
12171 // In such a case start looking for a tree rooted in the first '+'.
12172 if (Phi) {
12173 if (getLHS(RdxKind, Inst) == Phi) {
12174 Phi = nullptr;
12175 Inst = dyn_cast<Instruction>(getRHS(RdxKind, Inst));
12176 if (!Inst)
12177 return false;
12178 RdxKind = getRdxKind(Inst);
12179 } else if (getRHS(RdxKind, Inst) == Phi) {
12180 Phi = nullptr;
12181 Inst = dyn_cast<Instruction>(getLHS(RdxKind, Inst));
12182 if (!Inst)
12183 return false;
12184 RdxKind = getRdxKind(Inst);
12185 }
12186 }
12187
12188 if (!isVectorizable(RdxKind, Inst))
12189 return false;
12190
12191 // Analyze "regular" integer/FP types for reductions - no target-specific
12192 // types or pointers.
12193 Type *Ty = Inst->getType();
12194 if (!isValidElementType(Ty) || Ty->isPointerTy())
12195 return false;
12196
12197 // Though the ultimate reduction may have multiple uses, its condition must
12198 // have only single use.
12199 if (auto *Sel = dyn_cast<SelectInst>(Inst))
12200 if (!Sel->getCondition()->hasOneUse())
12201 return false;
12202
12203 ReductionRoot = Inst;
12204
12205 // Iterate through all the operands of the possible reduction tree and
12206 // gather all the reduced values, sorting them by their value id.
12207 BasicBlock *BB = Inst->getParent();
12208 bool IsCmpSelMinMax = isCmpSelMinMax(Inst);
12209 SmallVector<Instruction *> Worklist(1, Inst);
12210 // Checks if the operands of the \p TreeN instruction are also reduction
12211 // operations or should be treated as reduced values or an extra argument,
12212 // which is not part of the reduction.
12213 auto &&CheckOperands = [this, IsCmpSelMinMax,
12214 BB](Instruction *TreeN,
12215 SmallVectorImpl<Value *> &ExtraArgs,
12216 SmallVectorImpl<Value *> &PossibleReducedVals,
12217 SmallVectorImpl<Instruction *> &ReductionOps) {
12218 for (int I = getFirstOperandIndex(TreeN),
12219 End = getNumberOfOperands(TreeN);
12220 I < End; ++I) {
12221 Value *EdgeVal = getRdxOperand(TreeN, I);
12222 ReducedValsToOps[EdgeVal].push_back(TreeN);
12223 auto *EdgeInst = dyn_cast<Instruction>(EdgeVal);
12224 // Edge has wrong parent - mark as an extra argument.
12225 if (EdgeInst && !isVectorLikeInstWithConstOps(EdgeInst) &&
12226 !hasSameParent(EdgeInst, BB)) {
12227 ExtraArgs.push_back(EdgeVal);
12228 continue;
12229 }
12230 // If the edge is not an instruction, or it is different from the main
12231 // reduction opcode or has too many uses - possible reduced value.
12232 if (!EdgeInst || getRdxKind(EdgeInst) != RdxKind ||
12233 IsCmpSelMinMax != isCmpSelMinMax(EdgeInst) ||
12234 !hasRequiredNumberOfUses(IsCmpSelMinMax, EdgeInst) ||
12235 !isVectorizable(getRdxKind(EdgeInst), EdgeInst)) {
12236 PossibleReducedVals.push_back(EdgeVal);
12237 continue;
12238 }
12239 ReductionOps.push_back(EdgeInst);
12240 }
12241 };
12242 // Try to regroup reduced values so that it gets more profitable to try to
12243 // reduce them. Values are grouped by their value ids, instructions - by
12244 // instruction op id and/or alternate op id, plus do extra analysis for
12245 // loads (grouping them by the distabce between pointers) and cmp
12246 // instructions (grouping them by the predicate).
12247 MapVector<size_t, MapVector<size_t, MapVector<Value *, unsigned>>>
12248 PossibleReducedVals;
12249 initReductionOps(Inst);
12250 DenseMap<Value *, SmallVector<LoadInst *>> LoadsMap;
12251 SmallSet<size_t, 2> LoadKeyUsed;
12252 SmallPtrSet<Value *, 4> DoNotReverseVals;
12253 while (!Worklist.empty()) {
12254 Instruction *TreeN = Worklist.pop_back_val();
12255 SmallVector<Value *> Args;
12256 SmallVector<Value *> PossibleRedVals;
12257 SmallVector<Instruction *> PossibleReductionOps;
12258 CheckOperands(TreeN, Args, PossibleRedVals, PossibleReductionOps);
12259 // If too many extra args - mark the instruction itself as a reduction
12260 // value, not a reduction operation.
12261 if (Args.size() < 2) {
12262 addReductionOps(TreeN);
12263 // Add extra args.
12264 if (!Args.empty()) {
12265 assert(Args.size() == 1 && "Expected only single argument.")(static_cast <bool> (Args.size() == 1 && "Expected only single argument."
) ? void (0) : __assert_fail ("Args.size() == 1 && \"Expected only single argument.\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 12265, __extension__
__PRETTY_FUNCTION__))
;
12266 ExtraArgs[TreeN] = Args.front();
12267 }
12268 // Add reduction values. The values are sorted for better vectorization
12269 // results.
12270 for (Value *V : PossibleRedVals) {
12271 size_t Key, Idx;
12272 std::tie(Key, Idx) = generateKeySubkey(
12273 V, &TLI,
12274 [&](size_t Key, LoadInst *LI) {
12275 Value *Ptr = getUnderlyingObject(LI->getPointerOperand());
12276 if (LoadKeyUsed.contains(Key)) {
12277 auto LIt = LoadsMap.find(Ptr);
12278 if (LIt != LoadsMap.end()) {
12279 for (LoadInst *RLI: LIt->second) {
12280 if (getPointersDiff(
12281 RLI->getType(), RLI->getPointerOperand(),
12282 LI->getType(), LI->getPointerOperand(), DL, SE,
12283 /*StrictCheck=*/true))
12284 return hash_value(RLI->getPointerOperand());
12285 }
12286 for (LoadInst *RLI : LIt->second) {
12287 if (arePointersCompatible(RLI->getPointerOperand(),
12288 LI->getPointerOperand(), TLI)) {
12289 hash_code SubKey = hash_value(RLI->getPointerOperand());
12290 DoNotReverseVals.insert(RLI);
12291 return SubKey;
12292 }
12293 }
12294 if (LIt->second.size() > 2) {
12295 hash_code SubKey =
12296 hash_value(LIt->second.back()->getPointerOperand());
12297 DoNotReverseVals.insert(LIt->second.back());
12298 return SubKey;
12299 }
12300 }
12301 }
12302 LoadKeyUsed.insert(Key);
12303 LoadsMap.try_emplace(Ptr).first->second.push_back(LI);
12304 return hash_value(LI->getPointerOperand());
12305 },
12306 /*AllowAlternate=*/false);
12307 ++PossibleReducedVals[Key][Idx]
12308 .insert(std::make_pair(V, 0))
12309 .first->second;
12310 }
12311 Worklist.append(PossibleReductionOps.rbegin(),
12312 PossibleReductionOps.rend());
12313 } else {
12314 size_t Key, Idx;
12315 std::tie(Key, Idx) = generateKeySubkey(
12316 TreeN, &TLI,
12317 [&](size_t Key, LoadInst *LI) {
12318 Value *Ptr = getUnderlyingObject(LI->getPointerOperand());
12319 if (LoadKeyUsed.contains(Key)) {
12320 auto LIt = LoadsMap.find(Ptr);
12321 if (LIt != LoadsMap.end()) {
12322 for (LoadInst *RLI: LIt->second) {
12323 if (getPointersDiff(RLI->getType(),
12324 RLI->getPointerOperand(), LI->getType(),
12325 LI->getPointerOperand(), DL, SE,
12326 /*StrictCheck=*/true))
12327 return hash_value(RLI->getPointerOperand());
12328 }
12329 for (LoadInst *RLI : LIt->second) {
12330 if (arePointersCompatible(RLI->getPointerOperand(),
12331 LI->getPointerOperand(), TLI)) {
12332 hash_code SubKey = hash_value(RLI->getPointerOperand());
12333 DoNotReverseVals.insert(RLI);
12334 return SubKey;
12335 }
12336 }
12337 if (LIt->second.size() > 2) {
12338 hash_code SubKey = hash_value(LIt->second.back()->getPointerOperand());
12339 DoNotReverseVals.insert(LIt->second.back());
12340 return SubKey;
12341 }
12342 }
12343 }
12344 LoadKeyUsed.insert(Key);
12345 LoadsMap.try_emplace(Ptr).first->second.push_back(LI);
12346 return hash_value(LI->getPointerOperand());
12347 },
12348 /*AllowAlternate=*/false);
12349 ++PossibleReducedVals[Key][Idx]
12350 .insert(std::make_pair(TreeN, 0))
12351 .first->second;
12352 }
12353 }
12354 auto PossibleReducedValsVect = PossibleReducedVals.takeVector();
12355 // Sort values by the total number of values kinds to start the reduction
12356 // from the longest possible reduced values sequences.
12357 for (auto &PossibleReducedVals : PossibleReducedValsVect) {
12358 auto PossibleRedVals = PossibleReducedVals.second.takeVector();
12359 SmallVector<SmallVector<Value *>> PossibleRedValsVect;
12360 for (auto It = PossibleRedVals.begin(), E = PossibleRedVals.end();
12361 It != E; ++It) {
12362 PossibleRedValsVect.emplace_back();
12363 auto RedValsVect = It->second.takeVector();
12364 stable_sort(RedValsVect, llvm::less_second());
12365 for (const std::pair<Value *, unsigned> &Data : RedValsVect)
12366 PossibleRedValsVect.back().append(Data.second, Data.first);
12367 }
12368 stable_sort(PossibleRedValsVect, [](const auto &P1, const auto &P2) {
12369 return P1.size() > P2.size();
12370 });
12371 int NewIdx = -1;
12372 for (ArrayRef<Value *> Data : PossibleRedValsVect) {
12373 if (isGoodForReduction(Data) ||
12374 (isa<LoadInst>(Data.front()) && NewIdx >= 0 &&
12375 isa<LoadInst>(ReducedVals[NewIdx].front()) &&
12376 getUnderlyingObject(
12377 cast<LoadInst>(Data.front())->getPointerOperand()) ==
12378 getUnderlyingObject(cast<LoadInst>(ReducedVals[NewIdx].front())
12379 ->getPointerOperand()))) {
12380 if (NewIdx < 0) {
12381 NewIdx = ReducedVals.size();
12382 ReducedVals.emplace_back();
12383 }
12384 if (DoNotReverseVals.contains(Data.front()))
12385 ReducedVals[NewIdx].append(Data.begin(), Data.end());
12386 else
12387 ReducedVals[NewIdx].append(Data.rbegin(), Data.rend());
12388 } else {
12389 ReducedVals.emplace_back().append(Data.rbegin(), Data.rend());
12390 }
12391 }
12392 }
12393 // Sort the reduced values by number of same/alternate opcode and/or pointer
12394 // operand.
12395 stable_sort(ReducedVals, [](ArrayRef<Value *> P1, ArrayRef<Value *> P2) {
12396 return P1.size() > P2.size();
12397 });
12398 return true;
12399 }
12400
12401 /// Attempt to vectorize the tree found by matchAssociativeReduction.
12402 Value *tryToReduce(BoUpSLP &V, TargetTransformInfo *TTI,
12403 const TargetLibraryInfo &TLI) {
12404 constexpr int ReductionLimit = 4;
12405 constexpr unsigned RegMaxNumber = 4;
12406 constexpr unsigned RedValsMaxNumber = 128;
12407 // If there are a sufficient number of reduction values, reduce
12408 // to a nearby power-of-2. We can safely generate oversized
12409 // vectors and rely on the backend to split them to legal sizes.
12410 size_t NumReducedVals =
12411 std::accumulate(ReducedVals.begin(), ReducedVals.end(), 0,
12412 [](size_t Num, ArrayRef<Value *> Vals) {
12413 if (!isGoodForReduction(Vals))
12414 return Num;
12415 return Num + Vals.size();
12416 });
12417 if (NumReducedVals < ReductionLimit) {
12418 for (ReductionOpsType &RdxOps : ReductionOps)
12419 for (Value *RdxOp : RdxOps)
12420 V.analyzedReductionRoot(cast<Instruction>(RdxOp));
12421 return nullptr;
12422 }
12423
12424 IRBuilder<> Builder(cast<Instruction>(ReductionRoot));
12425
12426 // Track the reduced values in case if they are replaced by extractelement
12427 // because of the vectorization.
12428 DenseMap<Value *, WeakTrackingVH> TrackedVals(
12429 ReducedVals.size() * ReducedVals.front().size() + ExtraArgs.size());
12430 BoUpSLP::ExtraValueToDebugLocsMap ExternallyUsedValues;
12431 ExternallyUsedValues.reserve(ExtraArgs.size() + 1);
12432 // The same extra argument may be used several times, so log each attempt
12433 // to use it.
12434 for (const std::pair<Instruction *, Value *> &Pair : ExtraArgs) {
12435 assert(Pair.first && "DebugLoc must be set.")(static_cast <bool> (Pair.first && "DebugLoc must be set."
) ? void (0) : __assert_fail ("Pair.first && \"DebugLoc must be set.\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 12435, __extension__
__PRETTY_FUNCTION__))
;
12436 ExternallyUsedValues[Pair.second].push_back(Pair.first);
12437 TrackedVals.try_emplace(Pair.second, Pair.second);
12438 }
12439
12440 // The compare instruction of a min/max is the insertion point for new
12441 // instructions and may be replaced with a new compare instruction.
12442 auto &&GetCmpForMinMaxReduction = [](Instruction *RdxRootInst) {
12443 assert(isa<SelectInst>(RdxRootInst) &&(static_cast <bool> (isa<SelectInst>(RdxRootInst)
&& "Expected min/max reduction to have select root instruction"
) ? void (0) : __assert_fail ("isa<SelectInst>(RdxRootInst) && \"Expected min/max reduction to have select root instruction\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 12444, __extension__
__PRETTY_FUNCTION__))
12444 "Expected min/max reduction to have select root instruction")(static_cast <bool> (isa<SelectInst>(RdxRootInst)
&& "Expected min/max reduction to have select root instruction"
) ? void (0) : __assert_fail ("isa<SelectInst>(RdxRootInst) && \"Expected min/max reduction to have select root instruction\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 12444, __extension__
__PRETTY_FUNCTION__))
;
12445 Value *ScalarCond = cast<SelectInst>(RdxRootInst)->getCondition();
12446 assert(isa<Instruction>(ScalarCond) &&(static_cast <bool> (isa<Instruction>(ScalarCond)
&& "Expected min/max reduction to have compare condition"
) ? void (0) : __assert_fail ("isa<Instruction>(ScalarCond) && \"Expected min/max reduction to have compare condition\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 12447, __extension__
__PRETTY_FUNCTION__))
12447 "Expected min/max reduction to have compare condition")(static_cast <bool> (isa<Instruction>(ScalarCond)
&& "Expected min/max reduction to have compare condition"
) ? void (0) : __assert_fail ("isa<Instruction>(ScalarCond) && \"Expected min/max reduction to have compare condition\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 12447, __extension__
__PRETTY_FUNCTION__))
;
12448 return cast<Instruction>(ScalarCond);
12449 };
12450
12451 // The reduction root is used as the insertion point for new instructions,
12452 // so set it as externally used to prevent it from being deleted.
12453 ExternallyUsedValues[ReductionRoot];
12454 SmallDenseSet<Value *> IgnoreList(ReductionOps.size() *
12455 ReductionOps.front().size());
12456 for (ReductionOpsType &RdxOps : ReductionOps)
12457 for (Value *RdxOp : RdxOps) {
12458 if (!RdxOp)
12459 continue;
12460 IgnoreList.insert(RdxOp);
12461 }
12462 bool IsCmpSelMinMax = isCmpSelMinMax(cast<Instruction>(ReductionRoot));
12463
12464 // Need to track reduced vals, they may be changed during vectorization of
12465 // subvectors.
12466 for (ArrayRef<Value *> Candidates : ReducedVals)
12467 for (Value *V : Candidates)
12468 TrackedVals.try_emplace(V, V);
12469
12470 DenseMap<Value *, unsigned> VectorizedVals(ReducedVals.size());
12471 // List of the values that were reduced in other trees as part of gather
12472 // nodes and thus requiring extract if fully vectorized in other trees.
12473 SmallPtrSet<Value *, 4> RequiredExtract;
12474 Value *VectorizedTree = nullptr;
12475 bool CheckForReusedReductionOps = false;
12476 // Try to vectorize elements based on their type.
12477 for (unsigned I = 0, E = ReducedVals.size(); I < E; ++I) {
12478 ArrayRef<Value *> OrigReducedVals = ReducedVals[I];
12479 InstructionsState S = getSameOpcode(OrigReducedVals, TLI);
12480 SmallVector<Value *> Candidates;
12481 Candidates.reserve(2 * OrigReducedVals.size());
12482 DenseMap<Value *, Value *> TrackedToOrig(2 * OrigReducedVals.size());
12483 for (unsigned Cnt = 0, Sz = OrigReducedVals.size(); Cnt < Sz; ++Cnt) {
12484 Value *RdxVal = TrackedVals.find(OrigReducedVals[Cnt])->second;
12485 // Check if the reduction value was not overriden by the extractelement
12486 // instruction because of the vectorization and exclude it, if it is not
12487 // compatible with other values.
12488 if (auto *Inst = dyn_cast<Instruction>(RdxVal))
12489 if (isVectorLikeInstWithConstOps(Inst) &&
12490 (!S.getOpcode() || !S.isOpcodeOrAlt(Inst)))
12491 continue;
12492 Candidates.push_back(RdxVal);
12493 TrackedToOrig.try_emplace(RdxVal, OrigReducedVals[Cnt]);
12494 }
12495 bool ShuffledExtracts = false;
12496 // Try to handle shuffled extractelements.
12497 if (S.getOpcode() == Instruction::ExtractElement && !S.isAltShuffle() &&
12498 I + 1 < E) {
12499 InstructionsState NextS = getSameOpcode(ReducedVals[I + 1], TLI);
12500 if (NextS.getOpcode() == Instruction::ExtractElement &&
12501 !NextS.isAltShuffle()) {
12502 SmallVector<Value *> CommonCandidates(Candidates);
12503 for (Value *RV : ReducedVals[I + 1]) {
12504 Value *RdxVal = TrackedVals.find(RV)->second;
12505 // Check if the reduction value was not overriden by the
12506 // extractelement instruction because of the vectorization and
12507 // exclude it, if it is not compatible with other values.
12508 if (auto *Inst = dyn_cast<Instruction>(RdxVal))
12509 if (!NextS.getOpcode() || !NextS.isOpcodeOrAlt(Inst))
12510 continue;
12511 CommonCandidates.push_back(RdxVal);
12512 TrackedToOrig.try_emplace(RdxVal, RV);
12513 }
12514 SmallVector<int> Mask;
12515 if (isFixedVectorShuffle(CommonCandidates, Mask)) {
12516 ++I;
12517 Candidates.swap(CommonCandidates);
12518 ShuffledExtracts = true;
12519 }
12520 }
12521 }
12522 unsigned NumReducedVals = Candidates.size();
12523 if (NumReducedVals < ReductionLimit)
12524 continue;
12525
12526 unsigned MaxVecRegSize = V.getMaxVecRegSize();
12527 unsigned EltSize = V.getVectorElementSize(Candidates[0]);
12528 unsigned MaxElts = RegMaxNumber * PowerOf2Floor(MaxVecRegSize / EltSize);
12529
12530 unsigned ReduxWidth = std::min<unsigned>(
12531 PowerOf2Floor(NumReducedVals), std::max(RedValsMaxNumber, MaxElts));
12532 unsigned Start = 0;
12533 unsigned Pos = Start;
12534 // Restarts vectorization attempt with lower vector factor.
12535 unsigned PrevReduxWidth = ReduxWidth;
12536 bool CheckForReusedReductionOpsLocal = false;
12537 auto &&AdjustReducedVals = [&Pos, &Start, &ReduxWidth, NumReducedVals,
12538 &CheckForReusedReductionOpsLocal,
12539 &PrevReduxWidth, &V,
12540 &IgnoreList](bool IgnoreVL = false) {
12541 bool IsAnyRedOpGathered = !IgnoreVL && V.isAnyGathered(IgnoreList);
12542 if (!CheckForReusedReductionOpsLocal && PrevReduxWidth == ReduxWidth) {
12543 // Check if any of the reduction ops are gathered. If so, worth
12544 // trying again with less number of reduction ops.
12545 CheckForReusedReductionOpsLocal |= IsAnyRedOpGathered;
12546 }
12547 ++Pos;
12548 if (Pos < NumReducedVals - ReduxWidth + 1)
12549 return IsAnyRedOpGathered;
12550 Pos = Start;
12551 ReduxWidth /= 2;
12552 return IsAnyRedOpGathered;
12553 };
12554 while (Pos < NumReducedVals - ReduxWidth + 1 &&
12555 ReduxWidth >= ReductionLimit) {
12556 // Dependency in tree of the reduction ops - drop this attempt, try
12557 // later.
12558 if (CheckForReusedReductionOpsLocal && PrevReduxWidth != ReduxWidth &&
12559 Start == 0) {
12560 CheckForReusedReductionOps = true;
12561 break;
12562 }
12563 PrevReduxWidth = ReduxWidth;
12564 ArrayRef<Value *> VL(std::next(Candidates.begin(), Pos), ReduxWidth);
12565 // Beeing analyzed already - skip.
12566 if (V.areAnalyzedReductionVals(VL)) {
12567 (void)AdjustReducedVals(/*IgnoreVL=*/true);
12568 continue;
12569 }
12570 // Early exit if any of the reduction values were deleted during
12571 // previous vectorization attempts.
12572 if (any_of(VL, [&V](Value *RedVal) {
12573 auto *RedValI = dyn_cast<Instruction>(RedVal);
12574 if (!RedValI)
12575 return false;
12576 return V.isDeleted(RedValI);
12577 }))
12578 break;
12579 V.buildTree(VL, IgnoreList);
12580 if (V.isTreeTinyAndNotFullyVectorizable(/*ForReduction=*/true)) {
12581 if (!AdjustReducedVals())
12582 V.analyzedReductionVals(VL);
12583 continue;
12584 }
12585 if (V.isLoadCombineReductionCandidate(RdxKind)) {
12586 if (!AdjustReducedVals())
12587 V.analyzedReductionVals(VL);
12588 continue;
12589 }
12590 V.reorderTopToBottom();
12591 // No need to reorder the root node at all.
12592 V.reorderBottomToTop(/*IgnoreReorder=*/true);
12593 // Keep extracted other reduction values, if they are used in the
12594 // vectorization trees.
12595 BoUpSLP::ExtraValueToDebugLocsMap LocalExternallyUsedValues(
12596 ExternallyUsedValues);
12597 for (unsigned Cnt = 0, Sz = ReducedVals.size(); Cnt < Sz; ++Cnt) {
12598 if (Cnt == I || (ShuffledExtracts && Cnt == I - 1))
12599 continue;
12600 for_each(ReducedVals[Cnt],
12601 [&LocalExternallyUsedValues, &TrackedVals](Value *V) {
12602 if (isa<Instruction>(V))
12603 LocalExternallyUsedValues[TrackedVals[V]];
12604 });
12605 }
12606 // Number of uses of the candidates in the vector of values.
12607 SmallDenseMap<Value *, unsigned> NumUses(Candidates.size());
12608 for (unsigned Cnt = 0; Cnt < Pos; ++Cnt) {
12609 Value *V = Candidates[Cnt];
12610 ++NumUses.try_emplace(V, 0).first->getSecond();
12611 }
12612 for (unsigned Cnt = Pos + ReduxWidth; Cnt < NumReducedVals; ++Cnt) {
12613 Value *V = Candidates[Cnt];
12614 ++NumUses.try_emplace(V, 0).first->getSecond();
12615 }
12616 SmallPtrSet<Value *, 4> VLScalars(VL.begin(), VL.end());
12617 // Gather externally used values.
12618 SmallPtrSet<Value *, 4> Visited;
12619 for (unsigned Cnt = 0; Cnt < Pos; ++Cnt) {
12620 Value *RdxVal = Candidates[Cnt];
12621 if (!Visited.insert(RdxVal).second)
12622 continue;
12623 // Check if the scalar was vectorized as part of the vectorization
12624 // tree but not the top node.
12625 if (!VLScalars.contains(RdxVal) && V.isVectorized(RdxVal)) {
12626 LocalExternallyUsedValues[RdxVal];
12627 continue;
12628 }
12629 unsigned NumOps = VectorizedVals.lookup(RdxVal) + NumUses[RdxVal];
12630 if (NumOps != ReducedValsToOps.find(RdxVal)->second.size())
12631 LocalExternallyUsedValues[RdxVal];
12632 }
12633 for (unsigned Cnt = Pos + ReduxWidth; Cnt < NumReducedVals; ++Cnt) {
12634 Value *RdxVal = Candidates[Cnt];
12635 if (!Visited.insert(RdxVal).second)
12636 continue;
12637 // Check if the scalar was vectorized as part of the vectorization
12638 // tree but not the top node.
12639 if (!VLScalars.contains(RdxVal) && V.isVectorized(RdxVal)) {
12640 LocalExternallyUsedValues[RdxVal];
12641 continue;
12642 }
12643 unsigned NumOps = VectorizedVals.lookup(RdxVal) + NumUses[RdxVal];
12644 if (NumOps != ReducedValsToOps.find(RdxVal)->second.size())
12645 LocalExternallyUsedValues[RdxVal];
12646 }
12647 for (Value *RdxVal : VL)
12648 if (RequiredExtract.contains(RdxVal))
12649 LocalExternallyUsedValues[RdxVal];
12650 V.buildExternalUses(LocalExternallyUsedValues);
12651
12652 V.computeMinimumValueSizes();
12653
12654 // Intersect the fast-math-flags from all reduction operations.
12655 FastMathFlags RdxFMF;
12656 RdxFMF.set();
12657 for (Value *U : IgnoreList)
12658 if (auto *FPMO = dyn_cast<FPMathOperator>(U))
12659 RdxFMF &= FPMO->getFastMathFlags();
12660 // Estimate cost.
12661 InstructionCost TreeCost = V.getTreeCost(VL);
12662 InstructionCost ReductionCost =
12663 getReductionCost(TTI, VL, ReduxWidth, RdxFMF);
12664 if (V.isVectorizedFirstNode() && isa<LoadInst>(VL.front())) {
12665 Instruction *MainOp = V.getFirstNodeMainOp();
12666 for (Value *V : VL) {
12667 auto *VI = dyn_cast<LoadInst>(V);
12668 // Add the costs of scalar GEP pointers, to be removed from the
12669 // code.
12670 if (!VI || VI == MainOp)
12671 continue;
12672 auto *Ptr = dyn_cast<GetElementPtrInst>(VI->getPointerOperand());
12673 if (!Ptr || !Ptr->hasOneUse() || Ptr->hasAllConstantIndices())
12674 continue;
12675 TreeCost -= TTI->getArithmeticInstrCost(
12676 Instruction::Add, Ptr->getType(), TTI::TCK_RecipThroughput);
12677 }
12678 }
12679 InstructionCost Cost = TreeCost + ReductionCost;
12680 LLVM_DEBUG(dbgs() << "SLP: Found cost = " << Cost << " for reduction\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Found cost = " << Cost
<< " for reduction\n"; } } while (false)
;
12681 if (!Cost.isValid())
12682 return nullptr;
12683 if (Cost >= -SLPCostThreshold) {
12684 V.getORE()->emit([&]() {
12685 return OptimizationRemarkMissed(
12686 SV_NAME"slp-vectorizer", "HorSLPNotBeneficial",
12687 ReducedValsToOps.find(VL[0])->second.front())
12688 << "Vectorizing horizontal reduction is possible "
12689 << "but not beneficial with cost " << ore::NV("Cost", Cost)
12690 << " and threshold "
12691 << ore::NV("Threshold", -SLPCostThreshold);
12692 });
12693 if (!AdjustReducedVals())
12694 V.analyzedReductionVals(VL);
12695 continue;
12696 }
12697
12698 LLVM_DEBUG(dbgs() << "SLP: Vectorizing horizontal reduction at cost:"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Vectorizing horizontal reduction at cost:"
<< Cost << ". (HorRdx)\n"; } } while (false)
12699 << Cost << ". (HorRdx)\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Vectorizing horizontal reduction at cost:"
<< Cost << ". (HorRdx)\n"; } } while (false)
;
12700 V.getORE()->emit([&]() {
12701 return OptimizationRemark(
12702 SV_NAME"slp-vectorizer", "VectorizedHorizontalReduction",
12703 ReducedValsToOps.find(VL[0])->second.front())
12704 << "Vectorized horizontal reduction with cost "
12705 << ore::NV("Cost", Cost) << " and with tree size "
12706 << ore::NV("TreeSize", V.getTreeSize());
12707 });
12708
12709 Builder.setFastMathFlags(RdxFMF);
12710
12711 // Emit a reduction. If the root is a select (min/max idiom), the insert
12712 // point is the compare condition of that select.
12713 Instruction *RdxRootInst = cast<Instruction>(ReductionRoot);
12714 Instruction *InsertPt = RdxRootInst;
12715 if (IsCmpSelMinMax)
12716 InsertPt = GetCmpForMinMaxReduction(RdxRootInst);
12717
12718 // Vectorize a tree.
12719 Value *VectorizedRoot =
12720 V.vectorizeTree(LocalExternallyUsedValues, InsertPt);
12721
12722 Builder.SetInsertPoint(InsertPt);
12723
12724 // To prevent poison from leaking across what used to be sequential,
12725 // safe, scalar boolean logic operations, the reduction operand must be
12726 // frozen.
12727 if (isBoolLogicOp(RdxRootInst))
12728 VectorizedRoot = Builder.CreateFreeze(VectorizedRoot);
12729
12730 Value *ReducedSubTree =
12731 emitReduction(VectorizedRoot, Builder, ReduxWidth, TTI);
12732
12733 if (!VectorizedTree) {
12734 // Initialize the final value in the reduction.
12735 VectorizedTree = ReducedSubTree;
12736 } else {
12737 // Update the final value in the reduction.
12738 Builder.SetCurrentDebugLocation(
12739 cast<Instruction>(ReductionOps.front().front())->getDebugLoc());
12740 VectorizedTree = createOp(Builder, RdxKind, VectorizedTree,
12741 ReducedSubTree, "op.rdx", ReductionOps);
12742 }
12743 // Count vectorized reduced values to exclude them from final reduction.
12744 for (Value *RdxVal : VL) {
12745 ++VectorizedVals.try_emplace(TrackedToOrig.find(RdxVal)->second, 0)
12746 .first->getSecond();
12747 if (!V.isVectorized(RdxVal))
12748 RequiredExtract.insert(RdxVal);
12749 }
12750 Pos += ReduxWidth;
12751 Start = Pos;
12752 ReduxWidth = PowerOf2Floor(NumReducedVals - Pos);
12753 }
12754 }
12755 if (VectorizedTree) {
12756 // Reorder operands of bool logical op in the natural order to avoid
12757 // possible problem with poison propagation. If not possible to reorder
12758 // (both operands are originally RHS), emit an extra freeze instruction
12759 // for the LHS operand.
12760 //I.e., if we have original code like this:
12761 // RedOp1 = select i1 ?, i1 LHS, i1 false
12762 // RedOp2 = select i1 RHS, i1 ?, i1 false
12763
12764 // Then, we swap LHS/RHS to create a new op that matches the poison
12765 // semantics of the original code.
12766
12767 // If we have original code like this and both values could be poison:
12768 // RedOp1 = select i1 ?, i1 LHS, i1 false
12769 // RedOp2 = select i1 ?, i1 RHS, i1 false
12770
12771 // Then, we must freeze LHS in the new op.
12772 auto &&FixBoolLogicalOps =
12773 [&Builder, VectorizedTree](Value *&LHS, Value *&RHS,
12774 Instruction *RedOp1, Instruction *RedOp2) {
12775 if (!isBoolLogicOp(RedOp1))
12776 return;
12777 if (LHS == VectorizedTree || getRdxOperand(RedOp1, 0) == LHS ||
12778 isGuaranteedNotToBePoison(LHS))
12779 return;
12780 if (!isBoolLogicOp(RedOp2))
12781 return;
12782 if (RHS == VectorizedTree || getRdxOperand(RedOp2, 0) == RHS ||
12783 isGuaranteedNotToBePoison(RHS)) {
12784 std::swap(LHS, RHS);
12785 return;
12786 }
12787 LHS = Builder.CreateFreeze(LHS);
12788 };
12789 // Finish the reduction.
12790 // Need to add extra arguments and not vectorized possible reduction
12791 // values.
12792 // Try to avoid dependencies between the scalar remainders after
12793 // reductions.
12794 auto &&FinalGen =
12795 [this, &Builder, &TrackedVals, &FixBoolLogicalOps](
12796 ArrayRef<std::pair<Instruction *, Value *>> InstVals) {
12797 unsigned Sz = InstVals.size();
12798 SmallVector<std::pair<Instruction *, Value *>> ExtraReds(Sz / 2 +
12799 Sz % 2);
12800 for (unsigned I = 0, E = (Sz / 2) * 2; I < E; I += 2) {
12801 Instruction *RedOp = InstVals[I + 1].first;
12802 Builder.SetCurrentDebugLocation(RedOp->getDebugLoc());
12803 Value *RdxVal1 = InstVals[I].second;
12804 Value *StableRdxVal1 = RdxVal1;
12805 auto It1 = TrackedVals.find(RdxVal1);
12806 if (It1 != TrackedVals.end())
12807 StableRdxVal1 = It1->second;
12808 Value *RdxVal2 = InstVals[I + 1].second;
12809 Value *StableRdxVal2 = RdxVal2;
12810 auto It2 = TrackedVals.find(RdxVal2);
12811 if (It2 != TrackedVals.end())
12812 StableRdxVal2 = It2->second;
12813 // To prevent poison from leaking across what used to be
12814 // sequential, safe, scalar boolean logic operations, the
12815 // reduction operand must be frozen.
12816 FixBoolLogicalOps(StableRdxVal1, StableRdxVal2, InstVals[I].first,
12817 RedOp);
12818 Value *ExtraRed = createOp(Builder, RdxKind, StableRdxVal1,
12819 StableRdxVal2, "op.rdx", ReductionOps);
12820 ExtraReds[I / 2] = std::make_pair(InstVals[I].first, ExtraRed);
12821 }
12822 if (Sz % 2 == 1)
12823 ExtraReds[Sz / 2] = InstVals.back();
12824 return ExtraReds;
12825 };
12826 SmallVector<std::pair<Instruction *, Value *>> ExtraReductions;
12827 ExtraReductions.emplace_back(cast<Instruction>(ReductionRoot),
12828 VectorizedTree);
12829 SmallPtrSet<Value *, 8> Visited;
12830 for (ArrayRef<Value *> Candidates : ReducedVals) {
12831 for (Value *RdxVal : Candidates) {
12832 if (!Visited.insert(RdxVal).second)
12833 continue;
12834 unsigned NumOps = VectorizedVals.lookup(RdxVal);
12835 for (Instruction *RedOp :
12836 ArrayRef(ReducedValsToOps.find(RdxVal)->second)
12837 .drop_back(NumOps))
12838 ExtraReductions.emplace_back(RedOp, RdxVal);
12839 }
12840 }
12841 for (auto &Pair : ExternallyUsedValues) {
12842 // Add each externally used value to the final reduction.
12843 for (auto *I : Pair.second)
12844 ExtraReductions.emplace_back(I, Pair.first);
12845 }
12846 // Iterate through all not-vectorized reduction values/extra arguments.
12847 while (ExtraReductions.size() > 1) {
12848 VectorizedTree = ExtraReductions.front().second;
Value stored to 'VectorizedTree' is never read
12849 SmallVector<std::pair<Instruction *, Value *>> NewReds =
12850 FinalGen(ExtraReductions);
12851 ExtraReductions.swap(NewReds);
12852 }
12853 VectorizedTree = ExtraReductions.front().second;
12854
12855 ReductionRoot->replaceAllUsesWith(VectorizedTree);
12856
12857 // The original scalar reduction is expected to have no remaining
12858 // uses outside the reduction tree itself. Assert that we got this
12859 // correct, replace internal uses with undef, and mark for eventual
12860 // deletion.
12861#ifndef NDEBUG
12862 SmallSet<Value *, 4> IgnoreSet;
12863 for (ArrayRef<Value *> RdxOps : ReductionOps)
12864 IgnoreSet.insert(RdxOps.begin(), RdxOps.end());
12865#endif
12866 for (ArrayRef<Value *> RdxOps : ReductionOps) {
12867 for (Value *Ignore : RdxOps) {
12868 if (!Ignore)
12869 continue;
12870#ifndef NDEBUG
12871 for (auto *U : Ignore->users()) {
12872 assert(IgnoreSet.count(U) &&(static_cast <bool> (IgnoreSet.count(U) && "All users must be either in the reduction ops list."
) ? void (0) : __assert_fail ("IgnoreSet.count(U) && \"All users must be either in the reduction ops list.\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 12873, __extension__
__PRETTY_FUNCTION__))
12873 "All users must be either in the reduction ops list.")(static_cast <bool> (IgnoreSet.count(U) && "All users must be either in the reduction ops list."
) ? void (0) : __assert_fail ("IgnoreSet.count(U) && \"All users must be either in the reduction ops list.\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 12873, __extension__
__PRETTY_FUNCTION__))
;
12874 }
12875#endif
12876 if (!Ignore->use_empty()) {
12877 Value *Undef = UndefValue::get(Ignore->getType());
12878 Ignore->replaceAllUsesWith(Undef);
12879 }
12880 V.eraseInstruction(cast<Instruction>(Ignore));
12881 }
12882 }
12883 } else if (!CheckForReusedReductionOps) {
12884 for (ReductionOpsType &RdxOps : ReductionOps)
12885 for (Value *RdxOp : RdxOps)
12886 V.analyzedReductionRoot(cast<Instruction>(RdxOp));
12887 }
12888 return VectorizedTree;
12889 }
12890
12891private:
12892 /// Calculate the cost of a reduction.
12893 InstructionCost getReductionCost(TargetTransformInfo *TTI,
12894 ArrayRef<Value *> ReducedVals,
12895 unsigned ReduxWidth, FastMathFlags FMF) {
12896 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
12897 Value *FirstReducedVal = ReducedVals.front();
12898 Type *ScalarTy = FirstReducedVal->getType();
12899 FixedVectorType *VectorTy = FixedVectorType::get(ScalarTy, ReduxWidth);
12900 InstructionCost VectorCost = 0, ScalarCost;
12901 // If all of the reduced values are constant, the vector cost is 0, since
12902 // the reduction value can be calculated at the compile time.
12903 bool AllConsts = all_of(ReducedVals, isConstant);
12904 switch (RdxKind) {
12905 case RecurKind::Add:
12906 case RecurKind::Mul:
12907 case RecurKind::Or:
12908 case RecurKind::And:
12909 case RecurKind::Xor:
12910 case RecurKind::FAdd:
12911 case RecurKind::FMul: {
12912 unsigned RdxOpcode = RecurrenceDescriptor::getOpcode(RdxKind);
12913 if (!AllConsts)
12914 VectorCost =
12915 TTI->getArithmeticReductionCost(RdxOpcode, VectorTy, FMF, CostKind);
12916 ScalarCost = TTI->getArithmeticInstrCost(RdxOpcode, ScalarTy, CostKind);
12917 break;
12918 }
12919 case RecurKind::FMax:
12920 case RecurKind::FMin: {
12921 auto *SclCondTy = CmpInst::makeCmpResultType(ScalarTy);
12922 if (!AllConsts) {
12923 auto *VecCondTy =
12924 cast<VectorType>(CmpInst::makeCmpResultType(VectorTy));
12925 VectorCost =
12926 TTI->getMinMaxReductionCost(VectorTy, VecCondTy,
12927 /*IsUnsigned=*/false, CostKind);
12928 }
12929 CmpInst::Predicate RdxPred = getMinMaxReductionPredicate(RdxKind);
12930 ScalarCost = TTI->getCmpSelInstrCost(Instruction::FCmp, ScalarTy,
12931 SclCondTy, RdxPred, CostKind) +
12932 TTI->getCmpSelInstrCost(Instruction::Select, ScalarTy,
12933 SclCondTy, RdxPred, CostKind);
12934 break;
12935 }
12936 case RecurKind::SMax:
12937 case RecurKind::SMin:
12938 case RecurKind::UMax:
12939 case RecurKind::UMin: {
12940 auto *SclCondTy = CmpInst::makeCmpResultType(ScalarTy);
12941 if (!AllConsts) {
12942 auto *VecCondTy =
12943 cast<VectorType>(CmpInst::makeCmpResultType(VectorTy));
12944 bool IsUnsigned =
12945 RdxKind == RecurKind::UMax || RdxKind == RecurKind::UMin;
12946 VectorCost = TTI->getMinMaxReductionCost(VectorTy, VecCondTy,
12947 IsUnsigned, CostKind);
12948 }
12949 CmpInst::Predicate RdxPred = getMinMaxReductionPredicate(RdxKind);
12950 ScalarCost = TTI->getCmpSelInstrCost(Instruction::ICmp, ScalarTy,
12951 SclCondTy, RdxPred, CostKind) +
12952 TTI->getCmpSelInstrCost(Instruction::Select, ScalarTy,
12953 SclCondTy, RdxPred, CostKind);
12954 break;
12955 }
12956 default:
12957 llvm_unreachable("Expected arithmetic or min/max reduction operation")::llvm::llvm_unreachable_internal("Expected arithmetic or min/max reduction operation"
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 12957)
;
12958 }
12959
12960 // Scalar cost is repeated for N-1 elements.
12961 ScalarCost *= (ReduxWidth - 1);
12962 LLVM_DEBUG(dbgs() << "SLP: Adding cost " << VectorCost - ScalarCostdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Adding cost " << VectorCost
- ScalarCost << " for reduction that starts with " <<
*FirstReducedVal << " (It is a splitting reduction)\n"
; } } while (false)
12963 << " for reduction that starts with " << *FirstReducedValdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Adding cost " << VectorCost
- ScalarCost << " for reduction that starts with " <<
*FirstReducedVal << " (It is a splitting reduction)\n"
; } } while (false)
12964 << " (It is a splitting reduction)\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Adding cost " << VectorCost
- ScalarCost << " for reduction that starts with " <<
*FirstReducedVal << " (It is a splitting reduction)\n"
; } } while (false)
;
12965 return VectorCost - ScalarCost;
12966 }
12967
12968 /// Emit a horizontal reduction of the vectorized value.
12969 Value *emitReduction(Value *VectorizedValue, IRBuilder<> &Builder,
12970 unsigned ReduxWidth, const TargetTransformInfo *TTI) {
12971 assert(VectorizedValue && "Need to have a vectorized tree node")(static_cast <bool> (VectorizedValue && "Need to have a vectorized tree node"
) ? void (0) : __assert_fail ("VectorizedValue && \"Need to have a vectorized tree node\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 12971, __extension__
__PRETTY_FUNCTION__))
;
12972 assert(isPowerOf2_32(ReduxWidth) &&(static_cast <bool> (isPowerOf2_32(ReduxWidth) &&
"We only handle power-of-two reductions for now") ? void (0)
: __assert_fail ("isPowerOf2_32(ReduxWidth) && \"We only handle power-of-two reductions for now\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 12973, __extension__
__PRETTY_FUNCTION__))
12973 "We only handle power-of-two reductions for now")(static_cast <bool> (isPowerOf2_32(ReduxWidth) &&
"We only handle power-of-two reductions for now") ? void (0)
: __assert_fail ("isPowerOf2_32(ReduxWidth) && \"We only handle power-of-two reductions for now\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 12973, __extension__
__PRETTY_FUNCTION__))
;
12974 assert(RdxKind != RecurKind::FMulAdd &&(static_cast <bool> (RdxKind != RecurKind::FMulAdd &&
"A call to the llvm.fmuladd intrinsic is not handled yet") ?
void (0) : __assert_fail ("RdxKind != RecurKind::FMulAdd && \"A call to the llvm.fmuladd intrinsic is not handled yet\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 12975, __extension__
__PRETTY_FUNCTION__))
12975 "A call to the llvm.fmuladd intrinsic is not handled yet")(static_cast <bool> (RdxKind != RecurKind::FMulAdd &&
"A call to the llvm.fmuladd intrinsic is not handled yet") ?
void (0) : __assert_fail ("RdxKind != RecurKind::FMulAdd && \"A call to the llvm.fmuladd intrinsic is not handled yet\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 12975, __extension__
__PRETTY_FUNCTION__))
;
12976
12977 ++NumVectorInstructions;
12978 return createSimpleTargetReduction(Builder, TTI, VectorizedValue, RdxKind);
12979 }
12980};
12981
12982} // end anonymous namespace
12983
12984static std::optional<unsigned> getAggregateSize(Instruction *InsertInst) {
12985 if (auto *IE = dyn_cast<InsertElementInst>(InsertInst))
12986 return cast<FixedVectorType>(IE->getType())->getNumElements();
12987
12988 unsigned AggregateSize = 1;
12989 auto *IV = cast<InsertValueInst>(InsertInst);
12990 Type *CurrentType = IV->getType();
12991 do {
12992 if (auto *ST = dyn_cast<StructType>(CurrentType)) {
12993 for (auto *Elt : ST->elements())
12994 if (Elt != ST->getElementType(0)) // check homogeneity
12995 return std::nullopt;
12996 AggregateSize *= ST->getNumElements();
12997 CurrentType = ST->getElementType(0);
12998 } else if (auto *AT = dyn_cast<ArrayType>(CurrentType)) {
12999 AggregateSize *= AT->getNumElements();
13000 CurrentType = AT->getElementType();
13001 } else if (auto *VT = dyn_cast<FixedVectorType>(CurrentType)) {
13002 AggregateSize *= VT->getNumElements();
13003 return AggregateSize;
13004 } else if (CurrentType->isSingleValueType()) {
13005 return AggregateSize;
13006 } else {
13007 return std::nullopt;
13008 }
13009 } while (true);
13010}
13011
13012static void findBuildAggregate_rec(Instruction *LastInsertInst,
13013 TargetTransformInfo *TTI,
13014 SmallVectorImpl<Value *> &BuildVectorOpds,
13015 SmallVectorImpl<Value *> &InsertElts,
13016 unsigned OperandOffset) {
13017 do {
13018 Value *InsertedOperand = LastInsertInst->getOperand(1);
13019 std::optional<unsigned> OperandIndex =
13020 getInsertIndex(LastInsertInst, OperandOffset);
13021 if (!OperandIndex)
13022 return;
13023 if (isa<InsertElementInst, InsertValueInst>(InsertedOperand)) {
13024 findBuildAggregate_rec(cast<Instruction>(InsertedOperand), TTI,
13025 BuildVectorOpds, InsertElts, *OperandIndex);
13026
13027 } else {
13028 BuildVectorOpds[*OperandIndex] = InsertedOperand;
13029 InsertElts[*OperandIndex] = LastInsertInst;
13030 }
13031 LastInsertInst = dyn_cast<Instruction>(LastInsertInst->getOperand(0));
13032 } while (LastInsertInst != nullptr &&
13033 isa<InsertValueInst, InsertElementInst>(LastInsertInst) &&
13034 LastInsertInst->hasOneUse());
13035}
13036
13037/// Recognize construction of vectors like
13038/// %ra = insertelement <4 x float> poison, float %s0, i32 0
13039/// %rb = insertelement <4 x float> %ra, float %s1, i32 1
13040/// %rc = insertelement <4 x float> %rb, float %s2, i32 2
13041/// %rd = insertelement <4 x float> %rc, float %s3, i32 3
13042/// starting from the last insertelement or insertvalue instruction.
13043///
13044/// Also recognize homogeneous aggregates like {<2 x float>, <2 x float>},
13045/// {{float, float}, {float, float}}, [2 x {float, float}] and so on.
13046/// See llvm/test/Transforms/SLPVectorizer/X86/pr42022.ll for examples.
13047///
13048/// Assume LastInsertInst is of InsertElementInst or InsertValueInst type.
13049///
13050/// \return true if it matches.
13051static bool findBuildAggregate(Instruction *LastInsertInst,
13052 TargetTransformInfo *TTI,
13053 SmallVectorImpl<Value *> &BuildVectorOpds,
13054 SmallVectorImpl<Value *> &InsertElts) {
13055
13056 assert((isa<InsertElementInst>(LastInsertInst) ||(static_cast <bool> ((isa<InsertElementInst>(LastInsertInst
) || isa<InsertValueInst>(LastInsertInst)) && "Expected insertelement or insertvalue instruction!"
) ? void (0) : __assert_fail ("(isa<InsertElementInst>(LastInsertInst) || isa<InsertValueInst>(LastInsertInst)) && \"Expected insertelement or insertvalue instruction!\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 13058, __extension__
__PRETTY_FUNCTION__))
13057 isa<InsertValueInst>(LastInsertInst)) &&(static_cast <bool> ((isa<InsertElementInst>(LastInsertInst
) || isa<InsertValueInst>(LastInsertInst)) && "Expected insertelement or insertvalue instruction!"
) ? void (0) : __assert_fail ("(isa<InsertElementInst>(LastInsertInst) || isa<InsertValueInst>(LastInsertInst)) && \"Expected insertelement or insertvalue instruction!\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 13058, __extension__
__PRETTY_FUNCTION__))
13058 "Expected insertelement or insertvalue instruction!")(static_cast <bool> ((isa<InsertElementInst>(LastInsertInst
) || isa<InsertValueInst>(LastInsertInst)) && "Expected insertelement or insertvalue instruction!"
) ? void (0) : __assert_fail ("(isa<InsertElementInst>(LastInsertInst) || isa<InsertValueInst>(LastInsertInst)) && \"Expected insertelement or insertvalue instruction!\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 13058, __extension__
__PRETTY_FUNCTION__))
;
13059
13060 assert((BuildVectorOpds.empty() && InsertElts.empty()) &&(static_cast <bool> ((BuildVectorOpds.empty() &&
InsertElts.empty()) && "Expected empty result vectors!"
) ? void (0) : __assert_fail ("(BuildVectorOpds.empty() && InsertElts.empty()) && \"Expected empty result vectors!\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 13061, __extension__
__PRETTY_FUNCTION__))
13061 "Expected empty result vectors!")(static_cast <bool> ((BuildVectorOpds.empty() &&
InsertElts.empty()) && "Expected empty result vectors!"
) ? void (0) : __assert_fail ("(BuildVectorOpds.empty() && InsertElts.empty()) && \"Expected empty result vectors!\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 13061, __extension__
__PRETTY_FUNCTION__))
;
13062
13063 std::optional<unsigned> AggregateSize = getAggregateSize(LastInsertInst);
13064 if (!AggregateSize)
13065 return false;
13066 BuildVectorOpds.resize(*AggregateSize);
13067 InsertElts.resize(*AggregateSize);
13068
13069 findBuildAggregate_rec(LastInsertInst, TTI, BuildVectorOpds, InsertElts, 0);
13070 llvm::erase_value(BuildVectorOpds, nullptr);
13071 llvm::erase_value(InsertElts, nullptr);
13072 if (BuildVectorOpds.size() >= 2)
13073 return true;
13074
13075 return false;
13076}
13077
13078/// Try and get a reduction value from a phi node.
13079///
13080/// Given a phi node \p P in a block \p ParentBB, consider possible reductions
13081/// if they come from either \p ParentBB or a containing loop latch.
13082///
13083/// \returns A candidate reduction value if possible, or \code nullptr \endcode
13084/// if not possible.
13085static Value *getReductionValue(const DominatorTree *DT, PHINode *P,
13086 BasicBlock *ParentBB, LoopInfo *LI) {
13087 // There are situations where the reduction value is not dominated by the
13088 // reduction phi. Vectorizing such cases has been reported to cause
13089 // miscompiles. See PR25787.
13090 auto DominatedReduxValue = [&](Value *R) {
13091 return isa<Instruction>(R) &&
13092 DT->dominates(P->getParent(), cast<Instruction>(R)->getParent());
13093 };
13094
13095 Value *Rdx = nullptr;
13096
13097 // Return the incoming value if it comes from the same BB as the phi node.
13098 if (P->getIncomingBlock(0) == ParentBB) {
13099 Rdx = P->getIncomingValue(0);
13100 } else if (P->getIncomingBlock(1) == ParentBB) {
13101 Rdx = P->getIncomingValue(1);
13102 }
13103
13104 if (Rdx && DominatedReduxValue(Rdx))
13105 return Rdx;
13106
13107 // Otherwise, check whether we have a loop latch to look at.
13108 Loop *BBL = LI->getLoopFor(ParentBB);
13109 if (!BBL)
13110 return nullptr;
13111 BasicBlock *BBLatch = BBL->getLoopLatch();
13112 if (!BBLatch)
13113 return nullptr;
13114
13115 // There is a loop latch, return the incoming value if it comes from
13116 // that. This reduction pattern occasionally turns up.
13117 if (P->getIncomingBlock(0) == BBLatch) {
13118 Rdx = P->getIncomingValue(0);
13119 } else if (P->getIncomingBlock(1) == BBLatch) {
13120 Rdx = P->getIncomingValue(1);
13121 }
13122
13123 if (Rdx && DominatedReduxValue(Rdx))
13124 return Rdx;
13125
13126 return nullptr;
13127}
13128
13129static bool matchRdxBop(Instruction *I, Value *&V0, Value *&V1) {
13130 if (match(I, m_BinOp(m_Value(V0), m_Value(V1))))
13131 return true;
13132 if (match(I, m_Intrinsic<Intrinsic::maxnum>(m_Value(V0), m_Value(V1))))
13133 return true;
13134 if (match(I, m_Intrinsic<Intrinsic::minnum>(m_Value(V0), m_Value(V1))))
13135 return true;
13136 if (match(I, m_Intrinsic<Intrinsic::smax>(m_Value(V0), m_Value(V1))))
13137 return true;
13138 if (match(I, m_Intrinsic<Intrinsic::smin>(m_Value(V0), m_Value(V1))))
13139 return true;
13140 if (match(I, m_Intrinsic<Intrinsic::umax>(m_Value(V0), m_Value(V1))))
13141 return true;
13142 if (match(I, m_Intrinsic<Intrinsic::umin>(m_Value(V0), m_Value(V1))))
13143 return true;
13144 return false;
13145}
13146
13147bool SLPVectorizerPass::vectorizeHorReduction(
13148 PHINode *P, Value *V, BasicBlock *BB, BoUpSLP &R, TargetTransformInfo *TTI,
13149 SmallVectorImpl<WeakTrackingVH> &PostponedInsts) {
13150 if (!ShouldVectorizeHor)
13151 return false;
13152
13153 auto *Root = dyn_cast_or_null<Instruction>(V);
13154 if (!Root)
13155 return false;
13156
13157 if (!isa<BinaryOperator>(Root))
13158 P = nullptr;
13159
13160 if (Root->getParent() != BB || isa<PHINode>(Root))
13161 return false;
13162 // Start analysis starting from Root instruction. If horizontal reduction is
13163 // found, try to vectorize it. If it is not a horizontal reduction or
13164 // vectorization is not possible or not effective, and currently analyzed
13165 // instruction is a binary operation, try to vectorize the operands, using
13166 // pre-order DFS traversal order. If the operands were not vectorized, repeat
13167 // the same procedure considering each operand as a possible root of the
13168 // horizontal reduction.
13169 // Interrupt the process if the Root instruction itself was vectorized or all
13170 // sub-trees not higher that RecursionMaxDepth were analyzed/vectorized.
13171 // If a horizintal reduction was not matched or vectorized we collect
13172 // instructions for possible later attempts for vectorization.
13173 std::queue<std::pair<Instruction *, unsigned>> Stack;
13174 Stack.emplace(Root, 0);
13175 SmallPtrSet<Value *, 8> VisitedInstrs;
13176 bool Res = false;
13177 auto &&TryToReduce = [this, TTI, &P, &R](Instruction *Inst, Value *&B0,
13178 Value *&B1) -> Value * {
13179 if (R.isAnalyzedReductionRoot(Inst))
13180 return nullptr;
13181 bool IsBinop = matchRdxBop(Inst, B0, B1);
13182 bool IsSelect = match(Inst, m_Select(m_Value(), m_Value(), m_Value()));
13183 if (IsBinop || IsSelect) {
13184 HorizontalReduction HorRdx;
13185 if (HorRdx.matchAssociativeReduction(P, Inst, *SE, *DL, *TLI))
13186 return HorRdx.tryToReduce(R, TTI, *TLI);
13187 }
13188 return nullptr;
13189 };
13190 while (!Stack.empty()) {
13191 Instruction *Inst;
13192 unsigned Level;
13193 std::tie(Inst, Level) = Stack.front();
13194 Stack.pop();
13195 // Do not try to analyze instruction that has already been vectorized.
13196 // This may happen when we vectorize instruction operands on a previous
13197 // iteration while stack was populated before that happened.
13198 if (R.isDeleted(Inst))
13199 continue;
13200 Value *B0 = nullptr, *B1 = nullptr;
13201 if (Value *V = TryToReduce(Inst, B0, B1)) {
13202 Res = true;
13203 // Set P to nullptr to avoid re-analysis of phi node in
13204 // matchAssociativeReduction function unless this is the root node.
13205 P = nullptr;
13206 if (auto *I = dyn_cast<Instruction>(V)) {
13207 // Try to find another reduction.
13208 Stack.emplace(I, Level);
13209 continue;
13210 }
13211 } else {
13212 bool IsBinop = B0 && B1;
13213 if (P && IsBinop) {
13214 Inst = dyn_cast<Instruction>(B0);
13215 if (Inst == P)
13216 Inst = dyn_cast<Instruction>(B1);
13217 if (!Inst) {
13218 // Set P to nullptr to avoid re-analysis of phi node in
13219 // matchAssociativeReduction function unless this is the root node.
13220 P = nullptr;
13221 continue;
13222 }
13223 }
13224 // Set P to nullptr to avoid re-analysis of phi node in
13225 // matchAssociativeReduction function unless this is the root node.
13226 P = nullptr;
13227 // Do not collect CmpInst or InsertElementInst/InsertValueInst as their
13228 // analysis is done separately.
13229 if (!isa<CmpInst, InsertElementInst, InsertValueInst>(Inst))
13230 PostponedInsts.push_back(Inst);
13231 }
13232
13233 // Try to vectorize operands.
13234 // Continue analysis for the instruction from the same basic block only to
13235 // save compile time.
13236 if (++Level < RecursionMaxDepth)
13237 for (auto *Op : Inst->operand_values())
13238 if (VisitedInstrs.insert(Op).second)
13239 if (auto *I = dyn_cast<Instruction>(Op))
13240 // Do not try to vectorize CmpInst operands, this is done
13241 // separately.
13242 if (!isa<PHINode, CmpInst, InsertElementInst, InsertValueInst>(I) &&
13243 !R.isDeleted(I) && I->getParent() == BB)
13244 Stack.emplace(I, Level);
13245 }
13246 return Res;
13247}
13248
13249bool SLPVectorizerPass::vectorizeRootInstruction(PHINode *P, Value *V,
13250 BasicBlock *BB, BoUpSLP &R,
13251 TargetTransformInfo *TTI) {
13252 SmallVector<WeakTrackingVH> PostponedInsts;
13253 bool Res = vectorizeHorReduction(P, V, BB, R, TTI, PostponedInsts);
13254 Res |= tryToVectorize(PostponedInsts, R);
13255 return Res;
13256}
13257
13258bool SLPVectorizerPass::tryToVectorize(ArrayRef<WeakTrackingVH> Insts,
13259 BoUpSLP &R) {
13260 bool Res = false;
13261 for (Value *V : Insts)
13262 if (auto *Inst = dyn_cast<Instruction>(V); Inst && !R.isDeleted(Inst))
13263 Res |= tryToVectorize(Inst, R);
13264 return Res;
13265}
13266
13267bool SLPVectorizerPass::vectorizeInsertValueInst(InsertValueInst *IVI,
13268 BasicBlock *BB, BoUpSLP &R) {
13269 const DataLayout &DL = BB->getModule()->getDataLayout();
13270 if (!R.canMapToVector(IVI->getType(), DL))
13271 return false;
13272
13273 SmallVector<Value *, 16> BuildVectorOpds;
13274 SmallVector<Value *, 16> BuildVectorInsts;
13275 if (!findBuildAggregate(IVI, TTI, BuildVectorOpds, BuildVectorInsts))
13276 return false;
13277
13278 LLVM_DEBUG(dbgs() << "SLP: array mappable to vector: " << *IVI << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: array mappable to vector: " <<
*IVI << "\n"; } } while (false)
;
13279 // Aggregate value is unlikely to be processed in vector register.
13280 return tryToVectorizeList(BuildVectorOpds, R);
13281}
13282
13283bool SLPVectorizerPass::vectorizeInsertElementInst(InsertElementInst *IEI,
13284 BasicBlock *BB, BoUpSLP &R) {
13285 SmallVector<Value *, 16> BuildVectorInsts;
13286 SmallVector<Value *, 16> BuildVectorOpds;
13287 SmallVector<int> Mask;
13288 if (!findBuildAggregate(IEI, TTI, BuildVectorOpds, BuildVectorInsts) ||
13289 (llvm::all_of(
13290 BuildVectorOpds,
13291 [](Value *V) { return isa<ExtractElementInst, UndefValue>(V); }) &&
13292 isFixedVectorShuffle(BuildVectorOpds, Mask)))
13293 return false;
13294
13295 LLVM_DEBUG(dbgs() << "SLP: array mappable to vector: " << *IEI << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: array mappable to vector: " <<
*IEI << "\n"; } } while (false)
;
13296 return tryToVectorizeList(BuildVectorInsts, R);
13297}
13298
13299template <typename T>
13300static bool
13301tryToVectorizeSequence(SmallVectorImpl<T *> &Incoming,
13302 function_ref<unsigned(T *)> Limit,
13303 function_ref<bool(T *, T *)> Comparator,
13304 function_ref<bool(T *, T *)> AreCompatible,
13305 function_ref<bool(ArrayRef<T *>, bool)> TryToVectorizeHelper,
13306 bool LimitForRegisterSize) {
13307 bool Changed = false;
13308 // Sort by type, parent, operands.
13309 stable_sort(Incoming, Comparator);
13310
13311 // Try to vectorize elements base on their type.
13312 SmallVector<T *> Candidates;
13313 for (auto *IncIt = Incoming.begin(), *E = Incoming.end(); IncIt != E;) {
13314 // Look for the next elements with the same type, parent and operand
13315 // kinds.
13316 auto *SameTypeIt = IncIt;
13317 while (SameTypeIt != E && AreCompatible(*SameTypeIt, *IncIt))
13318 ++SameTypeIt;
13319
13320 // Try to vectorize them.
13321 unsigned NumElts = (SameTypeIt - IncIt);
13322 LLVM_DEBUG(dbgs() << "SLP: Trying to vectorize starting at nodes ("do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Trying to vectorize starting at nodes ("
<< NumElts << ")\n"; } } while (false)
13323 << NumElts << ")\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Trying to vectorize starting at nodes ("
<< NumElts << ")\n"; } } while (false)
;
13324 // The vectorization is a 3-state attempt:
13325 // 1. Try to vectorize instructions with the same/alternate opcodes with the
13326 // size of maximal register at first.
13327 // 2. Try to vectorize remaining instructions with the same type, if
13328 // possible. This may result in the better vectorization results rather than
13329 // if we try just to vectorize instructions with the same/alternate opcodes.
13330 // 3. Final attempt to try to vectorize all instructions with the
13331 // same/alternate ops only, this may result in some extra final
13332 // vectorization.
13333 if (NumElts > 1 &&
13334 TryToVectorizeHelper(ArrayRef(IncIt, NumElts), LimitForRegisterSize)) {
13335 // Success start over because instructions might have been changed.
13336 Changed = true;
13337 } else if (NumElts < Limit(*IncIt) &&
13338 (Candidates.empty() ||
13339 Candidates.front()->getType() == (*IncIt)->getType())) {
13340 Candidates.append(IncIt, std::next(IncIt, NumElts));
13341 }
13342 // Final attempt to vectorize instructions with the same types.
13343 if (Candidates.size() > 1 &&
13344 (SameTypeIt == E || (*SameTypeIt)->getType() != (*IncIt)->getType())) {
13345 if (TryToVectorizeHelper(Candidates, /*LimitForRegisterSize=*/false)) {
13346 // Success start over because instructions might have been changed.
13347 Changed = true;
13348 } else if (LimitForRegisterSize) {
13349 // Try to vectorize using small vectors.
13350 for (auto *It = Candidates.begin(), *End = Candidates.end();
13351 It != End;) {
13352 auto *SameTypeIt = It;
13353 while (SameTypeIt != End && AreCompatible(*SameTypeIt, *It))
13354 ++SameTypeIt;
13355 unsigned NumElts = (SameTypeIt - It);
13356 if (NumElts > 1 &&
13357 TryToVectorizeHelper(ArrayRef(It, NumElts),
13358 /*LimitForRegisterSize=*/false))
13359 Changed = true;
13360 It = SameTypeIt;
13361 }
13362 }
13363 Candidates.clear();
13364 }
13365
13366 // Start over at the next instruction of a different type (or the end).
13367 IncIt = SameTypeIt;
13368 }
13369 return Changed;
13370}
13371
13372/// Compare two cmp instructions. If IsCompatibility is true, function returns
13373/// true if 2 cmps have same/swapped predicates and mos compatible corresponding
13374/// operands. If IsCompatibility is false, function implements strict weak
13375/// ordering relation between two cmp instructions, returning true if the first
13376/// instruction is "less" than the second, i.e. its predicate is less than the
13377/// predicate of the second or the operands IDs are less than the operands IDs
13378/// of the second cmp instruction.
13379template <bool IsCompatibility>
13380static bool compareCmp(Value *V, Value *V2, TargetLibraryInfo &TLI,
13381 function_ref<bool(Instruction *)> IsDeleted) {
13382 auto *CI1 = cast<CmpInst>(V);
13383 auto *CI2 = cast<CmpInst>(V2);
13384 if (IsDeleted(CI2) || !isValidElementType(CI2->getType()))
13385 return false;
13386 if (CI1->getOperand(0)->getType()->getTypeID() <
13387 CI2->getOperand(0)->getType()->getTypeID())
13388 return !IsCompatibility;
13389 if (CI1->getOperand(0)->getType()->getTypeID() >
13390 CI2->getOperand(0)->getType()->getTypeID())
13391 return false;
13392 CmpInst::Predicate Pred1 = CI1->getPredicate();
13393 CmpInst::Predicate Pred2 = CI2->getPredicate();
13394 CmpInst::Predicate SwapPred1 = CmpInst::getSwappedPredicate(Pred1);
13395 CmpInst::Predicate SwapPred2 = CmpInst::getSwappedPredicate(Pred2);
13396 CmpInst::Predicate BasePred1 = std::min(Pred1, SwapPred1);
13397 CmpInst::Predicate BasePred2 = std::min(Pred2, SwapPred2);
13398 if (BasePred1 < BasePred2)
13399 return !IsCompatibility;
13400 if (BasePred1 > BasePred2)
13401 return false;
13402 // Compare operands.
13403 bool LEPreds = Pred1 <= Pred2;
13404 bool GEPreds = Pred1 >= Pred2;
13405 for (int I = 0, E = CI1->getNumOperands(); I < E; ++I) {
13406 auto *Op1 = CI1->getOperand(LEPreds ? I : E - I - 1);
13407 auto *Op2 = CI2->getOperand(GEPreds ? I : E - I - 1);
13408 if (Op1->getValueID() < Op2->getValueID())
13409 return !IsCompatibility;
13410 if (Op1->getValueID() > Op2->getValueID())
13411 return false;
13412 if (auto *I1 = dyn_cast<Instruction>(Op1))
13413 if (auto *I2 = dyn_cast<Instruction>(Op2)) {
13414 if (I1->getParent() != I2->getParent())
13415 return false;
13416 InstructionsState S = getSameOpcode({I1, I2}, TLI);
13417 if (S.getOpcode())
13418 continue;
13419 return false;
13420 }
13421 }
13422 return IsCompatibility;
13423}
13424
13425bool SLPVectorizerPass::vectorizeSimpleInstructions(InstSetVector &Instructions,
13426 BasicBlock *BB, BoUpSLP &R,
13427 bool AtTerminator) {
13428 bool OpsChanged = false;
13429 SmallVector<Instruction *, 4> PostponedCmps;
13430 SmallVector<WeakTrackingVH> PostponedInsts;
13431 // pass1 - try to vectorize reductions only
13432 for (auto *I : reverse(Instructions)) {
13433 if (R.isDeleted(I))
13434 continue;
13435 if (isa<CmpInst>(I)) {
13436 PostponedCmps.push_back(I);
13437 continue;
13438 }
13439 OpsChanged |= vectorizeHorReduction(nullptr, I, BB, R, TTI, PostponedInsts);
13440 }
13441 // pass2 - try to match and vectorize a buildvector sequence.
13442 for (auto *I : reverse(Instructions)) {
13443 if (R.isDeleted(I) || isa<CmpInst>(I))
13444 continue;
13445 if (auto *LastInsertValue = dyn_cast<InsertValueInst>(I)) {
13446 OpsChanged |= vectorizeInsertValueInst(LastInsertValue, BB, R);
13447 } else if (auto *LastInsertElem = dyn_cast<InsertElementInst>(I)) {
13448 OpsChanged |= vectorizeInsertElementInst(LastInsertElem, BB, R);
13449 }
13450 }
13451 // Now try to vectorize postponed instructions.
13452 OpsChanged |= tryToVectorize(PostponedInsts, R);
13453
13454 if (AtTerminator) {
13455 // Try to find reductions first.
13456 for (Instruction *I : PostponedCmps) {
13457 if (R.isDeleted(I))
13458 continue;
13459 for (Value *Op : I->operands())
13460 OpsChanged |= vectorizeRootInstruction(nullptr, Op, BB, R, TTI);
13461 }
13462 // Try to vectorize operands as vector bundles.
13463 for (Instruction *I : PostponedCmps) {
13464 if (R.isDeleted(I))
13465 continue;
13466 OpsChanged |= tryToVectorize(I, R);
13467 }
13468 // Try to vectorize list of compares.
13469 // Sort by type, compare predicate, etc.
13470 auto CompareSorter = [&](Value *V, Value *V2) {
13471 return compareCmp<false>(V, V2, *TLI,
13472 [&R](Instruction *I) { return R.isDeleted(I); });
13473 };
13474
13475 auto AreCompatibleCompares = [&](Value *V1, Value *V2) {
13476 if (V1 == V2)
13477 return true;
13478 return compareCmp<true>(V1, V2, *TLI,
13479 [&R](Instruction *I) { return R.isDeleted(I); });
13480 };
13481 auto Limit = [&R](Value *V) {
13482 unsigned EltSize = R.getVectorElementSize(V);
13483 return std::max(2U, R.getMaxVecRegSize() / EltSize);
13484 };
13485
13486 SmallVector<Value *> Vals(PostponedCmps.begin(), PostponedCmps.end());
13487 OpsChanged |= tryToVectorizeSequence<Value>(
13488 Vals, Limit, CompareSorter, AreCompatibleCompares,
13489 [this, &R](ArrayRef<Value *> Candidates, bool LimitForRegisterSize) {
13490 // Exclude possible reductions from other blocks.
13491 bool ArePossiblyReducedInOtherBlock =
13492 any_of(Candidates, [](Value *V) {
13493 return any_of(V->users(), [V](User *U) {
13494 return isa<SelectInst>(U) &&
13495 cast<SelectInst>(U)->getParent() !=
13496 cast<Instruction>(V)->getParent();
13497 });
13498 });
13499 if (ArePossiblyReducedInOtherBlock)
13500 return false;
13501 return tryToVectorizeList(Candidates, R, LimitForRegisterSize);
13502 },
13503 /*LimitForRegisterSize=*/true);
13504 Instructions.clear();
13505 } else {
13506 Instructions.clear();
13507 // Insert in reverse order since the PostponedCmps vector was filled in
13508 // reverse order.
13509 Instructions.insert(PostponedCmps.rbegin(), PostponedCmps.rend());
13510 }
13511 return OpsChanged;
13512}
13513
13514bool SLPVectorizerPass::vectorizeChainsInBlock(BasicBlock *BB, BoUpSLP &R) {
13515 bool Changed = false;
13516 SmallVector<Value *, 4> Incoming;
13517 SmallPtrSet<Value *, 16> VisitedInstrs;
13518 // Maps phi nodes to the non-phi nodes found in the use tree for each phi
13519 // node. Allows better to identify the chains that can be vectorized in the
13520 // better way.
13521 DenseMap<Value *, SmallVector<Value *, 4>> PHIToOpcodes;
13522 auto PHICompare = [this, &PHIToOpcodes](Value *V1, Value *V2) {
13523 assert(isValidElementType(V1->getType()) &&(static_cast <bool> (isValidElementType(V1->getType(
)) && isValidElementType(V2->getType()) &&
"Expected vectorizable types only.") ? void (0) : __assert_fail
("isValidElementType(V1->getType()) && isValidElementType(V2->getType()) && \"Expected vectorizable types only.\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 13525, __extension__
__PRETTY_FUNCTION__))
13524 isValidElementType(V2->getType()) &&(static_cast <bool> (isValidElementType(V1->getType(
)) && isValidElementType(V2->getType()) &&
"Expected vectorizable types only.") ? void (0) : __assert_fail
("isValidElementType(V1->getType()) && isValidElementType(V2->getType()) && \"Expected vectorizable types only.\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 13525, __extension__
__PRETTY_FUNCTION__))
13525 "Expected vectorizable types only.")(static_cast <bool> (isValidElementType(V1->getType(
)) && isValidElementType(V2->getType()) &&
"Expected vectorizable types only.") ? void (0) : __assert_fail
("isValidElementType(V1->getType()) && isValidElementType(V2->getType()) && \"Expected vectorizable types only.\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 13525, __extension__
__PRETTY_FUNCTION__))
;
13526 // It is fine to compare type IDs here, since we expect only vectorizable
13527 // types, like ints, floats and pointers, we don't care about other type.
13528 if (V1->getType()->getTypeID() < V2->getType()->getTypeID())
13529 return true;
13530 if (V1->getType()->getTypeID() > V2->getType()->getTypeID())
13531 return false;
13532 ArrayRef<Value *> Opcodes1 = PHIToOpcodes[V1];
13533 ArrayRef<Value *> Opcodes2 = PHIToOpcodes[V2];
13534 if (Opcodes1.size() < Opcodes2.size())
13535 return true;
13536 if (Opcodes1.size() > Opcodes2.size())
13537 return false;
13538 std::optional<bool> ConstOrder;
13539 for (int I = 0, E = Opcodes1.size(); I < E; ++I) {
13540 // Undefs are compatible with any other value.
13541 if (isa<UndefValue>(Opcodes1[I]) || isa<UndefValue>(Opcodes2[I])) {
13542 if (!ConstOrder)
13543 ConstOrder =
13544 !isa<UndefValue>(Opcodes1[I]) && isa<UndefValue>(Opcodes2[I]);
13545 continue;
13546 }
13547 if (auto *I1 = dyn_cast<Instruction>(Opcodes1[I]))
13548 if (auto *I2 = dyn_cast<Instruction>(Opcodes2[I])) {
13549 DomTreeNodeBase<BasicBlock> *NodeI1 = DT->getNode(I1->getParent());
13550 DomTreeNodeBase<BasicBlock> *NodeI2 = DT->getNode(I2->getParent());
13551 if (!NodeI1)
13552 return NodeI2 != nullptr;
13553 if (!NodeI2)
13554 return false;
13555 assert((NodeI1 == NodeI2) ==(static_cast <bool> ((NodeI1 == NodeI2) == (NodeI1->
getDFSNumIn() == NodeI2->getDFSNumIn()) && "Different nodes should have different DFS numbers"
) ? void (0) : __assert_fail ("(NodeI1 == NodeI2) == (NodeI1->getDFSNumIn() == NodeI2->getDFSNumIn()) && \"Different nodes should have different DFS numbers\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 13557, __extension__
__PRETTY_FUNCTION__))
13556 (NodeI1->getDFSNumIn() == NodeI2->getDFSNumIn()) &&(static_cast <bool> ((NodeI1 == NodeI2) == (NodeI1->
getDFSNumIn() == NodeI2->getDFSNumIn()) && "Different nodes should have different DFS numbers"
) ? void (0) : __assert_fail ("(NodeI1 == NodeI2) == (NodeI1->getDFSNumIn() == NodeI2->getDFSNumIn()) && \"Different nodes should have different DFS numbers\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 13557, __extension__
__PRETTY_FUNCTION__))
13557 "Different nodes should have different DFS numbers")(static_cast <bool> ((NodeI1 == NodeI2) == (NodeI1->
getDFSNumIn() == NodeI2->getDFSNumIn()) && "Different nodes should have different DFS numbers"
) ? void (0) : __assert_fail ("(NodeI1 == NodeI2) == (NodeI1->getDFSNumIn() == NodeI2->getDFSNumIn()) && \"Different nodes should have different DFS numbers\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 13557, __extension__
__PRETTY_FUNCTION__))
;
13558 if (NodeI1 != NodeI2)
13559 return NodeI1->getDFSNumIn() < NodeI2->getDFSNumIn();
13560 InstructionsState S = getSameOpcode({I1, I2}, *TLI);
13561 if (S.getOpcode())
13562 continue;
13563 return I1->getOpcode() < I2->getOpcode();
13564 }
13565 if (isa<Constant>(Opcodes1[I]) && isa<Constant>(Opcodes2[I])) {
13566 if (!ConstOrder)
13567 ConstOrder = Opcodes1[I]->getValueID() < Opcodes2[I]->getValueID();
13568 continue;
13569 }
13570 if (Opcodes1[I]->getValueID() < Opcodes2[I]->getValueID())
13571 return true;
13572 if (Opcodes1[I]->getValueID() > Opcodes2[I]->getValueID())
13573 return false;
13574 }
13575 return ConstOrder && *ConstOrder;
13576 };
13577 auto AreCompatiblePHIs = [&PHIToOpcodes, this](Value *V1, Value *V2) {
13578 if (V1 == V2)
13579 return true;
13580 if (V1->getType() != V2->getType())
13581 return false;
13582 ArrayRef<Value *> Opcodes1 = PHIToOpcodes[V1];
13583 ArrayRef<Value *> Opcodes2 = PHIToOpcodes[V2];
13584 if (Opcodes1.size() != Opcodes2.size())
13585 return false;
13586 for (int I = 0, E = Opcodes1.size(); I < E; ++I) {
13587 // Undefs are compatible with any other value.
13588 if (isa<UndefValue>(Opcodes1[I]) || isa<UndefValue>(Opcodes2[I]))
13589 continue;
13590 if (auto *I1 = dyn_cast<Instruction>(Opcodes1[I]))
13591 if (auto *I2 = dyn_cast<Instruction>(Opcodes2[I])) {
13592 if (I1->getParent() != I2->getParent())
13593 return false;
13594 InstructionsState S = getSameOpcode({I1, I2}, *TLI);
13595 if (S.getOpcode())
13596 continue;
13597 return false;
13598 }
13599 if (isa<Constant>(Opcodes1[I]) && isa<Constant>(Opcodes2[I]))
13600 continue;
13601 if (Opcodes1[I]->getValueID() != Opcodes2[I]->getValueID())
13602 return false;
13603 }
13604 return true;
13605 };
13606 auto Limit = [&R](Value *V) {
13607 unsigned EltSize = R.getVectorElementSize(V);
13608 return std::max(2U, R.getMaxVecRegSize() / EltSize);
13609 };
13610
13611 bool HaveVectorizedPhiNodes = false;
13612 do {
13613 // Collect the incoming values from the PHIs.
13614 Incoming.clear();
13615 for (Instruction &I : *BB) {
13616 PHINode *P = dyn_cast<PHINode>(&I);
13617 if (!P)
13618 break;
13619
13620 // No need to analyze deleted, vectorized and non-vectorizable
13621 // instructions.
13622 if (!VisitedInstrs.count(P) && !R.isDeleted(P) &&
13623 isValidElementType(P->getType()))
13624 Incoming.push_back(P);
13625 }
13626
13627 // Find the corresponding non-phi nodes for better matching when trying to
13628 // build the tree.
13629 for (Value *V : Incoming) {
13630 SmallVectorImpl<Value *> &Opcodes =
13631 PHIToOpcodes.try_emplace(V).first->getSecond();
13632 if (!Opcodes.empty())
13633 continue;
13634 SmallVector<Value *, 4> Nodes(1, V);
13635 SmallPtrSet<Value *, 4> Visited;
13636 while (!Nodes.empty()) {
13637 auto *PHI = cast<PHINode>(Nodes.pop_back_val());
13638 if (!Visited.insert(PHI).second)
13639 continue;
13640 for (Value *V : PHI->incoming_values()) {
13641 if (auto *PHI1 = dyn_cast<PHINode>((V))) {
13642 Nodes.push_back(PHI1);
13643 continue;
13644 }
13645 Opcodes.emplace_back(V);
13646 }
13647 }
13648 }
13649
13650 HaveVectorizedPhiNodes = tryToVectorizeSequence<Value>(
13651 Incoming, Limit, PHICompare, AreCompatiblePHIs,
13652 [this, &R](ArrayRef<Value *> Candidates, bool LimitForRegisterSize) {
13653 return tryToVectorizeList(Candidates, R, LimitForRegisterSize);
13654 },
13655 /*LimitForRegisterSize=*/true);
13656 Changed |= HaveVectorizedPhiNodes;
13657 VisitedInstrs.insert(Incoming.begin(), Incoming.end());
13658 } while (HaveVectorizedPhiNodes);
13659
13660 VisitedInstrs.clear();
13661
13662 InstSetVector PostProcessInstructions;
13663 SmallDenseSet<Instruction *, 4> KeyNodes;
13664 for (BasicBlock::iterator it = BB->begin(), e = BB->end(); it != e; ++it) {
13665 // Skip instructions with scalable type. The num of elements is unknown at
13666 // compile-time for scalable type.
13667 if (isa<ScalableVectorType>(it->getType()))
13668 continue;
13669
13670 // Skip instructions marked for the deletion.
13671 if (R.isDeleted(&*it))
13672 continue;
13673 // We may go through BB multiple times so skip the one we have checked.
13674 if (!VisitedInstrs.insert(&*it).second) {
13675 if (it->use_empty() && KeyNodes.contains(&*it) &&
13676 vectorizeSimpleInstructions(PostProcessInstructions, BB, R,
13677 it->isTerminator())) {
13678 // We would like to start over since some instructions are deleted
13679 // and the iterator may become invalid value.
13680 Changed = true;
13681 it = BB->begin();
13682 e = BB->end();
13683 }
13684 continue;
13685 }
13686
13687 if (isa<DbgInfoIntrinsic>(it))
13688 continue;
13689
13690 // Try to vectorize reductions that use PHINodes.
13691 if (PHINode *P = dyn_cast<PHINode>(it)) {
13692 // Check that the PHI is a reduction PHI.
13693 if (P->getNumIncomingValues() == 2) {
13694 // Try to match and vectorize a horizontal reduction.
13695 if (vectorizeRootInstruction(P, getReductionValue(DT, P, BB, LI), BB, R,
13696 TTI)) {
13697 Changed = true;
13698 it = BB->begin();
13699 e = BB->end();
13700 continue;
13701 }
13702 }
13703 // Try to vectorize the incoming values of the PHI, to catch reductions
13704 // that feed into PHIs.
13705 for (unsigned I = 0, E = P->getNumIncomingValues(); I != E; I++) {
13706 // Skip if the incoming block is the current BB for now. Also, bypass
13707 // unreachable IR for efficiency and to avoid crashing.
13708 // TODO: Collect the skipped incoming values and try to vectorize them
13709 // after processing BB.
13710 if (BB == P->getIncomingBlock(I) ||
13711 !DT->isReachableFromEntry(P->getIncomingBlock(I)))
13712 continue;
13713
13714 // Postponed instructions should not be vectorized here, delay their
13715 // vectorization.
13716 if (auto *PI = dyn_cast<Instruction>(P->getIncomingValue(I));
13717 PI && !PostProcessInstructions.contains(PI))
13718 Changed |= vectorizeRootInstruction(nullptr, P->getIncomingValue(I),
13719 P->getIncomingBlock(I), R, TTI);
13720 }
13721 continue;
13722 }
13723
13724 // Ran into an instruction without users, like terminator, or function call
13725 // with ignored return value, store. Ignore unused instructions (basing on
13726 // instruction type, except for CallInst and InvokeInst).
13727 if (it->use_empty() &&
13728 (it->getType()->isVoidTy() || isa<CallInst, InvokeInst>(it))) {
13729 KeyNodes.insert(&*it);
13730 bool OpsChanged = false;
13731 auto *SI = dyn_cast<StoreInst>(it);
13732 bool TryToVectorizeRoot = ShouldStartVectorizeHorAtStore || !SI;
13733 if (SI) {
13734 auto I = Stores.find(getUnderlyingObject(SI->getPointerOperand()));
13735 // Try to vectorize chain in store, if this is the only store to the
13736 // address in the block.
13737 // TODO: This is just a temporarily solution to save compile time. Need
13738 // to investigate if we can safely turn on slp-vectorize-hor-store
13739 // instead to allow lookup for reduction chains in all non-vectorized
13740 // stores (need to check side effects and compile time).
13741 TryToVectorizeRoot = (I == Stores.end() || I->second.size() == 1) &&
13742 SI->getValueOperand()->hasOneUse();
13743 }
13744 if (TryToVectorizeRoot) {
13745 for (auto *V : it->operand_values()) {
13746 // Postponed instructions should not be vectorized here, delay their
13747 // vectorization.
13748 if (auto *VI = dyn_cast<Instruction>(V);
13749 VI && !PostProcessInstructions.contains(VI))
13750 // Try to match and vectorize a horizontal reduction.
13751 OpsChanged |= vectorizeRootInstruction(nullptr, V, BB, R, TTI);
13752 }
13753 }
13754 // Start vectorization of post-process list of instructions from the
13755 // top-tree instructions to try to vectorize as many instructions as
13756 // possible.
13757 OpsChanged |= vectorizeSimpleInstructions(PostProcessInstructions, BB, R,
13758 it->isTerminator());
13759 if (OpsChanged) {
13760 // We would like to start over since some instructions are deleted
13761 // and the iterator may become invalid value.
13762 Changed = true;
13763 it = BB->begin();
13764 e = BB->end();
13765 continue;
13766 }
13767 }
13768
13769 if (isa<CmpInst, InsertElementInst, InsertValueInst>(it))
13770 PostProcessInstructions.insert(&*it);
13771 }
13772
13773 return Changed;
13774}
13775
13776bool SLPVectorizerPass::vectorizeGEPIndices(BasicBlock *BB, BoUpSLP &R) {
13777 auto Changed = false;
13778 for (auto &Entry : GEPs) {
13779 // If the getelementptr list has fewer than two elements, there's nothing
13780 // to do.
13781 if (Entry.second.size() < 2)
13782 continue;
13783
13784 LLVM_DEBUG(dbgs() << "SLP: Analyzing a getelementptr list of length "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Analyzing a getelementptr list of length "
<< Entry.second.size() << ".\n"; } } while (false
)
13785 << Entry.second.size() << ".\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Analyzing a getelementptr list of length "
<< Entry.second.size() << ".\n"; } } while (false
)
;
13786
13787 // Process the GEP list in chunks suitable for the target's supported
13788 // vector size. If a vector register can't hold 1 element, we are done. We
13789 // are trying to vectorize the index computations, so the maximum number of
13790 // elements is based on the size of the index expression, rather than the
13791 // size of the GEP itself (the target's pointer size).
13792 unsigned MaxVecRegSize = R.getMaxVecRegSize();
13793 unsigned EltSize = R.getVectorElementSize(*Entry.second[0]->idx_begin());
13794 if (MaxVecRegSize < EltSize)
13795 continue;
13796
13797 unsigned MaxElts = MaxVecRegSize / EltSize;
13798 for (unsigned BI = 0, BE = Entry.second.size(); BI < BE; BI += MaxElts) {
13799 auto Len = std::min<unsigned>(BE - BI, MaxElts);
13800 ArrayRef<GetElementPtrInst *> GEPList(&Entry.second[BI], Len);
13801
13802 // Initialize a set a candidate getelementptrs. Note that we use a
13803 // SetVector here to preserve program order. If the index computations
13804 // are vectorizable and begin with loads, we want to minimize the chance
13805 // of having to reorder them later.
13806 SetVector<Value *> Candidates(GEPList.begin(), GEPList.end());
13807
13808 // Some of the candidates may have already been vectorized after we
13809 // initially collected them. If so, they are marked as deleted, so remove
13810 // them from the set of candidates.
13811 Candidates.remove_if(
13812 [&R](Value *I) { return R.isDeleted(cast<Instruction>(I)); });
13813
13814 // Remove from the set of candidates all pairs of getelementptrs with
13815 // constant differences. Such getelementptrs are likely not good
13816 // candidates for vectorization in a bottom-up phase since one can be
13817 // computed from the other. We also ensure all candidate getelementptr
13818 // indices are unique.
13819 for (int I = 0, E = GEPList.size(); I < E && Candidates.size() > 1; ++I) {
13820 auto *GEPI = GEPList[I];
13821 if (!Candidates.count(GEPI))
13822 continue;
13823 auto *SCEVI = SE->getSCEV(GEPList[I]);
13824 for (int J = I + 1; J < E && Candidates.size() > 1; ++J) {
13825 auto *GEPJ = GEPList[J];
13826 auto *SCEVJ = SE->getSCEV(GEPList[J]);
13827 if (isa<SCEVConstant>(SE->getMinusSCEV(SCEVI, SCEVJ))) {
13828 Candidates.remove(GEPI);
13829 Candidates.remove(GEPJ);
13830 } else if (GEPI->idx_begin()->get() == GEPJ->idx_begin()->get()) {
13831 Candidates.remove(GEPJ);
13832 }
13833 }
13834 }
13835
13836 // We break out of the above computation as soon as we know there are
13837 // fewer than two candidates remaining.
13838 if (Candidates.size() < 2)
13839 continue;
13840
13841 // Add the single, non-constant index of each candidate to the bundle. We
13842 // ensured the indices met these constraints when we originally collected
13843 // the getelementptrs.
13844 SmallVector<Value *, 16> Bundle(Candidates.size());
13845 auto BundleIndex = 0u;
13846 for (auto *V : Candidates) {
13847 auto *GEP = cast<GetElementPtrInst>(V);
13848 auto *GEPIdx = GEP->idx_begin()->get();
13849 assert(GEP->getNumIndices() == 1 || !isa<Constant>(GEPIdx))(static_cast <bool> (GEP->getNumIndices() == 1 || !isa
<Constant>(GEPIdx)) ? void (0) : __assert_fail ("GEP->getNumIndices() == 1 || !isa<Constant>(GEPIdx)"
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 13849, __extension__
__PRETTY_FUNCTION__))
;
13850 Bundle[BundleIndex++] = GEPIdx;
13851 }
13852
13853 // Try and vectorize the indices. We are currently only interested in
13854 // gather-like cases of the form:
13855 //
13856 // ... = g[a[0] - b[0]] + g[a[1] - b[1]] + ...
13857 //
13858 // where the loads of "a", the loads of "b", and the subtractions can be
13859 // performed in parallel. It's likely that detecting this pattern in a
13860 // bottom-up phase will be simpler and less costly than building a
13861 // full-blown top-down phase beginning at the consecutive loads.
13862 Changed |= tryToVectorizeList(Bundle, R);
13863 }
13864 }
13865 return Changed;
13866}
13867
13868bool SLPVectorizerPass::vectorizeStoreChains(BoUpSLP &R) {
13869 bool Changed = false;
13870 // Sort by type, base pointers and values operand. Value operands must be
13871 // compatible (have the same opcode, same parent), otherwise it is
13872 // definitely not profitable to try to vectorize them.
13873 auto &&StoreSorter = [this](StoreInst *V, StoreInst *V2) {
13874 if (V->getPointerOperandType()->getTypeID() <
13875 V2->getPointerOperandType()->getTypeID())
13876 return true;
13877 if (V->getPointerOperandType()->getTypeID() >
13878 V2->getPointerOperandType()->getTypeID())
13879 return false;
13880 // UndefValues are compatible with all other values.
13881 if (isa<UndefValue>(V->getValueOperand()) ||
13882 isa<UndefValue>(V2->getValueOperand()))
13883 return false;
13884 if (auto *I1 = dyn_cast<Instruction>(V->getValueOperand()))
13885 if (auto *I2 = dyn_cast<Instruction>(V2->getValueOperand())) {
13886 DomTreeNodeBase<llvm::BasicBlock> *NodeI1 =
13887 DT->getNode(I1->getParent());
13888 DomTreeNodeBase<llvm::BasicBlock> *NodeI2 =
13889 DT->getNode(I2->getParent());
13890 assert(NodeI1 && "Should only process reachable instructions")(static_cast <bool> (NodeI1 && "Should only process reachable instructions"
) ? void (0) : __assert_fail ("NodeI1 && \"Should only process reachable instructions\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 13890, __extension__
__PRETTY_FUNCTION__))
;
13891 assert(NodeI2 && "Should only process reachable instructions")(static_cast <bool> (NodeI2 && "Should only process reachable instructions"
) ? void (0) : __assert_fail ("NodeI2 && \"Should only process reachable instructions\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 13891, __extension__
__PRETTY_FUNCTION__))
;
13892 assert((NodeI1 == NodeI2) ==(static_cast <bool> ((NodeI1 == NodeI2) == (NodeI1->
getDFSNumIn() == NodeI2->getDFSNumIn()) && "Different nodes should have different DFS numbers"
) ? void (0) : __assert_fail ("(NodeI1 == NodeI2) == (NodeI1->getDFSNumIn() == NodeI2->getDFSNumIn()) && \"Different nodes should have different DFS numbers\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 13894, __extension__
__PRETTY_FUNCTION__))
13893 (NodeI1->getDFSNumIn() == NodeI2->getDFSNumIn()) &&(static_cast <bool> ((NodeI1 == NodeI2) == (NodeI1->
getDFSNumIn() == NodeI2->getDFSNumIn()) && "Different nodes should have different DFS numbers"
) ? void (0) : __assert_fail ("(NodeI1 == NodeI2) == (NodeI1->getDFSNumIn() == NodeI2->getDFSNumIn()) && \"Different nodes should have different DFS numbers\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 13894, __extension__
__PRETTY_FUNCTION__))
13894 "Different nodes should have different DFS numbers")(static_cast <bool> ((NodeI1 == NodeI2) == (NodeI1->
getDFSNumIn() == NodeI2->getDFSNumIn()) && "Different nodes should have different DFS numbers"
) ? void (0) : __assert_fail ("(NodeI1 == NodeI2) == (NodeI1->getDFSNumIn() == NodeI2->getDFSNumIn()) && \"Different nodes should have different DFS numbers\""
, "llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp", 13894, __extension__
__PRETTY_FUNCTION__))
;
13895 if (NodeI1 != NodeI2)
13896 return NodeI1->getDFSNumIn() < NodeI2->getDFSNumIn();
13897 InstructionsState S = getSameOpcode({I1, I2}, *TLI);
13898 if (S.getOpcode())
13899 return false;
13900 return I1->getOpcode() < I2->getOpcode();
13901 }
13902 if (isa<Constant>(V->getValueOperand()) &&
13903 isa<Constant>(V2->getValueOperand()))
13904 return false;
13905 return V->getValueOperand()->getValueID() <
13906 V2->getValueOperand()->getValueID();
13907 };
13908
13909 auto &&AreCompatibleStores = [this](StoreInst *V1, StoreInst *V2) {
13910 if (V1 == V2)
13911 return true;
13912 if (V1->getPointerOperandType() != V2->getPointerOperandType())
13913 return false;
13914 // Undefs are compatible with any other value.
13915 if (isa<UndefValue>(V1->getValueOperand()) ||
13916 isa<UndefValue>(V2->getValueOperand()))
13917 return true;
13918 if (auto *I1 = dyn_cast<Instruction>(V1->getValueOperand()))
13919 if (auto *I2 = dyn_cast<Instruction>(V2->getValueOperand())) {
13920 if (I1->getParent() != I2->getParent())
13921 return false;
13922 InstructionsState S = getSameOpcode({I1, I2}, *TLI);
13923 return S.getOpcode() > 0;
13924 }
13925 if (isa<Constant>(V1->getValueOperand()) &&
13926 isa<Constant>(V2->getValueOperand()))
13927 return true;
13928 return V1->getValueOperand()->getValueID() ==
13929 V2->getValueOperand()->getValueID();
13930 };
13931 auto Limit = [&R, this](StoreInst *SI) {
13932 unsigned EltSize = DL->getTypeSizeInBits(SI->getValueOperand()->getType());
13933 return R.getMinVF(EltSize);
13934 };
13935
13936 // Attempt to sort and vectorize each of the store-groups.
13937 for (auto &Pair : Stores) {
13938 if (Pair.second.size() < 2)
13939 continue;
13940
13941 LLVM_DEBUG(dbgs() << "SLP: Analyzing a store chain of length "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Analyzing a store chain of length "
<< Pair.second.size() << ".\n"; } } while (false
)
13942 << Pair.second.size() << ".\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Analyzing a store chain of length "
<< Pair.second.size() << ".\n"; } } while (false
)
;
13943
13944 if (!isValidElementType(Pair.second.front()->getValueOperand()->getType()))
13945 continue;
13946
13947 Changed |= tryToVectorizeSequence<StoreInst>(
13948 Pair.second, Limit, StoreSorter, AreCompatibleStores,
13949 [this, &R](ArrayRef<StoreInst *> Candidates, bool) {
13950 return vectorizeStores(Candidates, R);
13951 },
13952 /*LimitForRegisterSize=*/false);
13953 }
13954 return Changed;
13955}
13956
13957char SLPVectorizer::ID = 0;
13958
13959static const char lv_name[] = "SLP Vectorizer";
13960
13961INITIALIZE_PASS_BEGIN(SLPVectorizer, SV_NAME, lv_name, false, false)static void *initializeSLPVectorizerPassOnce(PassRegistry &
Registry) {
13962INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)initializeAAResultsWrapperPassPass(Registry);
13963INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)initializeTargetTransformInfoWrapperPassPass(Registry);
13964INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)initializeAssumptionCacheTrackerPass(Registry);
13965INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass)initializeScalarEvolutionWrapperPassPass(Registry);
13966INITIALIZE_PASS_DEPENDENCY(LoopSimplify)initializeLoopSimplifyPass(Registry);
13967INITIALIZE_PASS_DEPENDENCY(DemandedBitsWrapperPass)initializeDemandedBitsWrapperPassPass(Registry);
13968INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass)initializeOptimizationRemarkEmitterWrapperPassPass(Registry);
13969INITIALIZE_PASS_DEPENDENCY(InjectTLIMappingsLegacy)initializeInjectTLIMappingsLegacyPass(Registry);
13970INITIALIZE_PASS_END(SLPVectorizer, SV_NAME, lv_name, false, false)PassInfo *PI = new PassInfo( lv_name, "slp-vectorizer", &
SLPVectorizer::ID, PassInfo::NormalCtor_t(callDefaultCtor<
SLPVectorizer>), false, false); Registry.registerPass(*PI,
true); return PI; } static llvm::once_flag InitializeSLPVectorizerPassFlag
; void llvm::initializeSLPVectorizerPass(PassRegistry &Registry
) { llvm::call_once(InitializeSLPVectorizerPassFlag, initializeSLPVectorizerPassOnce
, std::ref(Registry)); }
13971
13972Pass *llvm::createSLPVectorizerPass() { return new SLPVectorizer(); }