Bug Summary

File:llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
Warning:line 4473, column 22
Called C++ object pointer is null

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name SLPVectorizer.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mthread-model posix -mframe-pointer=none -fmath-errno -fno-rounding-math -masm-verbose -mconstructor-aliases -munwind-tables -fuse-init-array -target-cpu x86-64 -dwarf-column-info -debugger-tuning=gdb -ffunction-sections -fdata-sections -resource-dir /usr/lib/llvm-10/lib/clang/10.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/build-llvm/lib/Transforms/Vectorize -I /build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Vectorize -I /build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/build-llvm/include -I /build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/include -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0/backward -internal-isystem /usr/local/include -internal-isystem /usr/lib/llvm-10/lib/clang/10.0.0/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir /build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/build-llvm/lib/Transforms/Vectorize -fdebug-prefix-map=/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809=. -ferror-limit 19 -fmessage-length 0 -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -fobjc-runtime=gcc -fdiagnostics-show-option -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -o /tmp/scan-build-2019-12-07-102640-14763-1 -x c++ /build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp

/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp

1//===- SLPVectorizer.cpp - A bottom up SLP Vectorizer ---------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This pass implements the Bottom Up SLP vectorizer. It detects consecutive
10// stores that can be put together into vector-stores. Next, it attempts to
11// construct vectorizable tree using the use-def chains. If a profitable tree
12// was found, the SLP vectorizer performs vectorization on the tree.
13//
14// The pass is inspired by the work described in the paper:
15// "Loop-Aware SLP in GCC" by Ira Rosen, Dorit Nuzman, Ayal Zaks.
16//
17//===----------------------------------------------------------------------===//
18
19#include "llvm/Transforms/Vectorize/SLPVectorizer.h"
20#include "llvm/ADT/ArrayRef.h"
21#include "llvm/ADT/DenseMap.h"
22#include "llvm/ADT/DenseSet.h"
23#include "llvm/ADT/MapVector.h"
24#include "llvm/ADT/None.h"
25#include "llvm/ADT/Optional.h"
26#include "llvm/ADT/PostOrderIterator.h"
27#include "llvm/ADT/STLExtras.h"
28#include "llvm/ADT/SetVector.h"
29#include "llvm/ADT/SmallPtrSet.h"
30#include "llvm/ADT/SmallSet.h"
31#include "llvm/ADT/SmallVector.h"
32#include "llvm/ADT/Statistic.h"
33#include "llvm/ADT/iterator.h"
34#include "llvm/ADT/iterator_range.h"
35#include "llvm/Analysis/AliasAnalysis.h"
36#include "llvm/Analysis/CodeMetrics.h"
37#include "llvm/Analysis/DemandedBits.h"
38#include "llvm/Analysis/GlobalsModRef.h"
39#include "llvm/Analysis/LoopAccessAnalysis.h"
40#include "llvm/Analysis/LoopInfo.h"
41#include "llvm/Analysis/MemoryLocation.h"
42#include "llvm/Analysis/OptimizationRemarkEmitter.h"
43#include "llvm/Analysis/ScalarEvolution.h"
44#include "llvm/Analysis/ScalarEvolutionExpressions.h"
45#include "llvm/Analysis/TargetLibraryInfo.h"
46#include "llvm/Analysis/TargetTransformInfo.h"
47#include "llvm/Analysis/ValueTracking.h"
48#include "llvm/Analysis/VectorUtils.h"
49#include "llvm/IR/Attributes.h"
50#include "llvm/IR/BasicBlock.h"
51#include "llvm/IR/Constant.h"
52#include "llvm/IR/Constants.h"
53#include "llvm/IR/DataLayout.h"
54#include "llvm/IR/DebugLoc.h"
55#include "llvm/IR/DerivedTypes.h"
56#include "llvm/IR/Dominators.h"
57#include "llvm/IR/Function.h"
58#include "llvm/IR/IRBuilder.h"
59#include "llvm/IR/InstrTypes.h"
60#include "llvm/IR/Instruction.h"
61#include "llvm/IR/Instructions.h"
62#include "llvm/IR/IntrinsicInst.h"
63#include "llvm/IR/Intrinsics.h"
64#include "llvm/IR/Module.h"
65#include "llvm/IR/NoFolder.h"
66#include "llvm/IR/Operator.h"
67#include "llvm/IR/PassManager.h"
68#include "llvm/IR/PatternMatch.h"
69#include "llvm/IR/Type.h"
70#include "llvm/IR/Use.h"
71#include "llvm/IR/User.h"
72#include "llvm/IR/Value.h"
73#include "llvm/IR/ValueHandle.h"
74#include "llvm/IR/Verifier.h"
75#include "llvm/Pass.h"
76#include "llvm/Support/Casting.h"
77#include "llvm/Support/CommandLine.h"
78#include "llvm/Support/Compiler.h"
79#include "llvm/Support/DOTGraphTraits.h"
80#include "llvm/Support/Debug.h"
81#include "llvm/Support/ErrorHandling.h"
82#include "llvm/Support/GraphWriter.h"
83#include "llvm/Support/KnownBits.h"
84#include "llvm/Support/MathExtras.h"
85#include "llvm/Support/raw_ostream.h"
86#include "llvm/Transforms/Utils/LoopUtils.h"
87#include "llvm/Transforms/Vectorize.h"
88#include <algorithm>
89#include <cassert>
90#include <cstdint>
91#include <iterator>
92#include <memory>
93#include <set>
94#include <string>
95#include <tuple>
96#include <utility>
97#include <vector>
98
99using namespace llvm;
100using namespace llvm::PatternMatch;
101using namespace slpvectorizer;
102
103#define SV_NAME"slp-vectorizer" "slp-vectorizer"
104#define DEBUG_TYPE"SLP" "SLP"
105
106STATISTIC(NumVectorInstructions, "Number of vector instructions generated")static llvm::Statistic NumVectorInstructions = {"SLP", "NumVectorInstructions"
, "Number of vector instructions generated"}
;
107
108cl::opt<bool>
109 llvm::RunSLPVectorization("vectorize-slp", cl::init(false), cl::Hidden,
110 cl::desc("Run the SLP vectorization passes"));
111
112static cl::opt<int>
113 SLPCostThreshold("slp-threshold", cl::init(0), cl::Hidden,
114 cl::desc("Only vectorize if you gain more than this "
115 "number "));
116
117static cl::opt<bool>
118ShouldVectorizeHor("slp-vectorize-hor", cl::init(true), cl::Hidden,
119 cl::desc("Attempt to vectorize horizontal reductions"));
120
121static cl::opt<bool> ShouldStartVectorizeHorAtStore(
122 "slp-vectorize-hor-store", cl::init(false), cl::Hidden,
123 cl::desc(
124 "Attempt to vectorize horizontal reductions feeding into a store"));
125
126static cl::opt<int>
127MaxVectorRegSizeOption("slp-max-reg-size", cl::init(128), cl::Hidden,
128 cl::desc("Attempt to vectorize for this register size in bits"));
129
130/// Limits the size of scheduling regions in a block.
131/// It avoid long compile times for _very_ large blocks where vector
132/// instructions are spread over a wide range.
133/// This limit is way higher than needed by real-world functions.
134static cl::opt<int>
135ScheduleRegionSizeBudget("slp-schedule-budget", cl::init(100000), cl::Hidden,
136 cl::desc("Limit the size of the SLP scheduling region per block"));
137
138static cl::opt<int> MinVectorRegSizeOption(
139 "slp-min-reg-size", cl::init(128), cl::Hidden,
140 cl::desc("Attempt to vectorize for this register size in bits"));
141
142static cl::opt<unsigned> RecursionMaxDepth(
143 "slp-recursion-max-depth", cl::init(12), cl::Hidden,
144 cl::desc("Limit the recursion depth when building a vectorizable tree"));
145
146static cl::opt<unsigned> MinTreeSize(
147 "slp-min-tree-size", cl::init(3), cl::Hidden,
148 cl::desc("Only vectorize small trees if they are fully vectorizable"));
149
150static cl::opt<bool>
151 ViewSLPTree("view-slp-tree", cl::Hidden,
152 cl::desc("Display the SLP trees with Graphviz"));
153
154// Limit the number of alias checks. The limit is chosen so that
155// it has no negative effect on the llvm benchmarks.
156static const unsigned AliasedCheckLimit = 10;
157
158// Another limit for the alias checks: The maximum distance between load/store
159// instructions where alias checks are done.
160// This limit is useful for very large basic blocks.
161static const unsigned MaxMemDepDistance = 160;
162
163/// If the ScheduleRegionSizeBudget is exhausted, we allow small scheduling
164/// regions to be handled.
165static const int MinScheduleRegionSize = 16;
166
167/// Predicate for the element types that the SLP vectorizer supports.
168///
169/// The most important thing to filter here are types which are invalid in LLVM
170/// vectors. We also filter target specific types which have absolutely no
171/// meaningful vectorization path such as x86_fp80 and ppc_f128. This just
172/// avoids spending time checking the cost model and realizing that they will
173/// be inevitably scalarized.
174static bool isValidElementType(Type *Ty) {
175 return VectorType::isValidElementType(Ty) && !Ty->isX86_FP80Ty() &&
176 !Ty->isPPC_FP128Ty();
177}
178
179/// \returns true if all of the instructions in \p VL are in the same block or
180/// false otherwise.
181static bool allSameBlock(ArrayRef<Value *> VL) {
182 Instruction *I0 = dyn_cast<Instruction>(VL[0]);
183 if (!I0)
184 return false;
185 BasicBlock *BB = I0->getParent();
186 for (int i = 1, e = VL.size(); i < e; i++) {
187 Instruction *I = dyn_cast<Instruction>(VL[i]);
188 if (!I)
189 return false;
190
191 if (BB != I->getParent())
192 return false;
193 }
194 return true;
195}
196
197/// \returns True if all of the values in \p VL are constants (but not
198/// globals/constant expressions).
199static bool allConstant(ArrayRef<Value *> VL) {
200 // Constant expressions and globals can't be vectorized like normal integer/FP
201 // constants.
202 for (Value *i : VL)
203 if (!isa<Constant>(i) || isa<ConstantExpr>(i) || isa<GlobalValue>(i))
204 return false;
205 return true;
206}
207
208/// \returns True if all of the values in \p VL are identical.
209static bool isSplat(ArrayRef<Value *> VL) {
210 for (unsigned i = 1, e = VL.size(); i < e; ++i)
211 if (VL[i] != VL[0])
212 return false;
213 return true;
214}
215
216/// \returns True if \p I is commutative, handles CmpInst as well as Instruction.
217static bool isCommutative(Instruction *I) {
218 if (auto *IC = dyn_cast<CmpInst>(I))
219 return IC->isCommutative();
220 return I->isCommutative();
221}
222
223/// Checks if the vector of instructions can be represented as a shuffle, like:
224/// %x0 = extractelement <4 x i8> %x, i32 0
225/// %x3 = extractelement <4 x i8> %x, i32 3
226/// %y1 = extractelement <4 x i8> %y, i32 1
227/// %y2 = extractelement <4 x i8> %y, i32 2
228/// %x0x0 = mul i8 %x0, %x0
229/// %x3x3 = mul i8 %x3, %x3
230/// %y1y1 = mul i8 %y1, %y1
231/// %y2y2 = mul i8 %y2, %y2
232/// %ins1 = insertelement <4 x i8> undef, i8 %x0x0, i32 0
233/// %ins2 = insertelement <4 x i8> %ins1, i8 %x3x3, i32 1
234/// %ins3 = insertelement <4 x i8> %ins2, i8 %y1y1, i32 2
235/// %ins4 = insertelement <4 x i8> %ins3, i8 %y2y2, i32 3
236/// ret <4 x i8> %ins4
237/// can be transformed into:
238/// %1 = shufflevector <4 x i8> %x, <4 x i8> %y, <4 x i32> <i32 0, i32 3, i32 5,
239/// i32 6>
240/// %2 = mul <4 x i8> %1, %1
241/// ret <4 x i8> %2
242/// We convert this initially to something like:
243/// %x0 = extractelement <4 x i8> %x, i32 0
244/// %x3 = extractelement <4 x i8> %x, i32 3
245/// %y1 = extractelement <4 x i8> %y, i32 1
246/// %y2 = extractelement <4 x i8> %y, i32 2
247/// %1 = insertelement <4 x i8> undef, i8 %x0, i32 0
248/// %2 = insertelement <4 x i8> %1, i8 %x3, i32 1
249/// %3 = insertelement <4 x i8> %2, i8 %y1, i32 2
250/// %4 = insertelement <4 x i8> %3, i8 %y2, i32 3
251/// %5 = mul <4 x i8> %4, %4
252/// %6 = extractelement <4 x i8> %5, i32 0
253/// %ins1 = insertelement <4 x i8> undef, i8 %6, i32 0
254/// %7 = extractelement <4 x i8> %5, i32 1
255/// %ins2 = insertelement <4 x i8> %ins1, i8 %7, i32 1
256/// %8 = extractelement <4 x i8> %5, i32 2
257/// %ins3 = insertelement <4 x i8> %ins2, i8 %8, i32 2
258/// %9 = extractelement <4 x i8> %5, i32 3
259/// %ins4 = insertelement <4 x i8> %ins3, i8 %9, i32 3
260/// ret <4 x i8> %ins4
261/// InstCombiner transforms this into a shuffle and vector mul
262/// TODO: Can we split off and reuse the shuffle mask detection from
263/// TargetTransformInfo::getInstructionThroughput?
264static Optional<TargetTransformInfo::ShuffleKind>
265isShuffle(ArrayRef<Value *> VL) {
266 auto *EI0 = cast<ExtractElementInst>(VL[0]);
267 unsigned Size = EI0->getVectorOperandType()->getVectorNumElements();
268 Value *Vec1 = nullptr;
269 Value *Vec2 = nullptr;
270 enum ShuffleMode { Unknown, Select, Permute };
271 ShuffleMode CommonShuffleMode = Unknown;
272 for (unsigned I = 0, E = VL.size(); I < E; ++I) {
273 auto *EI = cast<ExtractElementInst>(VL[I]);
274 auto *Vec = EI->getVectorOperand();
275 // All vector operands must have the same number of vector elements.
276 if (Vec->getType()->getVectorNumElements() != Size)
277 return None;
278 auto *Idx = dyn_cast<ConstantInt>(EI->getIndexOperand());
279 if (!Idx)
280 return None;
281 // Undefined behavior if Idx is negative or >= Size.
282 if (Idx->getValue().uge(Size))
283 continue;
284 unsigned IntIdx = Idx->getValue().getZExtValue();
285 // We can extractelement from undef vector.
286 if (isa<UndefValue>(Vec))
287 continue;
288 // For correct shuffling we have to have at most 2 different vector operands
289 // in all extractelement instructions.
290 if (!Vec1 || Vec1 == Vec)
291 Vec1 = Vec;
292 else if (!Vec2 || Vec2 == Vec)
293 Vec2 = Vec;
294 else
295 return None;
296 if (CommonShuffleMode == Permute)
297 continue;
298 // If the extract index is not the same as the operation number, it is a
299 // permutation.
300 if (IntIdx != I) {
301 CommonShuffleMode = Permute;
302 continue;
303 }
304 CommonShuffleMode = Select;
305 }
306 // If we're not crossing lanes in different vectors, consider it as blending.
307 if (CommonShuffleMode == Select && Vec2)
308 return TargetTransformInfo::SK_Select;
309 // If Vec2 was never used, we have a permutation of a single vector, otherwise
310 // we have permutation of 2 vectors.
311 return Vec2 ? TargetTransformInfo::SK_PermuteTwoSrc
312 : TargetTransformInfo::SK_PermuteSingleSrc;
313}
314
315namespace {
316
317/// Main data required for vectorization of instructions.
318struct InstructionsState {
319 /// The very first instruction in the list with the main opcode.
320 Value *OpValue = nullptr;
321
322 /// The main/alternate instruction.
323 Instruction *MainOp = nullptr;
324 Instruction *AltOp = nullptr;
325
326 /// The main/alternate opcodes for the list of instructions.
327 unsigned getOpcode() const {
328 return MainOp ? MainOp->getOpcode() : 0;
329 }
330
331 unsigned getAltOpcode() const {
332 return AltOp ? AltOp->getOpcode() : 0;
333 }
334
335 /// Some of the instructions in the list have alternate opcodes.
336 bool isAltShuffle() const { return getOpcode() != getAltOpcode(); }
337
338 bool isOpcodeOrAlt(Instruction *I) const {
339 unsigned CheckedOpcode = I->getOpcode();
340 return getOpcode() == CheckedOpcode || getAltOpcode() == CheckedOpcode;
341 }
342
343 InstructionsState() = delete;
344 InstructionsState(Value *OpValue, Instruction *MainOp, Instruction *AltOp)
345 : OpValue(OpValue), MainOp(MainOp), AltOp(AltOp) {}
346};
347
348} // end anonymous namespace
349
350/// Chooses the correct key for scheduling data. If \p Op has the same (or
351/// alternate) opcode as \p OpValue, the key is \p Op. Otherwise the key is \p
352/// OpValue.
353static Value *isOneOf(const InstructionsState &S, Value *Op) {
354 auto *I = dyn_cast<Instruction>(Op);
355 if (I && S.isOpcodeOrAlt(I))
356 return Op;
357 return S.OpValue;
358}
359
360/// \returns analysis of the Instructions in \p VL described in
361/// InstructionsState, the Opcode that we suppose the whole list
362/// could be vectorized even if its structure is diverse.
363static InstructionsState getSameOpcode(ArrayRef<Value *> VL,
364 unsigned BaseIndex = 0) {
365 // Make sure these are all Instructions.
366 if (llvm::any_of(VL, [](Value *V) { return !isa<Instruction>(V); }))
367 return InstructionsState(VL[BaseIndex], nullptr, nullptr);
368
369 bool IsCastOp = isa<CastInst>(VL[BaseIndex]);
370 bool IsBinOp = isa<BinaryOperator>(VL[BaseIndex]);
371 unsigned Opcode = cast<Instruction>(VL[BaseIndex])->getOpcode();
372 unsigned AltOpcode = Opcode;
373 unsigned AltIndex = BaseIndex;
374
375 // Check for one alternate opcode from another BinaryOperator.
376 // TODO - generalize to support all operators (types, calls etc.).
377 for (int Cnt = 0, E = VL.size(); Cnt < E; Cnt++) {
378 unsigned InstOpcode = cast<Instruction>(VL[Cnt])->getOpcode();
379 if (IsBinOp && isa<BinaryOperator>(VL[Cnt])) {
380 if (InstOpcode == Opcode || InstOpcode == AltOpcode)
381 continue;
382 if (Opcode == AltOpcode) {
383 AltOpcode = InstOpcode;
384 AltIndex = Cnt;
385 continue;
386 }
387 } else if (IsCastOp && isa<CastInst>(VL[Cnt])) {
388 Type *Ty0 = cast<Instruction>(VL[BaseIndex])->getOperand(0)->getType();
389 Type *Ty1 = cast<Instruction>(VL[Cnt])->getOperand(0)->getType();
390 if (Ty0 == Ty1) {
391 if (InstOpcode == Opcode || InstOpcode == AltOpcode)
392 continue;
393 if (Opcode == AltOpcode) {
394 AltOpcode = InstOpcode;
395 AltIndex = Cnt;
396 continue;
397 }
398 }
399 } else if (InstOpcode == Opcode || InstOpcode == AltOpcode)
400 continue;
401 return InstructionsState(VL[BaseIndex], nullptr, nullptr);
402 }
403
404 return InstructionsState(VL[BaseIndex], cast<Instruction>(VL[BaseIndex]),
405 cast<Instruction>(VL[AltIndex]));
406}
407
408/// \returns true if all of the values in \p VL have the same type or false
409/// otherwise.
410static bool allSameType(ArrayRef<Value *> VL) {
411 Type *Ty = VL[0]->getType();
412 for (int i = 1, e = VL.size(); i < e; i++)
413 if (VL[i]->getType() != Ty)
414 return false;
415
416 return true;
417}
418
419/// \returns True if Extract{Value,Element} instruction extracts element Idx.
420static Optional<unsigned> getExtractIndex(Instruction *E) {
421 unsigned Opcode = E->getOpcode();
422 assert((Opcode == Instruction::ExtractElement ||(((Opcode == Instruction::ExtractElement || Opcode == Instruction
::ExtractValue) && "Expected extractelement or extractvalue instruction."
) ? static_cast<void> (0) : __assert_fail ("(Opcode == Instruction::ExtractElement || Opcode == Instruction::ExtractValue) && \"Expected extractelement or extractvalue instruction.\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp"
, 424, __PRETTY_FUNCTION__))
423 Opcode == Instruction::ExtractValue) &&(((Opcode == Instruction::ExtractElement || Opcode == Instruction
::ExtractValue) && "Expected extractelement or extractvalue instruction."
) ? static_cast<void> (0) : __assert_fail ("(Opcode == Instruction::ExtractElement || Opcode == Instruction::ExtractValue) && \"Expected extractelement or extractvalue instruction.\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp"
, 424, __PRETTY_FUNCTION__))
424 "Expected extractelement or extractvalue instruction.")(((Opcode == Instruction::ExtractElement || Opcode == Instruction
::ExtractValue) && "Expected extractelement or extractvalue instruction."
) ? static_cast<void> (0) : __assert_fail ("(Opcode == Instruction::ExtractElement || Opcode == Instruction::ExtractValue) && \"Expected extractelement or extractvalue instruction.\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp"
, 424, __PRETTY_FUNCTION__))
;
425 if (Opcode == Instruction::ExtractElement) {
426 auto *CI = dyn_cast<ConstantInt>(E->getOperand(1));
427 if (!CI)
428 return None;
429 return CI->getZExtValue();
430 }
431 ExtractValueInst *EI = cast<ExtractValueInst>(E);
432 if (EI->getNumIndices() != 1)
433 return None;
434 return *EI->idx_begin();
435}
436
437/// \returns True if in-tree use also needs extract. This refers to
438/// possible scalar operand in vectorized instruction.
439static bool InTreeUserNeedToExtract(Value *Scalar, Instruction *UserInst,
440 TargetLibraryInfo *TLI) {
441 unsigned Opcode = UserInst->getOpcode();
442 switch (Opcode) {
443 case Instruction::Load: {
444 LoadInst *LI = cast<LoadInst>(UserInst);
445 return (LI->getPointerOperand() == Scalar);
446 }
447 case Instruction::Store: {
448 StoreInst *SI = cast<StoreInst>(UserInst);
449 return (SI->getPointerOperand() == Scalar);
450 }
451 case Instruction::Call: {
452 CallInst *CI = cast<CallInst>(UserInst);
453 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
454 for (unsigned i = 0, e = CI->getNumArgOperands(); i != e; ++i) {
455 if (hasVectorInstrinsicScalarOpd(ID, i))
456 return (CI->getArgOperand(i) == Scalar);
457 }
458 LLVM_FALLTHROUGH[[gnu::fallthrough]];
459 }
460 default:
461 return false;
462 }
463}
464
465/// \returns the AA location that is being access by the instruction.
466static MemoryLocation getLocation(Instruction *I, AliasAnalysis *AA) {
467 if (StoreInst *SI = dyn_cast<StoreInst>(I))
468 return MemoryLocation::get(SI);
469 if (LoadInst *LI = dyn_cast<LoadInst>(I))
470 return MemoryLocation::get(LI);
471 return MemoryLocation();
472}
473
474/// \returns True if the instruction is not a volatile or atomic load/store.
475static bool isSimple(Instruction *I) {
476 if (LoadInst *LI = dyn_cast<LoadInst>(I))
477 return LI->isSimple();
478 if (StoreInst *SI = dyn_cast<StoreInst>(I))
479 return SI->isSimple();
480 if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(I))
481 return !MI->isVolatile();
482 return true;
483}
484
485namespace llvm {
486
487namespace slpvectorizer {
488
489/// Bottom Up SLP Vectorizer.
490class BoUpSLP {
491 struct TreeEntry;
492 struct ScheduleData;
493
494public:
495 using ValueList = SmallVector<Value *, 8>;
496 using InstrList = SmallVector<Instruction *, 16>;
497 using ValueSet = SmallPtrSet<Value *, 16>;
498 using StoreList = SmallVector<StoreInst *, 8>;
499 using ExtraValueToDebugLocsMap =
500 MapVector<Value *, SmallVector<Instruction *, 2>>;
501
502 BoUpSLP(Function *Func, ScalarEvolution *Se, TargetTransformInfo *Tti,
503 TargetLibraryInfo *TLi, AliasAnalysis *Aa, LoopInfo *Li,
504 DominatorTree *Dt, AssumptionCache *AC, DemandedBits *DB,
505 const DataLayout *DL, OptimizationRemarkEmitter *ORE)
506 : F(Func), SE(Se), TTI(Tti), TLI(TLi), AA(Aa), LI(Li), DT(Dt), AC(AC),
507 DB(DB), DL(DL), ORE(ORE), Builder(Se->getContext()) {
508 CodeMetrics::collectEphemeralValues(F, AC, EphValues);
509 // Use the vector register size specified by the target unless overridden
510 // by a command-line option.
511 // TODO: It would be better to limit the vectorization factor based on
512 // data type rather than just register size. For example, x86 AVX has
513 // 256-bit registers, but it does not support integer operations
514 // at that width (that requires AVX2).
515 if (MaxVectorRegSizeOption.getNumOccurrences())
516 MaxVecRegSize = MaxVectorRegSizeOption;
517 else
518 MaxVecRegSize = TTI->getRegisterBitWidth(true);
519
520 if (MinVectorRegSizeOption.getNumOccurrences())
521 MinVecRegSize = MinVectorRegSizeOption;
522 else
523 MinVecRegSize = TTI->getMinVectorRegisterBitWidth();
524 }
525
526 /// Vectorize the tree that starts with the elements in \p VL.
527 /// Returns the vectorized root.
528 Value *vectorizeTree();
529
530 /// Vectorize the tree but with the list of externally used values \p
531 /// ExternallyUsedValues. Values in this MapVector can be replaced but the
532 /// generated extractvalue instructions.
533 Value *vectorizeTree(ExtraValueToDebugLocsMap &ExternallyUsedValues);
534
535 /// \returns the cost incurred by unwanted spills and fills, caused by
536 /// holding live values over call sites.
537 int getSpillCost() const;
538
539 /// \returns the vectorization cost of the subtree that starts at \p VL.
540 /// A negative number means that this is profitable.
541 int getTreeCost();
542
543 /// Construct a vectorizable tree that starts at \p Roots, ignoring users for
544 /// the purpose of scheduling and extraction in the \p UserIgnoreLst.
545 void buildTree(ArrayRef<Value *> Roots,
546 ArrayRef<Value *> UserIgnoreLst = None);
547
548 /// Construct a vectorizable tree that starts at \p Roots, ignoring users for
549 /// the purpose of scheduling and extraction in the \p UserIgnoreLst taking
550 /// into account (anf updating it, if required) list of externally used
551 /// values stored in \p ExternallyUsedValues.
552 void buildTree(ArrayRef<Value *> Roots,
553 ExtraValueToDebugLocsMap &ExternallyUsedValues,
554 ArrayRef<Value *> UserIgnoreLst = None);
555
556 /// Clear the internal data structures that are created by 'buildTree'.
557 void deleteTree() {
558 VectorizableTree.clear();
559 ScalarToTreeEntry.clear();
560 MustGather.clear();
561 ExternalUses.clear();
562 NumOpsWantToKeepOrder.clear();
563 NumOpsWantToKeepOriginalOrder = 0;
564 for (auto &Iter : BlocksSchedules) {
565 BlockScheduling *BS = Iter.second.get();
566 BS->clear();
567 }
568 MinBWs.clear();
569 }
570
571 unsigned getTreeSize() const { return VectorizableTree.size(); }
572
573 /// Perform LICM and CSE on the newly generated gather sequences.
574 void optimizeGatherSequence();
575
576 /// \returns The best order of instructions for vectorization.
577 Optional<ArrayRef<unsigned>> bestOrder() const {
578 auto I = std::max_element(
579 NumOpsWantToKeepOrder.begin(), NumOpsWantToKeepOrder.end(),
580 [](const decltype(NumOpsWantToKeepOrder)::value_type &D1,
581 const decltype(NumOpsWantToKeepOrder)::value_type &D2) {
582 return D1.second < D2.second;
583 });
584 if (I == NumOpsWantToKeepOrder.end() ||
585 I->getSecond() <= NumOpsWantToKeepOriginalOrder)
586 return None;
587
588 return makeArrayRef(I->getFirst());
589 }
590
591 /// \return The vector element size in bits to use when vectorizing the
592 /// expression tree ending at \p V. If V is a store, the size is the width of
593 /// the stored value. Otherwise, the size is the width of the largest loaded
594 /// value reaching V. This method is used by the vectorizer to calculate
595 /// vectorization factors.
596 unsigned getVectorElementSize(Value *V) const;
597
598 /// Compute the minimum type sizes required to represent the entries in a
599 /// vectorizable tree.
600 void computeMinimumValueSizes();
601
602 // \returns maximum vector register size as set by TTI or overridden by cl::opt.
603 unsigned getMaxVecRegSize() const {
604 return MaxVecRegSize;
605 }
606
607 // \returns minimum vector register size as set by cl::opt.
608 unsigned getMinVecRegSize() const {
609 return MinVecRegSize;
610 }
611
612 /// Check if ArrayType or StructType is isomorphic to some VectorType.
613 ///
614 /// \returns number of elements in vector if isomorphism exists, 0 otherwise.
615 unsigned canMapToVector(Type *T, const DataLayout &DL) const;
616
617 /// \returns True if the VectorizableTree is both tiny and not fully
618 /// vectorizable. We do not vectorize such trees.
619 bool isTreeTinyAndNotFullyVectorizable() const;
620
621 /// Assume that a legal-sized 'or'-reduction of shifted/zexted loaded values
622 /// can be load combined in the backend. Load combining may not be allowed in
623 /// the IR optimizer, so we do not want to alter the pattern. For example,
624 /// partially transforming a scalar bswap() pattern into vector code is
625 /// effectively impossible for the backend to undo.
626 /// TODO: If load combining is allowed in the IR optimizer, this analysis
627 /// may not be necessary.
628 bool isLoadCombineReductionCandidate(unsigned ReductionOpcode) const;
629
630 OptimizationRemarkEmitter *getORE() { return ORE; }
631
632 /// This structure holds any data we need about the edges being traversed
633 /// during buildTree_rec(). We keep track of:
634 /// (i) the user TreeEntry index, and
635 /// (ii) the index of the edge.
636 struct EdgeInfo {
637 EdgeInfo() = default;
638 EdgeInfo(TreeEntry *UserTE, unsigned EdgeIdx)
639 : UserTE(UserTE), EdgeIdx(EdgeIdx) {}
640 /// The user TreeEntry.
641 TreeEntry *UserTE = nullptr;
642 /// The operand index of the use.
643 unsigned EdgeIdx = UINT_MAX(2147483647 *2U +1U);
644#ifndef NDEBUG
645 friend inline raw_ostream &operator<<(raw_ostream &OS,
646 const BoUpSLP::EdgeInfo &EI) {
647 EI.dump(OS);
648 return OS;
649 }
650 /// Debug print.
651 void dump(raw_ostream &OS) const {
652 OS << "{User:" << (UserTE ? std::to_string(UserTE->Idx) : "null")
653 << " EdgeIdx:" << EdgeIdx << "}";
654 }
655 LLVM_DUMP_METHOD__attribute__((noinline)) __attribute__((__used__)) void dump() const { dump(dbgs()); }
656#endif
657 };
658
659 /// A helper data structure to hold the operands of a vector of instructions.
660 /// This supports a fixed vector length for all operand vectors.
661 class VLOperands {
662 /// For each operand we need (i) the value, and (ii) the opcode that it
663 /// would be attached to if the expression was in a left-linearized form.
664 /// This is required to avoid illegal operand reordering.
665 /// For example:
666 /// \verbatim
667 /// 0 Op1
668 /// |/
669 /// Op1 Op2 Linearized + Op2
670 /// \ / ----------> |/
671 /// - -
672 ///
673 /// Op1 - Op2 (0 + Op1) - Op2
674 /// \endverbatim
675 ///
676 /// Value Op1 is attached to a '+' operation, and Op2 to a '-'.
677 ///
678 /// Another way to think of this is to track all the operations across the
679 /// path from the operand all the way to the root of the tree and to
680 /// calculate the operation that corresponds to this path. For example, the
681 /// path from Op2 to the root crosses the RHS of the '-', therefore the
682 /// corresponding operation is a '-' (which matches the one in the
683 /// linearized tree, as shown above).
684 ///
685 /// For lack of a better term, we refer to this operation as Accumulated
686 /// Path Operation (APO).
687 struct OperandData {
688 OperandData() = default;
689 OperandData(Value *V, bool APO, bool IsUsed)
690 : V(V), APO(APO), IsUsed(IsUsed) {}
691 /// The operand value.
692 Value *V = nullptr;
693 /// TreeEntries only allow a single opcode, or an alternate sequence of
694 /// them (e.g, +, -). Therefore, we can safely use a boolean value for the
695 /// APO. It is set to 'true' if 'V' is attached to an inverse operation
696 /// in the left-linearized form (e.g., Sub/Div), and 'false' otherwise
697 /// (e.g., Add/Mul)
698 bool APO = false;
699 /// Helper data for the reordering function.
700 bool IsUsed = false;
701 };
702
703 /// During operand reordering, we are trying to select the operand at lane
704 /// that matches best with the operand at the neighboring lane. Our
705 /// selection is based on the type of value we are looking for. For example,
706 /// if the neighboring lane has a load, we need to look for a load that is
707 /// accessing a consecutive address. These strategies are summarized in the
708 /// 'ReorderingMode' enumerator.
709 enum class ReorderingMode {
710 Load, ///< Matching loads to consecutive memory addresses
711 Opcode, ///< Matching instructions based on opcode (same or alternate)
712 Constant, ///< Matching constants
713 Splat, ///< Matching the same instruction multiple times (broadcast)
714 Failed, ///< We failed to create a vectorizable group
715 };
716
717 using OperandDataVec = SmallVector<OperandData, 2>;
718
719 /// A vector of operand vectors.
720 SmallVector<OperandDataVec, 4> OpsVec;
721
722 const DataLayout &DL;
723 ScalarEvolution &SE;
724
725 /// \returns the operand data at \p OpIdx and \p Lane.
726 OperandData &getData(unsigned OpIdx, unsigned Lane) {
727 return OpsVec[OpIdx][Lane];
728 }
729
730 /// \returns the operand data at \p OpIdx and \p Lane. Const version.
731 const OperandData &getData(unsigned OpIdx, unsigned Lane) const {
732 return OpsVec[OpIdx][Lane];
733 }
734
735 /// Clears the used flag for all entries.
736 void clearUsed() {
737 for (unsigned OpIdx = 0, NumOperands = getNumOperands();
738 OpIdx != NumOperands; ++OpIdx)
739 for (unsigned Lane = 0, NumLanes = getNumLanes(); Lane != NumLanes;
740 ++Lane)
741 OpsVec[OpIdx][Lane].IsUsed = false;
742 }
743
744 /// Swap the operand at \p OpIdx1 with that one at \p OpIdx2.
745 void swap(unsigned OpIdx1, unsigned OpIdx2, unsigned Lane) {
746 std::swap(OpsVec[OpIdx1][Lane], OpsVec[OpIdx2][Lane]);
747 }
748
749 // Search all operands in Ops[*][Lane] for the one that matches best
750 // Ops[OpIdx][LastLane] and return its opreand index.
751 // If no good match can be found, return None.
752 Optional<unsigned>
753 getBestOperand(unsigned OpIdx, int Lane, int LastLane,
754 ArrayRef<ReorderingMode> ReorderingModes) {
755 unsigned NumOperands = getNumOperands();
756
757 // The operand of the previous lane at OpIdx.
758 Value *OpLastLane = getData(OpIdx, LastLane).V;
759
760 // Our strategy mode for OpIdx.
761 ReorderingMode RMode = ReorderingModes[OpIdx];
762
763 // The linearized opcode of the operand at OpIdx, Lane.
764 bool OpIdxAPO = getData(OpIdx, Lane).APO;
765
766 const unsigned BestScore = 2;
767 const unsigned GoodScore = 1;
768
769 // The best operand index and its score.
770 // Sometimes we have more than one option (e.g., Opcode and Undefs), so we
771 // are using the score to differentiate between the two.
772 struct BestOpData {
773 Optional<unsigned> Idx = None;
774 unsigned Score = 0;
775 } BestOp;
776
777 // Iterate through all unused operands and look for the best.
778 for (unsigned Idx = 0; Idx != NumOperands; ++Idx) {
779 // Get the operand at Idx and Lane.
780 OperandData &OpData = getData(Idx, Lane);
781 Value *Op = OpData.V;
782 bool OpAPO = OpData.APO;
783
784 // Skip already selected operands.
785 if (OpData.IsUsed)
786 continue;
787
788 // Skip if we are trying to move the operand to a position with a
789 // different opcode in the linearized tree form. This would break the
790 // semantics.
791 if (OpAPO != OpIdxAPO)
792 continue;
793
794 // Look for an operand that matches the current mode.
795 switch (RMode) {
796 case ReorderingMode::Load:
797 if (isa<LoadInst>(Op)) {
798 // Figure out which is left and right, so that we can check for
799 // consecutive loads
800 bool LeftToRight = Lane > LastLane;
801 Value *OpLeft = (LeftToRight) ? OpLastLane : Op;
802 Value *OpRight = (LeftToRight) ? Op : OpLastLane;
803 if (isConsecutiveAccess(cast<LoadInst>(OpLeft),
804 cast<LoadInst>(OpRight), DL, SE))
805 BestOp.Idx = Idx;
806 }
807 break;
808 case ReorderingMode::Opcode:
809 // We accept both Instructions and Undefs, but with different scores.
810 if ((isa<Instruction>(Op) && isa<Instruction>(OpLastLane) &&
811 cast<Instruction>(Op)->getOpcode() ==
812 cast<Instruction>(OpLastLane)->getOpcode()) ||
813 (isa<UndefValue>(OpLastLane) && isa<Instruction>(Op)) ||
814 isa<UndefValue>(Op)) {
815 // An instruction has a higher score than an undef.
816 unsigned Score = (isa<UndefValue>(Op)) ? GoodScore : BestScore;
817 if (Score > BestOp.Score) {
818 BestOp.Idx = Idx;
819 BestOp.Score = Score;
820 }
821 }
822 break;
823 case ReorderingMode::Constant:
824 if (isa<Constant>(Op)) {
825 unsigned Score = (isa<UndefValue>(Op)) ? GoodScore : BestScore;
826 if (Score > BestOp.Score) {
827 BestOp.Idx = Idx;
828 BestOp.Score = Score;
829 }
830 }
831 break;
832 case ReorderingMode::Splat:
833 if (Op == OpLastLane)
834 BestOp.Idx = Idx;
835 break;
836 case ReorderingMode::Failed:
837 return None;
838 }
839 }
840
841 if (BestOp.Idx) {
842 getData(BestOp.Idx.getValue(), Lane).IsUsed = true;
843 return BestOp.Idx;
844 }
845 // If we could not find a good match return None.
846 return None;
847 }
848
849 /// Helper for reorderOperandVecs. \Returns the lane that we should start
850 /// reordering from. This is the one which has the least number of operands
851 /// that can freely move about.
852 unsigned getBestLaneToStartReordering() const {
853 unsigned BestLane = 0;
854 unsigned Min = UINT_MAX(2147483647 *2U +1U);
855 for (unsigned Lane = 0, NumLanes = getNumLanes(); Lane != NumLanes;
856 ++Lane) {
857 unsigned NumFreeOps = getMaxNumOperandsThatCanBeReordered(Lane);
858 if (NumFreeOps < Min) {
859 Min = NumFreeOps;
860 BestLane = Lane;
861 }
862 }
863 return BestLane;
864 }
865
866 /// \Returns the maximum number of operands that are allowed to be reordered
867 /// for \p Lane. This is used as a heuristic for selecting the first lane to
868 /// start operand reordering.
869 unsigned getMaxNumOperandsThatCanBeReordered(unsigned Lane) const {
870 unsigned CntTrue = 0;
871 unsigned NumOperands = getNumOperands();
872 // Operands with the same APO can be reordered. We therefore need to count
873 // how many of them we have for each APO, like this: Cnt[APO] = x.
874 // Since we only have two APOs, namely true and false, we can avoid using
875 // a map. Instead we can simply count the number of operands that
876 // correspond to one of them (in this case the 'true' APO), and calculate
877 // the other by subtracting it from the total number of operands.
878 for (unsigned OpIdx = 0; OpIdx != NumOperands; ++OpIdx)
879 if (getData(OpIdx, Lane).APO)
880 ++CntTrue;
881 unsigned CntFalse = NumOperands - CntTrue;
882 return std::max(CntTrue, CntFalse);
883 }
884
885 /// Go through the instructions in VL and append their operands.
886 void appendOperandsOfVL(ArrayRef<Value *> VL) {
887 assert(!VL.empty() && "Bad VL")((!VL.empty() && "Bad VL") ? static_cast<void> (
0) : __assert_fail ("!VL.empty() && \"Bad VL\"", "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp"
, 887, __PRETTY_FUNCTION__))
;
888 assert((empty() || VL.size() == getNumLanes()) &&(((empty() || VL.size() == getNumLanes()) && "Expected same number of lanes"
) ? static_cast<void> (0) : __assert_fail ("(empty() || VL.size() == getNumLanes()) && \"Expected same number of lanes\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp"
, 889, __PRETTY_FUNCTION__))
889 "Expected same number of lanes")(((empty() || VL.size() == getNumLanes()) && "Expected same number of lanes"
) ? static_cast<void> (0) : __assert_fail ("(empty() || VL.size() == getNumLanes()) && \"Expected same number of lanes\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp"
, 889, __PRETTY_FUNCTION__))
;
890 assert(isa<Instruction>(VL[0]) && "Expected instruction")((isa<Instruction>(VL[0]) && "Expected instruction"
) ? static_cast<void> (0) : __assert_fail ("isa<Instruction>(VL[0]) && \"Expected instruction\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp"
, 890, __PRETTY_FUNCTION__))
;
891 unsigned NumOperands = cast<Instruction>(VL[0])->getNumOperands();
892 OpsVec.resize(NumOperands);
893 unsigned NumLanes = VL.size();
894 for (unsigned OpIdx = 0; OpIdx != NumOperands; ++OpIdx) {
895 OpsVec[OpIdx].resize(NumLanes);
896 for (unsigned Lane = 0; Lane != NumLanes; ++Lane) {
897 assert(isa<Instruction>(VL[Lane]) && "Expected instruction")((isa<Instruction>(VL[Lane]) && "Expected instruction"
) ? static_cast<void> (0) : __assert_fail ("isa<Instruction>(VL[Lane]) && \"Expected instruction\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp"
, 897, __PRETTY_FUNCTION__))
;
898 // Our tree has just 3 nodes: the root and two operands.
899 // It is therefore trivial to get the APO. We only need to check the
900 // opcode of VL[Lane] and whether the operand at OpIdx is the LHS or
901 // RHS operand. The LHS operand of both add and sub is never attached
902 // to an inversese operation in the linearized form, therefore its APO
903 // is false. The RHS is true only if VL[Lane] is an inverse operation.
904
905 // Since operand reordering is performed on groups of commutative
906 // operations or alternating sequences (e.g., +, -), we can safely
907 // tell the inverse operations by checking commutativity.
908 bool IsInverseOperation = !isCommutative(cast<Instruction>(VL[Lane]));
909 bool APO = (OpIdx == 0) ? false : IsInverseOperation;
910 OpsVec[OpIdx][Lane] = {cast<Instruction>(VL[Lane])->getOperand(OpIdx),
911 APO, false};
912 }
913 }
914 }
915
916 /// \returns the number of operands.
917 unsigned getNumOperands() const { return OpsVec.size(); }
918
919 /// \returns the number of lanes.
920 unsigned getNumLanes() const { return OpsVec[0].size(); }
921
922 /// \returns the operand value at \p OpIdx and \p Lane.
923 Value *getValue(unsigned OpIdx, unsigned Lane) const {
924 return getData(OpIdx, Lane).V;
925 }
926
927 /// \returns true if the data structure is empty.
928 bool empty() const { return OpsVec.empty(); }
929
930 /// Clears the data.
931 void clear() { OpsVec.clear(); }
932
933 /// \Returns true if there are enough operands identical to \p Op to fill
934 /// the whole vector.
935 /// Note: This modifies the 'IsUsed' flag, so a cleanUsed() must follow.
936 bool shouldBroadcast(Value *Op, unsigned OpIdx, unsigned Lane) {
937 bool OpAPO = getData(OpIdx, Lane).APO;
938 for (unsigned Ln = 0, Lns = getNumLanes(); Ln != Lns; ++Ln) {
939 if (Ln == Lane)
940 continue;
941 // This is set to true if we found a candidate for broadcast at Lane.
942 bool FoundCandidate = false;
943 for (unsigned OpI = 0, OpE = getNumOperands(); OpI != OpE; ++OpI) {
944 OperandData &Data = getData(OpI, Ln);
945 if (Data.APO != OpAPO || Data.IsUsed)
946 continue;
947 if (Data.V == Op) {
948 FoundCandidate = true;
949 Data.IsUsed = true;
950 break;
951 }
952 }
953 if (!FoundCandidate)
954 return false;
955 }
956 return true;
957 }
958
959 public:
960 /// Initialize with all the operands of the instruction vector \p RootVL.
961 VLOperands(ArrayRef<Value *> RootVL, const DataLayout &DL,
962 ScalarEvolution &SE)
963 : DL(DL), SE(SE) {
964 // Append all the operands of RootVL.
965 appendOperandsOfVL(RootVL);
966 }
967
968 /// \Returns a value vector with the operands across all lanes for the
969 /// opearnd at \p OpIdx.
970 ValueList getVL(unsigned OpIdx) const {
971 ValueList OpVL(OpsVec[OpIdx].size());
972 assert(OpsVec[OpIdx].size() == getNumLanes() &&((OpsVec[OpIdx].size() == getNumLanes() && "Expected same num of lanes across all operands"
) ? static_cast<void> (0) : __assert_fail ("OpsVec[OpIdx].size() == getNumLanes() && \"Expected same num of lanes across all operands\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp"
, 973, __PRETTY_FUNCTION__))
973 "Expected same num of lanes across all operands")((OpsVec[OpIdx].size() == getNumLanes() && "Expected same num of lanes across all operands"
) ? static_cast<void> (0) : __assert_fail ("OpsVec[OpIdx].size() == getNumLanes() && \"Expected same num of lanes across all operands\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp"
, 973, __PRETTY_FUNCTION__))
;
974 for (unsigned Lane = 0, Lanes = getNumLanes(); Lane != Lanes; ++Lane)
975 OpVL[Lane] = OpsVec[OpIdx][Lane].V;
976 return OpVL;
977 }
978
979 // Performs operand reordering for 2 or more operands.
980 // The original operands are in OrigOps[OpIdx][Lane].
981 // The reordered operands are returned in 'SortedOps[OpIdx][Lane]'.
982 void reorder() {
983 unsigned NumOperands = getNumOperands();
984 unsigned NumLanes = getNumLanes();
985 // Each operand has its own mode. We are using this mode to help us select
986 // the instructions for each lane, so that they match best with the ones
987 // we have selected so far.
988 SmallVector<ReorderingMode, 2> ReorderingModes(NumOperands);
989
990 // This is a greedy single-pass algorithm. We are going over each lane
991 // once and deciding on the best order right away with no back-tracking.
992 // However, in order to increase its effectiveness, we start with the lane
993 // that has operands that can move the least. For example, given the
994 // following lanes:
995 // Lane 0 : A[0] = B[0] + C[0] // Visited 3rd
996 // Lane 1 : A[1] = C[1] - B[1] // Visited 1st
997 // Lane 2 : A[2] = B[2] + C[2] // Visited 2nd
998 // Lane 3 : A[3] = C[3] - B[3] // Visited 4th
999 // we will start at Lane 1, since the operands of the subtraction cannot
1000 // be reordered. Then we will visit the rest of the lanes in a circular
1001 // fashion. That is, Lanes 2, then Lane 0, and finally Lane 3.
1002
1003 // Find the first lane that we will start our search from.
1004 unsigned FirstLane = getBestLaneToStartReordering();
1005
1006 // Initialize the modes.
1007 for (unsigned OpIdx = 0; OpIdx != NumOperands; ++OpIdx) {
1008 Value *OpLane0 = getValue(OpIdx, FirstLane);
1009 // Keep track if we have instructions with all the same opcode on one
1010 // side.
1011 if (isa<LoadInst>(OpLane0))
1012 ReorderingModes[OpIdx] = ReorderingMode::Load;
1013 else if (isa<Instruction>(OpLane0)) {
1014 // Check if OpLane0 should be broadcast.
1015 if (shouldBroadcast(OpLane0, OpIdx, FirstLane))
1016 ReorderingModes[OpIdx] = ReorderingMode::Splat;
1017 else
1018 ReorderingModes[OpIdx] = ReorderingMode::Opcode;
1019 }
1020 else if (isa<Constant>(OpLane0))
1021 ReorderingModes[OpIdx] = ReorderingMode::Constant;
1022 else if (isa<Argument>(OpLane0))
1023 // Our best hope is a Splat. It may save some cost in some cases.
1024 ReorderingModes[OpIdx] = ReorderingMode::Splat;
1025 else
1026 // NOTE: This should be unreachable.
1027 ReorderingModes[OpIdx] = ReorderingMode::Failed;
1028 }
1029
1030 // If the initial strategy fails for any of the operand indexes, then we
1031 // perform reordering again in a second pass. This helps avoid assigning
1032 // high priority to the failed strategy, and should improve reordering for
1033 // the non-failed operand indexes.
1034 for (int Pass = 0; Pass != 2; ++Pass) {
1035 // Skip the second pass if the first pass did not fail.
1036 bool StrategyFailed = false;
1037 // Mark all operand data as free to use.
1038 clearUsed();
1039 // We keep the original operand order for the FirstLane, so reorder the
1040 // rest of the lanes. We are visiting the nodes in a circular fashion,
1041 // using FirstLane as the center point and increasing the radius
1042 // distance.
1043 for (unsigned Distance = 1; Distance != NumLanes; ++Distance) {
1044 // Visit the lane on the right and then the lane on the left.
1045 for (int Direction : {+1, -1}) {
1046 int Lane = FirstLane + Direction * Distance;
1047 if (Lane < 0 || Lane >= (int)NumLanes)
1048 continue;
1049 int LastLane = Lane - Direction;
1050 assert(LastLane >= 0 && LastLane < (int)NumLanes &&((LastLane >= 0 && LastLane < (int)NumLanes &&
"Out of bounds") ? static_cast<void> (0) : __assert_fail
("LastLane >= 0 && LastLane < (int)NumLanes && \"Out of bounds\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp"
, 1051, __PRETTY_FUNCTION__))
1051 "Out of bounds")((LastLane >= 0 && LastLane < (int)NumLanes &&
"Out of bounds") ? static_cast<void> (0) : __assert_fail
("LastLane >= 0 && LastLane < (int)NumLanes && \"Out of bounds\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp"
, 1051, __PRETTY_FUNCTION__))
;
1052 // Look for a good match for each operand.
1053 for (unsigned OpIdx = 0; OpIdx != NumOperands; ++OpIdx) {
1054 // Search for the operand that matches SortedOps[OpIdx][Lane-1].
1055 Optional<unsigned> BestIdx =
1056 getBestOperand(OpIdx, Lane, LastLane, ReorderingModes);
1057 // By not selecting a value, we allow the operands that follow to
1058 // select a better matching value. We will get a non-null value in
1059 // the next run of getBestOperand().
1060 if (BestIdx) {
1061 // Swap the current operand with the one returned by
1062 // getBestOperand().
1063 swap(OpIdx, BestIdx.getValue(), Lane);
1064 } else {
1065 // We failed to find a best operand, set mode to 'Failed'.
1066 ReorderingModes[OpIdx] = ReorderingMode::Failed;
1067 // Enable the second pass.
1068 StrategyFailed = true;
1069 }
1070 }
1071 }
1072 }
1073 // Skip second pass if the strategy did not fail.
1074 if (!StrategyFailed)
1075 break;
1076 }
1077 }
1078
1079#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1080 LLVM_DUMP_METHOD__attribute__((noinline)) __attribute__((__used__)) static StringRef getModeStr(ReorderingMode RMode) {
1081 switch (RMode) {
1082 case ReorderingMode::Load:
1083 return "Load";
1084 case ReorderingMode::Opcode:
1085 return "Opcode";
1086 case ReorderingMode::Constant:
1087 return "Constant";
1088 case ReorderingMode::Splat:
1089 return "Splat";
1090 case ReorderingMode::Failed:
1091 return "Failed";
1092 }
1093 llvm_unreachable("Unimplemented Reordering Type")::llvm::llvm_unreachable_internal("Unimplemented Reordering Type"
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp"
, 1093)
;
1094 }
1095
1096 LLVM_DUMP_METHOD__attribute__((noinline)) __attribute__((__used__)) static raw_ostream &printMode(ReorderingMode RMode,
1097 raw_ostream &OS) {
1098 return OS << getModeStr(RMode);
1099 }
1100
1101 /// Debug print.
1102 LLVM_DUMP_METHOD__attribute__((noinline)) __attribute__((__used__)) static void dumpMode(ReorderingMode RMode) {
1103 printMode(RMode, dbgs());
1104 }
1105
1106 friend raw_ostream &operator<<(raw_ostream &OS, ReorderingMode RMode) {
1107 return printMode(RMode, OS);
1108 }
1109
1110 LLVM_DUMP_METHOD__attribute__((noinline)) __attribute__((__used__)) raw_ostream &print(raw_ostream &OS) const {
1111 const unsigned Indent = 2;
1112 unsigned Cnt = 0;
1113 for (const OperandDataVec &OpDataVec : OpsVec) {
1114 OS << "Operand " << Cnt++ << "\n";
1115 for (const OperandData &OpData : OpDataVec) {
1116 OS.indent(Indent) << "{";
1117 if (Value *V = OpData.V)
1118 OS << *V;
1119 else
1120 OS << "null";
1121 OS << ", APO:" << OpData.APO << "}\n";
1122 }
1123 OS << "\n";
1124 }
1125 return OS;
1126 }
1127
1128 /// Debug print.
1129 LLVM_DUMP_METHOD__attribute__((noinline)) __attribute__((__used__)) void dump() const { print(dbgs()); }
1130#endif
1131 };
1132
1133 /// Checks if the instruction is marked for deletion.
1134 bool isDeleted(Instruction *I) const { return DeletedInstructions.count(I); }
1135
1136 /// Marks values operands for later deletion by replacing them with Undefs.
1137 void eraseInstructions(ArrayRef<Value *> AV);
1138
1139 ~BoUpSLP();
1140
1141private:
1142 /// Checks if all users of \p I are the part of the vectorization tree.
1143 bool areAllUsersVectorized(Instruction *I) const;
1144
1145 /// \returns the cost of the vectorizable entry.
1146 int getEntryCost(TreeEntry *E);
1147
1148 /// This is the recursive part of buildTree.
1149 void buildTree_rec(ArrayRef<Value *> Roots, unsigned Depth,
1150 const EdgeInfo &EI);
1151
1152 /// \returns true if the ExtractElement/ExtractValue instructions in \p VL can
1153 /// be vectorized to use the original vector (or aggregate "bitcast" to a
1154 /// vector) and sets \p CurrentOrder to the identity permutation; otherwise
1155 /// returns false, setting \p CurrentOrder to either an empty vector or a
1156 /// non-identity permutation that allows to reuse extract instructions.
1157 bool canReuseExtract(ArrayRef<Value *> VL, Value *OpValue,
1158 SmallVectorImpl<unsigned> &CurrentOrder) const;
1159
1160 /// Vectorize a single entry in the tree.
1161 Value *vectorizeTree(TreeEntry *E);
1162
1163 /// Vectorize a single entry in the tree, starting in \p VL.
1164 Value *vectorizeTree(ArrayRef<Value *> VL);
1165
1166 /// \returns the scalarization cost for this type. Scalarization in this
1167 /// context means the creation of vectors from a group of scalars.
1168 int getGatherCost(Type *Ty, const DenseSet<unsigned> &ShuffledIndices) const;
1169
1170 /// \returns the scalarization cost for this list of values. Assuming that
1171 /// this subtree gets vectorized, we may need to extract the values from the
1172 /// roots. This method calculates the cost of extracting the values.
1173 int getGatherCost(ArrayRef<Value *> VL) const;
1174
1175 /// Set the Builder insert point to one after the last instruction in
1176 /// the bundle
1177 void setInsertPointAfterBundle(TreeEntry *E);
1178
1179 /// \returns a vector from a collection of scalars in \p VL.
1180 Value *Gather(ArrayRef<Value *> VL, VectorType *Ty);
1181
1182 /// \returns whether the VectorizableTree is fully vectorizable and will
1183 /// be beneficial even the tree height is tiny.
1184 bool isFullyVectorizableTinyTree() const;
1185
1186 /// Reorder commutative or alt operands to get better probability of
1187 /// generating vectorized code.
1188 static void reorderInputsAccordingToOpcode(ArrayRef<Value *> VL,
1189 SmallVectorImpl<Value *> &Left,
1190 SmallVectorImpl<Value *> &Right,
1191 const DataLayout &DL,
1192 ScalarEvolution &SE);
1193 struct TreeEntry {
1194 using VecTreeTy = SmallVector<std::unique_ptr<TreeEntry>, 8>;
1195 TreeEntry(VecTreeTy &Container) : Container(Container) {}
1196
1197 /// \returns true if the scalars in VL are equal to this entry.
1198 bool isSame(ArrayRef<Value *> VL) const {
1199 if (VL.size() == Scalars.size())
1200 return std::equal(VL.begin(), VL.end(), Scalars.begin());
1201 return VL.size() == ReuseShuffleIndices.size() &&
1202 std::equal(
1203 VL.begin(), VL.end(), ReuseShuffleIndices.begin(),
1204 [this](Value *V, unsigned Idx) { return V == Scalars[Idx]; });
1205 }
1206
1207 /// A vector of scalars.
1208 ValueList Scalars;
1209
1210 /// The Scalars are vectorized into this value. It is initialized to Null.
1211 Value *VectorizedValue = nullptr;
1212
1213 /// Do we need to gather this sequence ?
1214 bool NeedToGather = false;
1215
1216 /// Does this sequence require some shuffling?
1217 SmallVector<unsigned, 4> ReuseShuffleIndices;
1218
1219 /// Does this entry require reordering?
1220 ArrayRef<unsigned> ReorderIndices;
1221
1222 /// Points back to the VectorizableTree.
1223 ///
1224 /// Only used for Graphviz right now. Unfortunately GraphTrait::NodeRef has
1225 /// to be a pointer and needs to be able to initialize the child iterator.
1226 /// Thus we need a reference back to the container to translate the indices
1227 /// to entries.
1228 VecTreeTy &Container;
1229
1230 /// The TreeEntry index containing the user of this entry. We can actually
1231 /// have multiple users so the data structure is not truly a tree.
1232 SmallVector<EdgeInfo, 1> UserTreeIndices;
1233
1234 /// The index of this treeEntry in VectorizableTree.
1235 int Idx = -1;
1236
1237 private:
1238 /// The operands of each instruction in each lane Operands[op_index][lane].
1239 /// Note: This helps avoid the replication of the code that performs the
1240 /// reordering of operands during buildTree_rec() and vectorizeTree().
1241 SmallVector<ValueList, 2> Operands;
1242
1243 /// The main/alternate instruction.
1244 Instruction *MainOp = nullptr;
1245 Instruction *AltOp = nullptr;
1246
1247 public:
1248 /// Set this bundle's \p OpIdx'th operand to \p OpVL.
1249 void setOperand(unsigned OpIdx, ArrayRef<Value *> OpVL) {
1250 if (Operands.size() < OpIdx + 1)
1251 Operands.resize(OpIdx + 1);
1252 assert(Operands[OpIdx].size() == 0 && "Already resized?")((Operands[OpIdx].size() == 0 && "Already resized?") ?
static_cast<void> (0) : __assert_fail ("Operands[OpIdx].size() == 0 && \"Already resized?\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp"
, 1252, __PRETTY_FUNCTION__))
;
1253 Operands[OpIdx].resize(Scalars.size());
1254 for (unsigned Lane = 0, E = Scalars.size(); Lane != E; ++Lane)
1255 Operands[OpIdx][Lane] = OpVL[Lane];
1256 }
1257
1258 /// Set the operands of this bundle in their original order.
1259 void setOperandsInOrder() {
1260 assert(Operands.empty() && "Already initialized?")((Operands.empty() && "Already initialized?") ? static_cast
<void> (0) : __assert_fail ("Operands.empty() && \"Already initialized?\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp"
, 1260, __PRETTY_FUNCTION__))
;
1261 auto *I0 = cast<Instruction>(Scalars[0]);
1262 Operands.resize(I0->getNumOperands());
1263 unsigned NumLanes = Scalars.size();
1264 for (unsigned OpIdx = 0, NumOperands = I0->getNumOperands();
1265 OpIdx != NumOperands; ++OpIdx) {
1266 Operands[OpIdx].resize(NumLanes);
1267 for (unsigned Lane = 0; Lane != NumLanes; ++Lane) {
1268 auto *I = cast<Instruction>(Scalars[Lane]);
1269 assert(I->getNumOperands() == NumOperands &&((I->getNumOperands() == NumOperands && "Expected same number of operands"
) ? static_cast<void> (0) : __assert_fail ("I->getNumOperands() == NumOperands && \"Expected same number of operands\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp"
, 1270, __PRETTY_FUNCTION__))
1270 "Expected same number of operands")((I->getNumOperands() == NumOperands && "Expected same number of operands"
) ? static_cast<void> (0) : __assert_fail ("I->getNumOperands() == NumOperands && \"Expected same number of operands\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp"
, 1270, __PRETTY_FUNCTION__))
;
1271 Operands[OpIdx][Lane] = I->getOperand(OpIdx);
1272 }
1273 }
1274 }
1275
1276 /// \returns the \p OpIdx operand of this TreeEntry.
1277 ValueList &getOperand(unsigned OpIdx) {
1278 assert(OpIdx < Operands.size() && "Off bounds")((OpIdx < Operands.size() && "Off bounds") ? static_cast
<void> (0) : __assert_fail ("OpIdx < Operands.size() && \"Off bounds\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp"
, 1278, __PRETTY_FUNCTION__))
;
1279 return Operands[OpIdx];
1280 }
1281
1282 /// \returns the number of operands.
1283 unsigned getNumOperands() const { return Operands.size(); }
1284
1285 /// \return the single \p OpIdx operand.
1286 Value *getSingleOperand(unsigned OpIdx) const {
1287 assert(OpIdx < Operands.size() && "Off bounds")((OpIdx < Operands.size() && "Off bounds") ? static_cast
<void> (0) : __assert_fail ("OpIdx < Operands.size() && \"Off bounds\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp"
, 1287, __PRETTY_FUNCTION__))
;
1288 assert(!Operands[OpIdx].empty() && "No operand available")((!Operands[OpIdx].empty() && "No operand available")
? static_cast<void> (0) : __assert_fail ("!Operands[OpIdx].empty() && \"No operand available\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp"
, 1288, __PRETTY_FUNCTION__))
;
1289 return Operands[OpIdx][0];
1290 }
1291
1292 /// Some of the instructions in the list have alternate opcodes.
1293 bool isAltShuffle() const {
1294 return getOpcode() != getAltOpcode();
1295 }
1296
1297 bool isOpcodeOrAlt(Instruction *I) const {
1298 unsigned CheckedOpcode = I->getOpcode();
1299 return (getOpcode() == CheckedOpcode ||
1300 getAltOpcode() == CheckedOpcode);
1301 }
1302
1303 /// Chooses the correct key for scheduling data. If \p Op has the same (or
1304 /// alternate) opcode as \p OpValue, the key is \p Op. Otherwise the key is
1305 /// \p OpValue.
1306 Value *isOneOf(Value *Op) const {
1307 auto *I = dyn_cast<Instruction>(Op);
1308 if (I && isOpcodeOrAlt(I))
1309 return Op;
1310 return MainOp;
1311 }
1312
1313 void setOperations(const InstructionsState &S) {
1314 MainOp = S.MainOp;
1315 AltOp = S.AltOp;
1316 }
1317
1318 Instruction *getMainOp() const {
1319 return MainOp;
1320 }
1321
1322 Instruction *getAltOp() const {
1323 return AltOp;
1324 }
1325
1326 /// The main/alternate opcodes for the list of instructions.
1327 unsigned getOpcode() const {
1328 return MainOp ? MainOp->getOpcode() : 0;
1329 }
1330
1331 unsigned getAltOpcode() const {
1332 return AltOp ? AltOp->getOpcode() : 0;
1333 }
1334
1335 /// Update operations state of this entry if reorder occurred.
1336 bool updateStateIfReorder() {
1337 if (ReorderIndices.empty())
1338 return false;
1339 InstructionsState S = getSameOpcode(Scalars, ReorderIndices.front());
1340 setOperations(S);
1341 return true;
1342 }
1343
1344#ifndef NDEBUG
1345 /// Debug printer.
1346 LLVM_DUMP_METHOD__attribute__((noinline)) __attribute__((__used__)) void dump() const {
1347 dbgs() << Idx << ".\n";
1348 for (unsigned OpI = 0, OpE = Operands.size(); OpI != OpE; ++OpI) {
1349 dbgs() << "Operand " << OpI << ":\n";
1350 for (const Value *V : Operands[OpI])
1351 dbgs().indent(2) << *V << "\n";
1352 }
1353 dbgs() << "Scalars: \n";
1354 for (Value *V : Scalars)
1355 dbgs().indent(2) << *V << "\n";
1356 dbgs() << "NeedToGather: " << NeedToGather << "\n";
1357 dbgs() << "MainOp: ";
1358 if (MainOp)
1359 dbgs() << *MainOp << "\n";
1360 else
1361 dbgs() << "NULL\n";
1362 dbgs() << "AltOp: ";
1363 if (AltOp)
1364 dbgs() << *AltOp << "\n";
1365 else
1366 dbgs() << "NULL\n";
1367 dbgs() << "VectorizedValue: ";
1368 if (VectorizedValue)
1369 dbgs() << *VectorizedValue << "\n";
1370 else
1371 dbgs() << "NULL\n";
1372 dbgs() << "ReuseShuffleIndices: ";
1373 if (ReuseShuffleIndices.empty())
1374 dbgs() << "Emtpy";
1375 else
1376 for (unsigned ReuseIdx : ReuseShuffleIndices)
1377 dbgs() << ReuseIdx << ", ";
1378 dbgs() << "\n";
1379 dbgs() << "ReorderIndices: ";
1380 for (unsigned ReorderIdx : ReorderIndices)
1381 dbgs() << ReorderIdx << ", ";
1382 dbgs() << "\n";
1383 dbgs() << "UserTreeIndices: ";
1384 for (const auto &EInfo : UserTreeIndices)
1385 dbgs() << EInfo << ", ";
1386 dbgs() << "\n";
1387 }
1388#endif
1389 };
1390
1391 /// Create a new VectorizableTree entry.
1392 TreeEntry *newTreeEntry(ArrayRef<Value *> VL, Optional<ScheduleData *> Bundle,
1393 const InstructionsState &S,
1394 const EdgeInfo &UserTreeIdx,
1395 ArrayRef<unsigned> ReuseShuffleIndices = None,
1396 ArrayRef<unsigned> ReorderIndices = None) {
1397 bool Vectorized = (bool)Bundle;
1398 VectorizableTree.push_back(std::make_unique<TreeEntry>(VectorizableTree));
1399 TreeEntry *Last = VectorizableTree.back().get();
1400 Last->Idx = VectorizableTree.size() - 1;
1401 Last->Scalars.insert(Last->Scalars.begin(), VL.begin(), VL.end());
1402 Last->NeedToGather = !Vectorized;
1403 Last->ReuseShuffleIndices.append(ReuseShuffleIndices.begin(),
1404 ReuseShuffleIndices.end());
1405 Last->ReorderIndices = ReorderIndices;
1406 Last->setOperations(S);
1407 if (Vectorized) {
1408 for (int i = 0, e = VL.size(); i != e; ++i) {
1409 assert(!getTreeEntry(VL[i]) && "Scalar already in tree!")((!getTreeEntry(VL[i]) && "Scalar already in tree!") ?
static_cast<void> (0) : __assert_fail ("!getTreeEntry(VL[i]) && \"Scalar already in tree!\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp"
, 1409, __PRETTY_FUNCTION__))
;
1410 ScalarToTreeEntry[VL[i]] = Last;
1411 }
1412 // Update the scheduler bundle to point to this TreeEntry.
1413 unsigned Lane = 0;
1414 for (ScheduleData *BundleMember = Bundle.getValue(); BundleMember;
1415 BundleMember = BundleMember->NextInBundle) {
1416 BundleMember->TE = Last;
1417 BundleMember->Lane = Lane;
1418 ++Lane;
1419 }
1420 assert((!Bundle.getValue() || Lane == VL.size()) &&(((!Bundle.getValue() || Lane == VL.size()) && "Bundle and VL out of sync"
) ? static_cast<void> (0) : __assert_fail ("(!Bundle.getValue() || Lane == VL.size()) && \"Bundle and VL out of sync\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp"
, 1421, __PRETTY_FUNCTION__))
1421 "Bundle and VL out of sync")(((!Bundle.getValue() || Lane == VL.size()) && "Bundle and VL out of sync"
) ? static_cast<void> (0) : __assert_fail ("(!Bundle.getValue() || Lane == VL.size()) && \"Bundle and VL out of sync\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp"
, 1421, __PRETTY_FUNCTION__))
;
1422 } else {
1423 MustGather.insert(VL.begin(), VL.end());
1424 }
1425
1426 if (UserTreeIdx.UserTE)
1427 Last->UserTreeIndices.push_back(UserTreeIdx);
1428
1429 return Last;
1430 }
1431
1432 /// -- Vectorization State --
1433 /// Holds all of the tree entries.
1434 TreeEntry::VecTreeTy VectorizableTree;
1435
1436#ifndef NDEBUG
1437 /// Debug printer.
1438 LLVM_DUMP_METHOD__attribute__((noinline)) __attribute__((__used__)) void dumpVectorizableTree() const {
1439 for (unsigned Id = 0, IdE = VectorizableTree.size(); Id != IdE; ++Id) {
1440 VectorizableTree[Id]->dump();
1441 dbgs() << "\n";
1442 }
1443 }
1444#endif
1445
1446 TreeEntry *getTreeEntry(Value *V) {
1447 auto I = ScalarToTreeEntry.find(V);
1448 if (I != ScalarToTreeEntry.end())
1449 return I->second;
1450 return nullptr;
1451 }
1452
1453 const TreeEntry *getTreeEntry(Value *V) const {
1454 auto I = ScalarToTreeEntry.find(V);
1455 if (I != ScalarToTreeEntry.end())
1456 return I->second;
1457 return nullptr;
1458 }
1459
1460 /// Maps a specific scalar to its tree entry.
1461 SmallDenseMap<Value*, TreeEntry *> ScalarToTreeEntry;
1462
1463 /// A list of scalars that we found that we need to keep as scalars.
1464 ValueSet MustGather;
1465
1466 /// This POD struct describes one external user in the vectorized tree.
1467 struct ExternalUser {
1468 ExternalUser(Value *S, llvm::User *U, int L)
1469 : Scalar(S), User(U), Lane(L) {}
1470
1471 // Which scalar in our function.
1472 Value *Scalar;
1473
1474 // Which user that uses the scalar.
1475 llvm::User *User;
1476
1477 // Which lane does the scalar belong to.
1478 int Lane;
1479 };
1480 using UserList = SmallVector<ExternalUser, 16>;
1481
1482 /// Checks if two instructions may access the same memory.
1483 ///
1484 /// \p Loc1 is the location of \p Inst1. It is passed explicitly because it
1485 /// is invariant in the calling loop.
1486 bool isAliased(const MemoryLocation &Loc1, Instruction *Inst1,
1487 Instruction *Inst2) {
1488 // First check if the result is already in the cache.
1489 AliasCacheKey key = std::make_pair(Inst1, Inst2);
1490 Optional<bool> &result = AliasCache[key];
1491 if (result.hasValue()) {
1492 return result.getValue();
1493 }
1494 MemoryLocation Loc2 = getLocation(Inst2, AA);
1495 bool aliased = true;
1496 if (Loc1.Ptr && Loc2.Ptr && isSimple(Inst1) && isSimple(Inst2)) {
1497 // Do the alias check.
1498 aliased = AA->alias(Loc1, Loc2);
1499 }
1500 // Store the result in the cache.
1501 result = aliased;
1502 return aliased;
1503 }
1504
1505 using AliasCacheKey = std::pair<Instruction *, Instruction *>;
1506
1507 /// Cache for alias results.
1508 /// TODO: consider moving this to the AliasAnalysis itself.
1509 DenseMap<AliasCacheKey, Optional<bool>> AliasCache;
1510
1511 /// Removes an instruction from its block and eventually deletes it.
1512 /// It's like Instruction::eraseFromParent() except that the actual deletion
1513 /// is delayed until BoUpSLP is destructed.
1514 /// This is required to ensure that there are no incorrect collisions in the
1515 /// AliasCache, which can happen if a new instruction is allocated at the
1516 /// same address as a previously deleted instruction.
1517 void eraseInstruction(Instruction *I, bool ReplaceOpsWithUndef = false) {
1518 auto It = DeletedInstructions.try_emplace(I, ReplaceOpsWithUndef).first;
1519 It->getSecond() = It->getSecond() && ReplaceOpsWithUndef;
1520 }
1521
1522 /// Temporary store for deleted instructions. Instructions will be deleted
1523 /// eventually when the BoUpSLP is destructed.
1524 DenseMap<Instruction *, bool> DeletedInstructions;
1525
1526 /// A list of values that need to extracted out of the tree.
1527 /// This list holds pairs of (Internal Scalar : External User). External User
1528 /// can be nullptr, it means that this Internal Scalar will be used later,
1529 /// after vectorization.
1530 UserList ExternalUses;
1531
1532 /// Values used only by @llvm.assume calls.
1533 SmallPtrSet<const Value *, 32> EphValues;
1534
1535 /// Holds all of the instructions that we gathered.
1536 SetVector<Instruction *> GatherSeq;
1537
1538 /// A list of blocks that we are going to CSE.
1539 SetVector<BasicBlock *> CSEBlocks;
1540
1541 /// Contains all scheduling relevant data for an instruction.
1542 /// A ScheduleData either represents a single instruction or a member of an
1543 /// instruction bundle (= a group of instructions which is combined into a
1544 /// vector instruction).
1545 struct ScheduleData {
1546 // The initial value for the dependency counters. It means that the
1547 // dependencies are not calculated yet.
1548 enum { InvalidDeps = -1 };
1549
1550 ScheduleData() = default;
1551
1552 void init(int BlockSchedulingRegionID, Value *OpVal) {
1553 FirstInBundle = this;
1554 NextInBundle = nullptr;
1555 NextLoadStore = nullptr;
1556 IsScheduled = false;
1557 SchedulingRegionID = BlockSchedulingRegionID;
1558 UnscheduledDepsInBundle = UnscheduledDeps;
1559 clearDependencies();
1560 OpValue = OpVal;
1561 TE = nullptr;
1562 Lane = -1;
1563 }
1564
1565 /// Returns true if the dependency information has been calculated.
1566 bool hasValidDependencies() const { return Dependencies != InvalidDeps; }
1567
1568 /// Returns true for single instructions and for bundle representatives
1569 /// (= the head of a bundle).
1570 bool isSchedulingEntity() const { return FirstInBundle == this; }
1571
1572 /// Returns true if it represents an instruction bundle and not only a
1573 /// single instruction.
1574 bool isPartOfBundle() const {
1575 return NextInBundle != nullptr || FirstInBundle != this;
1576 }
1577
1578 /// Returns true if it is ready for scheduling, i.e. it has no more
1579 /// unscheduled depending instructions/bundles.
1580 bool isReady() const {
1581 assert(isSchedulingEntity() &&((isSchedulingEntity() && "can't consider non-scheduling entity for ready list"
) ? static_cast<void> (0) : __assert_fail ("isSchedulingEntity() && \"can't consider non-scheduling entity for ready list\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp"
, 1582, __PRETTY_FUNCTION__))
1582 "can't consider non-scheduling entity for ready list")((isSchedulingEntity() && "can't consider non-scheduling entity for ready list"
) ? static_cast<void> (0) : __assert_fail ("isSchedulingEntity() && \"can't consider non-scheduling entity for ready list\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp"
, 1582, __PRETTY_FUNCTION__))
;
1583 return UnscheduledDepsInBundle == 0 && !IsScheduled;
1584 }
1585
1586 /// Modifies the number of unscheduled dependencies, also updating it for
1587 /// the whole bundle.
1588 int incrementUnscheduledDeps(int Incr) {
1589 UnscheduledDeps += Incr;
1590 return FirstInBundle->UnscheduledDepsInBundle += Incr;
1591 }
1592
1593 /// Sets the number of unscheduled dependencies to the number of
1594 /// dependencies.
1595 void resetUnscheduledDeps() {
1596 incrementUnscheduledDeps(Dependencies - UnscheduledDeps);
1597 }
1598
1599 /// Clears all dependency information.
1600 void clearDependencies() {
1601 Dependencies = InvalidDeps;
1602 resetUnscheduledDeps();
1603 MemoryDependencies.clear();
1604 }
1605
1606 void dump(raw_ostream &os) const {
1607 if (!isSchedulingEntity()) {
1608 os << "/ " << *Inst;
1609 } else if (NextInBundle) {
1610 os << '[' << *Inst;
1611 ScheduleData *SD = NextInBundle;
1612 while (SD) {
1613 os << ';' << *SD->Inst;
1614 SD = SD->NextInBundle;
1615 }
1616 os << ']';
1617 } else {
1618 os << *Inst;
1619 }
1620 }
1621
1622 Instruction *Inst = nullptr;
1623
1624 /// Points to the head in an instruction bundle (and always to this for
1625 /// single instructions).
1626 ScheduleData *FirstInBundle = nullptr;
1627
1628 /// Single linked list of all instructions in a bundle. Null if it is a
1629 /// single instruction.
1630 ScheduleData *NextInBundle = nullptr;
1631
1632 /// Single linked list of all memory instructions (e.g. load, store, call)
1633 /// in the block - until the end of the scheduling region.
1634 ScheduleData *NextLoadStore = nullptr;
1635
1636 /// The dependent memory instructions.
1637 /// This list is derived on demand in calculateDependencies().
1638 SmallVector<ScheduleData *, 4> MemoryDependencies;
1639
1640 /// This ScheduleData is in the current scheduling region if this matches
1641 /// the current SchedulingRegionID of BlockScheduling.
1642 int SchedulingRegionID = 0;
1643
1644 /// Used for getting a "good" final ordering of instructions.
1645 int SchedulingPriority = 0;
1646
1647 /// The number of dependencies. Constitutes of the number of users of the
1648 /// instruction plus the number of dependent memory instructions (if any).
1649 /// This value is calculated on demand.
1650 /// If InvalidDeps, the number of dependencies is not calculated yet.
1651 int Dependencies = InvalidDeps;
1652
1653 /// The number of dependencies minus the number of dependencies of scheduled
1654 /// instructions. As soon as this is zero, the instruction/bundle gets ready
1655 /// for scheduling.
1656 /// Note that this is negative as long as Dependencies is not calculated.
1657 int UnscheduledDeps = InvalidDeps;
1658
1659 /// The sum of UnscheduledDeps in a bundle. Equals to UnscheduledDeps for
1660 /// single instructions.
1661 int UnscheduledDepsInBundle = InvalidDeps;
1662
1663 /// True if this instruction is scheduled (or considered as scheduled in the
1664 /// dry-run).
1665 bool IsScheduled = false;
1666
1667 /// Opcode of the current instruction in the schedule data.
1668 Value *OpValue = nullptr;
1669
1670 /// The TreeEntry that this instruction corresponds to.
1671 TreeEntry *TE = nullptr;
1672
1673 /// The lane of this node in the TreeEntry.
1674 int Lane = -1;
1675 };
1676
1677#ifndef NDEBUG
1678 friend inline raw_ostream &operator<<(raw_ostream &os,
1679 const BoUpSLP::ScheduleData &SD) {
1680 SD.dump(os);
1681 return os;
1682 }
1683#endif
1684
1685 friend struct GraphTraits<BoUpSLP *>;
1686 friend struct DOTGraphTraits<BoUpSLP *>;
1687
1688 /// Contains all scheduling data for a basic block.
1689 struct BlockScheduling {
1690 BlockScheduling(BasicBlock *BB)
1691 : BB(BB), ChunkSize(BB->size()), ChunkPos(ChunkSize) {}
1692
1693 void clear() {
1694 ReadyInsts.clear();
1695 ScheduleStart = nullptr;
1696 ScheduleEnd = nullptr;
1697 FirstLoadStoreInRegion = nullptr;
1698 LastLoadStoreInRegion = nullptr;
1699
1700 // Reduce the maximum schedule region size by the size of the
1701 // previous scheduling run.
1702 ScheduleRegionSizeLimit -= ScheduleRegionSize;
1703 if (ScheduleRegionSizeLimit < MinScheduleRegionSize)
1704 ScheduleRegionSizeLimit = MinScheduleRegionSize;
1705 ScheduleRegionSize = 0;
1706
1707 // Make a new scheduling region, i.e. all existing ScheduleData is not
1708 // in the new region yet.
1709 ++SchedulingRegionID;
1710 }
1711
1712 ScheduleData *getScheduleData(Value *V) {
1713 ScheduleData *SD = ScheduleDataMap[V];
1714 if (SD && SD->SchedulingRegionID == SchedulingRegionID)
1715 return SD;
1716 return nullptr;
1717 }
1718
1719 ScheduleData *getScheduleData(Value *V, Value *Key) {
1720 if (V == Key)
1721 return getScheduleData(V);
1722 auto I = ExtraScheduleDataMap.find(V);
1723 if (I != ExtraScheduleDataMap.end()) {
1724 ScheduleData *SD = I->second[Key];
1725 if (SD && SD->SchedulingRegionID == SchedulingRegionID)
1726 return SD;
1727 }
1728 return nullptr;
1729 }
1730
1731 bool isInSchedulingRegion(ScheduleData *SD) {
1732 return SD->SchedulingRegionID == SchedulingRegionID;
1733 }
1734
1735 /// Marks an instruction as scheduled and puts all dependent ready
1736 /// instructions into the ready-list.
1737 template <typename ReadyListType>
1738 void schedule(ScheduleData *SD, ReadyListType &ReadyList) {
1739 SD->IsScheduled = true;
1740 LLVM_DEBUG(dbgs() << "SLP: schedule " << *SD << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: schedule " << *SD <<
"\n"; } } while (false)
;
1741
1742 ScheduleData *BundleMember = SD;
1743 while (BundleMember) {
1744 if (BundleMember->Inst != BundleMember->OpValue) {
1745 BundleMember = BundleMember->NextInBundle;
1746 continue;
1747 }
1748 // Handle the def-use chain dependencies.
1749
1750 // Decrement the unscheduled counter and insert to ready list if ready.
1751 auto &&DecrUnsched = [this, &ReadyList](Instruction *I) {
1752 doForAllOpcodes(I, [&ReadyList](ScheduleData *OpDef) {
1753 if (OpDef && OpDef->hasValidDependencies() &&
1754 OpDef->incrementUnscheduledDeps(-1) == 0) {
1755 // There are no more unscheduled dependencies after
1756 // decrementing, so we can put the dependent instruction
1757 // into the ready list.
1758 ScheduleData *DepBundle = OpDef->FirstInBundle;
1759 assert(!DepBundle->IsScheduled &&((!DepBundle->IsScheduled && "already scheduled bundle gets ready"
) ? static_cast<void> (0) : __assert_fail ("!DepBundle->IsScheduled && \"already scheduled bundle gets ready\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp"
, 1760, __PRETTY_FUNCTION__))
1760 "already scheduled bundle gets ready")((!DepBundle->IsScheduled && "already scheduled bundle gets ready"
) ? static_cast<void> (0) : __assert_fail ("!DepBundle->IsScheduled && \"already scheduled bundle gets ready\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp"
, 1760, __PRETTY_FUNCTION__))
;
1761 ReadyList.insert(DepBundle);
1762 LLVM_DEBUG(dbgs()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: gets ready (def): " <<
*DepBundle << "\n"; } } while (false)
1763 << "SLP: gets ready (def): " << *DepBundle << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: gets ready (def): " <<
*DepBundle << "\n"; } } while (false)
;
1764 }
1765 });
1766 };
1767
1768 // If BundleMember is a vector bundle, its operands may have been
1769 // reordered duiring buildTree(). We therefore need to get its operands
1770 // through the TreeEntry.
1771 if (TreeEntry *TE = BundleMember->TE) {
1772 int Lane = BundleMember->Lane;
1773 assert(Lane >= 0 && "Lane not set")((Lane >= 0 && "Lane not set") ? static_cast<void
> (0) : __assert_fail ("Lane >= 0 && \"Lane not set\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp"
, 1773, __PRETTY_FUNCTION__))
;
1774 for (unsigned OpIdx = 0, NumOperands = TE->getNumOperands();
1775 OpIdx != NumOperands; ++OpIdx)
1776 if (auto *I = dyn_cast<Instruction>(TE->getOperand(OpIdx)[Lane]))
1777 DecrUnsched(I);
1778 } else {
1779 // If BundleMember is a stand-alone instruction, no operand reordering
1780 // has taken place, so we directly access its operands.
1781 for (Use &U : BundleMember->Inst->operands())
1782 if (auto *I = dyn_cast<Instruction>(U.get()))
1783 DecrUnsched(I);
1784 }
1785 // Handle the memory dependencies.
1786 for (ScheduleData *MemoryDepSD : BundleMember->MemoryDependencies) {
1787 if (MemoryDepSD->incrementUnscheduledDeps(-1) == 0) {
1788 // There are no more unscheduled dependencies after decrementing,
1789 // so we can put the dependent instruction into the ready list.
1790 ScheduleData *DepBundle = MemoryDepSD->FirstInBundle;
1791 assert(!DepBundle->IsScheduled &&((!DepBundle->IsScheduled && "already scheduled bundle gets ready"
) ? static_cast<void> (0) : __assert_fail ("!DepBundle->IsScheduled && \"already scheduled bundle gets ready\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp"
, 1792, __PRETTY_FUNCTION__))
1792 "already scheduled bundle gets ready")((!DepBundle->IsScheduled && "already scheduled bundle gets ready"
) ? static_cast<void> (0) : __assert_fail ("!DepBundle->IsScheduled && \"already scheduled bundle gets ready\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp"
, 1792, __PRETTY_FUNCTION__))
;
1793 ReadyList.insert(DepBundle);
1794 LLVM_DEBUG(dbgs()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: gets ready (mem): " <<
*DepBundle << "\n"; } } while (false)
1795 << "SLP: gets ready (mem): " << *DepBundle << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: gets ready (mem): " <<
*DepBundle << "\n"; } } while (false)
;
1796 }
1797 }
1798 BundleMember = BundleMember->NextInBundle;
1799 }
1800 }
1801
1802 void doForAllOpcodes(Value *V,
1803 function_ref<void(ScheduleData *SD)> Action) {
1804 if (ScheduleData *SD = getScheduleData(V))
1805 Action(SD);
1806 auto I = ExtraScheduleDataMap.find(V);
1807 if (I != ExtraScheduleDataMap.end())
1808 for (auto &P : I->second)
1809 if (P.second->SchedulingRegionID == SchedulingRegionID)
1810 Action(P.second);
1811 }
1812
1813 /// Put all instructions into the ReadyList which are ready for scheduling.
1814 template <typename ReadyListType>
1815 void initialFillReadyList(ReadyListType &ReadyList) {
1816 for (auto *I = ScheduleStart; I != ScheduleEnd; I = I->getNextNode()) {
1817 doForAllOpcodes(I, [&](ScheduleData *SD) {
1818 if (SD->isSchedulingEntity() && SD->isReady()) {
1819 ReadyList.insert(SD);
1820 LLVM_DEBUG(dbgs()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: initially in ready list: "
<< *I << "\n"; } } while (false)
1821 << "SLP: initially in ready list: " << *I << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: initially in ready list: "
<< *I << "\n"; } } while (false)
;
1822 }
1823 });
1824 }
1825 }
1826
1827 /// Checks if a bundle of instructions can be scheduled, i.e. has no
1828 /// cyclic dependencies. This is only a dry-run, no instructions are
1829 /// actually moved at this stage.
1830 /// \returns the scheduling bundle. The returned Optional value is non-None
1831 /// if \p VL is allowed to be scheduled.
1832 Optional<ScheduleData *>
1833 tryScheduleBundle(ArrayRef<Value *> VL, BoUpSLP *SLP,
1834 const InstructionsState &S);
1835
1836 /// Un-bundles a group of instructions.
1837 void cancelScheduling(ArrayRef<Value *> VL, Value *OpValue);
1838
1839 /// Allocates schedule data chunk.
1840 ScheduleData *allocateScheduleDataChunks();
1841
1842 /// Extends the scheduling region so that V is inside the region.
1843 /// \returns true if the region size is within the limit.
1844 bool extendSchedulingRegion(Value *V, const InstructionsState &S);
1845
1846 /// Initialize the ScheduleData structures for new instructions in the
1847 /// scheduling region.
1848 void initScheduleData(Instruction *FromI, Instruction *ToI,
1849 ScheduleData *PrevLoadStore,
1850 ScheduleData *NextLoadStore);
1851
1852 /// Updates the dependency information of a bundle and of all instructions/
1853 /// bundles which depend on the original bundle.
1854 void calculateDependencies(ScheduleData *SD, bool InsertInReadyList,
1855 BoUpSLP *SLP);
1856
1857 /// Sets all instruction in the scheduling region to un-scheduled.
1858 void resetSchedule();
1859
1860 BasicBlock *BB;
1861
1862 /// Simple memory allocation for ScheduleData.
1863 std::vector<std::unique_ptr<ScheduleData[]>> ScheduleDataChunks;
1864
1865 /// The size of a ScheduleData array in ScheduleDataChunks.
1866 int ChunkSize;
1867
1868 /// The allocator position in the current chunk, which is the last entry
1869 /// of ScheduleDataChunks.
1870 int ChunkPos;
1871
1872 /// Attaches ScheduleData to Instruction.
1873 /// Note that the mapping survives during all vectorization iterations, i.e.
1874 /// ScheduleData structures are recycled.
1875 DenseMap<Value *, ScheduleData *> ScheduleDataMap;
1876
1877 /// Attaches ScheduleData to Instruction with the leading key.
1878 DenseMap<Value *, SmallDenseMap<Value *, ScheduleData *>>
1879 ExtraScheduleDataMap;
1880
1881 struct ReadyList : SmallVector<ScheduleData *, 8> {
1882 void insert(ScheduleData *SD) { push_back(SD); }
1883 };
1884
1885 /// The ready-list for scheduling (only used for the dry-run).
1886 ReadyList ReadyInsts;
1887
1888 /// The first instruction of the scheduling region.
1889 Instruction *ScheduleStart = nullptr;
1890
1891 /// The first instruction _after_ the scheduling region.
1892 Instruction *ScheduleEnd = nullptr;
1893
1894 /// The first memory accessing instruction in the scheduling region
1895 /// (can be null).
1896 ScheduleData *FirstLoadStoreInRegion = nullptr;
1897
1898 /// The last memory accessing instruction in the scheduling region
1899 /// (can be null).
1900 ScheduleData *LastLoadStoreInRegion = nullptr;
1901
1902 /// The current size of the scheduling region.
1903 int ScheduleRegionSize = 0;
1904
1905 /// The maximum size allowed for the scheduling region.
1906 int ScheduleRegionSizeLimit = ScheduleRegionSizeBudget;
1907
1908 /// The ID of the scheduling region. For a new vectorization iteration this
1909 /// is incremented which "removes" all ScheduleData from the region.
1910 // Make sure that the initial SchedulingRegionID is greater than the
1911 // initial SchedulingRegionID in ScheduleData (which is 0).
1912 int SchedulingRegionID = 1;
1913 };
1914
1915 /// Attaches the BlockScheduling structures to basic blocks.
1916 MapVector<BasicBlock *, std::unique_ptr<BlockScheduling>> BlocksSchedules;
1917
1918 /// Performs the "real" scheduling. Done before vectorization is actually
1919 /// performed in a basic block.
1920 void scheduleBlock(BlockScheduling *BS);
1921
1922 /// List of users to ignore during scheduling and that don't need extracting.
1923 ArrayRef<Value *> UserIgnoreList;
1924
1925 using OrdersType = SmallVector<unsigned, 4>;
1926 /// A DenseMapInfo implementation for holding DenseMaps and DenseSets of
1927 /// sorted SmallVectors of unsigned.
1928 struct OrdersTypeDenseMapInfo {
1929 static OrdersType getEmptyKey() {
1930 OrdersType V;
1931 V.push_back(~1U);
1932 return V;
1933 }
1934
1935 static OrdersType getTombstoneKey() {
1936 OrdersType V;
1937 V.push_back(~2U);
1938 return V;
1939 }
1940
1941 static unsigned getHashValue(const OrdersType &V) {
1942 return static_cast<unsigned>(hash_combine_range(V.begin(), V.end()));
1943 }
1944
1945 static bool isEqual(const OrdersType &LHS, const OrdersType &RHS) {
1946 return LHS == RHS;
1947 }
1948 };
1949
1950 /// Contains orders of operations along with the number of bundles that have
1951 /// operations in this order. It stores only those orders that require
1952 /// reordering, if reordering is not required it is counted using \a
1953 /// NumOpsWantToKeepOriginalOrder.
1954 DenseMap<OrdersType, unsigned, OrdersTypeDenseMapInfo> NumOpsWantToKeepOrder;
1955 /// Number of bundles that do not require reordering.
1956 unsigned NumOpsWantToKeepOriginalOrder = 0;
1957
1958 // Analysis and block reference.
1959 Function *F;
1960 ScalarEvolution *SE;
1961 TargetTransformInfo *TTI;
1962 TargetLibraryInfo *TLI;
1963 AliasAnalysis *AA;
1964 LoopInfo *LI;
1965 DominatorTree *DT;
1966 AssumptionCache *AC;
1967 DemandedBits *DB;
1968 const DataLayout *DL;
1969 OptimizationRemarkEmitter *ORE;
1970
1971 unsigned MaxVecRegSize; // This is set by TTI or overridden by cl::opt.
1972 unsigned MinVecRegSize; // Set by cl::opt (default: 128).
1973
1974 /// Instruction builder to construct the vectorized tree.
1975 IRBuilder<> Builder;
1976
1977 /// A map of scalar integer values to the smallest bit width with which they
1978 /// can legally be represented. The values map to (width, signed) pairs,
1979 /// where "width" indicates the minimum bit width and "signed" is True if the
1980 /// value must be signed-extended, rather than zero-extended, back to its
1981 /// original width.
1982 MapVector<Value *, std::pair<uint64_t, bool>> MinBWs;
1983};
1984
1985} // end namespace slpvectorizer
1986
1987template <> struct GraphTraits<BoUpSLP *> {
1988 using TreeEntry = BoUpSLP::TreeEntry;
1989
1990 /// NodeRef has to be a pointer per the GraphWriter.
1991 using NodeRef = TreeEntry *;
1992
1993 using ContainerTy = BoUpSLP::TreeEntry::VecTreeTy;
1994
1995 /// Add the VectorizableTree to the index iterator to be able to return
1996 /// TreeEntry pointers.
1997 struct ChildIteratorType
1998 : public iterator_adaptor_base<
1999 ChildIteratorType, SmallVector<BoUpSLP::EdgeInfo, 1>::iterator> {
2000 ContainerTy &VectorizableTree;
2001
2002 ChildIteratorType(SmallVector<BoUpSLP::EdgeInfo, 1>::iterator W,
2003 ContainerTy &VT)
2004 : ChildIteratorType::iterator_adaptor_base(W), VectorizableTree(VT) {}
2005
2006 NodeRef operator*() { return I->UserTE; }
2007 };
2008
2009 static NodeRef getEntryNode(BoUpSLP &R) {
2010 return R.VectorizableTree[0].get();
2011 }
2012
2013 static ChildIteratorType child_begin(NodeRef N) {
2014 return {N->UserTreeIndices.begin(), N->Container};
2015 }
2016
2017 static ChildIteratorType child_end(NodeRef N) {
2018 return {N->UserTreeIndices.end(), N->Container};
2019 }
2020
2021 /// For the node iterator we just need to turn the TreeEntry iterator into a
2022 /// TreeEntry* iterator so that it dereferences to NodeRef.
2023 class nodes_iterator {
2024 using ItTy = ContainerTy::iterator;
2025 ItTy It;
2026
2027 public:
2028 nodes_iterator(const ItTy &It2) : It(It2) {}
2029 NodeRef operator*() { return It->get(); }
2030 nodes_iterator operator++() {
2031 ++It;
2032 return *this;
2033 }
2034 bool operator!=(const nodes_iterator &N2) const { return N2.It != It; }
2035 };
2036
2037 static nodes_iterator nodes_begin(BoUpSLP *R) {
2038 return nodes_iterator(R->VectorizableTree.begin());
2039 }
2040
2041 static nodes_iterator nodes_end(BoUpSLP *R) {
2042 return nodes_iterator(R->VectorizableTree.end());
2043 }
2044
2045 static unsigned size(BoUpSLP *R) { return R->VectorizableTree.size(); }
2046};
2047
2048template <> struct DOTGraphTraits<BoUpSLP *> : public DefaultDOTGraphTraits {
2049 using TreeEntry = BoUpSLP::TreeEntry;
2050
2051 DOTGraphTraits(bool isSimple = false) : DefaultDOTGraphTraits(isSimple) {}
2052
2053 std::string getNodeLabel(const TreeEntry *Entry, const BoUpSLP *R) {
2054 std::string Str;
2055 raw_string_ostream OS(Str);
2056 if (isSplat(Entry->Scalars)) {
2057 OS << "<splat> " << *Entry->Scalars[0];
2058 return Str;
2059 }
2060 for (auto V : Entry->Scalars) {
2061 OS << *V;
2062 if (std::any_of(
2063 R->ExternalUses.begin(), R->ExternalUses.end(),
2064 [&](const BoUpSLP::ExternalUser &EU) { return EU.Scalar == V; }))
2065 OS << " <extract>";
2066 OS << "\n";
2067 }
2068 return Str;
2069 }
2070
2071 static std::string getNodeAttributes(const TreeEntry *Entry,
2072 const BoUpSLP *) {
2073 if (Entry->NeedToGather)
2074 return "color=red";
2075 return "";
2076 }
2077};
2078
2079} // end namespace llvm
2080
2081BoUpSLP::~BoUpSLP() {
2082 for (const auto &Pair : DeletedInstructions) {
2083 // Replace operands of ignored instructions with Undefs in case if they were
2084 // marked for deletion.
2085 if (Pair.getSecond()) {
2086 Value *Undef = UndefValue::get(Pair.getFirst()->getType());
2087 Pair.getFirst()->replaceAllUsesWith(Undef);
2088 }
2089 Pair.getFirst()->dropAllReferences();
2090 }
2091 for (const auto &Pair : DeletedInstructions) {
2092 assert(Pair.getFirst()->use_empty() &&((Pair.getFirst()->use_empty() && "trying to erase instruction with users."
) ? static_cast<void> (0) : __assert_fail ("Pair.getFirst()->use_empty() && \"trying to erase instruction with users.\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp"
, 2093, __PRETTY_FUNCTION__))
2093 "trying to erase instruction with users.")((Pair.getFirst()->use_empty() && "trying to erase instruction with users."
) ? static_cast<void> (0) : __assert_fail ("Pair.getFirst()->use_empty() && \"trying to erase instruction with users.\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp"
, 2093, __PRETTY_FUNCTION__))
;
2094 Pair.getFirst()->eraseFromParent();
2095 }
2096}
2097
2098void BoUpSLP::eraseInstructions(ArrayRef<Value *> AV) {
2099 for (auto *V : AV) {
2100 if (auto *I = dyn_cast<Instruction>(V))
2101 eraseInstruction(I, /*ReplaceWithUndef=*/true);
2102 };
2103}
2104
2105void BoUpSLP::buildTree(ArrayRef<Value *> Roots,
2106 ArrayRef<Value *> UserIgnoreLst) {
2107 ExtraValueToDebugLocsMap ExternallyUsedValues;
2108 buildTree(Roots, ExternallyUsedValues, UserIgnoreLst);
2109}
2110
2111void BoUpSLP::buildTree(ArrayRef<Value *> Roots,
2112 ExtraValueToDebugLocsMap &ExternallyUsedValues,
2113 ArrayRef<Value *> UserIgnoreLst) {
2114 deleteTree();
2115 UserIgnoreList = UserIgnoreLst;
2116 if (!allSameType(Roots))
2117 return;
2118 buildTree_rec(Roots, 0, EdgeInfo());
2119
2120 // Collect the values that we need to extract from the tree.
2121 for (auto &TEPtr : VectorizableTree) {
2122 TreeEntry *Entry = TEPtr.get();
2123
2124 // No need to handle users of gathered values.
2125 if (Entry->NeedToGather)
2126 continue;
2127
2128 // For each lane:
2129 for (int Lane = 0, LE = Entry->Scalars.size(); Lane != LE; ++Lane) {
2130 Value *Scalar = Entry->Scalars[Lane];
2131 int FoundLane = Lane;
2132 if (!Entry->ReuseShuffleIndices.empty()) {
2133 FoundLane =
2134 std::distance(Entry->ReuseShuffleIndices.begin(),
2135 llvm::find(Entry->ReuseShuffleIndices, FoundLane));
2136 }
2137
2138 // Check if the scalar is externally used as an extra arg.
2139 auto ExtI = ExternallyUsedValues.find(Scalar);
2140 if (ExtI != ExternallyUsedValues.end()) {
2141 LLVM_DEBUG(dbgs() << "SLP: Need to extract: Extra arg from lane "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Need to extract: Extra arg from lane "
<< Lane << " from " << *Scalar << ".\n"
; } } while (false)
2142 << Lane << " from " << *Scalar << ".\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Need to extract: Extra arg from lane "
<< Lane << " from " << *Scalar << ".\n"
; } } while (false)
;
2143 ExternalUses.emplace_back(Scalar, nullptr, FoundLane);
2144 }
2145 for (User *U : Scalar->users()) {
2146 LLVM_DEBUG(dbgs() << "SLP: Checking user:" << *U << ".\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Checking user:" << *U <<
".\n"; } } while (false)
;
2147
2148 Instruction *UserInst = dyn_cast<Instruction>(U);
2149 if (!UserInst)
2150 continue;
2151
2152 // Skip in-tree scalars that become vectors
2153 if (TreeEntry *UseEntry = getTreeEntry(U)) {
2154 Value *UseScalar = UseEntry->Scalars[0];
2155 // Some in-tree scalars will remain as scalar in vectorized
2156 // instructions. If that is the case, the one in Lane 0 will
2157 // be used.
2158 if (UseScalar != U ||
2159 !InTreeUserNeedToExtract(Scalar, UserInst, TLI)) {
2160 LLVM_DEBUG(dbgs() << "SLP: \tInternal user will be removed:" << *Udo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: \tInternal user will be removed:"
<< *U << ".\n"; } } while (false)
2161 << ".\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: \tInternal user will be removed:"
<< *U << ".\n"; } } while (false)
;
2162 assert(!UseEntry->NeedToGather && "Bad state")((!UseEntry->NeedToGather && "Bad state") ? static_cast
<void> (0) : __assert_fail ("!UseEntry->NeedToGather && \"Bad state\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp"
, 2162, __PRETTY_FUNCTION__))
;
2163 continue;
2164 }
2165 }
2166
2167 // Ignore users in the user ignore list.
2168 if (is_contained(UserIgnoreList, UserInst))
2169 continue;
2170
2171 LLVM_DEBUG(dbgs() << "SLP: Need to extract:" << *U << " from lane "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Need to extract:" << *
U << " from lane " << Lane << " from " <<
*Scalar << ".\n"; } } while (false)
2172 << Lane << " from " << *Scalar << ".\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Need to extract:" << *
U << " from lane " << Lane << " from " <<
*Scalar << ".\n"; } } while (false)
;
2173 ExternalUses.push_back(ExternalUser(Scalar, U, FoundLane));
2174 }
2175 }
2176 }
2177}
2178
2179void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
2180 const EdgeInfo &UserTreeIdx) {
2181 assert((allConstant(VL) || allSameType(VL)) && "Invalid types!")(((allConstant(VL) || allSameType(VL)) && "Invalid types!"
) ? static_cast<void> (0) : __assert_fail ("(allConstant(VL) || allSameType(VL)) && \"Invalid types!\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp"
, 2181, __PRETTY_FUNCTION__))
;
2182
2183 InstructionsState S = getSameOpcode(VL);
2184 if (Depth == RecursionMaxDepth) {
2185 LLVM_DEBUG(dbgs() << "SLP: Gathering due to max recursion depth.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Gathering due to max recursion depth.\n"
; } } while (false)
;
2186 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx);
2187 return;
2188 }
2189
2190 // Don't handle vectors.
2191 if (S.OpValue->getType()->isVectorTy()) {
2192 LLVM_DEBUG(dbgs() << "SLP: Gathering due to vector type.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Gathering due to vector type.\n"
; } } while (false)
;
2193 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx);
2194 return;
2195 }
2196
2197 if (StoreInst *SI = dyn_cast<StoreInst>(S.OpValue))
2198 if (SI->getValueOperand()->getType()->isVectorTy()) {
2199 LLVM_DEBUG(dbgs() << "SLP: Gathering due to store vector type.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Gathering due to store vector type.\n"
; } } while (false)
;
2200 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx);
2201 return;
2202 }
2203
2204 // If all of the operands are identical or constant we have a simple solution.
2205 if (allConstant(VL) || isSplat(VL) || !allSameBlock(VL) || !S.getOpcode()) {
2206 LLVM_DEBUG(dbgs() << "SLP: Gathering due to C,S,B,O. \n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Gathering due to C,S,B,O. \n"
; } } while (false)
;
2207 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx);
2208 return;
2209 }
2210
2211 // We now know that this is a vector of instructions of the same type from
2212 // the same block.
2213
2214 // Don't vectorize ephemeral values.
2215 for (Value *V : VL) {
2216 if (EphValues.count(V)) {
2217 LLVM_DEBUG(dbgs() << "SLP: The instruction (" << *Vdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: The instruction (" << *
V << ") is ephemeral.\n"; } } while (false)
2218 << ") is ephemeral.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: The instruction (" << *
V << ") is ephemeral.\n"; } } while (false)
;
2219 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx);
2220 return;
2221 }
2222 }
2223
2224 // Check if this is a duplicate of another entry.
2225 if (TreeEntry *E = getTreeEntry(S.OpValue)) {
2226 LLVM_DEBUG(dbgs() << "SLP: \tChecking bundle: " << *S.OpValue << ".\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: \tChecking bundle: " <<
*S.OpValue << ".\n"; } } while (false)
;
2227 if (!E->isSame(VL)) {
2228 LLVM_DEBUG(dbgs() << "SLP: Gathering due to partial overlap.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Gathering due to partial overlap.\n"
; } } while (false)
;
2229 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx);
2230 return;
2231 }
2232 // Record the reuse of the tree node. FIXME, currently this is only used to
2233 // properly draw the graph rather than for the actual vectorization.
2234 E->UserTreeIndices.push_back(UserTreeIdx);
2235 LLVM_DEBUG(dbgs() << "SLP: Perfect diamond merge at " << *S.OpValuedo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Perfect diamond merge at " <<
*S.OpValue << ".\n"; } } while (false)
2236 << ".\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Perfect diamond merge at " <<
*S.OpValue << ".\n"; } } while (false)
;
2237 return;
2238 }
2239
2240 // Check that none of the instructions in the bundle are already in the tree.
2241 for (Value *V : VL) {
2242 auto *I = dyn_cast<Instruction>(V);
2243 if (!I)
2244 continue;
2245 if (getTreeEntry(I)) {
2246 LLVM_DEBUG(dbgs() << "SLP: The instruction (" << *Vdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: The instruction (" << *
V << ") is already in tree.\n"; } } while (false)
2247 << ") is already in tree.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: The instruction (" << *
V << ") is already in tree.\n"; } } while (false)
;
2248 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx);
2249 return;
2250 }
2251 }
2252
2253 // If any of the scalars is marked as a value that needs to stay scalar, then
2254 // we need to gather the scalars.
2255 // The reduction nodes (stored in UserIgnoreList) also should stay scalar.
2256 for (Value *V : VL) {
2257 if (MustGather.count(V) || is_contained(UserIgnoreList, V)) {
2258 LLVM_DEBUG(dbgs() << "SLP: Gathering due to gathered scalar.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Gathering due to gathered scalar.\n"
; } } while (false)
;
2259 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx);
2260 return;
2261 }
2262 }
2263
2264 // Check that all of the users of the scalars that we want to vectorize are
2265 // schedulable.
2266 auto *VL0 = cast<Instruction>(S.OpValue);
2267 BasicBlock *BB = VL0->getParent();
2268
2269 if (!DT->isReachableFromEntry(BB)) {
2270 // Don't go into unreachable blocks. They may contain instructions with
2271 // dependency cycles which confuse the final scheduling.
2272 LLVM_DEBUG(dbgs() << "SLP: bundle in unreachable block.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: bundle in unreachable block.\n"
; } } while (false)
;
2273 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx);
2274 return;
2275 }
2276
2277 // Check that every instruction appears once in this bundle.
2278 SmallVector<unsigned, 4> ReuseShuffleIndicies;
2279 SmallVector<Value *, 4> UniqueValues;
2280 DenseMap<Value *, unsigned> UniquePositions;
2281 for (Value *V : VL) {
2282 auto Res = UniquePositions.try_emplace(V, UniqueValues.size());
2283 ReuseShuffleIndicies.emplace_back(Res.first->second);
2284 if (Res.second)
2285 UniqueValues.emplace_back(V);
2286 }
2287 size_t NumUniqueScalarValues = UniqueValues.size();
2288 if (NumUniqueScalarValues == VL.size()) {
2289 ReuseShuffleIndicies.clear();
2290 } else {
2291 LLVM_DEBUG(dbgs() << "SLP: Shuffle for reused scalars.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Shuffle for reused scalars.\n"
; } } while (false)
;
2292 if (NumUniqueScalarValues <= 1 ||
2293 !llvm::isPowerOf2_32(NumUniqueScalarValues)) {
2294 LLVM_DEBUG(dbgs() << "SLP: Scalar used twice in bundle.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Scalar used twice in bundle.\n"
; } } while (false)
;
2295 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx);
2296 return;
2297 }
2298 VL = UniqueValues;
2299 }
2300
2301 auto &BSRef = BlocksSchedules[BB];
2302 if (!BSRef)
2303 BSRef = std::make_unique<BlockScheduling>(BB);
2304
2305 BlockScheduling &BS = *BSRef.get();
2306
2307 Optional<ScheduleData *> Bundle = BS.tryScheduleBundle(VL, this, S);
2308 if (!Bundle) {
2309 LLVM_DEBUG(dbgs() << "SLP: We are not able to schedule this bundle!\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: We are not able to schedule this bundle!\n"
; } } while (false)
;
2310 assert((!BS.getScheduleData(VL0) ||(((!BS.getScheduleData(VL0) || !BS.getScheduleData(VL0)->isPartOfBundle
()) && "tryScheduleBundle should cancelScheduling on failure"
) ? static_cast<void> (0) : __assert_fail ("(!BS.getScheduleData(VL0) || !BS.getScheduleData(VL0)->isPartOfBundle()) && \"tryScheduleBundle should cancelScheduling on failure\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp"
, 2312, __PRETTY_FUNCTION__))
2311 !BS.getScheduleData(VL0)->isPartOfBundle()) &&(((!BS.getScheduleData(VL0) || !BS.getScheduleData(VL0)->isPartOfBundle
()) && "tryScheduleBundle should cancelScheduling on failure"
) ? static_cast<void> (0) : __assert_fail ("(!BS.getScheduleData(VL0) || !BS.getScheduleData(VL0)->isPartOfBundle()) && \"tryScheduleBundle should cancelScheduling on failure\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp"
, 2312, __PRETTY_FUNCTION__))
2312 "tryScheduleBundle should cancelScheduling on failure")(((!BS.getScheduleData(VL0) || !BS.getScheduleData(VL0)->isPartOfBundle
()) && "tryScheduleBundle should cancelScheduling on failure"
) ? static_cast<void> (0) : __assert_fail ("(!BS.getScheduleData(VL0) || !BS.getScheduleData(VL0)->isPartOfBundle()) && \"tryScheduleBundle should cancelScheduling on failure\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp"
, 2312, __PRETTY_FUNCTION__))
;
2313 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
2314 ReuseShuffleIndicies);
2315 return;
2316 }
2317 LLVM_DEBUG(dbgs() << "SLP: We are able to schedule this bundle.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: We are able to schedule this bundle.\n"
; } } while (false)
;
2318
2319 unsigned ShuffleOrOp = S.isAltShuffle() ?
2320 (unsigned) Instruction::ShuffleVector : S.getOpcode();
2321 switch (ShuffleOrOp) {
2322 case Instruction::PHI: {
2323 auto *PH = cast<PHINode>(VL0);
2324
2325 // Check for terminator values (e.g. invoke).
2326 for (unsigned j = 0; j < VL.size(); ++j)
2327 for (unsigned i = 0, e = PH->getNumIncomingValues(); i < e; ++i) {
2328 Instruction *Term = dyn_cast<Instruction>(
2329 cast<PHINode>(VL[j])->getIncomingValueForBlock(
2330 PH->getIncomingBlock(i)));
2331 if (Term && Term->isTerminator()) {
2332 LLVM_DEBUG(dbgs()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Need to swizzle PHINodes (terminator use).\n"
; } } while (false)
2333 << "SLP: Need to swizzle PHINodes (terminator use).\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Need to swizzle PHINodes (terminator use).\n"
; } } while (false)
;
2334 BS.cancelScheduling(VL, VL0);
2335 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
2336 ReuseShuffleIndicies);
2337 return;
2338 }
2339 }
2340
2341 TreeEntry *TE =
2342 newTreeEntry(VL, Bundle, S, UserTreeIdx, ReuseShuffleIndicies);
2343 LLVM_DEBUG(dbgs() << "SLP: added a vector of PHINodes.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: added a vector of PHINodes.\n"
; } } while (false)
;
2344
2345 // Keeps the reordered operands to avoid code duplication.
2346 SmallVector<ValueList, 2> OperandsVec;
2347 for (unsigned i = 0, e = PH->getNumIncomingValues(); i < e; ++i) {
2348 ValueList Operands;
2349 // Prepare the operand vector.
2350 for (Value *j : VL)
2351 Operands.push_back(cast<PHINode>(j)->getIncomingValueForBlock(
2352 PH->getIncomingBlock(i)));
2353 TE->setOperand(i, Operands);
2354 OperandsVec.push_back(Operands);
2355 }
2356 for (unsigned OpIdx = 0, OpE = OperandsVec.size(); OpIdx != OpE; ++OpIdx)
2357 buildTree_rec(OperandsVec[OpIdx], Depth + 1, {TE, OpIdx});
2358 return;
2359 }
2360 case Instruction::ExtractValue:
2361 case Instruction::ExtractElement: {
2362 OrdersType CurrentOrder;
2363 bool Reuse = canReuseExtract(VL, VL0, CurrentOrder);
2364 if (Reuse) {
2365 LLVM_DEBUG(dbgs() << "SLP: Reusing or shuffling extract sequence.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Reusing or shuffling extract sequence.\n"
; } } while (false)
;
2366 ++NumOpsWantToKeepOriginalOrder;
2367 newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx,
2368 ReuseShuffleIndicies);
2369 // This is a special case, as it does not gather, but at the same time
2370 // we are not extending buildTree_rec() towards the operands.
2371 ValueList Op0;
2372 Op0.assign(VL.size(), VL0->getOperand(0));
2373 VectorizableTree.back()->setOperand(0, Op0);
2374 return;
2375 }
2376 if (!CurrentOrder.empty()) {
2377 LLVM_DEBUG({do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { { dbgs() << "SLP: Reusing or shuffling of reordered extract sequence "
"with order"; for (unsigned Idx : CurrentOrder) dbgs() <<
" " << Idx; dbgs() << "\n"; }; } } while (false)
2378 dbgs() << "SLP: Reusing or shuffling of reordered extract sequence "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { { dbgs() << "SLP: Reusing or shuffling of reordered extract sequence "
"with order"; for (unsigned Idx : CurrentOrder) dbgs() <<
" " << Idx; dbgs() << "\n"; }; } } while (false)
2379 "with order";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { { dbgs() << "SLP: Reusing or shuffling of reordered extract sequence "
"with order"; for (unsigned Idx : CurrentOrder) dbgs() <<
" " << Idx; dbgs() << "\n"; }; } } while (false)
2380 for (unsigned Idx : CurrentOrder)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { { dbgs() << "SLP: Reusing or shuffling of reordered extract sequence "
"with order"; for (unsigned Idx : CurrentOrder) dbgs() <<
" " << Idx; dbgs() << "\n"; }; } } while (false)
2381 dbgs() << " " << Idx;do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { { dbgs() << "SLP: Reusing or shuffling of reordered extract sequence "
"with order"; for (unsigned Idx : CurrentOrder) dbgs() <<
" " << Idx; dbgs() << "\n"; }; } } while (false)
2382 dbgs() << "\n";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { { dbgs() << "SLP: Reusing or shuffling of reordered extract sequence "
"with order"; for (unsigned Idx : CurrentOrder) dbgs() <<
" " << Idx; dbgs() << "\n"; }; } } while (false)
2383 })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { { dbgs() << "SLP: Reusing or shuffling of reordered extract sequence "
"with order"; for (unsigned Idx : CurrentOrder) dbgs() <<
" " << Idx; dbgs() << "\n"; }; } } while (false)
;
2384 // Insert new order with initial value 0, if it does not exist,
2385 // otherwise return the iterator to the existing one.
2386 auto StoredCurrentOrderAndNum =
2387 NumOpsWantToKeepOrder.try_emplace(CurrentOrder).first;
2388 ++StoredCurrentOrderAndNum->getSecond();
2389 newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx,
2390 ReuseShuffleIndicies,
2391 StoredCurrentOrderAndNum->getFirst());
2392 // This is a special case, as it does not gather, but at the same time
2393 // we are not extending buildTree_rec() towards the operands.
2394 ValueList Op0;
2395 Op0.assign(VL.size(), VL0->getOperand(0));
2396 VectorizableTree.back()->setOperand(0, Op0);
2397 return;
2398 }
2399 LLVM_DEBUG(dbgs() << "SLP: Gather extract sequence.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Gather extract sequence.\n";
} } while (false)
;
2400 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
2401 ReuseShuffleIndicies);
2402 BS.cancelScheduling(VL, VL0);
2403 return;
2404 }
2405 case Instruction::Load: {
2406 // Check that a vectorized load would load the same memory as a scalar
2407 // load. For example, we don't want to vectorize loads that are smaller
2408 // than 8-bit. Even though we have a packed struct {<i2, i2, i2, i2>} LLVM
2409 // treats loading/storing it as an i8 struct. If we vectorize loads/stores
2410 // from such a struct, we read/write packed bits disagreeing with the
2411 // unvectorized version.
2412 Type *ScalarTy = VL0->getType();
2413
2414 if (DL->getTypeSizeInBits(ScalarTy) !=
2415 DL->getTypeAllocSizeInBits(ScalarTy)) {
2416 BS.cancelScheduling(VL, VL0);
2417 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
2418 ReuseShuffleIndicies);
2419 LLVM_DEBUG(dbgs() << "SLP: Gathering loads of non-packed type.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Gathering loads of non-packed type.\n"
; } } while (false)
;
2420 return;
2421 }
2422
2423 // Make sure all loads in the bundle are simple - we can't vectorize
2424 // atomic or volatile loads.
2425 SmallVector<Value *, 4> PointerOps(VL.size());
2426 auto POIter = PointerOps.begin();
2427 for (Value *V : VL) {
2428 auto *L = cast<LoadInst>(V);
2429 if (!L->isSimple()) {
2430 BS.cancelScheduling(VL, VL0);
2431 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
2432 ReuseShuffleIndicies);
2433 LLVM_DEBUG(dbgs() << "SLP: Gathering non-simple loads.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Gathering non-simple loads.\n"
; } } while (false)
;
2434 return;
2435 }
2436 *POIter = L->getPointerOperand();
2437 ++POIter;
2438 }
2439
2440 OrdersType CurrentOrder;
2441 // Check the order of pointer operands.
2442 if (llvm::sortPtrAccesses(PointerOps, *DL, *SE, CurrentOrder)) {
2443 Value *Ptr0;
2444 Value *PtrN;
2445 if (CurrentOrder.empty()) {
2446 Ptr0 = PointerOps.front();
2447 PtrN = PointerOps.back();
2448 } else {
2449 Ptr0 = PointerOps[CurrentOrder.front()];
2450 PtrN = PointerOps[CurrentOrder.back()];
2451 }
2452 const SCEV *Scev0 = SE->getSCEV(Ptr0);
2453 const SCEV *ScevN = SE->getSCEV(PtrN);
2454 const auto *Diff =
2455 dyn_cast<SCEVConstant>(SE->getMinusSCEV(ScevN, Scev0));
2456 uint64_t Size = DL->getTypeAllocSize(ScalarTy);
2457 // Check that the sorted loads are consecutive.
2458 if (Diff && Diff->getAPInt() == (VL.size() - 1) * Size) {
2459 if (CurrentOrder.empty()) {
2460 // Original loads are consecutive and does not require reordering.
2461 ++NumOpsWantToKeepOriginalOrder;
2462 TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S,
2463 UserTreeIdx, ReuseShuffleIndicies);
2464 TE->setOperandsInOrder();
2465 LLVM_DEBUG(dbgs() << "SLP: added a vector of loads.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: added a vector of loads.\n";
} } while (false)
;
2466 } else {
2467 // Need to reorder.
2468 auto I = NumOpsWantToKeepOrder.try_emplace(CurrentOrder).first;
2469 ++I->getSecond();
2470 TreeEntry *TE =
2471 newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx,
2472 ReuseShuffleIndicies, I->getFirst());
2473 TE->setOperandsInOrder();
2474 LLVM_DEBUG(dbgs() << "SLP: added a vector of jumbled loads.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: added a vector of jumbled loads.\n"
; } } while (false)
;
2475 }
2476 return;
2477 }
2478 }
2479
2480 LLVM_DEBUG(dbgs() << "SLP: Gathering non-consecutive loads.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Gathering non-consecutive loads.\n"
; } } while (false)
;
2481 BS.cancelScheduling(VL, VL0);
2482 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
2483 ReuseShuffleIndicies);
2484 return;
2485 }
2486 case Instruction::ZExt:
2487 case Instruction::SExt:
2488 case Instruction::FPToUI:
2489 case Instruction::FPToSI:
2490 case Instruction::FPExt:
2491 case Instruction::PtrToInt:
2492 case Instruction::IntToPtr:
2493 case Instruction::SIToFP:
2494 case Instruction::UIToFP:
2495 case Instruction::Trunc:
2496 case Instruction::FPTrunc:
2497 case Instruction::BitCast: {
2498 Type *SrcTy = VL0->getOperand(0)->getType();
2499 for (Value *V : VL) {
2500 Type *Ty = cast<Instruction>(V)->getOperand(0)->getType();
2501 if (Ty != SrcTy || !isValidElementType(Ty)) {
2502 BS.cancelScheduling(VL, VL0);
2503 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
2504 ReuseShuffleIndicies);
2505 LLVM_DEBUG(dbgs()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Gathering casts with different src types.\n"
; } } while (false)
2506 << "SLP: Gathering casts with different src types.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Gathering casts with different src types.\n"
; } } while (false)
;
2507 return;
2508 }
2509 }
2510 TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx,
2511 ReuseShuffleIndicies);
2512 LLVM_DEBUG(dbgs() << "SLP: added a vector of casts.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: added a vector of casts.\n";
} } while (false)
;
2513
2514 TE->setOperandsInOrder();
2515 for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) {
2516 ValueList Operands;
2517 // Prepare the operand vector.
2518 for (Value *V : VL)
2519 Operands.push_back(cast<Instruction>(V)->getOperand(i));
2520
2521 buildTree_rec(Operands, Depth + 1, {TE, i});
2522 }
2523 return;
2524 }
2525 case Instruction::ICmp:
2526 case Instruction::FCmp: {
2527 // Check that all of the compares have the same predicate.
2528 CmpInst::Predicate P0 = cast<CmpInst>(VL0)->getPredicate();
2529 CmpInst::Predicate SwapP0 = CmpInst::getSwappedPredicate(P0);
2530 Type *ComparedTy = VL0->getOperand(0)->getType();
2531 for (Value *V : VL) {
2532 CmpInst *Cmp = cast<CmpInst>(V);
2533 if ((Cmp->getPredicate() != P0 && Cmp->getPredicate() != SwapP0) ||
2534 Cmp->getOperand(0)->getType() != ComparedTy) {
2535 BS.cancelScheduling(VL, VL0);
2536 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
2537 ReuseShuffleIndicies);
2538 LLVM_DEBUG(dbgs()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Gathering cmp with different predicate.\n"
; } } while (false)
2539 << "SLP: Gathering cmp with different predicate.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Gathering cmp with different predicate.\n"
; } } while (false)
;
2540 return;
2541 }
2542 }
2543
2544 TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx,
2545 ReuseShuffleIndicies);
2546 LLVM_DEBUG(dbgs() << "SLP: added a vector of compares.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: added a vector of compares.\n"
; } } while (false)
;
2547
2548 ValueList Left, Right;
2549 if (cast<CmpInst>(VL0)->isCommutative()) {
2550 // Commutative predicate - collect + sort operands of the instructions
2551 // so that each side is more likely to have the same opcode.
2552 assert(P0 == SwapP0 && "Commutative Predicate mismatch")((P0 == SwapP0 && "Commutative Predicate mismatch") ?
static_cast<void> (0) : __assert_fail ("P0 == SwapP0 && \"Commutative Predicate mismatch\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp"
, 2552, __PRETTY_FUNCTION__))
;
2553 reorderInputsAccordingToOpcode(VL, Left, Right, *DL, *SE);
2554 } else {
2555 // Collect operands - commute if it uses the swapped predicate.
2556 for (Value *V : VL) {
2557 auto *Cmp = cast<CmpInst>(V);
2558 Value *LHS = Cmp->getOperand(0);
2559 Value *RHS = Cmp->getOperand(1);
2560 if (Cmp->getPredicate() != P0)
2561 std::swap(LHS, RHS);
2562 Left.push_back(LHS);
2563 Right.push_back(RHS);
2564 }
2565 }
2566 TE->setOperand(0, Left);
2567 TE->setOperand(1, Right);
2568 buildTree_rec(Left, Depth + 1, {TE, 0});
2569 buildTree_rec(Right, Depth + 1, {TE, 1});
2570 return;
2571 }
2572 case Instruction::Select:
2573 case Instruction::FNeg:
2574 case Instruction::Add:
2575 case Instruction::FAdd:
2576 case Instruction::Sub:
2577 case Instruction::FSub:
2578 case Instruction::Mul:
2579 case Instruction::FMul:
2580 case Instruction::UDiv:
2581 case Instruction::SDiv:
2582 case Instruction::FDiv:
2583 case Instruction::URem:
2584 case Instruction::SRem:
2585 case Instruction::FRem:
2586 case Instruction::Shl:
2587 case Instruction::LShr:
2588 case Instruction::AShr:
2589 case Instruction::And:
2590 case Instruction::Or:
2591 case Instruction::Xor: {
2592 TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx,
2593 ReuseShuffleIndicies);
2594 LLVM_DEBUG(dbgs() << "SLP: added a vector of un/bin op.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: added a vector of un/bin op.\n"
; } } while (false)
;
2595
2596 // Sort operands of the instructions so that each side is more likely to
2597 // have the same opcode.
2598 if (isa<BinaryOperator>(VL0) && VL0->isCommutative()) {
2599 ValueList Left, Right;
2600 reorderInputsAccordingToOpcode(VL, Left, Right, *DL, *SE);
2601 TE->setOperand(0, Left);
2602 TE->setOperand(1, Right);
2603 buildTree_rec(Left, Depth + 1, {TE, 0});
2604 buildTree_rec(Right, Depth + 1, {TE, 1});
2605 return;
2606 }
2607
2608 TE->setOperandsInOrder();
2609 for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) {
2610 ValueList Operands;
2611 // Prepare the operand vector.
2612 for (Value *j : VL)
2613 Operands.push_back(cast<Instruction>(j)->getOperand(i));
2614
2615 buildTree_rec(Operands, Depth + 1, {TE, i});
2616 }
2617 return;
2618 }
2619 case Instruction::GetElementPtr: {
2620 // We don't combine GEPs with complicated (nested) indexing.
2621 for (Value *V : VL) {
2622 if (cast<Instruction>(V)->getNumOperands() != 2) {
2623 LLVM_DEBUG(dbgs() << "SLP: not-vectorizable GEP (nested indexes).\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: not-vectorizable GEP (nested indexes).\n"
; } } while (false)
;
2624 BS.cancelScheduling(VL, VL0);
2625 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
2626 ReuseShuffleIndicies);
2627 return;
2628 }
2629 }
2630
2631 // We can't combine several GEPs into one vector if they operate on
2632 // different types.
2633 Type *Ty0 = VL0->getOperand(0)->getType();
2634 for (Value *V : VL) {
2635 Type *CurTy = cast<Instruction>(V)->getOperand(0)->getType();
2636 if (Ty0 != CurTy) {
2637 LLVM_DEBUG(dbgs()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: not-vectorizable GEP (different types).\n"
; } } while (false)
2638 << "SLP: not-vectorizable GEP (different types).\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: not-vectorizable GEP (different types).\n"
; } } while (false)
;
2639 BS.cancelScheduling(VL, VL0);
2640 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
2641 ReuseShuffleIndicies);
2642 return;
2643 }
2644 }
2645
2646 // We don't combine GEPs with non-constant indexes.
2647 Type *Ty1 = VL0->getOperand(1)->getType();
2648 for (Value *V : VL) {
2649 auto Op = cast<Instruction>(V)->getOperand(1);
2650 if (!isa<ConstantInt>(Op) ||
2651 (Op->getType() != Ty1 &&
2652 Op->getType()->getScalarSizeInBits() >
2653 DL->getIndexSizeInBits(
2654 V->getType()->getPointerAddressSpace()))) {
2655 LLVM_DEBUG(dbgs()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: not-vectorizable GEP (non-constant indexes).\n"
; } } while (false)
2656 << "SLP: not-vectorizable GEP (non-constant indexes).\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: not-vectorizable GEP (non-constant indexes).\n"
; } } while (false)
;
2657 BS.cancelScheduling(VL, VL0);
2658 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
2659 ReuseShuffleIndicies);
2660 return;
2661 }
2662 }
2663
2664 TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx,
2665 ReuseShuffleIndicies);
2666 LLVM_DEBUG(dbgs() << "SLP: added a vector of GEPs.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: added a vector of GEPs.\n"; }
} while (false)
;
2667 TE->setOperandsInOrder();
2668 for (unsigned i = 0, e = 2; i < e; ++i) {
2669 ValueList Operands;
2670 // Prepare the operand vector.
2671 for (Value *V : VL)
2672 Operands.push_back(cast<Instruction>(V)->getOperand(i));
2673
2674 buildTree_rec(Operands, Depth + 1, {TE, i});
2675 }
2676 return;
2677 }
2678 case Instruction::Store: {
2679 // Check if the stores are consecutive or if we need to swizzle them.
2680 for (unsigned i = 0, e = VL.size() - 1; i < e; ++i)
2681 if (!isConsecutiveAccess(VL[i], VL[i + 1], *DL, *SE)) {
2682 BS.cancelScheduling(VL, VL0);
2683 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
2684 ReuseShuffleIndicies);
2685 LLVM_DEBUG(dbgs() << "SLP: Non-consecutive store.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Non-consecutive store.\n"; }
} while (false)
;
2686 return;
2687 }
2688
2689 TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx,
2690 ReuseShuffleIndicies);
2691 LLVM_DEBUG(dbgs() << "SLP: added a vector of stores.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: added a vector of stores.\n"
; } } while (false)
;
2692
2693 ValueList Operands;
2694 for (Value *V : VL)
2695 Operands.push_back(cast<Instruction>(V)->getOperand(0));
2696 TE->setOperandsInOrder();
2697 buildTree_rec(Operands, Depth + 1, {TE, 0});
2698 return;
2699 }
2700 case Instruction::Call: {
2701 // Check if the calls are all to the same vectorizable intrinsic.
2702 CallInst *CI = cast<CallInst>(VL0);
2703 // Check if this is an Intrinsic call or something that can be
2704 // represented by an intrinsic call
2705 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
2706 if (!isTriviallyVectorizable(ID)) {
2707 BS.cancelScheduling(VL, VL0);
2708 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
2709 ReuseShuffleIndicies);
2710 LLVM_DEBUG(dbgs() << "SLP: Non-vectorizable call.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Non-vectorizable call.\n"; }
} while (false)
;
2711 return;
2712 }
2713 Function *Int = CI->getCalledFunction();
2714 unsigned NumArgs = CI->getNumArgOperands();
2715 SmallVector<Value*, 4> ScalarArgs(NumArgs, nullptr);
2716 for (unsigned j = 0; j != NumArgs; ++j)
2717 if (hasVectorInstrinsicScalarOpd(ID, j))
2718 ScalarArgs[j] = CI->getArgOperand(j);
2719 for (Value *V : VL) {
2720 CallInst *CI2 = dyn_cast<CallInst>(V);
2721 if (!CI2 || CI2->getCalledFunction() != Int ||
2722 getVectorIntrinsicIDForCall(CI2, TLI) != ID ||
2723 !CI->hasIdenticalOperandBundleSchema(*CI2)) {
2724 BS.cancelScheduling(VL, VL0);
2725 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
2726 ReuseShuffleIndicies);
2727 LLVM_DEBUG(dbgs() << "SLP: mismatched calls:" << *CI << "!=" << *Vdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: mismatched calls:" << *
CI << "!=" << *V << "\n"; } } while (false)
2728 << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: mismatched calls:" << *
CI << "!=" << *V << "\n"; } } while (false)
;
2729 return;
2730 }
2731 // Some intrinsics have scalar arguments and should be same in order for
2732 // them to be vectorized.
2733 for (unsigned j = 0; j != NumArgs; ++j) {
2734 if (hasVectorInstrinsicScalarOpd(ID, j)) {
2735 Value *A1J = CI2->getArgOperand(j);
2736 if (ScalarArgs[j] != A1J) {
2737 BS.cancelScheduling(VL, VL0);
2738 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
2739 ReuseShuffleIndicies);
2740 LLVM_DEBUG(dbgs() << "SLP: mismatched arguments in call:" << *CIdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: mismatched arguments in call:"
<< *CI << " argument " << ScalarArgs[j] <<
"!=" << A1J << "\n"; } } while (false)
2741 << " argument " << ScalarArgs[j] << "!=" << A1Jdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: mismatched arguments in call:"
<< *CI << " argument " << ScalarArgs[j] <<
"!=" << A1J << "\n"; } } while (false)
2742 << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: mismatched arguments in call:"
<< *CI << " argument " << ScalarArgs[j] <<
"!=" << A1J << "\n"; } } while (false)
;
2743 return;
2744 }
2745 }
2746 }
2747 // Verify that the bundle operands are identical between the two calls.
2748 if (CI->hasOperandBundles() &&
2749 !std::equal(CI->op_begin() + CI->getBundleOperandsStartIndex(),
2750 CI->op_begin() + CI->getBundleOperandsEndIndex(),
2751 CI2->op_begin() + CI2->getBundleOperandsStartIndex())) {
2752 BS.cancelScheduling(VL, VL0);
2753 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
2754 ReuseShuffleIndicies);
2755 LLVM_DEBUG(dbgs() << "SLP: mismatched bundle operands in calls:"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: mismatched bundle operands in calls:"
<< *CI << "!=" << *V << '\n'; } } while
(false)
2756 << *CI << "!=" << *V << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: mismatched bundle operands in calls:"
<< *CI << "!=" << *V << '\n'; } } while
(false)
;
2757 return;
2758 }
2759 }
2760
2761 TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx,
2762 ReuseShuffleIndicies);
2763 TE->setOperandsInOrder();
2764 for (unsigned i = 0, e = CI->getNumArgOperands(); i != e; ++i) {
2765 ValueList Operands;
2766 // Prepare the operand vector.
2767 for (Value *V : VL) {
2768 auto *CI2 = cast<CallInst>(V);
2769 Operands.push_back(CI2->getArgOperand(i));
2770 }
2771 buildTree_rec(Operands, Depth + 1, {TE, i});
2772 }
2773 return;
2774 }
2775 case Instruction::ShuffleVector: {
2776 // If this is not an alternate sequence of opcode like add-sub
2777 // then do not vectorize this instruction.
2778 if (!S.isAltShuffle()) {
2779 BS.cancelScheduling(VL, VL0);
2780 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
2781 ReuseShuffleIndicies);
2782 LLVM_DEBUG(dbgs() << "SLP: ShuffleVector are not vectorized.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: ShuffleVector are not vectorized.\n"
; } } while (false)
;
2783 return;
2784 }
2785 TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx,
2786 ReuseShuffleIndicies);
2787 LLVM_DEBUG(dbgs() << "SLP: added a ShuffleVector op.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: added a ShuffleVector op.\n"
; } } while (false)
;
2788
2789 // Reorder operands if reordering would enable vectorization.
2790 if (isa<BinaryOperator>(VL0)) {
2791 ValueList Left, Right;
2792 reorderInputsAccordingToOpcode(VL, Left, Right, *DL, *SE);
2793 TE->setOperand(0, Left);
2794 TE->setOperand(1, Right);
2795 buildTree_rec(Left, Depth + 1, {TE, 0});
2796 buildTree_rec(Right, Depth + 1, {TE, 1});
2797 return;
2798 }
2799
2800 TE->setOperandsInOrder();
2801 for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) {
2802 ValueList Operands;
2803 // Prepare the operand vector.
2804 for (Value *V : VL)
2805 Operands.push_back(cast<Instruction>(V)->getOperand(i));
2806
2807 buildTree_rec(Operands, Depth + 1, {TE, i});
2808 }
2809 return;
2810 }
2811 default:
2812 BS.cancelScheduling(VL, VL0);
2813 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
2814 ReuseShuffleIndicies);
2815 LLVM_DEBUG(dbgs() << "SLP: Gathering unknown instruction.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Gathering unknown instruction.\n"
; } } while (false)
;
2816 return;
2817 }
2818}
2819
2820unsigned BoUpSLP::canMapToVector(Type *T, const DataLayout &DL) const {
2821 unsigned N;
2822 Type *EltTy;
2823 auto *ST = dyn_cast<StructType>(T);
2824 if (ST) {
2825 N = ST->getNumElements();
2826 EltTy = *ST->element_begin();
2827 } else {
2828 N = cast<ArrayType>(T)->getNumElements();
2829 EltTy = cast<ArrayType>(T)->getElementType();
2830 }
2831 if (!isValidElementType(EltTy))
2832 return 0;
2833 uint64_t VTSize = DL.getTypeStoreSizeInBits(VectorType::get(EltTy, N));
2834 if (VTSize < MinVecRegSize || VTSize > MaxVecRegSize || VTSize != DL.getTypeStoreSizeInBits(T))
2835 return 0;
2836 if (ST) {
2837 // Check that struct is homogeneous.
2838 for (const auto *Ty : ST->elements())
2839 if (Ty != EltTy)
2840 return 0;
2841 }
2842 return N;
2843}
2844
2845bool BoUpSLP::canReuseExtract(ArrayRef<Value *> VL, Value *OpValue,
2846 SmallVectorImpl<unsigned> &CurrentOrder) const {
2847 Instruction *E0 = cast<Instruction>(OpValue);
2848 assert(E0->getOpcode() == Instruction::ExtractElement ||((E0->getOpcode() == Instruction::ExtractElement || E0->
getOpcode() == Instruction::ExtractValue) ? static_cast<void
> (0) : __assert_fail ("E0->getOpcode() == Instruction::ExtractElement || E0->getOpcode() == Instruction::ExtractValue"
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp"
, 2849, __PRETTY_FUNCTION__))
2849 E0->getOpcode() == Instruction::ExtractValue)((E0->getOpcode() == Instruction::ExtractElement || E0->
getOpcode() == Instruction::ExtractValue) ? static_cast<void
> (0) : __assert_fail ("E0->getOpcode() == Instruction::ExtractElement || E0->getOpcode() == Instruction::ExtractValue"
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp"
, 2849, __PRETTY_FUNCTION__))
;
2850 assert(E0->getOpcode() == getSameOpcode(VL).getOpcode() && "Invalid opcode")((E0->getOpcode() == getSameOpcode(VL).getOpcode() &&
"Invalid opcode") ? static_cast<void> (0) : __assert_fail
("E0->getOpcode() == getSameOpcode(VL).getOpcode() && \"Invalid opcode\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp"
, 2850, __PRETTY_FUNCTION__))
;
2851 // Check if all of the extracts come from the same vector and from the
2852 // correct offset.
2853 Value *Vec = E0->getOperand(0);
2854
2855 CurrentOrder.clear();
2856
2857 // We have to extract from a vector/aggregate with the same number of elements.
2858 unsigned NElts;
2859 if (E0->getOpcode() == Instruction::ExtractValue) {
2860 const DataLayout &DL = E0->getModule()->getDataLayout();
2861 NElts = canMapToVector(Vec->getType(), DL);
2862 if (!NElts)
2863 return false;
2864 // Check if load can be rewritten as load of vector.
2865 LoadInst *LI = dyn_cast<LoadInst>(Vec);
2866 if (!LI || !LI->isSimple() || !LI->hasNUses(VL.size()))
2867 return false;
2868 } else {
2869 NElts = Vec->getType()->getVectorNumElements();
2870 }
2871
2872 if (NElts != VL.size())
2873 return false;
2874
2875 // Check that all of the indices extract from the correct offset.
2876 bool ShouldKeepOrder = true;
2877 unsigned E = VL.size();
2878 // Assign to all items the initial value E + 1 so we can check if the extract
2879 // instruction index was used already.
2880 // Also, later we can check that all the indices are used and we have a
2881 // consecutive access in the extract instructions, by checking that no
2882 // element of CurrentOrder still has value E + 1.
2883 CurrentOrder.assign(E, E + 1);
2884 unsigned I = 0;
2885 for (; I < E; ++I) {
2886 auto *Inst = cast<Instruction>(VL[I]);
2887 if (Inst->getOperand(0) != Vec)
2888 break;
2889 Optional<unsigned> Idx = getExtractIndex(Inst);
2890 if (!Idx)
2891 break;
2892 const unsigned ExtIdx = *Idx;
2893 if (ExtIdx != I) {
2894 if (ExtIdx >= E || CurrentOrder[ExtIdx] != E + 1)
2895 break;
2896 ShouldKeepOrder = false;
2897 CurrentOrder[ExtIdx] = I;
2898 } else {
2899 if (CurrentOrder[I] != E + 1)
2900 break;
2901 CurrentOrder[I] = I;
2902 }
2903 }
2904 if (I < E) {
2905 CurrentOrder.clear();
2906 return false;
2907 }
2908
2909 return ShouldKeepOrder;
2910}
2911
2912bool BoUpSLP::areAllUsersVectorized(Instruction *I) const {
2913 return I->hasOneUse() ||
2914 std::all_of(I->user_begin(), I->user_end(), [this](User *U) {
2915 return ScalarToTreeEntry.count(U) > 0;
2916 });
2917}
2918
2919int BoUpSLP::getEntryCost(TreeEntry *E) {
2920 ArrayRef<Value*> VL = E->Scalars;
2921
2922 Type *ScalarTy = VL[0]->getType();
2923 if (StoreInst *SI = dyn_cast<StoreInst>(VL[0]))
2924 ScalarTy = SI->getValueOperand()->getType();
2925 else if (CmpInst *CI = dyn_cast<CmpInst>(VL[0]))
2926 ScalarTy = CI->getOperand(0)->getType();
2927 VectorType *VecTy = VectorType::get(ScalarTy, VL.size());
2928
2929 // If we have computed a smaller type for the expression, update VecTy so
2930 // that the costs will be accurate.
2931 if (MinBWs.count(VL[0]))
2932 VecTy = VectorType::get(
2933 IntegerType::get(F->getContext(), MinBWs[VL[0]].first), VL.size());
2934
2935 unsigned ReuseShuffleNumbers = E->ReuseShuffleIndices.size();
2936 bool NeedToShuffleReuses = !E->ReuseShuffleIndices.empty();
2937 int ReuseShuffleCost = 0;
2938 if (NeedToShuffleReuses) {
2939 ReuseShuffleCost =
2940 TTI->getShuffleCost(TargetTransformInfo::SK_PermuteSingleSrc, VecTy);
2941 }
2942 if (E->NeedToGather) {
2943 if (allConstant(VL))
2944 return 0;
2945 if (isSplat(VL)) {
2946 return ReuseShuffleCost +
2947 TTI->getShuffleCost(TargetTransformInfo::SK_Broadcast, VecTy, 0);
2948 }
2949 if (E->getOpcode() == Instruction::ExtractElement &&
2950 allSameType(VL) && allSameBlock(VL)) {
2951 Optional<TargetTransformInfo::ShuffleKind> ShuffleKind = isShuffle(VL);
2952 if (ShuffleKind.hasValue()) {
2953 int Cost = TTI->getShuffleCost(ShuffleKind.getValue(), VecTy);
2954 for (auto *V : VL) {
2955 // If all users of instruction are going to be vectorized and this
2956 // instruction itself is not going to be vectorized, consider this
2957 // instruction as dead and remove its cost from the final cost of the
2958 // vectorized tree.
2959 if (areAllUsersVectorized(cast<Instruction>(V)) &&
2960 !ScalarToTreeEntry.count(V)) {
2961 auto *IO = cast<ConstantInt>(
2962 cast<ExtractElementInst>(V)->getIndexOperand());
2963 Cost -= TTI->getVectorInstrCost(Instruction::ExtractElement, VecTy,
2964 IO->getZExtValue());
2965 }
2966 }
2967 return ReuseShuffleCost + Cost;
2968 }
2969 }
2970 return ReuseShuffleCost + getGatherCost(VL);
2971 }
2972 assert(E->getOpcode() && allSameType(VL) && allSameBlock(VL) && "Invalid VL")((E->getOpcode() && allSameType(VL) && allSameBlock
(VL) && "Invalid VL") ? static_cast<void> (0) :
__assert_fail ("E->getOpcode() && allSameType(VL) && allSameBlock(VL) && \"Invalid VL\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp"
, 2972, __PRETTY_FUNCTION__))
;
2973 Instruction *VL0 = E->getMainOp();
2974 unsigned ShuffleOrOp =
2975 E->isAltShuffle() ? (unsigned)Instruction::ShuffleVector : E->getOpcode();
2976 switch (ShuffleOrOp) {
2977 case Instruction::PHI:
2978 return 0;
2979
2980 case Instruction::ExtractValue:
2981 case Instruction::ExtractElement:
2982 if (NeedToShuffleReuses) {
2983 unsigned Idx = 0;
2984 for (unsigned I : E->ReuseShuffleIndices) {
2985 if (ShuffleOrOp == Instruction::ExtractElement) {
2986 auto *IO = cast<ConstantInt>(
2987 cast<ExtractElementInst>(VL[I])->getIndexOperand());
2988 Idx = IO->getZExtValue();
2989 ReuseShuffleCost -= TTI->getVectorInstrCost(
2990 Instruction::ExtractElement, VecTy, Idx);
2991 } else {
2992 ReuseShuffleCost -= TTI->getVectorInstrCost(
2993 Instruction::ExtractElement, VecTy, Idx);
2994 ++Idx;
2995 }
2996 }
2997 Idx = ReuseShuffleNumbers;
2998 for (Value *V : VL) {
2999 if (ShuffleOrOp == Instruction::ExtractElement) {
3000 auto *IO = cast<ConstantInt>(
3001 cast<ExtractElementInst>(V)->getIndexOperand());
3002 Idx = IO->getZExtValue();
3003 } else {
3004 --Idx;
3005 }
3006 ReuseShuffleCost +=
3007 TTI->getVectorInstrCost(Instruction::ExtractElement, VecTy, Idx);
3008 }
3009 }
3010 if (!E->NeedToGather) {
3011 int DeadCost = ReuseShuffleCost;
3012 if (!E->ReorderIndices.empty()) {
3013 // TODO: Merge this shuffle with the ReuseShuffleCost.
3014 DeadCost += TTI->getShuffleCost(
3015 TargetTransformInfo::SK_PermuteSingleSrc, VecTy);
3016 }
3017 for (unsigned i = 0, e = VL.size(); i < e; ++i) {
3018 Instruction *E = cast<Instruction>(VL[i]);
3019 // If all users are going to be vectorized, instruction can be
3020 // considered as dead.
3021 // The same, if have only one user, it will be vectorized for sure.
3022 if (areAllUsersVectorized(E)) {
3023 // Take credit for instruction that will become dead.
3024 if (E->hasOneUse()) {
3025 Instruction *Ext = E->user_back();
3026 if ((isa<SExtInst>(Ext) || isa<ZExtInst>(Ext)) &&
3027 all_of(Ext->users(),
3028 [](User *U) { return isa<GetElementPtrInst>(U); })) {
3029 // Use getExtractWithExtendCost() to calculate the cost of
3030 // extractelement/ext pair.
3031 DeadCost -= TTI->getExtractWithExtendCost(
3032 Ext->getOpcode(), Ext->getType(), VecTy, i);
3033 // Add back the cost of s|zext which is subtracted separately.
3034 DeadCost += TTI->getCastInstrCost(
3035 Ext->getOpcode(), Ext->getType(), E->getType(), Ext);
3036 continue;
3037 }
3038 }
3039 DeadCost -=
3040 TTI->getVectorInstrCost(Instruction::ExtractElement, VecTy, i);
3041 }
3042 }
3043 return DeadCost;
3044 }
3045 return ReuseShuffleCost + getGatherCost(VL);
3046
3047 case Instruction::ZExt:
3048 case Instruction::SExt:
3049 case Instruction::FPToUI:
3050 case Instruction::FPToSI:
3051 case Instruction::FPExt:
3052 case Instruction::PtrToInt:
3053 case Instruction::IntToPtr:
3054 case Instruction::SIToFP:
3055 case Instruction::UIToFP:
3056 case Instruction::Trunc:
3057 case Instruction::FPTrunc:
3058 case Instruction::BitCast: {
3059 Type *SrcTy = VL0->getOperand(0)->getType();
3060 int ScalarEltCost =
3061 TTI->getCastInstrCost(E->getOpcode(), ScalarTy, SrcTy, VL0);
3062 if (NeedToShuffleReuses) {
3063 ReuseShuffleCost -= (ReuseShuffleNumbers - VL.size()) * ScalarEltCost;
3064 }
3065
3066 // Calculate the cost of this instruction.
3067 int ScalarCost = VL.size() * ScalarEltCost;
3068
3069 VectorType *SrcVecTy = VectorType::get(SrcTy, VL.size());
3070 int VecCost = 0;
3071 // Check if the values are candidates to demote.
3072 if (!MinBWs.count(VL0) || VecTy != SrcVecTy) {
3073 VecCost = ReuseShuffleCost +
3074 TTI->getCastInstrCost(E->getOpcode(), VecTy, SrcVecTy, VL0);
3075 }
3076 return VecCost - ScalarCost;
3077 }
3078 case Instruction::FCmp:
3079 case Instruction::ICmp:
3080 case Instruction::Select: {
3081 // Calculate the cost of this instruction.
3082 int ScalarEltCost = TTI->getCmpSelInstrCost(E->getOpcode(), ScalarTy,
3083 Builder.getInt1Ty(), VL0);
3084 if (NeedToShuffleReuses) {
3085 ReuseShuffleCost -= (ReuseShuffleNumbers - VL.size()) * ScalarEltCost;
3086 }
3087 VectorType *MaskTy = VectorType::get(Builder.getInt1Ty(), VL.size());
3088 int ScalarCost = VecTy->getNumElements() * ScalarEltCost;
3089 int VecCost = TTI->getCmpSelInstrCost(E->getOpcode(), VecTy, MaskTy, VL0);
3090 return ReuseShuffleCost + VecCost - ScalarCost;
3091 }
3092 case Instruction::FNeg:
3093 case Instruction::Add:
3094 case Instruction::FAdd:
3095 case Instruction::Sub:
3096 case Instruction::FSub:
3097 case Instruction::Mul:
3098 case Instruction::FMul:
3099 case Instruction::UDiv:
3100 case Instruction::SDiv:
3101 case Instruction::FDiv:
3102 case Instruction::URem:
3103 case Instruction::SRem:
3104 case Instruction::FRem:
3105 case Instruction::Shl:
3106 case Instruction::LShr:
3107 case Instruction::AShr:
3108 case Instruction::And:
3109 case Instruction::Or:
3110 case Instruction::Xor: {
3111 // Certain instructions can be cheaper to vectorize if they have a
3112 // constant second vector operand.
3113 TargetTransformInfo::OperandValueKind Op1VK =
3114 TargetTransformInfo::OK_AnyValue;
3115 TargetTransformInfo::OperandValueKind Op2VK =
3116 TargetTransformInfo::OK_UniformConstantValue;
3117 TargetTransformInfo::OperandValueProperties Op1VP =
3118 TargetTransformInfo::OP_None;
3119 TargetTransformInfo::OperandValueProperties Op2VP =
3120 TargetTransformInfo::OP_PowerOf2;
3121
3122 // If all operands are exactly the same ConstantInt then set the
3123 // operand kind to OK_UniformConstantValue.
3124 // If instead not all operands are constants, then set the operand kind
3125 // to OK_AnyValue. If all operands are constants but not the same,
3126 // then set the operand kind to OK_NonUniformConstantValue.
3127 ConstantInt *CInt0 = nullptr;
3128 for (unsigned i = 0, e = VL.size(); i < e; ++i) {
3129 const Instruction *I = cast<Instruction>(VL[i]);
3130 unsigned OpIdx = isa<BinaryOperator>(I) ? 1 : 0;
3131 ConstantInt *CInt = dyn_cast<ConstantInt>(I->getOperand(OpIdx));
3132 if (!CInt) {
3133 Op2VK = TargetTransformInfo::OK_AnyValue;
3134 Op2VP = TargetTransformInfo::OP_None;
3135 break;
3136 }
3137 if (Op2VP == TargetTransformInfo::OP_PowerOf2 &&
3138 !CInt->getValue().isPowerOf2())
3139 Op2VP = TargetTransformInfo::OP_None;
3140 if (i == 0) {
3141 CInt0 = CInt;
3142 continue;
3143 }
3144 if (CInt0 != CInt)
3145 Op2VK = TargetTransformInfo::OK_NonUniformConstantValue;
3146 }
3147
3148 SmallVector<const Value *, 4> Operands(VL0->operand_values());
3149 int ScalarEltCost = TTI->getArithmeticInstrCost(
3150 E->getOpcode(), ScalarTy, Op1VK, Op2VK, Op1VP, Op2VP, Operands);
3151 if (NeedToShuffleReuses) {
3152 ReuseShuffleCost -= (ReuseShuffleNumbers - VL.size()) * ScalarEltCost;
3153 }
3154 int ScalarCost = VecTy->getNumElements() * ScalarEltCost;
3155 int VecCost = TTI->getArithmeticInstrCost(E->getOpcode(), VecTy, Op1VK,
3156 Op2VK, Op1VP, Op2VP, Operands);
3157 return ReuseShuffleCost + VecCost - ScalarCost;
3158 }
3159 case Instruction::GetElementPtr: {
3160 TargetTransformInfo::OperandValueKind Op1VK =
3161 TargetTransformInfo::OK_AnyValue;
3162 TargetTransformInfo::OperandValueKind Op2VK =
3163 TargetTransformInfo::OK_UniformConstantValue;
3164
3165 int ScalarEltCost =
3166 TTI->getArithmeticInstrCost(Instruction::Add, ScalarTy, Op1VK, Op2VK);
3167 if (NeedToShuffleReuses) {
3168 ReuseShuffleCost -= (ReuseShuffleNumbers - VL.size()) * ScalarEltCost;
3169 }
3170 int ScalarCost = VecTy->getNumElements() * ScalarEltCost;
3171 int VecCost =
3172 TTI->getArithmeticInstrCost(Instruction::Add, VecTy, Op1VK, Op2VK);
3173 return ReuseShuffleCost + VecCost - ScalarCost;
3174 }
3175 case Instruction::Load: {
3176 // Cost of wide load - cost of scalar loads.
3177 MaybeAlign alignment(cast<LoadInst>(VL0)->getAlignment());
3178 int ScalarEltCost =
3179 TTI->getMemoryOpCost(Instruction::Load, ScalarTy, alignment, 0, VL0);
3180 if (NeedToShuffleReuses) {
3181 ReuseShuffleCost -= (ReuseShuffleNumbers - VL.size()) * ScalarEltCost;
3182 }
3183 int ScalarLdCost = VecTy->getNumElements() * ScalarEltCost;
3184 int VecLdCost =
3185 TTI->getMemoryOpCost(Instruction::Load, VecTy, alignment, 0, VL0);
3186 if (!E->ReorderIndices.empty()) {
3187 // TODO: Merge this shuffle with the ReuseShuffleCost.
3188 VecLdCost += TTI->getShuffleCost(
3189 TargetTransformInfo::SK_PermuteSingleSrc, VecTy);
3190 }
3191 return ReuseShuffleCost + VecLdCost - ScalarLdCost;
3192 }
3193 case Instruction::Store: {
3194 // We know that we can merge the stores. Calculate the cost.
3195 MaybeAlign alignment(cast<StoreInst>(VL0)->getAlignment());
3196 int ScalarEltCost =
3197 TTI->getMemoryOpCost(Instruction::Store, ScalarTy, alignment, 0, VL0);
3198 if (NeedToShuffleReuses) {
3199 ReuseShuffleCost -= (ReuseShuffleNumbers - VL.size()) * ScalarEltCost;
3200 }
3201 int ScalarStCost = VecTy->getNumElements() * ScalarEltCost;
3202 int VecStCost =
3203 TTI->getMemoryOpCost(Instruction::Store, VecTy, alignment, 0, VL0);
3204 return ReuseShuffleCost + VecStCost - ScalarStCost;
3205 }
3206 case Instruction::Call: {
3207 CallInst *CI = cast<CallInst>(VL0);
3208 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
3209
3210 // Calculate the cost of the scalar and vector calls.
3211 SmallVector<Type *, 4> ScalarTys;
3212 for (unsigned op = 0, opc = CI->getNumArgOperands(); op != opc; ++op)
3213 ScalarTys.push_back(CI->getArgOperand(op)->getType());
3214
3215 FastMathFlags FMF;
3216 if (auto *FPMO = dyn_cast<FPMathOperator>(CI))
3217 FMF = FPMO->getFastMathFlags();
3218
3219 int ScalarEltCost =
3220 TTI->getIntrinsicInstrCost(ID, ScalarTy, ScalarTys, FMF);
3221 if (NeedToShuffleReuses) {
3222 ReuseShuffleCost -= (ReuseShuffleNumbers - VL.size()) * ScalarEltCost;
3223 }
3224 int ScalarCallCost = VecTy->getNumElements() * ScalarEltCost;
3225
3226 SmallVector<Value *, 4> Args(CI->arg_operands());
3227 int VecCallCost = TTI->getIntrinsicInstrCost(ID, CI->getType(), Args, FMF,
3228 VecTy->getNumElements());
3229
3230 LLVM_DEBUG(dbgs() << "SLP: Call cost " << VecCallCost - ScalarCallCostdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Call cost " << VecCallCost
- ScalarCallCost << " (" << VecCallCost <<
"-" << ScalarCallCost << ")" << " for " <<
*CI << "\n"; } } while (false)
3231 << " (" << VecCallCost << "-" << ScalarCallCost << ")"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Call cost " << VecCallCost
- ScalarCallCost << " (" << VecCallCost <<
"-" << ScalarCallCost << ")" << " for " <<
*CI << "\n"; } } while (false)
3232 << " for " << *CI << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Call cost " << VecCallCost
- ScalarCallCost << " (" << VecCallCost <<
"-" << ScalarCallCost << ")" << " for " <<
*CI << "\n"; } } while (false)
;
3233
3234 return ReuseShuffleCost + VecCallCost - ScalarCallCost;
3235 }
3236 case Instruction::ShuffleVector: {
3237 assert(E->isAltShuffle() &&((E->isAltShuffle() && ((Instruction::isBinaryOp(E
->getOpcode()) && Instruction::isBinaryOp(E->getAltOpcode
())) || (Instruction::isCast(E->getOpcode()) && Instruction
::isCast(E->getAltOpcode()))) && "Invalid Shuffle Vector Operand"
) ? static_cast<void> (0) : __assert_fail ("E->isAltShuffle() && ((Instruction::isBinaryOp(E->getOpcode()) && Instruction::isBinaryOp(E->getAltOpcode())) || (Instruction::isCast(E->getOpcode()) && Instruction::isCast(E->getAltOpcode()))) && \"Invalid Shuffle Vector Operand\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp"
, 3242, __PRETTY_FUNCTION__))
3238 ((Instruction::isBinaryOp(E->getOpcode()) &&((E->isAltShuffle() && ((Instruction::isBinaryOp(E
->getOpcode()) && Instruction::isBinaryOp(E->getAltOpcode
())) || (Instruction::isCast(E->getOpcode()) && Instruction
::isCast(E->getAltOpcode()))) && "Invalid Shuffle Vector Operand"
) ? static_cast<void> (0) : __assert_fail ("E->isAltShuffle() && ((Instruction::isBinaryOp(E->getOpcode()) && Instruction::isBinaryOp(E->getAltOpcode())) || (Instruction::isCast(E->getOpcode()) && Instruction::isCast(E->getAltOpcode()))) && \"Invalid Shuffle Vector Operand\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp"
, 3242, __PRETTY_FUNCTION__))
3239 Instruction::isBinaryOp(E->getAltOpcode())) ||((E->isAltShuffle() && ((Instruction::isBinaryOp(E
->getOpcode()) && Instruction::isBinaryOp(E->getAltOpcode
())) || (Instruction::isCast(E->getOpcode()) && Instruction
::isCast(E->getAltOpcode()))) && "Invalid Shuffle Vector Operand"
) ? static_cast<void> (0) : __assert_fail ("E->isAltShuffle() && ((Instruction::isBinaryOp(E->getOpcode()) && Instruction::isBinaryOp(E->getAltOpcode())) || (Instruction::isCast(E->getOpcode()) && Instruction::isCast(E->getAltOpcode()))) && \"Invalid Shuffle Vector Operand\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp"
, 3242, __PRETTY_FUNCTION__))
3240 (Instruction::isCast(E->getOpcode()) &&((E->isAltShuffle() && ((Instruction::isBinaryOp(E
->getOpcode()) && Instruction::isBinaryOp(E->getAltOpcode
())) || (Instruction::isCast(E->getOpcode()) && Instruction
::isCast(E->getAltOpcode()))) && "Invalid Shuffle Vector Operand"
) ? static_cast<void> (0) : __assert_fail ("E->isAltShuffle() && ((Instruction::isBinaryOp(E->getOpcode()) && Instruction::isBinaryOp(E->getAltOpcode())) || (Instruction::isCast(E->getOpcode()) && Instruction::isCast(E->getAltOpcode()))) && \"Invalid Shuffle Vector Operand\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp"
, 3242, __PRETTY_FUNCTION__))
3241 Instruction::isCast(E->getAltOpcode()))) &&((E->isAltShuffle() && ((Instruction::isBinaryOp(E
->getOpcode()) && Instruction::isBinaryOp(E->getAltOpcode
())) || (Instruction::isCast(E->getOpcode()) && Instruction
::isCast(E->getAltOpcode()))) && "Invalid Shuffle Vector Operand"
) ? static_cast<void> (0) : __assert_fail ("E->isAltShuffle() && ((Instruction::isBinaryOp(E->getOpcode()) && Instruction::isBinaryOp(E->getAltOpcode())) || (Instruction::isCast(E->getOpcode()) && Instruction::isCast(E->getAltOpcode()))) && \"Invalid Shuffle Vector Operand\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp"
, 3242, __PRETTY_FUNCTION__))
3242 "Invalid Shuffle Vector Operand")((E->isAltShuffle() && ((Instruction::isBinaryOp(E
->getOpcode()) && Instruction::isBinaryOp(E->getAltOpcode
())) || (Instruction::isCast(E->getOpcode()) && Instruction
::isCast(E->getAltOpcode()))) && "Invalid Shuffle Vector Operand"
) ? static_cast<void> (0) : __assert_fail ("E->isAltShuffle() && ((Instruction::isBinaryOp(E->getOpcode()) && Instruction::isBinaryOp(E->getAltOpcode())) || (Instruction::isCast(E->getOpcode()) && Instruction::isCast(E->getAltOpcode()))) && \"Invalid Shuffle Vector Operand\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp"
, 3242, __PRETTY_FUNCTION__))
;
3243 int ScalarCost = 0;
3244 if (NeedToShuffleReuses) {
3245 for (unsigned Idx : E->ReuseShuffleIndices) {
3246 Instruction *I = cast<Instruction>(VL[Idx]);
3247 ReuseShuffleCost -= TTI->getInstructionCost(
3248 I, TargetTransformInfo::TCK_RecipThroughput);
3249 }
3250 for (Value *V : VL) {
3251 Instruction *I = cast<Instruction>(V);
3252 ReuseShuffleCost += TTI->getInstructionCost(
3253 I, TargetTransformInfo::TCK_RecipThroughput);
3254 }
3255 }
3256 for (Value *V : VL) {
3257 Instruction *I = cast<Instruction>(V);
3258 assert(E->isOpcodeOrAlt(I) && "Unexpected main/alternate opcode")((E->isOpcodeOrAlt(I) && "Unexpected main/alternate opcode"
) ? static_cast<void> (0) : __assert_fail ("E->isOpcodeOrAlt(I) && \"Unexpected main/alternate opcode\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp"
, 3258, __PRETTY_FUNCTION__))
;
3259 ScalarCost += TTI->getInstructionCost(
3260 I, TargetTransformInfo::TCK_RecipThroughput);
3261 }
3262 // VecCost is equal to sum of the cost of creating 2 vectors
3263 // and the cost of creating shuffle.
3264 int VecCost = 0;
3265 if (Instruction::isBinaryOp(E->getOpcode())) {
3266 VecCost = TTI->getArithmeticInstrCost(E->getOpcode(), VecTy);
3267 VecCost += TTI->getArithmeticInstrCost(E->getAltOpcode(), VecTy);
3268 } else {
3269 Type *Src0SclTy = E->getMainOp()->getOperand(0)->getType();
3270 Type *Src1SclTy = E->getAltOp()->getOperand(0)->getType();
3271 VectorType *Src0Ty = VectorType::get(Src0SclTy, VL.size());
3272 VectorType *Src1Ty = VectorType::get(Src1SclTy, VL.size());
3273 VecCost = TTI->getCastInstrCost(E->getOpcode(), VecTy, Src0Ty);
3274 VecCost += TTI->getCastInstrCost(E->getAltOpcode(), VecTy, Src1Ty);
3275 }
3276 VecCost += TTI->getShuffleCost(TargetTransformInfo::SK_Select, VecTy, 0);
3277 return ReuseShuffleCost + VecCost - ScalarCost;
3278 }
3279 default:
3280 llvm_unreachable("Unknown instruction")::llvm::llvm_unreachable_internal("Unknown instruction", "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp"
, 3280)
;
3281 }
3282}
3283
3284bool BoUpSLP::isFullyVectorizableTinyTree() const {
3285 LLVM_DEBUG(dbgs() << "SLP: Check whether the tree with height "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Check whether the tree with height "
<< VectorizableTree.size() << " is fully vectorizable .\n"
; } } while (false)
3286 << VectorizableTree.size() << " is fully vectorizable .\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Check whether the tree with height "
<< VectorizableTree.size() << " is fully vectorizable .\n"
; } } while (false)
;
3287
3288 // We only handle trees of heights 1 and 2.
3289 if (VectorizableTree.size() == 1 && !VectorizableTree[0]->NeedToGather)
3290 return true;
3291
3292 if (VectorizableTree.size() != 2)
3293 return false;
3294
3295 // Handle splat and all-constants stores.
3296 if (!VectorizableTree[0]->NeedToGather &&
3297 (allConstant(VectorizableTree[1]->Scalars) ||
3298 isSplat(VectorizableTree[1]->Scalars)))
3299 return true;
3300
3301 // Gathering cost would be too much for tiny trees.
3302 if (VectorizableTree[0]->NeedToGather || VectorizableTree[1]->NeedToGather)
3303 return false;
3304
3305 return true;
3306}
3307
3308bool BoUpSLP::isLoadCombineReductionCandidate(unsigned RdxOpcode) const {
3309 if (RdxOpcode != Instruction::Or)
3310 return false;
3311
3312 unsigned NumElts = VectorizableTree[0]->Scalars.size();
3313 Value *FirstReduced = VectorizableTree[0]->Scalars[0];
3314
3315 // Look past the reduction to find a source value. Arbitrarily follow the
3316 // path through operand 0 of any 'or'. Also, peek through optional
3317 // shift-left-by-constant.
3318 Value *ZextLoad = FirstReduced;
3319 while (match(ZextLoad, m_Or(m_Value(), m_Value())) ||
3320 match(ZextLoad, m_Shl(m_Value(), m_Constant())))
3321 ZextLoad = cast<BinaryOperator>(ZextLoad)->getOperand(0);
3322
3323 // Check if the input to the reduction is an extended load.
3324 Value *LoadPtr;
3325 if (!match(ZextLoad, m_ZExt(m_Load(m_Value(LoadPtr)))))
3326 return false;
3327
3328 // Require that the total load bit width is a legal integer type.
3329 // For example, <8 x i8> --> i64 is a legal integer on a 64-bit target.
3330 // But <16 x i8> --> i128 is not, so the backend probably can't reduce it.
3331 Type *SrcTy = LoadPtr->getType()->getPointerElementType();
3332 unsigned LoadBitWidth = SrcTy->getIntegerBitWidth() * NumElts;
3333 LLVMContext &Context = FirstReduced->getContext();
3334 if (!TTI->isTypeLegal(IntegerType::get(Context, LoadBitWidth)))
3335 return false;
3336
3337 // Everything matched - assume that we can fold the whole sequence using
3338 // load combining.
3339 LLVM_DEBUG(dbgs() << "SLP: Assume load combining for scalar reduction of "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Assume load combining for scalar reduction of "
<< *(cast<Instruction>(FirstReduced)) << "\n"
; } } while (false)
3340 << *(cast<Instruction>(FirstReduced)) << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Assume load combining for scalar reduction of "
<< *(cast<Instruction>(FirstReduced)) << "\n"
; } } while (false)
;
3341
3342 return true;
3343}
3344
3345bool BoUpSLP::isTreeTinyAndNotFullyVectorizable() const {
3346 // We can vectorize the tree if its size is greater than or equal to the
3347 // minimum size specified by the MinTreeSize command line option.
3348 if (VectorizableTree.size() >= MinTreeSize)
3349 return false;
3350
3351 // If we have a tiny tree (a tree whose size is less than MinTreeSize), we
3352 // can vectorize it if we can prove it fully vectorizable.
3353 if (isFullyVectorizableTinyTree())
3354 return false;
3355
3356 assert(VectorizableTree.empty()((VectorizableTree.empty() ? ExternalUses.empty() : true &&
"We shouldn't have any external users") ? static_cast<void
> (0) : __assert_fail ("VectorizableTree.empty() ? ExternalUses.empty() : true && \"We shouldn't have any external users\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp"
, 3358, __PRETTY_FUNCTION__))
3357 ? ExternalUses.empty()((VectorizableTree.empty() ? ExternalUses.empty() : true &&
"We shouldn't have any external users") ? static_cast<void
> (0) : __assert_fail ("VectorizableTree.empty() ? ExternalUses.empty() : true && \"We shouldn't have any external users\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp"
, 3358, __PRETTY_FUNCTION__))
3358 : true && "We shouldn't have any external users")((VectorizableTree.empty() ? ExternalUses.empty() : true &&
"We shouldn't have any external users") ? static_cast<void
> (0) : __assert_fail ("VectorizableTree.empty() ? ExternalUses.empty() : true && \"We shouldn't have any external users\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp"
, 3358, __PRETTY_FUNCTION__))
;
3359
3360 // Otherwise, we can't vectorize the tree. It is both tiny and not fully
3361 // vectorizable.
3362 return true;
3363}
3364
3365int BoUpSLP::getSpillCost() const {
3366 // Walk from the bottom of the tree to the top, tracking which values are
3367 // live. When we see a call instruction that is not part of our tree,
3368 // query TTI to see if there is a cost to keeping values live over it
3369 // (for example, if spills and fills are required).
3370 unsigned BundleWidth = VectorizableTree.front()->Scalars.size();
3371 int Cost = 0;
3372
3373 SmallPtrSet<Instruction*, 4> LiveValues;
3374 Instruction *PrevInst = nullptr;
3375
3376 for (const auto &TEPtr : VectorizableTree) {
3377 Instruction *Inst = dyn_cast<Instruction>(TEPtr->Scalars[0]);
3378 if (!Inst)
3379 continue;
3380
3381 if (!PrevInst) {
3382 PrevInst = Inst;
3383 continue;
3384 }
3385
3386 // Update LiveValues.
3387 LiveValues.erase(PrevInst);
3388 for (auto &J : PrevInst->operands()) {
3389 if (isa<Instruction>(&*J) && getTreeEntry(&*J))
3390 LiveValues.insert(cast<Instruction>(&*J));
3391 }
3392
3393 LLVM_DEBUG({do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { { dbgs() << "SLP: #LV: " << LiveValues
.size(); for (auto *X : LiveValues) dbgs() << " " <<
X->getName(); dbgs() << ", Looking at "; Inst->dump
(); }; } } while (false)
3394 dbgs() << "SLP: #LV: " << LiveValues.size();do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { { dbgs() << "SLP: #LV: " << LiveValues
.size(); for (auto *X : LiveValues) dbgs() << " " <<
X->getName(); dbgs() << ", Looking at "; Inst->dump
(); }; } } while (false)
3395 for (auto *X : LiveValues)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { { dbgs() << "SLP: #LV: " << LiveValues
.size(); for (auto *X : LiveValues) dbgs() << " " <<
X->getName(); dbgs() << ", Looking at "; Inst->dump
(); }; } } while (false)
3396 dbgs() << " " << X->getName();do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { { dbgs() << "SLP: #LV: " << LiveValues
.size(); for (auto *X : LiveValues) dbgs() << " " <<
X->getName(); dbgs() << ", Looking at "; Inst->dump
(); }; } } while (false)
3397 dbgs() << ", Looking at ";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { { dbgs() << "SLP: #LV: " << LiveValues
.size(); for (auto *X : LiveValues) dbgs() << " " <<
X->getName(); dbgs() << ", Looking at "; Inst->dump
(); }; } } while (false)
3398 Inst->dump();do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { { dbgs() << "SLP: #LV: " << LiveValues
.size(); for (auto *X : LiveValues) dbgs() << " " <<
X->getName(); dbgs() << ", Looking at "; Inst->dump
(); }; } } while (false)
3399 })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { { dbgs() << "SLP: #LV: " << LiveValues
.size(); for (auto *X : LiveValues) dbgs() << " " <<
X->getName(); dbgs() << ", Looking at "; Inst->dump
(); }; } } while (false)
;
3400
3401 // Now find the sequence of instructions between PrevInst and Inst.
3402 unsigned NumCalls = 0;
3403 BasicBlock::reverse_iterator InstIt = ++Inst->getIterator().getReverse(),
3404 PrevInstIt =
3405 PrevInst->getIterator().getReverse();
3406 while (InstIt != PrevInstIt) {
3407 if (PrevInstIt == PrevInst->getParent()->rend()) {
3408 PrevInstIt = Inst->getParent()->rbegin();
3409 continue;
3410 }
3411
3412 // Debug information does not impact spill cost.
3413 if ((isa<CallInst>(&*PrevInstIt) &&
3414 !isa<DbgInfoIntrinsic>(&*PrevInstIt)) &&
3415 &*PrevInstIt != PrevInst)
3416 NumCalls++;
3417
3418 ++PrevInstIt;
3419 }
3420
3421 if (NumCalls) {
3422 SmallVector<Type*, 4> V;
3423 for (auto *II : LiveValues)
3424 V.push_back(VectorType::get(II->getType(), BundleWidth));
3425 Cost += NumCalls * TTI->getCostOfKeepingLiveOverCall(V);
3426 }
3427
3428 PrevInst = Inst;
3429 }
3430
3431 return Cost;
3432}
3433
3434int BoUpSLP::getTreeCost() {
3435 int Cost = 0;
3436 LLVM_DEBUG(dbgs() << "SLP: Calculating cost for tree of size "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Calculating cost for tree of size "
<< VectorizableTree.size() << ".\n"; } } while (
false)
3437 << VectorizableTree.size() << ".\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Calculating cost for tree of size "
<< VectorizableTree.size() << ".\n"; } } while (
false)
;
3438
3439 unsigned BundleWidth = VectorizableTree[0]->Scalars.size();
3440
3441 for (unsigned I = 0, E = VectorizableTree.size(); I < E; ++I) {
3442 TreeEntry &TE = *VectorizableTree[I].get();
3443
3444 // We create duplicate tree entries for gather sequences that have multiple
3445 // uses. However, we should not compute the cost of duplicate sequences.
3446 // For example, if we have a build vector (i.e., insertelement sequence)
3447 // that is used by more than one vector instruction, we only need to
3448 // compute the cost of the insertelement instructions once. The redundant
3449 // instructions will be eliminated by CSE.
3450 //
3451 // We should consider not creating duplicate tree entries for gather
3452 // sequences, and instead add additional edges to the tree representing
3453 // their uses. Since such an approach results in fewer total entries,
3454 // existing heuristics based on tree size may yield different results.
3455 //
3456 if (TE.NeedToGather &&
3457 std::any_of(
3458 std::next(VectorizableTree.begin(), I + 1), VectorizableTree.end(),
3459 [TE](const std::unique_ptr<TreeEntry> &EntryPtr) {
3460 return EntryPtr->NeedToGather && EntryPtr->isSame(TE.Scalars);
3461 }))
3462 continue;
3463
3464 int C = getEntryCost(&TE);
3465 LLVM_DEBUG(dbgs() << "SLP: Adding cost " << Cdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Adding cost " << C <<
" for bundle that starts with " << *TE.Scalars[0] <<
".\n"; } } while (false)
3466 << " for bundle that starts with " << *TE.Scalars[0]do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Adding cost " << C <<
" for bundle that starts with " << *TE.Scalars[0] <<
".\n"; } } while (false)
3467 << ".\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Adding cost " << C <<
" for bundle that starts with " << *TE.Scalars[0] <<
".\n"; } } while (false)
;
3468 Cost += C;
3469 }
3470
3471 SmallPtrSet<Value *, 16> ExtractCostCalculated;
3472 int ExtractCost = 0;
3473 for (ExternalUser &EU : ExternalUses) {
3474 // We only add extract cost once for the same scalar.
3475 if (!ExtractCostCalculated.insert(EU.Scalar).second)
3476 continue;
3477
3478 // Uses by ephemeral values are free (because the ephemeral value will be
3479 // removed prior to code generation, and so the extraction will be
3480 // removed as well).
3481 if (EphValues.count(EU.User))
3482 continue;
3483
3484 // If we plan to rewrite the tree in a smaller type, we will need to sign
3485 // extend the extracted value back to the original type. Here, we account
3486 // for the extract and the added cost of the sign extend if needed.
3487 auto *VecTy = VectorType::get(EU.Scalar->getType(), BundleWidth);
3488 auto *ScalarRoot = VectorizableTree[0]->Scalars[0];
3489 if (MinBWs.count(ScalarRoot)) {
3490 auto *MinTy = IntegerType::get(F->getContext(), MinBWs[ScalarRoot].first);
3491 auto Extend =
3492 MinBWs[ScalarRoot].second ? Instruction::SExt : Instruction::ZExt;
3493 VecTy = VectorType::get(MinTy, BundleWidth);
3494 ExtractCost += TTI->getExtractWithExtendCost(Extend, EU.Scalar->getType(),
3495 VecTy, EU.Lane);
3496 } else {
3497 ExtractCost +=
3498 TTI->getVectorInstrCost(Instruction::ExtractElement, VecTy, EU.Lane);
3499 }
3500 }
3501
3502 int SpillCost = getSpillCost();
3503 Cost += SpillCost + ExtractCost;
3504
3505 std::string Str;
3506 {
3507 raw_string_ostream OS(Str);
3508 OS << "SLP: Spill Cost = " << SpillCost << ".\n"
3509 << "SLP: Extract Cost = " << ExtractCost << ".\n"
3510 << "SLP: Total Cost = " << Cost << ".\n";
3511 }
3512 LLVM_DEBUG(dbgs() << Str)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << Str; } } while (false)
;
3513
3514 if (ViewSLPTree)
3515 ViewGraph(this, "SLP" + F->getName(), false, Str);
3516
3517 return Cost;
3518}
3519
3520int BoUpSLP::getGatherCost(Type *Ty,
3521 const DenseSet<unsigned> &ShuffledIndices) const {
3522 int Cost = 0;
3523 for (unsigned i = 0, e = cast<VectorType>(Ty)->getNumElements(); i < e; ++i)
3524 if (!ShuffledIndices.count(i))
3525 Cost += TTI->getVectorInstrCost(Instruction::InsertElement, Ty, i);
3526 if (!ShuffledIndices.empty())
3527 Cost += TTI->getShuffleCost(TargetTransformInfo::SK_PermuteSingleSrc, Ty);
3528 return Cost;
3529}
3530
3531int BoUpSLP::getGatherCost(ArrayRef<Value *> VL) const {
3532 // Find the type of the operands in VL.
3533 Type *ScalarTy = VL[0]->getType();
3534 if (StoreInst *SI = dyn_cast<StoreInst>(VL[0]))
3535 ScalarTy = SI->getValueOperand()->getType();
3536 VectorType *VecTy = VectorType::get(ScalarTy, VL.size());
3537 // Find the cost of inserting/extracting values from the vector.
3538 // Check if the same elements are inserted several times and count them as
3539 // shuffle candidates.
3540 DenseSet<unsigned> ShuffledElements;
3541 DenseSet<Value *> UniqueElements;
3542 // Iterate in reverse order to consider insert elements with the high cost.
3543 for (unsigned I = VL.size(); I > 0; --I) {
3544 unsigned Idx = I - 1;
3545 if (!UniqueElements.insert(VL[Idx]).second)
3546 ShuffledElements.insert(Idx);
3547 }
3548 return getGatherCost(VecTy, ShuffledElements);
3549}
3550
3551// Perform operand reordering on the instructions in VL and return the reordered
3552// operands in Left and Right.
3553void BoUpSLP::reorderInputsAccordingToOpcode(
3554 ArrayRef<Value *> VL, SmallVectorImpl<Value *> &Left,
3555 SmallVectorImpl<Value *> &Right, const DataLayout &DL,
3556 ScalarEvolution &SE) {
3557 if (VL.empty())
3558 return;
3559 VLOperands Ops(VL, DL, SE);
3560 // Reorder the operands in place.
3561 Ops.reorder();
3562 Left = Ops.getVL(0);
3563 Right = Ops.getVL(1);
3564}
3565
3566void BoUpSLP::setInsertPointAfterBundle(TreeEntry *E) {
3567 // Get the basic block this bundle is in. All instructions in the bundle
3568 // should be in this block.
3569 auto *Front = E->getMainOp();
3570 auto *BB = Front->getParent();
3571 assert(llvm::all_of(make_range(E->Scalars.begin(), E->Scalars.end()),((llvm::all_of(make_range(E->Scalars.begin(), E->Scalars
.end()), [=](Value *V) -> bool { auto *I = cast<Instruction
>(V); return !E->isOpcodeOrAlt(I) || I->getParent() ==
BB; })) ? static_cast<void> (0) : __assert_fail ("llvm::all_of(make_range(E->Scalars.begin(), E->Scalars.end()), [=](Value *V) -> bool { auto *I = cast<Instruction>(V); return !E->isOpcodeOrAlt(I) || I->getParent() == BB; })"
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp"
, 3575, __PRETTY_FUNCTION__))
3572 [=](Value *V) -> bool {((llvm::all_of(make_range(E->Scalars.begin(), E->Scalars
.end()), [=](Value *V) -> bool { auto *I = cast<Instruction
>(V); return !E->isOpcodeOrAlt(I) || I->getParent() ==
BB; })) ? static_cast<void> (0) : __assert_fail ("llvm::all_of(make_range(E->Scalars.begin(), E->Scalars.end()), [=](Value *V) -> bool { auto *I = cast<Instruction>(V); return !E->isOpcodeOrAlt(I) || I->getParent() == BB; })"
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp"
, 3575, __PRETTY_FUNCTION__))
3573 auto *I = cast<Instruction>(V);((llvm::all_of(make_range(E->Scalars.begin(), E->Scalars
.end()), [=](Value *V) -> bool { auto *I = cast<Instruction
>(V); return !E->isOpcodeOrAlt(I) || I->getParent() ==
BB; })) ? static_cast<void> (0) : __assert_fail ("llvm::all_of(make_range(E->Scalars.begin(), E->Scalars.end()), [=](Value *V) -> bool { auto *I = cast<Instruction>(V); return !E->isOpcodeOrAlt(I) || I->getParent() == BB; })"
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp"
, 3575, __PRETTY_FUNCTION__))
3574 return !E->isOpcodeOrAlt(I) || I->getParent() == BB;((llvm::all_of(make_range(E->Scalars.begin(), E->Scalars
.end()), [=](Value *V) -> bool { auto *I = cast<Instruction
>(V); return !E->isOpcodeOrAlt(I) || I->getParent() ==
BB; })) ? static_cast<void> (0) : __assert_fail ("llvm::all_of(make_range(E->Scalars.begin(), E->Scalars.end()), [=](Value *V) -> bool { auto *I = cast<Instruction>(V); return !E->isOpcodeOrAlt(I) || I->getParent() == BB; })"
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp"
, 3575, __PRETTY_FUNCTION__))
3575 }))((llvm::all_of(make_range(E->Scalars.begin(), E->Scalars
.end()), [=](Value *V) -> bool { auto *I = cast<Instruction
>(V); return !E->isOpcodeOrAlt(I) || I->getParent() ==
BB; })) ? static_cast<void> (0) : __assert_fail ("llvm::all_of(make_range(E->Scalars.begin(), E->Scalars.end()), [=](Value *V) -> bool { auto *I = cast<Instruction>(V); return !E->isOpcodeOrAlt(I) || I->getParent() == BB; })"
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp"
, 3575, __PRETTY_FUNCTION__))
;
3576
3577 // The last instruction in the bundle in program order.
3578 Instruction *LastInst = nullptr;
3579
3580 // Find the last instruction. The common case should be that BB has been
3581 // scheduled, and the last instruction is VL.back(). So we start with
3582 // VL.back() and iterate over schedule data until we reach the end of the
3583 // bundle. The end of the bundle is marked by null ScheduleData.
3584 if (BlocksSchedules.count(BB)) {
3585 auto *Bundle =
3586 BlocksSchedules[BB]->getScheduleData(E->isOneOf(E->Scalars.back()));
3587 if (Bundle && Bundle->isPartOfBundle())
3588 for (; Bundle; Bundle = Bundle->NextInBundle)
3589 if (Bundle->OpValue == Bundle->Inst)
3590 LastInst = Bundle->Inst;
3591 }
3592
3593 // LastInst can still be null at this point if there's either not an entry
3594 // for BB in BlocksSchedules or there's no ScheduleData available for
3595 // VL.back(). This can be the case if buildTree_rec aborts for various
3596 // reasons (e.g., the maximum recursion depth is reached, the maximum region
3597 // size is reached, etc.). ScheduleData is initialized in the scheduling
3598 // "dry-run".
3599 //
3600 // If this happens, we can still find the last instruction by brute force. We
3601 // iterate forwards from Front (inclusive) until we either see all
3602 // instructions in the bundle or reach the end of the block. If Front is the
3603 // last instruction in program order, LastInst will be set to Front, and we
3604 // will visit all the remaining instructions in the block.
3605 //
3606 // One of the reasons we exit early from buildTree_rec is to place an upper
3607 // bound on compile-time. Thus, taking an additional compile-time hit here is
3608 // not ideal. However, this should be exceedingly rare since it requires that
3609 // we both exit early from buildTree_rec and that the bundle be out-of-order
3610 // (causing us to iterate all the way to the end of the block).
3611 if (!LastInst) {
3612 SmallPtrSet<Value *, 16> Bundle(E->Scalars.begin(), E->Scalars.end());
3613 for (auto &I : make_range(BasicBlock::iterator(Front), BB->end())) {
3614 if (Bundle.erase(&I) && E->isOpcodeOrAlt(&I))
3615 LastInst = &I;
3616 if (Bundle.empty())
3617 break;
3618 }
3619 }
3620 assert(LastInst && "Failed to find last instruction in bundle")((LastInst && "Failed to find last instruction in bundle"
) ? static_cast<void> (0) : __assert_fail ("LastInst && \"Failed to find last instruction in bundle\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp"
, 3620, __PRETTY_FUNCTION__))
;
3621
3622 // Set the insertion point after the last instruction in the bundle. Set the
3623 // debug location to Front.
3624 Builder.SetInsertPoint(BB, ++LastInst->getIterator());
3625 Builder.SetCurrentDebugLocation(Front->getDebugLoc());
3626}
3627
3628Value *BoUpSLP::Gather(ArrayRef<Value *> VL, VectorType *Ty) {
3629 Value *Vec = UndefValue::get(Ty);
3630 // Generate the 'InsertElement' instruction.
3631 for (unsigned i = 0; i < Ty->getNumElements(); ++i) {
3632 Vec = Builder.CreateInsertElement(Vec, VL[i], Builder.getInt32(i));
3633 if (auto *Insrt = dyn_cast<InsertElementInst>(Vec)) {
3634 GatherSeq.insert(Insrt);
3635 CSEBlocks.insert(Insrt->getParent());
3636
3637 // Add to our 'need-to-extract' list.
3638 if (TreeEntry *E = getTreeEntry(VL[i])) {
3639 // Find which lane we need to extract.
3640 int FoundLane = -1;
3641 for (unsigned Lane = 0, LE = E->Scalars.size(); Lane != LE; ++Lane) {
3642 // Is this the lane of the scalar that we are looking for ?
3643 if (E->Scalars[Lane] == VL[i]) {
3644 FoundLane = Lane;
3645 break;
3646 }
3647 }
3648 assert(FoundLane >= 0 && "Could not find the correct lane")((FoundLane >= 0 && "Could not find the correct lane"
) ? static_cast<void> (0) : __assert_fail ("FoundLane >= 0 && \"Could not find the correct lane\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp"
, 3648, __PRETTY_FUNCTION__))
;
3649 if (!E->ReuseShuffleIndices.empty()) {
3650 FoundLane =
3651 std::distance(E->ReuseShuffleIndices.begin(),
3652 llvm::find(E->ReuseShuffleIndices, FoundLane));
3653 }
3654 ExternalUses.push_back(ExternalUser(VL[i], Insrt, FoundLane));
3655 }
3656 }
3657 }
3658
3659 return Vec;
3660}
3661
3662Value *BoUpSLP::vectorizeTree(ArrayRef<Value *> VL) {
3663 InstructionsState S = getSameOpcode(VL);
3664 if (S.getOpcode()) {
3665 if (TreeEntry *E = getTreeEntry(S.OpValue)) {
3666 if (E->isSame(VL)) {
3667 Value *V = vectorizeTree(E);
3668 if (VL.size() == E->Scalars.size() && !E->ReuseShuffleIndices.empty()) {
3669 // We need to get the vectorized value but without shuffle.
3670 if (auto *SV = dyn_cast<ShuffleVectorInst>(V)) {
3671 V = SV->getOperand(0);
3672 } else {
3673 // Reshuffle to get only unique values.
3674 SmallVector<unsigned, 4> UniqueIdxs;
3675 SmallSet<unsigned, 4> UsedIdxs;
3676 for(unsigned Idx : E->ReuseShuffleIndices)
3677 if (UsedIdxs.insert(Idx).second)
3678 UniqueIdxs.emplace_back(Idx);
3679 V = Builder.CreateShuffleVector(V, UndefValue::get(V->getType()),
3680 UniqueIdxs);
3681 }
3682 }
3683 return V;
3684 }
3685 }
3686 }
3687
3688 Type *ScalarTy = S.OpValue->getType();
3689 if (StoreInst *SI = dyn_cast<StoreInst>(S.OpValue))
3690 ScalarTy = SI->getValueOperand()->getType();
3691
3692 // Check that every instruction appears once in this bundle.
3693 SmallVector<unsigned, 4> ReuseShuffleIndicies;
3694 SmallVector<Value *, 4> UniqueValues;
3695 if (VL.size() > 2) {
3696 DenseMap<Value *, unsigned> UniquePositions;
3697 for (Value *V : VL) {
3698 auto Res = UniquePositions.try_emplace(V, UniqueValues.size());
3699 ReuseShuffleIndicies.emplace_back(Res.first->second);
3700 if (Res.second || isa<Constant>(V))
3701 UniqueValues.emplace_back(V);
3702 }
3703 // Do not shuffle single element or if number of unique values is not power
3704 // of 2.
3705 if (UniqueValues.size() == VL.size() || UniqueValues.size() <= 1 ||
3706 !llvm::isPowerOf2_32(UniqueValues.size()))
3707 ReuseShuffleIndicies.clear();
3708 else
3709 VL = UniqueValues;
3710 }
3711 VectorType *VecTy = VectorType::get(ScalarTy, VL.size());
3712
3713 Value *V = Gather(VL, VecTy);
3714 if (!ReuseShuffleIndicies.empty()) {
3715 V = Builder.CreateShuffleVector(V, UndefValue::get(VecTy),
3716 ReuseShuffleIndicies, "shuffle");
3717 if (auto *I = dyn_cast<Instruction>(V)) {
3718 GatherSeq.insert(I);
3719 CSEBlocks.insert(I->getParent());
3720 }
3721 }
3722 return V;
3723}
3724
3725static void inversePermutation(ArrayRef<unsigned> Indices,
3726 SmallVectorImpl<unsigned> &Mask) {
3727 Mask.clear();
3728 const unsigned E = Indices.size();
3729 Mask.resize(E);
3730 for (unsigned I = 0; I < E; ++I)
3731 Mask[Indices[I]] = I;
3732}
3733
3734Value *BoUpSLP::vectorizeTree(TreeEntry *E) {
3735 IRBuilder<>::InsertPointGuard Guard(Builder);
3736
3737 if (E->VectorizedValue) {
3738 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *E->Scalars[0] << ".\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Diamond merged for " <<
*E->Scalars[0] << ".\n"; } } while (false)
;
3739 return E->VectorizedValue;
3740 }
3741
3742 Instruction *VL0 = E->getMainOp();
3743 Type *ScalarTy = VL0->getType();
3744 if (StoreInst *SI = dyn_cast<StoreInst>(VL0))
3745 ScalarTy = SI->getValueOperand()->getType();
3746 VectorType *VecTy = VectorType::get(ScalarTy, E->Scalars.size());
3747
3748 bool NeedToShuffleReuses = !E->ReuseShuffleIndices.empty();
3749
3750 if (E->NeedToGather) {
3751 setInsertPointAfterBundle(E);
3752 auto *V = Gather(E->Scalars, VecTy);
3753 if (NeedToShuffleReuses) {
3754 V = Builder.CreateShuffleVector(V, UndefValue::get(VecTy),
3755 E->ReuseShuffleIndices, "shuffle");
3756 if (auto *I = dyn_cast<Instruction>(V)) {
3757 GatherSeq.insert(I);
3758 CSEBlocks.insert(I->getParent());
3759 }
3760 }
3761 E->VectorizedValue = V;
3762 return V;
3763 }
3764
3765 unsigned ShuffleOrOp =
3766 E->isAltShuffle() ? (unsigned)Instruction::ShuffleVector : E->getOpcode();
3767 switch (ShuffleOrOp) {
3768 case Instruction::PHI: {
3769 auto *PH = cast<PHINode>(VL0);
3770 Builder.SetInsertPoint(PH->getParent()->getFirstNonPHI());
3771 Builder.SetCurrentDebugLocation(PH->getDebugLoc());
3772 PHINode *NewPhi = Builder.CreatePHI(VecTy, PH->getNumIncomingValues());
3773 Value *V = NewPhi;
3774 if (NeedToShuffleReuses) {
3775 V = Builder.CreateShuffleVector(V, UndefValue::get(VecTy),
3776 E->ReuseShuffleIndices, "shuffle");
3777 }
3778 E->VectorizedValue = V;
3779
3780 // PHINodes may have multiple entries from the same block. We want to
3781 // visit every block once.
3782 SmallPtrSet<BasicBlock*, 4> VisitedBBs;
3783
3784 for (unsigned i = 0, e = PH->getNumIncomingValues(); i < e; ++i) {
3785 ValueList Operands;
3786 BasicBlock *IBB = PH->getIncomingBlock(i);
3787
3788 if (!VisitedBBs.insert(IBB).second) {
3789 NewPhi->addIncoming(NewPhi->getIncomingValueForBlock(IBB), IBB);
3790 continue;
3791 }
3792
3793 Builder.SetInsertPoint(IBB->getTerminator());
3794 Builder.SetCurrentDebugLocation(PH->getDebugLoc());
3795 Value *Vec = vectorizeTree(E->getOperand(i));
3796 NewPhi->addIncoming(Vec, IBB);
3797 }
3798
3799 assert(NewPhi->getNumIncomingValues() == PH->getNumIncomingValues() &&((NewPhi->getNumIncomingValues() == PH->getNumIncomingValues
() && "Invalid number of incoming values") ? static_cast
<void> (0) : __assert_fail ("NewPhi->getNumIncomingValues() == PH->getNumIncomingValues() && \"Invalid number of incoming values\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp"
, 3800, __PRETTY_FUNCTION__))
3800 "Invalid number of incoming values")((NewPhi->getNumIncomingValues() == PH->getNumIncomingValues
() && "Invalid number of incoming values") ? static_cast
<void> (0) : __assert_fail ("NewPhi->getNumIncomingValues() == PH->getNumIncomingValues() && \"Invalid number of incoming values\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp"
, 3800, __PRETTY_FUNCTION__))
;
3801 return V;
3802 }
3803
3804 case Instruction::ExtractElement: {
3805 if (!E->NeedToGather) {
3806 Value *V = E->getSingleOperand(0);
3807 if (!E->ReorderIndices.empty()) {
3808 OrdersType Mask;
3809 inversePermutation(E->ReorderIndices, Mask);
3810 Builder.SetInsertPoint(VL0);
3811 V = Builder.CreateShuffleVector(V, UndefValue::get(VecTy), Mask,
3812 "reorder_shuffle");
3813 }
3814 if (NeedToShuffleReuses) {
3815 // TODO: Merge this shuffle with the ReorderShuffleMask.
3816 if (E->ReorderIndices.empty())
3817 Builder.SetInsertPoint(VL0);
3818 V = Builder.CreateShuffleVector(V, UndefValue::get(VecTy),
3819 E->ReuseShuffleIndices, "shuffle");
3820 }
3821 E->VectorizedValue = V;
3822 return V;
3823 }
3824 setInsertPointAfterBundle(E);
3825 auto *V = Gather(E->Scalars, VecTy);
3826 if (NeedToShuffleReuses) {
3827 V = Builder.CreateShuffleVector(V, UndefValue::get(VecTy),
3828 E->ReuseShuffleIndices, "shuffle");
3829 if (auto *I = dyn_cast<Instruction>(V)) {
3830 GatherSeq.insert(I);
3831 CSEBlocks.insert(I->getParent());
3832 }
3833 }
3834 E->VectorizedValue = V;
3835 return V;
3836 }
3837 case Instruction::ExtractValue: {
3838 if (!E->NeedToGather) {
3839 LoadInst *LI = cast<LoadInst>(E->getSingleOperand(0));
3840 Builder.SetInsertPoint(LI);
3841 PointerType *PtrTy = PointerType::get(VecTy, LI->getPointerAddressSpace());
3842 Value *Ptr = Builder.CreateBitCast(LI->getOperand(0), PtrTy);
3843 LoadInst *V = Builder.CreateAlignedLoad(VecTy, Ptr, LI->getAlignment());
3844 Value *NewV = propagateMetadata(V, E->Scalars);
3845 if (!E->ReorderIndices.empty()) {
3846 OrdersType Mask;
3847 inversePermutation(E->ReorderIndices, Mask);
3848 NewV = Builder.CreateShuffleVector(NewV, UndefValue::get(VecTy), Mask,
3849 "reorder_shuffle");
3850 }
3851 if (NeedToShuffleReuses) {
3852 // TODO: Merge this shuffle with the ReorderShuffleMask.
3853 NewV = Builder.CreateShuffleVector(
3854 NewV, UndefValue::get(VecTy), E->ReuseShuffleIndices, "shuffle");
3855 }
3856 E->VectorizedValue = NewV;
3857 return NewV;
3858 }
3859 setInsertPointAfterBundle(E);
3860 auto *V = Gather(E->Scalars, VecTy);
3861 if (NeedToShuffleReuses) {
3862 V = Builder.CreateShuffleVector(V, UndefValue::get(VecTy),
3863 E->ReuseShuffleIndices, "shuffle");
3864 if (auto *I = dyn_cast<Instruction>(V)) {
3865 GatherSeq.insert(I);
3866 CSEBlocks.insert(I->getParent());
3867 }
3868 }
3869 E->VectorizedValue = V;
3870 return V;
3871 }
3872 case Instruction::ZExt:
3873 case Instruction::SExt:
3874 case Instruction::FPToUI:
3875 case Instruction::FPToSI:
3876 case Instruction::FPExt:
3877 case Instruction::PtrToInt:
3878 case Instruction::IntToPtr:
3879 case Instruction::SIToFP:
3880 case Instruction::UIToFP:
3881 case Instruction::Trunc:
3882 case Instruction::FPTrunc:
3883 case Instruction::BitCast: {
3884 setInsertPointAfterBundle(E);
3885
3886 Value *InVec = vectorizeTree(E->getOperand(0));
3887
3888 if (E->VectorizedValue) {
3889 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Diamond merged for " <<
*VL0 << ".\n"; } } while (false)
;
3890 return E->VectorizedValue;
3891 }
3892
3893 auto *CI = cast<CastInst>(VL0);
3894 Value *V = Builder.CreateCast(CI->getOpcode(), InVec, VecTy);
3895 if (NeedToShuffleReuses) {
3896 V = Builder.CreateShuffleVector(V, UndefValue::get(VecTy),
3897 E->ReuseShuffleIndices, "shuffle");
3898 }
3899 E->VectorizedValue = V;
3900 ++NumVectorInstructions;
3901 return V;
3902 }
3903 case Instruction::FCmp:
3904 case Instruction::ICmp: {
3905 setInsertPointAfterBundle(E);
3906
3907 Value *L = vectorizeTree(E->getOperand(0));
3908 Value *R = vectorizeTree(E->getOperand(1));
3909
3910 if (E->VectorizedValue) {
3911 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Diamond merged for " <<
*VL0 << ".\n"; } } while (false)
;
3912 return E->VectorizedValue;
3913 }
3914
3915 CmpInst::Predicate P0 = cast<CmpInst>(VL0)->getPredicate();
3916 Value *V;
3917 if (E->getOpcode() == Instruction::FCmp)
3918 V = Builder.CreateFCmp(P0, L, R);
3919 else
3920 V = Builder.CreateICmp(P0, L, R);
3921
3922 propagateIRFlags(V, E->Scalars, VL0);
3923 if (NeedToShuffleReuses) {
3924 V = Builder.CreateShuffleVector(V, UndefValue::get(VecTy),
3925 E->ReuseShuffleIndices, "shuffle");
3926 }
3927 E->VectorizedValue = V;
3928 ++NumVectorInstructions;
3929 return V;
3930 }
3931 case Instruction::Select: {
3932 setInsertPointAfterBundle(E);
3933
3934 Value *Cond = vectorizeTree(E->getOperand(0));
3935 Value *True = vectorizeTree(E->getOperand(1));
3936 Value *False = vectorizeTree(E->getOperand(2));
3937
3938 if (E->VectorizedValue) {
3939 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Diamond merged for " <<
*VL0 << ".\n"; } } while (false)
;
3940 return E->VectorizedValue;
3941 }
3942
3943 Value *V = Builder.CreateSelect(Cond, True, False);
3944 if (NeedToShuffleReuses) {
3945 V = Builder.CreateShuffleVector(V, UndefValue::get(VecTy),
3946 E->ReuseShuffleIndices, "shuffle");
3947 }
3948 E->VectorizedValue = V;
3949 ++NumVectorInstructions;
3950 return V;
3951 }
3952 case Instruction::FNeg: {
3953 setInsertPointAfterBundle(E);
3954
3955 Value *Op = vectorizeTree(E->getOperand(0));
3956
3957 if (E->VectorizedValue) {
3958 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Diamond merged for " <<
*VL0 << ".\n"; } } while (false)
;
3959 return E->VectorizedValue;
3960 }
3961
3962 Value *V = Builder.CreateUnOp(
3963 static_cast<Instruction::UnaryOps>(E->getOpcode()), Op);
3964 propagateIRFlags(V, E->Scalars, VL0);
3965 if (auto *I = dyn_cast<Instruction>(V))
3966 V = propagateMetadata(I, E->Scalars);
3967
3968 if (NeedToShuffleReuses) {
3969 V = Builder.CreateShuffleVector(V, UndefValue::get(VecTy),
3970 E->ReuseShuffleIndices, "shuffle");
3971 }
3972 E->VectorizedValue = V;
3973 ++NumVectorInstructions;
3974
3975 return V;
3976 }
3977 case Instruction::Add:
3978 case Instruction::FAdd:
3979 case Instruction::Sub:
3980 case Instruction::FSub:
3981 case Instruction::Mul:
3982 case Instruction::FMul:
3983 case Instruction::UDiv:
3984 case Instruction::SDiv:
3985 case Instruction::FDiv:
3986 case Instruction::URem:
3987 case Instruction::SRem:
3988 case Instruction::FRem:
3989 case Instruction::Shl:
3990 case Instruction::LShr:
3991 case Instruction::AShr:
3992 case Instruction::And:
3993 case Instruction::Or:
3994 case Instruction::Xor: {
3995 setInsertPointAfterBundle(E);
3996
3997 Value *LHS = vectorizeTree(E->getOperand(0));
3998 Value *RHS = vectorizeTree(E->getOperand(1));
3999
4000 if (E->VectorizedValue) {
4001 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Diamond merged for " <<
*VL0 << ".\n"; } } while (false)
;
4002 return E->VectorizedValue;
4003 }
4004
4005 Value *V = Builder.CreateBinOp(
4006 static_cast<Instruction::BinaryOps>(E->getOpcode()), LHS,
4007 RHS);
4008 propagateIRFlags(V, E->Scalars, VL0);
4009 if (auto *I = dyn_cast<Instruction>(V))
4010 V = propagateMetadata(I, E->Scalars);
4011
4012 if (NeedToShuffleReuses) {
4013 V = Builder.CreateShuffleVector(V, UndefValue::get(VecTy),
4014 E->ReuseShuffleIndices, "shuffle");
4015 }
4016 E->VectorizedValue = V;
4017 ++NumVectorInstructions;
4018
4019 return V;
4020 }
4021 case Instruction::Load: {
4022 // Loads are inserted at the head of the tree because we don't want to
4023 // sink them all the way down past store instructions.
4024 bool IsReorder = E->updateStateIfReorder();
4025 if (IsReorder)
4026 VL0 = E->getMainOp();
4027 setInsertPointAfterBundle(E);
4028
4029 LoadInst *LI = cast<LoadInst>(VL0);
4030 Type *ScalarLoadTy = LI->getType();
4031 unsigned AS = LI->getPointerAddressSpace();
4032
4033 Value *VecPtr = Builder.CreateBitCast(LI->getPointerOperand(),
4034 VecTy->getPointerTo(AS));
4035
4036 // The pointer operand uses an in-tree scalar so we add the new BitCast to
4037 // ExternalUses list to make sure that an extract will be generated in the
4038 // future.
4039 Value *PO = LI->getPointerOperand();
4040 if (getTreeEntry(PO))
4041 ExternalUses.push_back(ExternalUser(PO, cast<User>(VecPtr), 0));
4042
4043 MaybeAlign Alignment = MaybeAlign(LI->getAlignment());
4044 LI = Builder.CreateLoad(VecTy, VecPtr);
4045 if (!Alignment)
4046 Alignment = MaybeAlign(DL->getABITypeAlignment(ScalarLoadTy));
4047 LI->setAlignment(Alignment);
4048 Value *V = propagateMetadata(LI, E->Scalars);
4049 if (IsReorder) {
4050 OrdersType Mask;
4051 inversePermutation(E->ReorderIndices, Mask);
4052 V = Builder.CreateShuffleVector(V, UndefValue::get(V->getType()),
4053 Mask, "reorder_shuffle");
4054 }
4055 if (NeedToShuffleReuses) {
4056 // TODO: Merge this shuffle with the ReorderShuffleMask.
4057 V = Builder.CreateShuffleVector(V, UndefValue::get(VecTy),
4058 E->ReuseShuffleIndices, "shuffle");
4059 }
4060 E->VectorizedValue = V;
4061 ++NumVectorInstructions;
4062 return V;
4063 }
4064 case Instruction::Store: {
4065 StoreInst *SI = cast<StoreInst>(VL0);
4066 unsigned Alignment = SI->getAlignment();
4067 unsigned AS = SI->getPointerAddressSpace();
4068
4069 setInsertPointAfterBundle(E);
4070
4071 Value *VecValue = vectorizeTree(E->getOperand(0));
4072 Value *ScalarPtr = SI->getPointerOperand();
4073 Value *VecPtr = Builder.CreateBitCast(ScalarPtr, VecTy->getPointerTo(AS));
4074 StoreInst *ST = Builder.CreateStore(VecValue, VecPtr);
4075
4076 // The pointer operand uses an in-tree scalar, so add the new BitCast to
4077 // ExternalUses to make sure that an extract will be generated in the
4078 // future.
4079 if (getTreeEntry(ScalarPtr))
4080 ExternalUses.push_back(ExternalUser(ScalarPtr, cast<User>(VecPtr), 0));
4081
4082 if (!Alignment)
4083 Alignment = DL->getABITypeAlignment(SI->getValueOperand()->getType());
4084
4085 ST->setAlignment(Align(Alignment));
4086 Value *V = propagateMetadata(ST, E->Scalars);
4087 if (NeedToShuffleReuses) {
4088 V = Builder.CreateShuffleVector(V, UndefValue::get(VecTy),
4089 E->ReuseShuffleIndices, "shuffle");
4090 }
4091 E->VectorizedValue = V;
4092 ++NumVectorInstructions;
4093 return V;
4094 }
4095 case Instruction::GetElementPtr: {
4096 setInsertPointAfterBundle(E);
4097
4098 Value *Op0 = vectorizeTree(E->getOperand(0));
4099
4100 std::vector<Value *> OpVecs;
4101 for (int j = 1, e = cast<GetElementPtrInst>(VL0)->getNumOperands(); j < e;
4102 ++j) {
4103 ValueList &VL = E->getOperand(j);
4104 // Need to cast all elements to the same type before vectorization to
4105 // avoid crash.
4106 Type *VL0Ty = VL0->getOperand(j)->getType();
4107 Type *Ty = llvm::all_of(
4108 VL, [VL0Ty](Value *V) { return VL0Ty == V->getType(); })
4109 ? VL0Ty
4110 : DL->getIndexType(cast<GetElementPtrInst>(VL0)
4111 ->getPointerOperandType()
4112 ->getScalarType());
4113 for (Value *&V : VL) {
4114 auto *CI = cast<ConstantInt>(V);
4115 V = ConstantExpr::getIntegerCast(CI, Ty,
4116 CI->getValue().isSignBitSet());
4117 }
4118 Value *OpVec = vectorizeTree(VL);
4119 OpVecs.push_back(OpVec);
4120 }
4121
4122 Value *V = Builder.CreateGEP(
4123 cast<GetElementPtrInst>(VL0)->getSourceElementType(), Op0, OpVecs);
4124 if (Instruction *I = dyn_cast<Instruction>(V))
4125 V = propagateMetadata(I, E->Scalars);
4126
4127 if (NeedToShuffleReuses) {
4128 V = Builder.CreateShuffleVector(V, UndefValue::get(VecTy),
4129 E->ReuseShuffleIndices, "shuffle");
4130 }
4131 E->VectorizedValue = V;
4132 ++NumVectorInstructions;
4133
4134 return V;
4135 }
4136 case Instruction::Call: {
4137 CallInst *CI = cast<CallInst>(VL0);
4138 setInsertPointAfterBundle(E);
4139
4140 Intrinsic::ID IID = Intrinsic::not_intrinsic;
4141 if (Function *FI = CI->getCalledFunction())
4142 IID = FI->getIntrinsicID();
4143
4144 Value *ScalarArg = nullptr;
4145 std::vector<Value *> OpVecs;
4146 for (int j = 0, e = CI->getNumArgOperands(); j < e; ++j) {
4147 ValueList OpVL;
4148 // Some intrinsics have scalar arguments. This argument should not be
4149 // vectorized.
4150 if (hasVectorInstrinsicScalarOpd(IID, j)) {
4151 CallInst *CEI = cast<CallInst>(VL0);
4152 ScalarArg = CEI->getArgOperand(j);
4153 OpVecs.push_back(CEI->getArgOperand(j));
4154 continue;
4155 }
4156
4157 Value *OpVec = vectorizeTree(E->getOperand(j));
4158 LLVM_DEBUG(dbgs() << "SLP: OpVec[" << j << "]: " << *OpVec << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: OpVec[" << j << "]: "
<< *OpVec << "\n"; } } while (false)
;
4159 OpVecs.push_back(OpVec);
4160 }
4161
4162 Module *M = F->getParent();
4163 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
4164 Type *Tys[] = { VectorType::get(CI->getType(), E->Scalars.size()) };
4165 Function *CF = Intrinsic::getDeclaration(M, ID, Tys);
4166 SmallVector<OperandBundleDef, 1> OpBundles;
4167 CI->getOperandBundlesAsDefs(OpBundles);
4168 Value *V = Builder.CreateCall(CF, OpVecs, OpBundles);
4169
4170 // The scalar argument uses an in-tree scalar so we add the new vectorized
4171 // call to ExternalUses list to make sure that an extract will be
4172 // generated in the future.
4173 if (ScalarArg && getTreeEntry(ScalarArg))
4174 ExternalUses.push_back(ExternalUser(ScalarArg, cast<User>(V), 0));
4175
4176 propagateIRFlags(V, E->Scalars, VL0);
4177 if (NeedToShuffleReuses) {
4178 V = Builder.CreateShuffleVector(V, UndefValue::get(VecTy),
4179 E->ReuseShuffleIndices, "shuffle");
4180 }
4181 E->VectorizedValue = V;
4182 ++NumVectorInstructions;
4183 return V;
4184 }
4185 case Instruction::ShuffleVector: {
4186 assert(E->isAltShuffle() &&((E->isAltShuffle() && ((Instruction::isBinaryOp(E
->getOpcode()) && Instruction::isBinaryOp(E->getAltOpcode
())) || (Instruction::isCast(E->getOpcode()) && Instruction
::isCast(E->getAltOpcode()))) && "Invalid Shuffle Vector Operand"
) ? static_cast<void> (0) : __assert_fail ("E->isAltShuffle() && ((Instruction::isBinaryOp(E->getOpcode()) && Instruction::isBinaryOp(E->getAltOpcode())) || (Instruction::isCast(E->getOpcode()) && Instruction::isCast(E->getAltOpcode()))) && \"Invalid Shuffle Vector Operand\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp"
, 4191, __PRETTY_FUNCTION__))
4187 ((Instruction::isBinaryOp(E->getOpcode()) &&((E->isAltShuffle() && ((Instruction::isBinaryOp(E
->getOpcode()) && Instruction::isBinaryOp(E->getAltOpcode
())) || (Instruction::isCast(E->getOpcode()) && Instruction
::isCast(E->getAltOpcode()))) && "Invalid Shuffle Vector Operand"
) ? static_cast<void> (0) : __assert_fail ("E->isAltShuffle() && ((Instruction::isBinaryOp(E->getOpcode()) && Instruction::isBinaryOp(E->getAltOpcode())) || (Instruction::isCast(E->getOpcode()) && Instruction::isCast(E->getAltOpcode()))) && \"Invalid Shuffle Vector Operand\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp"
, 4191, __PRETTY_FUNCTION__))
4188 Instruction::isBinaryOp(E->getAltOpcode())) ||((E->isAltShuffle() && ((Instruction::isBinaryOp(E
->getOpcode()) && Instruction::isBinaryOp(E->getAltOpcode
())) || (Instruction::isCast(E->getOpcode()) && Instruction
::isCast(E->getAltOpcode()))) && "Invalid Shuffle Vector Operand"
) ? static_cast<void> (0) : __assert_fail ("E->isAltShuffle() && ((Instruction::isBinaryOp(E->getOpcode()) && Instruction::isBinaryOp(E->getAltOpcode())) || (Instruction::isCast(E->getOpcode()) && Instruction::isCast(E->getAltOpcode()))) && \"Invalid Shuffle Vector Operand\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp"
, 4191, __PRETTY_FUNCTION__))
4189 (Instruction::isCast(E->getOpcode()) &&((E->isAltShuffle() && ((Instruction::isBinaryOp(E
->getOpcode()) && Instruction::isBinaryOp(E->getAltOpcode
())) || (Instruction::isCast(E->getOpcode()) && Instruction
::isCast(E->getAltOpcode()))) && "Invalid Shuffle Vector Operand"
) ? static_cast<void> (0) : __assert_fail ("E->isAltShuffle() && ((Instruction::isBinaryOp(E->getOpcode()) && Instruction::isBinaryOp(E->getAltOpcode())) || (Instruction::isCast(E->getOpcode()) && Instruction::isCast(E->getAltOpcode()))) && \"Invalid Shuffle Vector Operand\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp"
, 4191, __PRETTY_FUNCTION__))
4190 Instruction::isCast(E->getAltOpcode()))) &&((E->isAltShuffle() && ((Instruction::isBinaryOp(E
->getOpcode()) && Instruction::isBinaryOp(E->getAltOpcode
())) || (Instruction::isCast(E->getOpcode()) && Instruction
::isCast(E->getAltOpcode()))) && "Invalid Shuffle Vector Operand"
) ? static_cast<void> (0) : __assert_fail ("E->isAltShuffle() && ((Instruction::isBinaryOp(E->getOpcode()) && Instruction::isBinaryOp(E->getAltOpcode())) || (Instruction::isCast(E->getOpcode()) && Instruction::isCast(E->getAltOpcode()))) && \"Invalid Shuffle Vector Operand\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp"
, 4191, __PRETTY_FUNCTION__))
4191 "Invalid Shuffle Vector Operand")((E->isAltShuffle() && ((Instruction::isBinaryOp(E
->getOpcode()) && Instruction::isBinaryOp(E->getAltOpcode
())) || (Instruction::isCast(E->getOpcode()) && Instruction
::isCast(E->getAltOpcode()))) && "Invalid Shuffle Vector Operand"
) ? static_cast<void> (0) : __assert_fail ("E->isAltShuffle() && ((Instruction::isBinaryOp(E->getOpcode()) && Instruction::isBinaryOp(E->getAltOpcode())) || (Instruction::isCast(E->getOpcode()) && Instruction::isCast(E->getAltOpcode()))) && \"Invalid Shuffle Vector Operand\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp"
, 4191, __PRETTY_FUNCTION__))
;
4192
4193 Value *LHS = nullptr, *RHS = nullptr;
4194 if (Instruction::isBinaryOp(E->getOpcode())) {
4195 setInsertPointAfterBundle(E);
4196 LHS = vectorizeTree(E->getOperand(0));
4197 RHS = vectorizeTree(E->getOperand(1));
4198 } else {
4199 setInsertPointAfterBundle(E);
4200 LHS = vectorizeTree(E->getOperand(0));
4201 }
4202
4203 if (E->VectorizedValue) {
4204 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Diamond merged for " <<
*VL0 << ".\n"; } } while (false)
;
4205 return E->VectorizedValue;
4206 }
4207
4208 Value *V0, *V1;
4209 if (Instruction::isBinaryOp(E->getOpcode())) {
4210 V0 = Builder.CreateBinOp(
4211 static_cast<Instruction::BinaryOps>(E->getOpcode()), LHS, RHS);
4212 V1 = Builder.CreateBinOp(
4213 static_cast<Instruction::BinaryOps>(E->getAltOpcode()), LHS, RHS);
4214 } else {
4215 V0 = Builder.CreateCast(
4216 static_cast<Instruction::CastOps>(E->getOpcode()), LHS, VecTy);
4217 V1 = Builder.CreateCast(
4218 static_cast<Instruction::CastOps>(E->getAltOpcode()), LHS, VecTy);
4219 }
4220
4221 // Create shuffle to take alternate operations from the vector.
4222 // Also, gather up main and alt scalar ops to propagate IR flags to
4223 // each vector operation.
4224 ValueList OpScalars, AltScalars;
4225 unsigned e = E->Scalars.size();
4226 SmallVector<Constant *, 8> Mask(e);
4227 for (unsigned i = 0; i < e; ++i) {
4228 auto *OpInst = cast<Instruction>(E->Scalars[i]);
4229 assert(E->isOpcodeOrAlt(OpInst) && "Unexpected main/alternate opcode")((E->isOpcodeOrAlt(OpInst) && "Unexpected main/alternate opcode"
) ? static_cast<void> (0) : __assert_fail ("E->isOpcodeOrAlt(OpInst) && \"Unexpected main/alternate opcode\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp"
, 4229, __PRETTY_FUNCTION__))
;
4230 if (OpInst->getOpcode() == E->getAltOpcode()) {
4231 Mask[i] = Builder.getInt32(e + i);
4232 AltScalars.push_back(E->Scalars[i]);
4233 } else {
4234 Mask[i] = Builder.getInt32(i);
4235 OpScalars.push_back(E->Scalars[i]);
4236 }
4237 }
4238
4239 Value *ShuffleMask = ConstantVector::get(Mask);
4240 propagateIRFlags(V0, OpScalars);
4241 propagateIRFlags(V1, AltScalars);
4242
4243 Value *V = Builder.CreateShuffleVector(V0, V1, ShuffleMask);
4244 if (Instruction *I = dyn_cast<Instruction>(V))
4245 V = propagateMetadata(I, E->Scalars);
4246 if (NeedToShuffleReuses) {
4247 V = Builder.CreateShuffleVector(V, UndefValue::get(VecTy),
4248 E->ReuseShuffleIndices, "shuffle");
4249 }
4250 E->VectorizedValue = V;
4251 ++NumVectorInstructions;
4252
4253 return V;
4254 }
4255 default:
4256 llvm_unreachable("unknown inst")::llvm::llvm_unreachable_internal("unknown inst", "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp"
, 4256)
;
4257 }
4258 return nullptr;
4259}
4260
4261Value *BoUpSLP::vectorizeTree() {
4262 ExtraValueToDebugLocsMap ExternallyUsedValues;
4263 return vectorizeTree(ExternallyUsedValues);
4264}
4265
4266Value *
4267BoUpSLP::vectorizeTree(ExtraValueToDebugLocsMap &ExternallyUsedValues) {
4268 // All blocks must be scheduled before any instructions are inserted.
4269 for (auto &BSIter : BlocksSchedules) {
4270 scheduleBlock(BSIter.second.get());
4271 }
4272
4273 Builder.SetInsertPoint(&F->getEntryBlock().front());
4274 auto *VectorRoot = vectorizeTree(VectorizableTree[0].get());
4275
4276 // If the vectorized tree can be rewritten in a smaller type, we truncate the
4277 // vectorized root. InstCombine will then rewrite the entire expression. We
4278 // sign extend the extracted values below.
4279 auto *ScalarRoot = VectorizableTree[0]->Scalars[0];
4280 if (MinBWs.count(ScalarRoot)) {
4281 if (auto *I = dyn_cast<Instruction>(VectorRoot))
4282 Builder.SetInsertPoint(&*++BasicBlock::iterator(I));
4283 auto BundleWidth = VectorizableTree[0]->Scalars.size();
4284 auto *MinTy = IntegerType::get(F->getContext(), MinBWs[ScalarRoot].first);
4285 auto *VecTy = VectorType::get(MinTy, BundleWidth);
4286 auto *Trunc = Builder.CreateTrunc(VectorRoot, VecTy);
4287 VectorizableTree[0]->VectorizedValue = Trunc;
4288 }
4289
4290 LLVM_DEBUG(dbgs() << "SLP: Extracting " << ExternalUses.size()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Extracting " << ExternalUses
.size() << " values .\n"; } } while (false)
4291 << " values .\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Extracting " << ExternalUses
.size() << " values .\n"; } } while (false)
;
4292
4293 // If necessary, sign-extend or zero-extend ScalarRoot to the larger type
4294 // specified by ScalarType.
4295 auto extend = [&](Value *ScalarRoot, Value *Ex, Type *ScalarType) {
4296 if (!MinBWs.count(ScalarRoot))
4297 return Ex;
4298 if (MinBWs[ScalarRoot].second)
4299 return Builder.CreateSExt(Ex, ScalarType);
4300 return Builder.CreateZExt(Ex, ScalarType);
4301 };
4302
4303 // Extract all of the elements with the external uses.
4304 for (const auto &ExternalUse : ExternalUses) {
4305 Value *Scalar = ExternalUse.Scalar;
4306 llvm::User *User = ExternalUse.User;
4307
4308 // Skip users that we already RAUW. This happens when one instruction
4309 // has multiple uses of the same value.
4310 if (User && !is_contained(Scalar->users(), User))
4311 continue;
4312 TreeEntry *E = getTreeEntry(Scalar);
4313 assert(E && "Invalid scalar")((E && "Invalid scalar") ? static_cast<void> (0
) : __assert_fail ("E && \"Invalid scalar\"", "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp"
, 4313, __PRETTY_FUNCTION__))
;
4314 assert(!E->NeedToGather && "Extracting from a gather list")((!E->NeedToGather && "Extracting from a gather list"
) ? static_cast<void> (0) : __assert_fail ("!E->NeedToGather && \"Extracting from a gather list\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp"
, 4314, __PRETTY_FUNCTION__))
;
4315
4316 Value *Vec = E->VectorizedValue;
4317 assert(Vec && "Can't find vectorizable value")((Vec && "Can't find vectorizable value") ? static_cast
<void> (0) : __assert_fail ("Vec && \"Can't find vectorizable value\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp"
, 4317, __PRETTY_FUNCTION__))
;
4318
4319 Value *Lane = Builder.getInt32(ExternalUse.Lane);
4320 // If User == nullptr, the Scalar is used as extra arg. Generate
4321 // ExtractElement instruction and update the record for this scalar in
4322 // ExternallyUsedValues.
4323 if (!User) {
4324 assert(ExternallyUsedValues.count(Scalar) &&((ExternallyUsedValues.count(Scalar) && "Scalar with nullptr as an external user must be registered in "
"ExternallyUsedValues map") ? static_cast<void> (0) : __assert_fail
("ExternallyUsedValues.count(Scalar) && \"Scalar with nullptr as an external user must be registered in \" \"ExternallyUsedValues map\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp"
, 4326, __PRETTY_FUNCTION__))
4325 "Scalar with nullptr as an external user must be registered in "((ExternallyUsedValues.count(Scalar) && "Scalar with nullptr as an external user must be registered in "
"ExternallyUsedValues map") ? static_cast<void> (0) : __assert_fail
("ExternallyUsedValues.count(Scalar) && \"Scalar with nullptr as an external user must be registered in \" \"ExternallyUsedValues map\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp"
, 4326, __PRETTY_FUNCTION__))
4326 "ExternallyUsedValues map")((ExternallyUsedValues.count(Scalar) && "Scalar with nullptr as an external user must be registered in "
"ExternallyUsedValues map") ? static_cast<void> (0) : __assert_fail
("ExternallyUsedValues.count(Scalar) && \"Scalar with nullptr as an external user must be registered in \" \"ExternallyUsedValues map\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp"
, 4326, __PRETTY_FUNCTION__))
;
4327 if (auto *VecI = dyn_cast<Instruction>(Vec)) {
4328 Builder.SetInsertPoint(VecI->getParent(),
4329 std::next(VecI->getIterator()));
4330 } else {
4331 Builder.SetInsertPoint(&F->getEntryBlock().front());
4332 }
4333 Value *Ex = Builder.CreateExtractElement(Vec, Lane);
4334 Ex = extend(ScalarRoot, Ex, Scalar->getType());
4335 CSEBlocks.insert(cast<Instruction>(Scalar)->getParent());
4336 auto &Locs = ExternallyUsedValues[Scalar];
4337 ExternallyUsedValues.insert({Ex, Locs});
4338 ExternallyUsedValues.erase(Scalar);
4339 // Required to update internally referenced instructions.
4340 Scalar->replaceAllUsesWith(Ex);
4341 continue;
4342 }
4343
4344 // Generate extracts for out-of-tree users.
4345 // Find the insertion point for the extractelement lane.
4346 if (auto *VecI = dyn_cast<Instruction>(Vec)) {
4347 if (PHINode *PH = dyn_cast<PHINode>(User)) {
4348 for (int i = 0, e = PH->getNumIncomingValues(); i != e; ++i) {
4349 if (PH->getIncomingValue(i) == Scalar) {
4350 Instruction *IncomingTerminator =
4351 PH->getIncomingBlock(i)->getTerminator();
4352 if (isa<CatchSwitchInst>(IncomingTerminator)) {
4353 Builder.SetInsertPoint(VecI->getParent(),
4354 std::next(VecI->getIterator()));
4355 } else {
4356 Builder.SetInsertPoint(PH->getIncomingBlock(i)->getTerminator());
4357 }
4358 Value *Ex = Builder.CreateExtractElement(Vec, Lane);
4359 Ex = extend(ScalarRoot, Ex, Scalar->getType());
4360 CSEBlocks.insert(PH->getIncomingBlock(i));
4361 PH->setOperand(i, Ex);
4362 }
4363 }
4364 } else {
4365 Builder.SetInsertPoint(cast<Instruction>(User));
4366 Value *Ex = Builder.CreateExtractElement(Vec, Lane);
4367 Ex = extend(ScalarRoot, Ex, Scalar->getType());
4368 CSEBlocks.insert(cast<Instruction>(User)->getParent());
4369 User->replaceUsesOfWith(Scalar, Ex);
4370 }
4371 } else {
4372 Builder.SetInsertPoint(&F->getEntryBlock().front());
4373 Value *Ex = Builder.CreateExtractElement(Vec, Lane);
4374 Ex = extend(ScalarRoot, Ex, Scalar->getType());
4375 CSEBlocks.insert(&F->getEntryBlock());
4376 User->replaceUsesOfWith(Scalar, Ex);
4377 }
4378
4379 LLVM_DEBUG(dbgs() << "SLP: Replaced:" << *User << ".\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Replaced:" << *User <<
".\n"; } } while (false)
;
4380 }
4381
4382 // For each vectorized value:
4383 for (auto &TEPtr : VectorizableTree) {
4384 TreeEntry *Entry = TEPtr.get();
4385
4386 // No need to handle users of gathered values.
4387 if (Entry->NeedToGather)
4388 continue;
4389
4390 assert(Entry->VectorizedValue && "Can't find vectorizable value")((Entry->VectorizedValue && "Can't find vectorizable value"
) ? static_cast<void> (0) : __assert_fail ("Entry->VectorizedValue && \"Can't find vectorizable value\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp"
, 4390, __PRETTY_FUNCTION__))
;
4391
4392 // For each lane:
4393 for (int Lane = 0, LE = Entry->Scalars.size(); Lane != LE; ++Lane) {
4394 Value *Scalar = Entry->Scalars[Lane];
4395
4396#ifndef NDEBUG
4397 Type *Ty = Scalar->getType();
4398 if (!Ty->isVoidTy()) {
4399 for (User *U : Scalar->users()) {
4400 LLVM_DEBUG(dbgs() << "SLP: \tvalidating user:" << *U << ".\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: \tvalidating user:" <<
*U << ".\n"; } } while (false)
;
4401
4402 // It is legal to delete users in the ignorelist.
4403 assert((getTreeEntry(U) || is_contained(UserIgnoreList, U)) &&(((getTreeEntry(U) || is_contained(UserIgnoreList, U)) &&
"Deleting out-of-tree value") ? static_cast<void> (0) :
__assert_fail ("(getTreeEntry(U) || is_contained(UserIgnoreList, U)) && \"Deleting out-of-tree value\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp"
, 4404, __PRETTY_FUNCTION__))
4404 "Deleting out-of-tree value")(((getTreeEntry(U) || is_contained(UserIgnoreList, U)) &&
"Deleting out-of-tree value") ? static_cast<void> (0) :
__assert_fail ("(getTreeEntry(U) || is_contained(UserIgnoreList, U)) && \"Deleting out-of-tree value\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp"
, 4404, __PRETTY_FUNCTION__))
;
4405 }
4406 }
4407#endif
4408 LLVM_DEBUG(dbgs() << "SLP: \tErasing scalar:" << *Scalar << ".\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: \tErasing scalar:" << *
Scalar << ".\n"; } } while (false)
;
4409 eraseInstruction(cast<Instruction>(Scalar));
4410 }
4411 }
4412
4413 Builder.ClearInsertionPoint();
4414
4415 return VectorizableTree[0]->VectorizedValue;
4416}
4417
4418void BoUpSLP::optimizeGatherSequence() {
4419 LLVM_DEBUG(dbgs() << "SLP: Optimizing " << GatherSeq.size()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Optimizing " << GatherSeq
.size() << " gather sequences instructions.\n"; } } while
(false)
1
Assuming 'DebugFlag' is false
2
Loop condition is false. Exiting loop
4420 << " gather sequences instructions.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Optimizing " << GatherSeq
.size() << " gather sequences instructions.\n"; } } while
(false)
;
4421 // LICM InsertElementInst sequences.
4422 for (Instruction *I : GatherSeq) {
4423 if (isDeleted(I))
4424 continue;
4425
4426 // Check if this block is inside a loop.
4427 Loop *L = LI->getLoopFor(I->getParent());
4428 if (!L)
4429 continue;
4430
4431 // Check if it has a preheader.
4432 BasicBlock *PreHeader = L->getLoopPreheader();
4433 if (!PreHeader)
4434 continue;
4435
4436 // If the vector or the element that we insert into it are
4437 // instructions that are defined in this basic block then we can't
4438 // hoist this instruction.
4439 auto *Op0 = dyn_cast<Instruction>(I->getOperand(0));
4440 auto *Op1 = dyn_cast<Instruction>(I->getOperand(1));
4441 if (Op0 && L->contains(Op0))
4442 continue;
4443 if (Op1 && L->contains(Op1))
4444 continue;
4445
4446 // We can hoist this instruction. Move it to the pre-header.
4447 I->moveBefore(PreHeader->getTerminator());
4448 }
4449
4450 // Make a list of all reachable blocks in our CSE queue.
4451 SmallVector<const DomTreeNode *, 8> CSEWorkList;
4452 CSEWorkList.reserve(CSEBlocks.size());
4453 for (BasicBlock *BB : CSEBlocks)
4454 if (DomTreeNode *N = DT->getNode(BB)) {
4455 assert(DT->isReachableFromEntry(N))((DT->isReachableFromEntry(N)) ? static_cast<void> (
0) : __assert_fail ("DT->isReachableFromEntry(N)", "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp"
, 4455, __PRETTY_FUNCTION__))
;
4456 CSEWorkList.push_back(N);
4457 }
4458
4459 // Sort blocks by domination. This ensures we visit a block after all blocks
4460 // dominating it are visited.
4461 llvm::stable_sort(CSEWorkList,
4462 [this](const DomTreeNode *A, const DomTreeNode *B) {
4463 return DT->properlyDominates(A, B);
4464 });
4465
4466 // Perform O(N^2) search over the gather sequences and merge identical
4467 // instructions. TODO: We can further optimize this scan if we split the
4468 // instructions into different buckets based on the insert lane.
4469 SmallVector<Instruction *, 16> Visited;
4470 for (auto I = CSEWorkList.begin(), E = CSEWorkList.end(); I != E; ++I) {
3
Assuming 'I' is not equal to 'E'
4
Loop condition is true. Entering loop body
4471 assert((I == CSEWorkList.begin() || !DT->dominates(*I, *std::prev(I))) &&(((I == CSEWorkList.begin() || !DT->dominates(*I, *std::prev
(I))) && "Worklist not sorted properly!") ? static_cast
<void> (0) : __assert_fail ("(I == CSEWorkList.begin() || !DT->dominates(*I, *std::prev(I))) && \"Worklist not sorted properly!\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp"
, 4472, __PRETTY_FUNCTION__))
5
Assuming the condition is false
6
Calling 'DominatorTreeBase::dominates'
13
Returning from 'DominatorTreeBase::dominates'
14
'?' condition is true
4472 "Worklist not sorted properly!")(((I == CSEWorkList.begin() || !DT->dominates(*I, *std::prev
(I))) && "Worklist not sorted properly!") ? static_cast
<void> (0) : __assert_fail ("(I == CSEWorkList.begin() || !DT->dominates(*I, *std::prev(I))) && \"Worklist not sorted properly!\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp"
, 4472, __PRETTY_FUNCTION__))
;
4473 BasicBlock *BB = (*I)->getBlock();
15
Called C++ object pointer is null
4474 // For all instructions in blocks containing gather sequences:
4475 for (BasicBlock::iterator it = BB->begin(), e = BB->end(); it != e;) {
4476 Instruction *In = &*it++;
4477 if (isDeleted(In))
4478 continue;
4479 if (!isa<InsertElementInst>(In) && !isa<ExtractElementInst>(In))
4480 continue;
4481
4482 // Check if we can replace this instruction with any of the
4483 // visited instructions.
4484 for (Instruction *v : Visited) {
4485 if (In->isIdenticalTo(v) &&
4486 DT->dominates(v->getParent(), In->getParent())) {
4487 In->replaceAllUsesWith(v);
4488 eraseInstruction(In);
4489 In = nullptr;
4490 break;
4491 }
4492 }
4493 if (In) {
4494 assert(!is_contained(Visited, In))((!is_contained(Visited, In)) ? static_cast<void> (0) :
__assert_fail ("!is_contained(Visited, In)", "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp"
, 4494, __PRETTY_FUNCTION__))
;
4495 Visited.push_back(In);
4496 }
4497 }
4498 }
4499 CSEBlocks.clear();
4500 GatherSeq.clear();
4501}
4502
4503// Groups the instructions to a bundle (which is then a single scheduling entity)
4504// and schedules instructions until the bundle gets ready.
4505Optional<BoUpSLP::ScheduleData *>
4506BoUpSLP::BlockScheduling::tryScheduleBundle(ArrayRef<Value *> VL, BoUpSLP *SLP,
4507 const InstructionsState &S) {
4508 if (isa<PHINode>(S.OpValue))
4509 return nullptr;
4510
4511 // Initialize the instruction bundle.
4512 Instruction *OldScheduleEnd = ScheduleEnd;
4513 ScheduleData *PrevInBundle = nullptr;
4514 ScheduleData *Bundle = nullptr;
4515 bool ReSchedule = false;
4516 LLVM_DEBUG(dbgs() << "SLP: bundle: " << *S.OpValue << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: bundle: " << *S.OpValue
<< "\n"; } } while (false)
;
4517
4518 // Make sure that the scheduling region contains all
4519 // instructions of the bundle.
4520 for (Value *V : VL) {
4521 if (!extendSchedulingRegion(V, S))
4522 return None;
4523 }
4524
4525 for (Value *V : VL) {
4526 ScheduleData *BundleMember = getScheduleData(V);
4527 assert(BundleMember &&((BundleMember && "no ScheduleData for bundle member (maybe not in same basic block)"
) ? static_cast<void> (0) : __assert_fail ("BundleMember && \"no ScheduleData for bundle member (maybe not in same basic block)\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp"
, 4528, __PRETTY_FUNCTION__))
4528 "no ScheduleData for bundle member (maybe not in same basic block)")((BundleMember && "no ScheduleData for bundle member (maybe not in same basic block)"
) ? static_cast<void> (0) : __assert_fail ("BundleMember && \"no ScheduleData for bundle member (maybe not in same basic block)\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp"
, 4528, __PRETTY_FUNCTION__))
;
4529 if (BundleMember->IsScheduled) {
4530 // A bundle member was scheduled as single instruction before and now
4531 // needs to be scheduled as part of the bundle. We just get rid of the
4532 // existing schedule.
4533 LLVM_DEBUG(dbgs() << "SLP: reset schedule because " << *BundleMemberdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: reset schedule because " <<
*BundleMember << " was already scheduled\n"; } } while
(false)
4534 << " was already scheduled\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: reset schedule because " <<
*BundleMember << " was already scheduled\n"; } } while
(false)
;
4535 ReSchedule = true;
4536 }
4537 assert(BundleMember->isSchedulingEntity() &&((BundleMember->isSchedulingEntity() && "bundle member already part of other bundle"
) ? static_cast<void> (0) : __assert_fail ("BundleMember->isSchedulingEntity() && \"bundle member already part of other bundle\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp"
, 4538, __PRETTY_FUNCTION__))
4538 "bundle member already part of other bundle")((BundleMember->isSchedulingEntity() && "bundle member already part of other bundle"
) ? static_cast<void> (0) : __assert_fail ("BundleMember->isSchedulingEntity() && \"bundle member already part of other bundle\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp"
, 4538, __PRETTY_FUNCTION__))
;
4539 if (PrevInBundle) {
4540 PrevInBundle->NextInBundle = BundleMember;
4541 } else {
4542 Bundle = BundleMember;
4543 }
4544 BundleMember->UnscheduledDepsInBundle = 0;
4545 Bundle->UnscheduledDepsInBundle += BundleMember->UnscheduledDeps;
4546
4547 // Group the instructions to a bundle.
4548 BundleMember->FirstInBundle = Bundle;
4549 PrevInBundle = BundleMember;
4550 }
4551 if (ScheduleEnd != OldScheduleEnd) {
4552 // The scheduling region got new instructions at the lower end (or it is a
4553 // new region for the first bundle). This makes it necessary to
4554 // recalculate all dependencies.
4555 // It is seldom that this needs to be done a second time after adding the
4556 // initial bundle to the region.
4557 for (auto *I = ScheduleStart; I != ScheduleEnd; I = I->getNextNode()) {
4558 doForAllOpcodes(I, [](ScheduleData *SD) {
4559 SD->clearDependencies();
4560 });
4561 }
4562 ReSchedule = true;
4563 }
4564 if (ReSchedule) {
4565 resetSchedule();
4566 initialFillReadyList(ReadyInsts);
4567 }
4568 assert(Bundle && "Failed to find schedule bundle")((Bundle && "Failed to find schedule bundle") ? static_cast
<void> (0) : __assert_fail ("Bundle && \"Failed to find schedule bundle\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp"
, 4568, __PRETTY_FUNCTION__))
;
4569
4570 LLVM_DEBUG(dbgs() << "SLP: try schedule bundle " << *Bundle << " in block "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: try schedule bundle " <<
*Bundle << " in block " << BB->getName() <<
"\n"; } } while (false)
4571 << BB->getName() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: try schedule bundle " <<
*Bundle << " in block " << BB->getName() <<
"\n"; } } while (false)
;
4572
4573 calculateDependencies(Bundle, true, SLP);
4574
4575 // Now try to schedule the new bundle. As soon as the bundle is "ready" it
4576 // means that there are no cyclic dependencies and we can schedule it.
4577 // Note that's important that we don't "schedule" the bundle yet (see
4578 // cancelScheduling).
4579 while (!Bundle->isReady() && !ReadyInsts.empty()) {
4580
4581 ScheduleData *pickedSD = ReadyInsts.back();
4582 ReadyInsts.pop_back();
4583
4584 if (pickedSD->isSchedulingEntity() && pickedSD->isReady()) {
4585 schedule(pickedSD, ReadyInsts);
4586 }
4587 }
4588 if (!Bundle->isReady()) {
4589 cancelScheduling(VL, S.OpValue);
4590 return None;
4591 }
4592 return Bundle;
4593}
4594
4595void BoUpSLP::BlockScheduling::cancelScheduling(ArrayRef<Value *> VL,
4596 Value *OpValue) {
4597 if (isa<PHINode>(OpValue))
4598 return;
4599
4600 ScheduleData *Bundle = getScheduleData(OpValue);
4601 LLVM_DEBUG(dbgs() << "SLP: cancel scheduling of " << *Bundle << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: cancel scheduling of " <<
*Bundle << "\n"; } } while (false)
;
4602 assert(!Bundle->IsScheduled &&((!Bundle->IsScheduled && "Can't cancel bundle which is already scheduled"
) ? static_cast<void> (0) : __assert_fail ("!Bundle->IsScheduled && \"Can't cancel bundle which is already scheduled\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp"
, 4603, __PRETTY_FUNCTION__))
4603 "Can't cancel bundle which is already scheduled")((!Bundle->IsScheduled && "Can't cancel bundle which is already scheduled"
) ? static_cast<void> (0) : __assert_fail ("!Bundle->IsScheduled && \"Can't cancel bundle which is already scheduled\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp"
, 4603, __PRETTY_FUNCTION__))
;
4604 assert(Bundle->isSchedulingEntity() && Bundle->isPartOfBundle() &&((Bundle->isSchedulingEntity() && Bundle->isPartOfBundle
() && "tried to unbundle something which is not a bundle"
) ? static_cast<void> (0) : __assert_fail ("Bundle->isSchedulingEntity() && Bundle->isPartOfBundle() && \"tried to unbundle something which is not a bundle\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp"
, 4605, __PRETTY_FUNCTION__))
4605 "tried to unbundle something which is not a bundle")((Bundle->isSchedulingEntity() && Bundle->isPartOfBundle
() && "tried to unbundle something which is not a bundle"
) ? static_cast<void> (0) : __assert_fail ("Bundle->isSchedulingEntity() && Bundle->isPartOfBundle() && \"tried to unbundle something which is not a bundle\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp"
, 4605, __PRETTY_FUNCTION__))
;
4606
4607 // Un-bundle: make single instructions out of the bundle.
4608 ScheduleData *BundleMember = Bundle;
4609 while (BundleMember) {
4610 assert(BundleMember->FirstInBundle == Bundle && "corrupt bundle links")((BundleMember->FirstInBundle == Bundle && "corrupt bundle links"
) ? static_cast<void> (0) : __assert_fail ("BundleMember->FirstInBundle == Bundle && \"corrupt bundle links\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp"
, 4610, __PRETTY_FUNCTION__))
;
4611 BundleMember->FirstInBundle = BundleMember;
4612 ScheduleData *Next = BundleMember->NextInBundle;
4613 BundleMember->NextInBundle = nullptr;
4614 BundleMember->UnscheduledDepsInBundle = BundleMember->UnscheduledDeps;
4615 if (BundleMember->UnscheduledDepsInBundle == 0) {
4616 ReadyInsts.insert(BundleMember);
4617 }
4618 BundleMember = Next;
4619 }
4620}
4621
4622BoUpSLP::ScheduleData *BoUpSLP::BlockScheduling::allocateScheduleDataChunks() {
4623 // Allocate a new ScheduleData for the instruction.
4624 if (ChunkPos >= ChunkSize) {
4625 ScheduleDataChunks.push_back(std::make_unique<ScheduleData[]>(ChunkSize));
4626 ChunkPos = 0;
4627 }
4628 return &(ScheduleDataChunks.back()[ChunkPos++]);
4629}
4630
4631bool BoUpSLP::BlockScheduling::extendSchedulingRegion(Value *V,
4632 const InstructionsState &S) {
4633 if (getScheduleData(V, isOneOf(S, V)))
4634 return true;
4635 Instruction *I = dyn_cast<Instruction>(V);
4636 assert(I && "bundle member must be an instruction")((I && "bundle member must be an instruction") ? static_cast
<void> (0) : __assert_fail ("I && \"bundle member must be an instruction\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp"
, 4636, __PRETTY_FUNCTION__))
;
4637 assert(!isa<PHINode>(I) && "phi nodes don't need to be scheduled")((!isa<PHINode>(I) && "phi nodes don't need to be scheduled"
) ? static_cast<void> (0) : __assert_fail ("!isa<PHINode>(I) && \"phi nodes don't need to be scheduled\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp"
, 4637, __PRETTY_FUNCTION__))
;
4638 auto &&CheckSheduleForI = [this, &S](Instruction *I) -> bool {
4639 ScheduleData *ISD = getScheduleData(I);
4640 if (!ISD)
4641 return false;
4642 assert(isInSchedulingRegion(ISD) &&((isInSchedulingRegion(ISD) && "ScheduleData not in scheduling region"
) ? static_cast<void> (0) : __assert_fail ("isInSchedulingRegion(ISD) && \"ScheduleData not in scheduling region\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp"
, 4643, __PRETTY_FUNCTION__))
4643 "ScheduleData not in scheduling region")((isInSchedulingRegion(ISD) && "ScheduleData not in scheduling region"
) ? static_cast<void> (0) : __assert_fail ("isInSchedulingRegion(ISD) && \"ScheduleData not in scheduling region\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp"
, 4643, __PRETTY_FUNCTION__))
;
4644 ScheduleData *SD = allocateScheduleDataChunks();
4645 SD->Inst = I;
4646 SD->init(SchedulingRegionID, S.OpValue);
4647 ExtraScheduleDataMap[I][S.OpValue] = SD;
4648 return true;
4649 };
4650 if (CheckSheduleForI(I))
4651 return true;
4652 if (!ScheduleStart) {
4653 // It's the first instruction in the new region.
4654 initScheduleData(I, I->getNextNode(), nullptr, nullptr);
4655 ScheduleStart = I;
4656 ScheduleEnd = I->getNextNode();
4657 if (isOneOf(S, I) != I)
4658 CheckSheduleForI(I);
4659 assert(ScheduleEnd && "tried to vectorize a terminator?")((ScheduleEnd && "tried to vectorize a terminator?") ?
static_cast<void> (0) : __assert_fail ("ScheduleEnd && \"tried to vectorize a terminator?\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp"
, 4659, __PRETTY_FUNCTION__))
;
4660 LLVM_DEBUG(dbgs() << "SLP: initialize schedule region to " << *I << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: initialize schedule region to "
<< *I << "\n"; } } while (false)
;
4661 return true;
4662 }
4663 // Search up and down at the same time, because we don't know if the new
4664 // instruction is above or below the existing scheduling region.
4665 BasicBlock::reverse_iterator UpIter =
4666 ++ScheduleStart->getIterator().getReverse();
4667 BasicBlock::reverse_iterator UpperEnd = BB->rend();
4668 BasicBlock::iterator DownIter = ScheduleEnd->getIterator();
4669 BasicBlock::iterator LowerEnd = BB->end();
4670 while (true) {
4671 if (++ScheduleRegionSize > ScheduleRegionSizeLimit) {
4672 LLVM_DEBUG(dbgs() << "SLP: exceeded schedule region size limit\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: exceeded schedule region size limit\n"
; } } while (false)
;
4673 return false;
4674 }
4675
4676 if (UpIter != UpperEnd) {
4677 if (&*UpIter == I) {
4678 initScheduleData(I, ScheduleStart, nullptr, FirstLoadStoreInRegion);
4679 ScheduleStart = I;
4680 if (isOneOf(S, I) != I)
4681 CheckSheduleForI(I);
4682 LLVM_DEBUG(dbgs() << "SLP: extend schedule region start to " << *Ido { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: extend schedule region start to "
<< *I << "\n"; } } while (false)
4683 << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: extend schedule region start to "
<< *I << "\n"; } } while (false)
;
4684 return true;
4685 }
4686 ++UpIter;
4687 }
4688 if (DownIter != LowerEnd) {
4689 if (&*DownIter == I) {
4690 initScheduleData(ScheduleEnd, I->getNextNode(), LastLoadStoreInRegion,
4691 nullptr);
4692 ScheduleEnd = I->getNextNode();
4693 if (isOneOf(S, I) != I)
4694 CheckSheduleForI(I);
4695 assert(ScheduleEnd && "tried to vectorize a terminator?")((ScheduleEnd && "tried to vectorize a terminator?") ?
static_cast<void> (0) : __assert_fail ("ScheduleEnd && \"tried to vectorize a terminator?\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp"
, 4695, __PRETTY_FUNCTION__))
;
4696 LLVM_DEBUG(dbgs() << "SLP: extend schedule region end to " << *Ido { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: extend schedule region end to "
<< *I << "\n"; } } while (false)
4697 << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: extend schedule region end to "
<< *I << "\n"; } } while (false)
;
4698 return true;
4699 }
4700 ++DownIter;
4701 }
4702 assert((UpIter != UpperEnd || DownIter != LowerEnd) &&(((UpIter != UpperEnd || DownIter != LowerEnd) && "instruction not found in block"
) ? static_cast<void> (0) : __assert_fail ("(UpIter != UpperEnd || DownIter != LowerEnd) && \"instruction not found in block\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp"
, 4703, __PRETTY_FUNCTION__))
4703 "instruction not found in block")(((UpIter != UpperEnd || DownIter != LowerEnd) && "instruction not found in block"
) ? static_cast<void> (0) : __assert_fail ("(UpIter != UpperEnd || DownIter != LowerEnd) && \"instruction not found in block\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp"
, 4703, __PRETTY_FUNCTION__))
;
4704 }
4705 return true;
4706}
4707
4708void BoUpSLP::BlockScheduling::initScheduleData(Instruction *FromI,
4709 Instruction *ToI,
4710 ScheduleData *PrevLoadStore,
4711 ScheduleData *NextLoadStore) {
4712 ScheduleData *CurrentLoadStore = PrevLoadStore;
4713 for (Instruction *I = FromI; I != ToI; I = I->getNextNode()) {
4714 ScheduleData *SD = ScheduleDataMap[I];
4715 if (!SD) {
4716 SD = allocateScheduleDataChunks();
4717 ScheduleDataMap[I] = SD;
4718 SD->Inst = I;
4719 }
4720 assert(!isInSchedulingRegion(SD) &&((!isInSchedulingRegion(SD) && "new ScheduleData already in scheduling region"
) ? static_cast<void> (0) : __assert_fail ("!isInSchedulingRegion(SD) && \"new ScheduleData already in scheduling region\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp"
, 4721, __PRETTY_FUNCTION__))
4721 "new ScheduleData already in scheduling region")((!isInSchedulingRegion(SD) && "new ScheduleData already in scheduling region"
) ? static_cast<void> (0) : __assert_fail ("!isInSchedulingRegion(SD) && \"new ScheduleData already in scheduling region\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp"
, 4721, __PRETTY_FUNCTION__))
;
4722 SD->init(SchedulingRegionID, I);
4723
4724 if (I->mayReadOrWriteMemory() &&
4725 (!isa<IntrinsicInst>(I) ||
4726 cast<IntrinsicInst>(I)->getIntrinsicID() != Intrinsic::sideeffect)) {
4727 // Update the linked list of memory accessing instructions.
4728 if (CurrentLoadStore) {
4729 CurrentLoadStore->NextLoadStore = SD;
4730 } else {
4731 FirstLoadStoreInRegion = SD;
4732 }
4733 CurrentLoadStore = SD;
4734 }
4735 }
4736 if (NextLoadStore) {
4737 if (CurrentLoadStore)
4738 CurrentLoadStore->NextLoadStore = NextLoadStore;
4739 } else {
4740 LastLoadStoreInRegion = CurrentLoadStore;
4741 }
4742}
4743
4744void BoUpSLP::BlockScheduling::calculateDependencies(ScheduleData *SD,
4745 bool InsertInReadyList,
4746 BoUpSLP *SLP) {
4747 assert(SD->isSchedulingEntity())((SD->isSchedulingEntity()) ? static_cast<void> (0) :
__assert_fail ("SD->isSchedulingEntity()", "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp"
, 4747, __PRETTY_FUNCTION__))
;
4748
4749 SmallVector<ScheduleData *, 10> WorkList;
4750 WorkList.push_back(SD);
4751
4752 while (!WorkList.empty()) {
4753 ScheduleData *SD = WorkList.back();
4754 WorkList.pop_back();
4755
4756 ScheduleData *BundleMember = SD;
4757 while (BundleMember) {
4758 assert(isInSchedulingRegion(BundleMember))((isInSchedulingRegion(BundleMember)) ? static_cast<void>
(0) : __assert_fail ("isInSchedulingRegion(BundleMember)", "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp"
, 4758, __PRETTY_FUNCTION__))
;
4759 if (!BundleMember->hasValidDependencies()) {
4760
4761 LLVM_DEBUG(dbgs() << "SLP: update deps of " << *BundleMemberdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: update deps of " <<
*BundleMember << "\n"; } } while (false)
4762 << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: update deps of " <<
*BundleMember << "\n"; } } while (false)
;
4763 BundleMember->Dependencies = 0;
4764 BundleMember->resetUnscheduledDeps();
4765
4766 // Handle def-use chain dependencies.
4767 if (BundleMember->OpValue != BundleMember->Inst) {
4768 ScheduleData *UseSD = getScheduleData(BundleMember->Inst);
4769 if (UseSD && isInSchedulingRegion(UseSD->FirstInBundle)) {
4770 BundleMember->Dependencies++;
4771 ScheduleData *DestBundle = UseSD->FirstInBundle;
4772 if (!DestBundle->IsScheduled)
4773 BundleMember->incrementUnscheduledDeps(1);
4774 if (!DestBundle->hasValidDependencies())
4775 WorkList.push_back(DestBundle);
4776 }
4777 } else {
4778 for (User *U : BundleMember->Inst->users()) {
4779 if (isa<Instruction>(U)) {
4780 ScheduleData *UseSD = getScheduleData(U);
4781 if (UseSD && isInSchedulingRegion(UseSD->FirstInBundle)) {
4782 BundleMember->Dependencies++;
4783 ScheduleData *DestBundle = UseSD->FirstInBundle;
4784 if (!DestBundle->IsScheduled)
4785 BundleMember->incrementUnscheduledDeps(1);
4786 if (!DestBundle->hasValidDependencies())
4787 WorkList.push_back(DestBundle);
4788 }
4789 } else {
4790 // I'm not sure if this can ever happen. But we need to be safe.
4791 // This lets the instruction/bundle never be scheduled and
4792 // eventually disable vectorization.
4793 BundleMember->Dependencies++;
4794 BundleMember->incrementUnscheduledDeps(1);
4795 }
4796 }
4797 }
4798
4799 // Handle the memory dependencies.
4800 ScheduleData *DepDest = BundleMember->NextLoadStore;
4801 if (DepDest) {
4802 Instruction *SrcInst = BundleMember->Inst;
4803 MemoryLocation SrcLoc = getLocation(SrcInst, SLP->AA);
4804 bool SrcMayWrite = BundleMember->Inst->mayWriteToMemory();
4805 unsigned numAliased = 0;
4806 unsigned DistToSrc = 1;
4807
4808 while (DepDest) {
4809 assert(isInSchedulingRegion(DepDest))((isInSchedulingRegion(DepDest)) ? static_cast<void> (0
) : __assert_fail ("isInSchedulingRegion(DepDest)", "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp"
, 4809, __PRETTY_FUNCTION__))
;
4810
4811 // We have two limits to reduce the complexity:
4812 // 1) AliasedCheckLimit: It's a small limit to reduce calls to
4813 // SLP->isAliased (which is the expensive part in this loop).
4814 // 2) MaxMemDepDistance: It's for very large blocks and it aborts
4815 // the whole loop (even if the loop is fast, it's quadratic).
4816 // It's important for the loop break condition (see below) to
4817 // check this limit even between two read-only instructions.
4818 if (DistToSrc >= MaxMemDepDistance ||
4819 ((SrcMayWrite || DepDest->Inst->mayWriteToMemory()) &&
4820 (numAliased >= AliasedCheckLimit ||
4821 SLP->isAliased(SrcLoc, SrcInst, DepDest->Inst)))) {
4822
4823 // We increment the counter only if the locations are aliased
4824 // (instead of counting all alias checks). This gives a better
4825 // balance between reduced runtime and accurate dependencies.
4826 numAliased++;
4827
4828 DepDest->MemoryDependencies.push_back(BundleMember);
4829 BundleMember->Dependencies++;
4830 ScheduleData *DestBundle = DepDest->FirstInBundle;
4831 if (!DestBundle->IsScheduled) {
4832 BundleMember->incrementUnscheduledDeps(1);
4833 }
4834 if (!DestBundle->hasValidDependencies()) {
4835 WorkList.push_back(DestBundle);
4836 }
4837 }
4838 DepDest = DepDest->NextLoadStore;
4839
4840 // Example, explaining the loop break condition: Let's assume our
4841 // starting instruction is i0 and MaxMemDepDistance = 3.
4842 //
4843 // +--------v--v--v
4844 // i0,i1,i2,i3,i4,i5,i6,i7,i8
4845 // +--------^--^--^
4846 //
4847 // MaxMemDepDistance let us stop alias-checking at i3 and we add
4848 // dependencies from i0 to i3,i4,.. (even if they are not aliased).
4849 // Previously we already added dependencies from i3 to i6,i7,i8
4850 // (because of MaxMemDepDistance). As we added a dependency from
4851 // i0 to i3, we have transitive dependencies from i0 to i6,i7,i8
4852 // and we can abort this loop at i6.
4853 if (DistToSrc >= 2 * MaxMemDepDistance)
4854 break;
4855 DistToSrc++;
4856 }
4857 }
4858 }
4859 BundleMember = BundleMember->NextInBundle;
4860 }
4861 if (InsertInReadyList && SD->isReady()) {
4862 ReadyInsts.push_back(SD);
4863 LLVM_DEBUG(dbgs() << "SLP: gets ready on update: " << *SD->Instdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: gets ready on update: " <<
*SD->Inst << "\n"; } } while (false)
4864 << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: gets ready on update: " <<
*SD->Inst << "\n"; } } while (false)
;
4865 }
4866 }
4867}
4868
4869void BoUpSLP::BlockScheduling::resetSchedule() {
4870 assert(ScheduleStart &&((ScheduleStart && "tried to reset schedule on block which has not been scheduled"
) ? static_cast<void> (0) : __assert_fail ("ScheduleStart && \"tried to reset schedule on block which has not been scheduled\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp"
, 4871, __PRETTY_FUNCTION__))
4871 "tried to reset schedule on block which has not been scheduled")((ScheduleStart && "tried to reset schedule on block which has not been scheduled"
) ? static_cast<void> (0) : __assert_fail ("ScheduleStart && \"tried to reset schedule on block which has not been scheduled\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp"
, 4871, __PRETTY_FUNCTION__))
;
4872 for (Instruction *I = ScheduleStart; I != ScheduleEnd; I = I->getNextNode()) {
4873 doForAllOpcodes(I, [&](ScheduleData *SD) {
4874 assert(isInSchedulingRegion(SD) &&((isInSchedulingRegion(SD) && "ScheduleData not in scheduling region"
) ? static_cast<void> (0) : __assert_fail ("isInSchedulingRegion(SD) && \"ScheduleData not in scheduling region\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp"
, 4875, __PRETTY_FUNCTION__))
4875 "ScheduleData not in scheduling region")((isInSchedulingRegion(SD) && "ScheduleData not in scheduling region"
) ? static_cast<void> (0) : __assert_fail ("isInSchedulingRegion(SD) && \"ScheduleData not in scheduling region\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp"
, 4875, __PRETTY_FUNCTION__))
;
4876 SD->IsScheduled = false;
4877 SD->resetUnscheduledDeps();
4878 });
4879 }
4880 ReadyInsts.clear();
4881}
4882
4883void BoUpSLP::scheduleBlock(BlockScheduling *BS) {
4884 if (!BS->ScheduleStart)
4885 return;
4886
4887 LLVM_DEBUG(dbgs() << "SLP: schedule block " << BS->BB->getName() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: schedule block " << BS
->BB->getName() << "\n"; } } while (false)
;
4888
4889 BS->resetSchedule();
4890
4891 // For the real scheduling we use a more sophisticated ready-list: it is
4892 // sorted by the original instruction location. This lets the final schedule
4893 // be as close as possible to the original instruction order.
4894 struct ScheduleDataCompare {
4895 bool operator()(ScheduleData *SD1, ScheduleData *SD2) const {
4896 return SD2->SchedulingPriority < SD1->SchedulingPriority;
4897 }
4898 };
4899 std::set<ScheduleData *, ScheduleDataCompare> ReadyInsts;
4900
4901 // Ensure that all dependency data is updated and fill the ready-list with
4902 // initial instructions.
4903 int Idx = 0;
4904 int NumToSchedule = 0;
4905 for (auto *I = BS->ScheduleStart; I != BS->ScheduleEnd;
4906 I = I->getNextNode()) {
4907 BS->doForAllOpcodes(I, [this, &Idx, &NumToSchedule, BS](ScheduleData *SD) {
4908 assert(SD->isPartOfBundle() ==((SD->isPartOfBundle() == (getTreeEntry(SD->Inst) != nullptr
) && "scheduler and vectorizer bundle mismatch") ? static_cast
<void> (0) : __assert_fail ("SD->isPartOfBundle() == (getTreeEntry(SD->Inst) != nullptr) && \"scheduler and vectorizer bundle mismatch\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp"
, 4910, __PRETTY_FUNCTION__))
4909 (getTreeEntry(SD->Inst) != nullptr) &&((SD->isPartOfBundle() == (getTreeEntry(SD->Inst) != nullptr
) && "scheduler and vectorizer bundle mismatch") ? static_cast
<void> (0) : __assert_fail ("SD->isPartOfBundle() == (getTreeEntry(SD->Inst) != nullptr) && \"scheduler and vectorizer bundle mismatch\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp"
, 4910, __PRETTY_FUNCTION__))
4910 "scheduler and vectorizer bundle mismatch")((SD->isPartOfBundle() == (getTreeEntry(SD->Inst) != nullptr
) && "scheduler and vectorizer bundle mismatch") ? static_cast
<void> (0) : __assert_fail ("SD->isPartOfBundle() == (getTreeEntry(SD->Inst) != nullptr) && \"scheduler and vectorizer bundle mismatch\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp"
, 4910, __PRETTY_FUNCTION__))
;
4911 SD->FirstInBundle->SchedulingPriority = Idx++;
4912 if (SD->isSchedulingEntity()) {
4913 BS->calculateDependencies(SD, false, this);
4914 NumToSchedule++;
4915 }
4916 });
4917 }
4918 BS->initialFillReadyList(ReadyInsts);
4919
4920 Instruction *LastScheduledInst = BS->ScheduleEnd;
4921
4922 // Do the "real" scheduling.
4923 while (!ReadyInsts.empty()) {
4924 ScheduleData *picked = *ReadyInsts.begin();
4925 ReadyInsts.erase(ReadyInsts.begin());
4926
4927 // Move the scheduled instruction(s) to their dedicated places, if not
4928 // there yet.
4929 ScheduleData *BundleMember = picked;
4930 while (BundleMember) {
4931 Instruction *pickedInst = BundleMember->Inst;
4932 if (LastScheduledInst->getNextNode() != pickedInst) {
4933 BS->BB->getInstList().remove(pickedInst);
4934 BS->BB->getInstList().insert(LastScheduledInst->getIterator(),
4935 pickedInst);
4936 }
4937 LastScheduledInst = pickedInst;
4938 BundleMember = BundleMember->NextInBundle;
4939 }
4940
4941 BS->schedule(picked, ReadyInsts);
4942 NumToSchedule--;
4943 }
4944 assert(NumToSchedule == 0 && "could not schedule all instructions")((NumToSchedule == 0 && "could not schedule all instructions"
) ? static_cast<void> (0) : __assert_fail ("NumToSchedule == 0 && \"could not schedule all instructions\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp"
, 4944, __PRETTY_FUNCTION__))
;
4945
4946 // Avoid duplicate scheduling of the block.
4947 BS->ScheduleStart = nullptr;
4948}
4949
4950unsigned BoUpSLP::getVectorElementSize(Value *V) const {
4951 // If V is a store, just return the width of the stored value without
4952 // traversing the expression tree. This is the common case.
4953 if (auto *Store = dyn_cast<StoreInst>(V))
4954 return DL->getTypeSizeInBits(Store->getValueOperand()->getType());
4955
4956 // If V is not a store, we can traverse the expression tree to find loads
4957 // that feed it. The type of the loaded value may indicate a more suitable
4958 // width than V's type. We want to base the vector element size on the width
4959 // of memory operations where possible.
4960 SmallVector<Instruction *, 16> Worklist;
4961 SmallPtrSet<Instruction *, 16> Visited;
4962 if (auto *I = dyn_cast<Instruction>(V))
4963 Worklist.push_back(I);
4964
4965 // Traverse the expression tree in bottom-up order looking for loads. If we
4966 // encounter an instruction we don't yet handle, we give up.
4967 auto MaxWidth = 0u;
4968 auto FoundUnknownInst = false;
4969 while (!Worklist.empty() && !FoundUnknownInst) {
4970 auto *I = Worklist.pop_back_val();
4971 Visited.insert(I);
4972
4973 // We should only be looking at scalar instructions here. If the current
4974 // instruction has a vector type, give up.
4975 auto *Ty = I->getType();
4976 if (isa<VectorType>(Ty))
4977 FoundUnknownInst = true;
4978
4979 // If the current instruction is a load, update MaxWidth to reflect the
4980 // width of the loaded value.
4981 else if (isa<LoadInst>(I))
4982 MaxWidth = std::max<unsigned>(MaxWidth, DL->getTypeSizeInBits(Ty));
4983
4984 // Otherwise, we need to visit the operands of the instruction. We only
4985 // handle the interesting cases from buildTree here. If an operand is an
4986 // instruction we haven't yet visited, we add it to the worklist.
4987 else if (isa<PHINode>(I) || isa<CastInst>(I) || isa<GetElementPtrInst>(I) ||
4988 isa<CmpInst>(I) || isa<SelectInst>(I) || isa<BinaryOperator>(I)) {
4989 for (Use &U : I->operands())
4990 if (auto *J = dyn_cast<Instruction>(U.get()))
4991 if (!Visited.count(J))
4992 Worklist.push_back(J);
4993 }
4994
4995 // If we don't yet handle the instruction, give up.
4996 else
4997 FoundUnknownInst = true;
4998 }
4999
5000 // If we didn't encounter a memory access in the expression tree, or if we
5001 // gave up for some reason, just return the width of V.
5002 if (!MaxWidth || FoundUnknownInst)
5003 return DL->getTypeSizeInBits(V->getType());
5004
5005 // Otherwise, return the maximum width we found.
5006 return MaxWidth;
5007}
5008
5009// Determine if a value V in a vectorizable expression Expr can be demoted to a
5010// smaller type with a truncation. We collect the values that will be demoted
5011// in ToDemote and additional roots that require investigating in Roots.
5012static bool collectValuesToDemote(Value *V, SmallPtrSetImpl<Value *> &Expr,
5013 SmallVectorImpl<Value *> &ToDemote,
5014 SmallVectorImpl<Value *> &Roots) {
5015 // We can always demote constants.
5016 if (isa<Constant>(V)) {
5017 ToDemote.push_back(V);
5018 return true;
5019 }
5020
5021 // If the value is not an instruction in the expression with only one use, it
5022 // cannot be demoted.
5023 auto *I = dyn_cast<Instruction>(V);
5024 if (!I || !I->hasOneUse() || !Expr.count(I))
5025 return false;
5026
5027 switch (I->getOpcode()) {
5028
5029 // We can always demote truncations and extensions. Since truncations can
5030 // seed additional demotion, we save the truncated value.
5031 case Instruction::Trunc:
5032 Roots.push_back(I->getOperand(0));
5033 break;
5034 case Instruction::ZExt:
5035 case Instruction::SExt:
5036 break;
5037
5038 // We can demote certain binary operations if we can demote both of their
5039 // operands.
5040 case Instruction::Add:
5041 case Instruction::Sub:
5042 case Instruction::Mul:
5043 case Instruction::And:
5044 case Instruction::Or:
5045 case Instruction::Xor:
5046 if (!collectValuesToDemote(I->getOperand(0), Expr, ToDemote, Roots) ||
5047 !collectValuesToDemote(I->getOperand(1), Expr, ToDemote, Roots))
5048 return false;
5049 break;
5050
5051 // We can demote selects if we can demote their true and false values.
5052 case Instruction::Select: {
5053 SelectInst *SI = cast<SelectInst>(I);
5054 if (!collectValuesToDemote(SI->getTrueValue(), Expr, ToDemote, Roots) ||
5055 !collectValuesToDemote(SI->getFalseValue(), Expr, ToDemote, Roots))
5056 return false;
5057 break;
5058 }
5059
5060 // We can demote phis if we can demote all their incoming operands. Note that
5061 // we don't need to worry about cycles since we ensure single use above.
5062 case Instruction::PHI: {
5063 PHINode *PN = cast<PHINode>(I);
5064 for (Value *IncValue : PN->incoming_values())
5065 if (!collectValuesToDemote(IncValue, Expr, ToDemote, Roots))
5066 return false;
5067 break;
5068 }
5069
5070 // Otherwise, conservatively give up.
5071 default:
5072 return false;
5073 }
5074
5075 // Record the value that we can demote.
5076 ToDemote.push_back(V);
5077 return true;
5078}
5079
5080void BoUpSLP::computeMinimumValueSizes() {
5081 // If there are no external uses, the expression tree must be rooted by a
5082 // store. We can't demote in-memory values, so there is nothing to do here.
5083 if (ExternalUses.empty())
5084 return;
5085
5086 // We only attempt to truncate integer expressions.
5087 auto &TreeRoot = VectorizableTree[0]->Scalars;
5088 auto *TreeRootIT = dyn_cast<IntegerType>(TreeRoot[0]->getType());
5089 if (!TreeRootIT)
5090 return;
5091
5092 // If the expression is not rooted by a store, these roots should have
5093 // external uses. We will rely on InstCombine to rewrite the expression in
5094 // the narrower type. However, InstCombine only rewrites single-use values.
5095 // This means that if a tree entry other than a root is used externally, it
5096 // must have multiple uses and InstCombine will not rewrite it. The code
5097 // below ensures that only the roots are used externally.
5098 SmallPtrSet<Value *, 32> Expr(TreeRoot.begin(), TreeRoot.end());
5099 for (auto &EU : ExternalUses)
5100 if (!Expr.erase(EU.Scalar))
5101 return;
5102 if (!Expr.empty())
5103 return;
5104
5105 // Collect the scalar values of the vectorizable expression. We will use this
5106 // context to determine which values can be demoted. If we see a truncation,
5107 // we mark it as seeding another demotion.
5108 for (auto &EntryPtr : VectorizableTree)
5109 Expr.insert(EntryPtr->Scalars.begin(), EntryPtr->Scalars.end());
5110
5111 // Ensure the roots of the vectorizable tree don't form a cycle. They must
5112 // have a single external user that is not in the vectorizable tree.
5113 for (auto *Root : TreeRoot)
5114 if (!Root->hasOneUse() || Expr.count(*Root->user_begin()))
5115 return;
5116
5117 // Conservatively determine if we can actually truncate the roots of the
5118 // expression. Collect the values that can be demoted in ToDemote and
5119 // additional roots that require investigating in Roots.
5120 SmallVector<Value *, 32> ToDemote;
5121 SmallVector<Value *, 4> Roots;
5122 for (auto *Root : TreeRoot)
5123 if (!collectValuesToDemote(Root, Expr, ToDemote, Roots))
5124 return;
5125
5126 // The maximum bit width required to represent all the values that can be
5127 // demoted without loss of precision. It would be safe to truncate the roots
5128 // of the expression to this width.
5129 auto MaxBitWidth = 8u;
5130
5131 // We first check if all the bits of the roots are demanded. If they're not,
5132 // we can truncate the roots to this narrower type.
5133 for (auto *Root : TreeRoot) {
5134 auto Mask = DB->getDemandedBits(cast<Instruction>(Root));
5135 MaxBitWidth = std::max<unsigned>(
5136 Mask.getBitWidth() - Mask.countLeadingZeros(), MaxBitWidth);
5137 }
5138
5139 // True if the roots can be zero-extended back to their original type, rather
5140 // than sign-extended. We know that if the leading bits are not demanded, we
5141 // can safely zero-extend. So we initialize IsKnownPositive to True.
5142 bool IsKnownPositive = true;
5143
5144 // If all the bits of the roots are demanded, we can try a little harder to
5145 // compute a narrower type. This can happen, for example, if the roots are
5146 // getelementptr indices. InstCombine promotes these indices to the pointer
5147 // width. Thus, all their bits are technically demanded even though the
5148 // address computation might be vectorized in a smaller type.
5149 //
5150 // We start by looking at each entry that can be demoted. We compute the
5151 // maximum bit width required to store the scalar by using ValueTracking to
5152 // compute the number of high-order bits we can truncate.
5153 if (MaxBitWidth == DL->getTypeSizeInBits(TreeRoot[0]->getType()) &&
5154 llvm::all_of(TreeRoot, [](Value *R) {
5155 assert(R->hasOneUse() && "Root should have only one use!")((R->hasOneUse() && "Root should have only one use!"
) ? static_cast<void> (0) : __assert_fail ("R->hasOneUse() && \"Root should have only one use!\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp"
, 5155, __PRETTY_FUNCTION__))
;
5156 return isa<GetElementPtrInst>(R->user_back());
5157 })) {
5158 MaxBitWidth = 8u;
5159
5160 // Determine if the sign bit of all the roots is known to be zero. If not,
5161 // IsKnownPositive is set to False.
5162 IsKnownPositive = llvm::all_of(TreeRoot, [&](Value *R) {
5163 KnownBits Known = computeKnownBits(R, *DL);
5164 return Known.isNonNegative();
5165 });
5166
5167 // Determine the maximum number of bits required to store the scalar
5168 // values.
5169 for (auto *Scalar : ToDemote) {
5170 auto NumSignBits = ComputeNumSignBits(Scalar, *DL, 0, AC, nullptr, DT);
5171 auto NumTypeBits = DL->getTypeSizeInBits(Scalar->getType());
5172 MaxBitWidth = std::max<unsigned>(NumTypeBits - NumSignBits, MaxBitWidth);
5173 }
5174
5175 // If we can't prove that the sign bit is zero, we must add one to the
5176 // maximum bit width to account for the unknown sign bit. This preserves
5177 // the existing sign bit so we can safely sign-extend the root back to the
5178 // original type. Otherwise, if we know the sign bit is zero, we will
5179 // zero-extend the root instead.
5180 //
5181 // FIXME: This is somewhat suboptimal, as there will be cases where adding
5182 // one to the maximum bit width will yield a larger-than-necessary
5183 // type. In general, we need to add an extra bit only if we can't
5184 // prove that the upper bit of the original type is equal to the
5185 // upper bit of the proposed smaller type. If these two bits are the
5186 // same (either zero or one) we know that sign-extending from the
5187 // smaller type will result in the same value. Here, since we can't
5188 // yet prove this, we are just making the proposed smaller type
5189 // larger to ensure correctness.
5190 if (!IsKnownPositive)
5191 ++MaxBitWidth;
5192 }
5193
5194 // Round MaxBitWidth up to the next power-of-two.
5195 if (!isPowerOf2_64(MaxBitWidth))
5196 MaxBitWidth = NextPowerOf2(MaxBitWidth);
5197
5198 // If the maximum bit width we compute is less than the with of the roots'
5199 // type, we can proceed with the narrowing. Otherwise, do nothing.
5200 if (MaxBitWidth >= TreeRootIT->getBitWidth())
5201 return;
5202
5203 // If we can truncate the root, we must collect additional values that might
5204 // be demoted as a result. That is, those seeded by truncations we will
5205 // modify.
5206 while (!Roots.empty())
5207 collectValuesToDemote(Roots.pop_back_val(), Expr, ToDemote, Roots);
5208
5209 // Finally, map the values we can demote to the maximum bit with we computed.
5210 for (auto *Scalar : ToDemote)
5211 MinBWs[Scalar] = std::make_pair(MaxBitWidth, !IsKnownPositive);
5212}
5213
5214namespace {
5215
5216/// The SLPVectorizer Pass.
5217struct SLPVectorizer : public FunctionPass {
5218 SLPVectorizerPass Impl;
5219
5220 /// Pass identification, replacement for typeid
5221 static char ID;
5222
5223 explicit SLPVectorizer() : FunctionPass(ID) {
5224 initializeSLPVectorizerPass(*PassRegistry::getPassRegistry());
5225 }
5226
5227 bool doInitialization(Module &M) override {
5228 return false;
5229 }
5230
5231 bool runOnFunction(Function &F) override {
5232 if (skipFunction(F))
5233 return false;
5234
5235 auto *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
5236 auto *TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
5237 auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>();
5238 auto *TLI = TLIP ? &TLIP->getTLI(F) : nullptr;
5239 auto *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
5240 auto *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
5241 auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
5242 auto *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
5243 auto *DB = &getAnalysis<DemandedBitsWrapperPass>().getDemandedBits();
5244 auto *ORE = &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE();
5245
5246 return Impl.runImpl(F, SE, TTI, TLI, AA, LI, DT, AC, DB, ORE);
5247 }
5248
5249 void getAnalysisUsage(AnalysisUsage &AU) const override {
5250 FunctionPass::getAnalysisUsage(AU);
5251 AU.addRequired<AssumptionCacheTracker>();
5252 AU.addRequired<ScalarEvolutionWrapperPass>();
5253 AU.addRequired<AAResultsWrapperPass>();
5254 AU.addRequired<TargetTransformInfoWrapperPass>();
5255 AU.addRequired<LoopInfoWrapperPass>();
5256 AU.addRequired<DominatorTreeWrapperPass>();
5257 AU.addRequired<DemandedBitsWrapperPass>();
5258 AU.addRequired<OptimizationRemarkEmitterWrapperPass>();
5259 AU.addPreserved<LoopInfoWrapperPass>();
5260 AU.addPreserved<DominatorTreeWrapperPass>();
5261 AU.addPreserved<AAResultsWrapperPass>();
5262 AU.addPreserved<GlobalsAAWrapperPass>();
5263 AU.setPreservesCFG();
5264 }
5265};
5266
5267} // end anonymous namespace
5268
5269PreservedAnalyses SLPVectorizerPass::run(Function &F, FunctionAnalysisManager &AM) {
5270 auto *SE = &AM.getResult<ScalarEvolutionAnalysis>(F);
5271 auto *TTI = &AM.getResult<TargetIRAnalysis>(F);
5272 auto *TLI = AM.getCachedResult<TargetLibraryAnalysis>(F);
5273 auto *AA = &AM.getResult<AAManager>(F);
5274 auto *LI = &AM.getResult<LoopAnalysis>(F);
5275 auto *DT = &AM.getResult<DominatorTreeAnalysis>(F);
5276 auto *AC = &AM.getResult<AssumptionAnalysis>(F);
5277 auto *DB = &AM.getResult<DemandedBitsAnalysis>(F);
5278 auto *ORE = &AM.getResult<OptimizationRemarkEmitterAnalysis>(F);
5279
5280 bool Changed = runImpl(F, SE, TTI, TLI, AA, LI, DT, AC, DB, ORE);
5281 if (!Changed)
5282 return PreservedAnalyses::all();
5283
5284 PreservedAnalyses PA;
5285 PA.preserveSet<CFGAnalyses>();
5286 PA.preserve<AAManager>();
5287 PA.preserve<GlobalsAA>();
5288 return PA;
5289}
5290
5291bool SLPVectorizerPass::runImpl(Function &F, ScalarEvolution *SE_,
5292 TargetTransformInfo *TTI_,
5293 TargetLibraryInfo *TLI_, AliasAnalysis *AA_,
5294 LoopInfo *LI_, DominatorTree *DT_,
5295 AssumptionCache *AC_, DemandedBits *DB_,
5296 OptimizationRemarkEmitter *ORE_) {
5297 SE = SE_;
5298 TTI = TTI_;
5299 TLI = TLI_;
5300 AA = AA_;
5301 LI = LI_;
5302 DT = DT_;
5303 AC = AC_;
5304 DB = DB_;
5305 DL = &F.getParent()->getDataLayout();
5306
5307 Stores.clear();
5308 GEPs.clear();
5309 bool Changed = false;
5310
5311 // If the target claims to have no vector registers don't attempt
5312 // vectorization.
5313 if (!TTI->getNumberOfRegisters(TTI->getRegisterClassForType(true)))
5314 return false;
5315
5316 // Don't vectorize when the attribute NoImplicitFloat is used.
5317 if (F.hasFnAttribute(Attribute::NoImplicitFloat))
5318 return false;
5319
5320 LLVM_DEBUG(dbgs() << "SLP: Analyzing blocks in " << F.getName() << ".\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Analyzing blocks in " <<
F.getName() << ".\n"; } } while (false)
;
5321
5322 // Use the bottom up slp vectorizer to construct chains that start with
5323 // store instructions.
5324 BoUpSLP R(&F, SE, TTI, TLI, AA, LI, DT, AC, DB, DL, ORE_);
5325
5326 // A general note: the vectorizer must use BoUpSLP::eraseInstruction() to
5327 // delete instructions.
5328
5329 // Scan the blocks in the function in post order.
5330 for (auto BB : post_order(&F.getEntryBlock())) {
5331 collectSeedInstructions(BB);
5332
5333 // Vectorize trees that end at stores.
5334 if (!Stores.empty()) {
5335 LLVM_DEBUG(dbgs() << "SLP: Found stores for " << Stores.size()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Found stores for " << Stores
.size() << " underlying objects.\n"; } } while (false)
5336 << " underlying objects.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Found stores for " << Stores
.size() << " underlying objects.\n"; } } while (false)
;
5337 Changed |= vectorizeStoreChains(R);
5338 }
5339
5340 // Vectorize trees that end at reductions.
5341 Changed |= vectorizeChainsInBlock(BB, R);
5342
5343 // Vectorize the index computations of getelementptr instructions. This
5344 // is primarily intended to catch gather-like idioms ending at
5345 // non-consecutive loads.
5346 if (!GEPs.empty()) {
5347 LLVM_DEBUG(dbgs() << "SLP: Found GEPs for " << GEPs.size()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Found GEPs for " << GEPs
.size() << " underlying objects.\n"; } } while (false)
5348 << " underlying objects.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Found GEPs for " << GEPs
.size() << " underlying objects.\n"; } } while (false)
;
5349 Changed |= vectorizeGEPIndices(BB, R);
5350 }
5351 }
5352
5353 if (Changed) {
5354 R.optimizeGatherSequence();
5355 LLVM_DEBUG(dbgs() << "SLP: vectorized \"" << F.getName() << "\"\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: vectorized \"" << F.getName
() << "\"\n"; } } while (false)
;
5356 LLVM_DEBUG(verifyFunction(F))do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { verifyFunction(F); } } while (false)
;
5357 }
5358 return Changed;
5359}
5360
5361bool SLPVectorizerPass::vectorizeStoreChain(ArrayRef<Value *> Chain, BoUpSLP &R,
5362 unsigned VecRegSize) {
5363 const unsigned ChainLen = Chain.size();
5364 LLVM_DEBUG(dbgs() << "SLP: Analyzing a store chain of length " << ChainLendo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Analyzing a store chain of length "
<< ChainLen << "\n"; } } while (false)
5365 << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Analyzing a store chain of length "
<< ChainLen << "\n"; } } while (false)
;
5366 const unsigned Sz = R.getVectorElementSize(Chain[0]);
5367 const unsigned VF = VecRegSize / Sz;
5368
5369 if (!isPowerOf2_32(Sz) || VF < 2)
5370 return false;
5371
5372 bool Changed = false;
5373 // Look for profitable vectorizable trees at all offsets, starting at zero.
5374 for (unsigned i = 0, e = ChainLen; i + VF <= e; ++i) {
5375
5376 ArrayRef<Value *> Operands = Chain.slice(i, VF);
5377 // Check that a previous iteration of this loop did not delete the Value.
5378 if (llvm::any_of(Operands, [&R](Value *V) {
5379 auto *I = dyn_cast<Instruction>(V);
5380 return I && R.isDeleted(I);
5381 }))
5382 continue;
5383
5384 LLVM_DEBUG(dbgs() << "SLP: Analyzing " << VF << " stores at offset " << ido { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Analyzing " << VF <<
" stores at offset " << i << "\n"; } } while (false
)
5385 << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Analyzing " << VF <<
" stores at offset " << i << "\n"; } } while (false
)
;
5386
5387 R.buildTree(Operands);
5388 if (R.isTreeTinyAndNotFullyVectorizable())
5389 continue;
5390
5391 R.computeMinimumValueSizes();
5392
5393 int Cost = R.getTreeCost();
5394
5395 LLVM_DEBUG(dbgs() << "SLP: Found cost=" << Cost << " for VF=" << VFdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Found cost=" << Cost <<
" for VF=" << VF << "\n"; } } while (false)
5396 << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Found cost=" << Cost <<
" for VF=" << VF << "\n"; } } while (false)
;
5397 if (Cost < -SLPCostThreshold) {
5398 LLVM_DEBUG(dbgs() << "SLP: Decided to vectorize cost=" << Cost << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Decided to vectorize cost=" <<
Cost << "\n"; } } while (false)
;
5399
5400 using namespace ore;
5401
5402 R.getORE()->emit(OptimizationRemark(SV_NAME"slp-vectorizer", "StoresVectorized",
5403 cast<StoreInst>(Chain[i]))
5404 << "Stores SLP vectorized with cost " << NV("Cost", Cost)
5405 << " and with tree size "
5406 << NV("TreeSize", R.getTreeSize()));
5407
5408 R.vectorizeTree();
5409
5410 // Move to the next bundle.
5411 i += VF - 1;
5412 Changed = true;
5413 }
5414 }
5415
5416 return Changed;
5417}
5418
5419bool SLPVectorizerPass::vectorizeStores(ArrayRef<StoreInst *> Stores,
5420 BoUpSLP &R) {
5421 SetVector<StoreInst *> Heads;
5422 SmallDenseSet<StoreInst *> Tails;
5423 SmallDenseMap<StoreInst *, StoreInst *> ConsecutiveChain;
5424
5425 // We may run into multiple chains that merge into a single chain. We mark the
5426 // stores that we vectorized so that we don't visit the same store twice.
5427 BoUpSLP::ValueSet VectorizedStores;
5428 bool Changed = false;
5429
5430 auto &&FindConsecutiveAccess =
5431 [this, &Stores, &Heads, &Tails, &ConsecutiveChain] (int K, int Idx) {
5432 if (!isConsecutiveAccess(Stores[K], Stores[Idx], *DL, *SE))
5433 return false;
5434
5435 Tails.insert(Stores[Idx]);
5436 Heads.insert(Stores[K]);
5437 ConsecutiveChain[Stores[K]] = Stores[Idx];
5438 return true;
5439 };
5440
5441 // Do a quadratic search on all of the given stores in reverse order and find
5442 // all of the pairs of stores that follow each other.
5443 int E = Stores.size();
5444 for (int Idx = E - 1; Idx >= 0; --Idx) {
5445 // If a store has multiple consecutive store candidates, search according
5446 // to the sequence: Idx-1, Idx+1, Idx-2, Idx+2, ...
5447 // This is because usually pairing with immediate succeeding or preceding
5448 // candidate create the best chance to find slp vectorization opportunity.
5449 for (int Offset = 1, F = std::max(E - Idx, Idx + 1); Offset < F; ++Offset)
5450 if ((Idx >= Offset && FindConsecutiveAccess(Idx - Offset, Idx)) ||
5451 (Idx + Offset < E && FindConsecutiveAccess(Idx + Offset, Idx)))
5452 break;
5453 }
5454
5455 // For stores that start but don't end a link in the chain:
5456 for (auto *SI : llvm::reverse(Heads)) {
5457 if (Tails.count(SI))
5458 continue;
5459
5460 // We found a store instr that starts a chain. Now follow the chain and try
5461 // to vectorize it.
5462 BoUpSLP::ValueList Operands;
5463 StoreInst *I = SI;
5464 // Collect the chain into a list.
5465 while ((Tails.count(I) || Heads.count(I)) && !VectorizedStores.count(I)) {
5466 Operands.push_back(I);
5467 // Move to the next value in the chain.
5468 I = ConsecutiveChain[I];
5469 }
5470
5471 // FIXME: Is division-by-2 the correct step? Should we assert that the
5472 // register size is a power-of-2?
5473 for (unsigned Size = R.getMaxVecRegSize(); Size >= R.getMinVecRegSize();
5474 Size /= 2) {
5475 if (vectorizeStoreChain(Operands, R, Size)) {
5476 // Mark the vectorized stores so that we don't vectorize them again.
5477 VectorizedStores.insert(Operands.begin(), Operands.end());
5478 Changed = true;
5479 break;
5480 }
5481 }
5482 }
5483
5484 return Changed;
5485}
5486
5487void SLPVectorizerPass::collectSeedInstructions(BasicBlock *BB) {
5488 // Initialize the collections. We will make a single pass over the block.
5489 Stores.clear();
5490 GEPs.clear();
5491
5492 // Visit the store and getelementptr instructions in BB and organize them in
5493 // Stores and GEPs according to the underlying objects of their pointer
5494 // operands.
5495 for (Instruction &I : *BB) {
5496 // Ignore store instructions that are volatile or have a pointer operand
5497 // that doesn't point to a scalar type.
5498 if (auto *SI = dyn_cast<StoreInst>(&I)) {
5499 if (!SI->isSimple())
5500 continue;
5501 if (!isValidElementType(SI->getValueOperand()->getType()))
5502 continue;
5503 Stores[GetUnderlyingObject(SI->getPointerOperand(), *DL)].push_back(SI);
5504 }
5505
5506 // Ignore getelementptr instructions that have more than one index, a
5507 // constant index, or a pointer operand that doesn't point to a scalar
5508 // type.
5509 else if (auto *GEP = dyn_cast<GetElementPtrInst>(&I)) {
5510 auto Idx = GEP->idx_begin()->get();
5511 if (GEP->getNumIndices() > 1 || isa<Constant>(Idx))
5512 continue;
5513 if (!isValidElementType(Idx->getType()))
5514 continue;
5515 if (GEP->getType()->isVectorTy())
5516 continue;
5517 GEPs[GEP->getPointerOperand()].push_back(GEP);
5518 }
5519 }
5520}
5521
5522bool SLPVectorizerPass::tryToVectorizePair(Value *A, Value *B, BoUpSLP &R) {
5523 if (!A || !B)
5524 return false;
5525 Value *VL[] = { A, B };
5526 return tryToVectorizeList(VL, R, /*UserCost=*/0, true);
5527}
5528
5529bool SLPVectorizerPass::tryToVectorizeList(ArrayRef<Value *> VL, BoUpSLP &R,
5530 int UserCost, bool AllowReorder) {
5531 if (VL.size() < 2)
5532 return false;
5533
5534 LLVM_DEBUG(dbgs() << "SLP: Trying to vectorize a list of length = "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Trying to vectorize a list of length = "
<< VL.size() << ".\n"; } } while (false)
5535 << VL.size() << ".\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Trying to vectorize a list of length = "
<< VL.size() << ".\n"; } } while (false)
;
5536
5537 // Check that all of the parts are scalar instructions of the same type,
5538 // we permit an alternate opcode via InstructionsState.
5539 InstructionsState S = getSameOpcode(VL);
5540 if (!S.getOpcode())
5541 return false;
5542
5543 Instruction *I0 = cast<Instruction>(S.OpValue);
5544 unsigned Sz = R.getVectorElementSize(I0);
5545 unsigned MinVF = std::max(2U, R.getMinVecRegSize() / Sz);
5546 unsigned MaxVF = std::max<unsigned>(PowerOf2Floor(VL.size()), MinVF);
5547 if (MaxVF < 2) {
5548 R.getORE()->emit([&]() {
5549 return OptimizationRemarkMissed(SV_NAME"slp-vectorizer", "SmallVF", I0)
5550 << "Cannot SLP vectorize list: vectorization factor "
5551 << "less than 2 is not supported";
5552 });
5553 return false;
5554 }
5555
5556 for (Value *V : VL) {
5557 Type *Ty = V->getType();
5558 if (!isValidElementType(Ty)) {
5559 // NOTE: the following will give user internal llvm type name, which may
5560 // not be useful.
5561 R.getORE()->emit([&]() {
5562 std::string type_str;
5563 llvm::raw_string_ostream rso(type_str);
5564 Ty->print(rso);
5565 return OptimizationRemarkMissed(SV_NAME"slp-vectorizer", "UnsupportedType", I0)
5566 << "Cannot SLP vectorize list: type "
5567 << rso.str() + " is unsupported by vectorizer";
5568 });
5569 return false;
5570 }
5571 }
5572
5573 bool Changed = false;
5574 bool CandidateFound = false;
5575 int MinCost = SLPCostThreshold;
5576
5577 unsigned NextInst = 0, MaxInst = VL.size();
5578 for (unsigned VF = MaxVF; NextInst + 1 < MaxInst && VF >= MinVF; VF /= 2) {
5579 // No actual vectorization should happen, if number of parts is the same as
5580 // provided vectorization factor (i.e. the scalar type is used for vector
5581 // code during codegen).
5582 auto *VecTy = VectorType::get(VL[0]->getType(), VF);
5583 if (TTI->getNumberOfParts(VecTy) == VF)
5584 continue;
5585 for (unsigned I = NextInst; I < MaxInst; ++I) {
5586 unsigned OpsWidth = 0;
5587
5588 if (I + VF > MaxInst)
5589 OpsWidth = MaxInst - I;
5590 else
5591 OpsWidth = VF;
5592
5593 if (!isPowerOf2_32(OpsWidth) || OpsWidth < 2)
5594 break;
5595
5596 ArrayRef<Value *> Ops = VL.slice(I, OpsWidth);
5597 // Check that a previous iteration of this loop did not delete the Value.
5598 if (llvm::any_of(Ops, [&R](Value *V) {
5599 auto *I = dyn_cast<Instruction>(V);
5600 return I && R.isDeleted(I);
5601 }))
5602 continue;
5603
5604 LLVM_DEBUG(dbgs() << "SLP: Analyzing " << OpsWidth << " operations "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Analyzing " << OpsWidth
<< " operations " << "\n"; } } while (false)
5605 << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("SLP")) { dbgs() << "SLP: Analyzing " << OpsWidth
<< " operations " << "\n"; } } while (false)
;
5606
5607 R.buildTree(Ops);
5608 Optional<ArrayRef<unsigned>> Order = R.bestOrder();