File: | lib/Transforms/Vectorize/SLPVectorizer.cpp |
Location: | line 2819, column 22 |
Description: | Called C++ object pointer is null |
1 | //===- SLPVectorizer.cpp - A bottom up SLP Vectorizer ---------------------===// | |||
2 | // | |||
3 | // The LLVM Compiler Infrastructure | |||
4 | // | |||
5 | // This file is distributed under the University of Illinois Open Source | |||
6 | // License. See LICENSE.TXT for details. | |||
7 | // | |||
8 | //===----------------------------------------------------------------------===// | |||
9 | // This pass implements the Bottom Up SLP vectorizer. It detects consecutive | |||
10 | // stores that can be put together into vector-stores. Next, it attempts to | |||
11 | // construct vectorizable tree using the use-def chains. If a profitable tree | |||
12 | // was found, the SLP vectorizer performs vectorization on the tree. | |||
13 | // | |||
14 | // The pass is inspired by the work described in the paper: | |||
15 | // "Loop-Aware SLP in GCC" by Ira Rosen, Dorit Nuzman, Ayal Zaks. | |||
16 | // | |||
17 | //===----------------------------------------------------------------------===// | |||
18 | #include "llvm/ADT/MapVector.h" | |||
19 | #include "llvm/ADT/Optional.h" | |||
20 | #include "llvm/ADT/PostOrderIterator.h" | |||
21 | #include "llvm/ADT/SetVector.h" | |||
22 | #include "llvm/ADT/Statistic.h" | |||
23 | #include "llvm/Analysis/AliasAnalysis.h" | |||
24 | #include "llvm/Analysis/AssumptionCache.h" | |||
25 | #include "llvm/Analysis/CodeMetrics.h" | |||
26 | #include "llvm/Analysis/DemandedBits.h" | |||
27 | #include "llvm/Analysis/GlobalsModRef.h" | |||
28 | #include "llvm/Analysis/LoopAccessAnalysis.h" | |||
29 | #include "llvm/Analysis/LoopAccessAnalysis.h" | |||
30 | #include "llvm/Analysis/LoopInfo.h" | |||
31 | #include "llvm/Analysis/ScalarEvolution.h" | |||
32 | #include "llvm/Analysis/ScalarEvolutionExpressions.h" | |||
33 | #include "llvm/Analysis/TargetTransformInfo.h" | |||
34 | #include "llvm/Analysis/ValueTracking.h" | |||
35 | #include "llvm/Analysis/VectorUtils.h" | |||
36 | #include "llvm/IR/DataLayout.h" | |||
37 | #include "llvm/IR/Dominators.h" | |||
38 | #include "llvm/IR/IRBuilder.h" | |||
39 | #include "llvm/IR/Instructions.h" | |||
40 | #include "llvm/IR/IntrinsicInst.h" | |||
41 | #include "llvm/IR/Module.h" | |||
42 | #include "llvm/IR/NoFolder.h" | |||
43 | #include "llvm/IR/Type.h" | |||
44 | #include "llvm/IR/Value.h" | |||
45 | #include "llvm/IR/Verifier.h" | |||
46 | #include "llvm/Pass.h" | |||
47 | #include "llvm/Support/CommandLine.h" | |||
48 | #include "llvm/Support/Debug.h" | |||
49 | #include "llvm/Support/raw_ostream.h" | |||
50 | #include "llvm/Transforms/Vectorize.h" | |||
51 | #include <algorithm> | |||
52 | #include <memory> | |||
53 | ||||
54 | using namespace llvm; | |||
55 | ||||
56 | #define SV_NAME"slp-vectorizer" "slp-vectorizer" | |||
57 | #define DEBUG_TYPE"SLP" "SLP" | |||
58 | ||||
59 | STATISTIC(NumVectorInstructions, "Number of vector instructions generated")static llvm::Statistic NumVectorInstructions = { "SLP", "Number of vector instructions generated" , 0, 0 }; | |||
60 | ||||
61 | static cl::opt<int> | |||
62 | SLPCostThreshold("slp-threshold", cl::init(0), cl::Hidden, | |||
63 | cl::desc("Only vectorize if you gain more than this " | |||
64 | "number ")); | |||
65 | ||||
66 | static cl::opt<bool> | |||
67 | ShouldVectorizeHor("slp-vectorize-hor", cl::init(true), cl::Hidden, | |||
68 | cl::desc("Attempt to vectorize horizontal reductions")); | |||
69 | ||||
70 | static cl::opt<bool> ShouldStartVectorizeHorAtStore( | |||
71 | "slp-vectorize-hor-store", cl::init(false), cl::Hidden, | |||
72 | cl::desc( | |||
73 | "Attempt to vectorize horizontal reductions feeding into a store")); | |||
74 | ||||
75 | static cl::opt<int> | |||
76 | MaxVectorRegSizeOption("slp-max-reg-size", cl::init(128), cl::Hidden, | |||
77 | cl::desc("Attempt to vectorize for this register size in bits")); | |||
78 | ||||
79 | /// Limits the size of scheduling regions in a block. | |||
80 | /// It avoid long compile times for _very_ large blocks where vector | |||
81 | /// instructions are spread over a wide range. | |||
82 | /// This limit is way higher than needed by real-world functions. | |||
83 | static cl::opt<int> | |||
84 | ScheduleRegionSizeBudget("slp-schedule-budget", cl::init(100000), cl::Hidden, | |||
85 | cl::desc("Limit the size of the SLP scheduling region per block")); | |||
86 | ||||
87 | static cl::opt<int> MinVectorRegSizeOption( | |||
88 | "slp-min-reg-size", cl::init(128), cl::Hidden, | |||
89 | cl::desc("Attempt to vectorize for this register size in bits")); | |||
90 | ||||
91 | namespace { | |||
92 | ||||
93 | // FIXME: Set this via cl::opt to allow overriding. | |||
94 | static const unsigned RecursionMaxDepth = 12; | |||
95 | ||||
96 | // Limit the number of alias checks. The limit is chosen so that | |||
97 | // it has no negative effect on the llvm benchmarks. | |||
98 | static const unsigned AliasedCheckLimit = 10; | |||
99 | ||||
100 | // Another limit for the alias checks: The maximum distance between load/store | |||
101 | // instructions where alias checks are done. | |||
102 | // This limit is useful for very large basic blocks. | |||
103 | static const unsigned MaxMemDepDistance = 160; | |||
104 | ||||
105 | /// If the ScheduleRegionSizeBudget is exhausted, we allow small scheduling | |||
106 | /// regions to be handled. | |||
107 | static const int MinScheduleRegionSize = 16; | |||
108 | ||||
109 | /// \brief Predicate for the element types that the SLP vectorizer supports. | |||
110 | /// | |||
111 | /// The most important thing to filter here are types which are invalid in LLVM | |||
112 | /// vectors. We also filter target specific types which have absolutely no | |||
113 | /// meaningful vectorization path such as x86_fp80 and ppc_f128. This just | |||
114 | /// avoids spending time checking the cost model and realizing that they will | |||
115 | /// be inevitably scalarized. | |||
116 | static bool isValidElementType(Type *Ty) { | |||
117 | return VectorType::isValidElementType(Ty) && !Ty->isX86_FP80Ty() && | |||
118 | !Ty->isPPC_FP128Ty(); | |||
119 | } | |||
120 | ||||
121 | /// \returns the parent basic block if all of the instructions in \p VL | |||
122 | /// are in the same block or null otherwise. | |||
123 | static BasicBlock *getSameBlock(ArrayRef<Value *> VL) { | |||
124 | Instruction *I0 = dyn_cast<Instruction>(VL[0]); | |||
125 | if (!I0) | |||
126 | return nullptr; | |||
127 | BasicBlock *BB = I0->getParent(); | |||
128 | for (int i = 1, e = VL.size(); i < e; i++) { | |||
129 | Instruction *I = dyn_cast<Instruction>(VL[i]); | |||
130 | if (!I) | |||
131 | return nullptr; | |||
132 | ||||
133 | if (BB != I->getParent()) | |||
134 | return nullptr; | |||
135 | } | |||
136 | return BB; | |||
137 | } | |||
138 | ||||
139 | /// \returns True if all of the values in \p VL are constants. | |||
140 | static bool allConstant(ArrayRef<Value *> VL) { | |||
141 | for (unsigned i = 0, e = VL.size(); i < e; ++i) | |||
142 | if (!isa<Constant>(VL[i])) | |||
143 | return false; | |||
144 | return true; | |||
145 | } | |||
146 | ||||
147 | /// \returns True if all of the values in \p VL are identical. | |||
148 | static bool isSplat(ArrayRef<Value *> VL) { | |||
149 | for (unsigned i = 1, e = VL.size(); i < e; ++i) | |||
150 | if (VL[i] != VL[0]) | |||
151 | return false; | |||
152 | return true; | |||
153 | } | |||
154 | ||||
155 | ///\returns Opcode that can be clubbed with \p Op to create an alternate | |||
156 | /// sequence which can later be merged as a ShuffleVector instruction. | |||
157 | static unsigned getAltOpcode(unsigned Op) { | |||
158 | switch (Op) { | |||
159 | case Instruction::FAdd: | |||
160 | return Instruction::FSub; | |||
161 | case Instruction::FSub: | |||
162 | return Instruction::FAdd; | |||
163 | case Instruction::Add: | |||
164 | return Instruction::Sub; | |||
165 | case Instruction::Sub: | |||
166 | return Instruction::Add; | |||
167 | default: | |||
168 | return 0; | |||
169 | } | |||
170 | } | |||
171 | ||||
172 | ///\returns bool representing if Opcode \p Op can be part | |||
173 | /// of an alternate sequence which can later be merged as | |||
174 | /// a ShuffleVector instruction. | |||
175 | static bool canCombineAsAltInst(unsigned Op) { | |||
176 | return Op == Instruction::FAdd || Op == Instruction::FSub || | |||
177 | Op == Instruction::Sub || Op == Instruction::Add; | |||
178 | } | |||
179 | ||||
180 | /// \returns ShuffleVector instruction if instructions in \p VL have | |||
181 | /// alternate fadd,fsub / fsub,fadd/add,sub/sub,add sequence. | |||
182 | /// (i.e. e.g. opcodes of fadd,fsub,fadd,fsub...) | |||
183 | static unsigned isAltInst(ArrayRef<Value *> VL) { | |||
184 | Instruction *I0 = dyn_cast<Instruction>(VL[0]); | |||
185 | unsigned Opcode = I0->getOpcode(); | |||
186 | unsigned AltOpcode = getAltOpcode(Opcode); | |||
187 | for (int i = 1, e = VL.size(); i < e; i++) { | |||
188 | Instruction *I = dyn_cast<Instruction>(VL[i]); | |||
189 | if (!I || I->getOpcode() != ((i & 1) ? AltOpcode : Opcode)) | |||
190 | return 0; | |||
191 | } | |||
192 | return Instruction::ShuffleVector; | |||
193 | } | |||
194 | ||||
195 | /// \returns The opcode if all of the Instructions in \p VL have the same | |||
196 | /// opcode, or zero. | |||
197 | static unsigned getSameOpcode(ArrayRef<Value *> VL) { | |||
198 | Instruction *I0 = dyn_cast<Instruction>(VL[0]); | |||
199 | if (!I0) | |||
200 | return 0; | |||
201 | unsigned Opcode = I0->getOpcode(); | |||
202 | for (int i = 1, e = VL.size(); i < e; i++) { | |||
203 | Instruction *I = dyn_cast<Instruction>(VL[i]); | |||
204 | if (!I || Opcode != I->getOpcode()) { | |||
205 | if (canCombineAsAltInst(Opcode) && i == 1) | |||
206 | return isAltInst(VL); | |||
207 | return 0; | |||
208 | } | |||
209 | } | |||
210 | return Opcode; | |||
211 | } | |||
212 | ||||
213 | /// Get the intersection (logical and) of all of the potential IR flags | |||
214 | /// of each scalar operation (VL) that will be converted into a vector (I). | |||
215 | /// Flag set: NSW, NUW, exact, and all of fast-math. | |||
216 | static void propagateIRFlags(Value *I, ArrayRef<Value *> VL) { | |||
217 | if (auto *VecOp = dyn_cast<BinaryOperator>(I)) { | |||
218 | if (auto *Intersection = dyn_cast<BinaryOperator>(VL[0])) { | |||
219 | // Intersection is initialized to the 0th scalar, | |||
220 | // so start counting from index '1'. | |||
221 | for (int i = 1, e = VL.size(); i < e; ++i) { | |||
222 | if (auto *Scalar = dyn_cast<BinaryOperator>(VL[i])) | |||
223 | Intersection->andIRFlags(Scalar); | |||
224 | } | |||
225 | VecOp->copyIRFlags(Intersection); | |||
226 | } | |||
227 | } | |||
228 | } | |||
229 | ||||
230 | /// \returns \p I after propagating metadata from \p VL. | |||
231 | static Instruction *propagateMetadata(Instruction *I, ArrayRef<Value *> VL) { | |||
232 | Instruction *I0 = cast<Instruction>(VL[0]); | |||
233 | SmallVector<std::pair<unsigned, MDNode *>, 4> Metadata; | |||
234 | I0->getAllMetadataOtherThanDebugLoc(Metadata); | |||
235 | ||||
236 | for (unsigned i = 0, n = Metadata.size(); i != n; ++i) { | |||
237 | unsigned Kind = Metadata[i].first; | |||
238 | MDNode *MD = Metadata[i].second; | |||
239 | ||||
240 | for (int i = 1, e = VL.size(); MD && i != e; i++) { | |||
241 | Instruction *I = cast<Instruction>(VL[i]); | |||
242 | MDNode *IMD = I->getMetadata(Kind); | |||
243 | ||||
244 | switch (Kind) { | |||
245 | default: | |||
246 | MD = nullptr; // Remove unknown metadata | |||
247 | break; | |||
248 | case LLVMContext::MD_tbaa: | |||
249 | MD = MDNode::getMostGenericTBAA(MD, IMD); | |||
250 | break; | |||
251 | case LLVMContext::MD_alias_scope: | |||
252 | MD = MDNode::getMostGenericAliasScope(MD, IMD); | |||
253 | break; | |||
254 | case LLVMContext::MD_noalias: | |||
255 | MD = MDNode::intersect(MD, IMD); | |||
256 | break; | |||
257 | case LLVMContext::MD_fpmath: | |||
258 | MD = MDNode::getMostGenericFPMath(MD, IMD); | |||
259 | break; | |||
260 | case LLVMContext::MD_nontemporal: | |||
261 | MD = MDNode::intersect(MD, IMD); | |||
262 | break; | |||
263 | } | |||
264 | } | |||
265 | I->setMetadata(Kind, MD); | |||
266 | } | |||
267 | return I; | |||
268 | } | |||
269 | ||||
270 | /// \returns The type that all of the values in \p VL have or null if there | |||
271 | /// are different types. | |||
272 | static Type* getSameType(ArrayRef<Value *> VL) { | |||
273 | Type *Ty = VL[0]->getType(); | |||
274 | for (int i = 1, e = VL.size(); i < e; i++) | |||
275 | if (VL[i]->getType() != Ty) | |||
276 | return nullptr; | |||
277 | ||||
278 | return Ty; | |||
279 | } | |||
280 | ||||
281 | /// \returns True if Extract{Value,Element} instruction extracts element Idx. | |||
282 | static bool matchExtractIndex(Instruction *E, unsigned Idx, unsigned Opcode) { | |||
283 | assert(Opcode == Instruction::ExtractElement ||((Opcode == Instruction::ExtractElement || Opcode == Instruction ::ExtractValue) ? static_cast<void> (0) : __assert_fail ("Opcode == Instruction::ExtractElement || Opcode == Instruction::ExtractValue" , "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn271203/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 284, __PRETTY_FUNCTION__)) | |||
284 | Opcode == Instruction::ExtractValue)((Opcode == Instruction::ExtractElement || Opcode == Instruction ::ExtractValue) ? static_cast<void> (0) : __assert_fail ("Opcode == Instruction::ExtractElement || Opcode == Instruction::ExtractValue" , "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn271203/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 284, __PRETTY_FUNCTION__)); | |||
285 | if (Opcode == Instruction::ExtractElement) { | |||
286 | ConstantInt *CI = dyn_cast<ConstantInt>(E->getOperand(1)); | |||
287 | return CI && CI->getZExtValue() == Idx; | |||
288 | } else { | |||
289 | ExtractValueInst *EI = cast<ExtractValueInst>(E); | |||
290 | return EI->getNumIndices() == 1 && *EI->idx_begin() == Idx; | |||
291 | } | |||
292 | } | |||
293 | ||||
294 | /// \returns True if in-tree use also needs extract. This refers to | |||
295 | /// possible scalar operand in vectorized instruction. | |||
296 | static bool InTreeUserNeedToExtract(Value *Scalar, Instruction *UserInst, | |||
297 | TargetLibraryInfo *TLI) { | |||
298 | ||||
299 | unsigned Opcode = UserInst->getOpcode(); | |||
300 | switch (Opcode) { | |||
301 | case Instruction::Load: { | |||
302 | LoadInst *LI = cast<LoadInst>(UserInst); | |||
303 | return (LI->getPointerOperand() == Scalar); | |||
304 | } | |||
305 | case Instruction::Store: { | |||
306 | StoreInst *SI = cast<StoreInst>(UserInst); | |||
307 | return (SI->getPointerOperand() == Scalar); | |||
308 | } | |||
309 | case Instruction::Call: { | |||
310 | CallInst *CI = cast<CallInst>(UserInst); | |||
311 | Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); | |||
312 | if (hasVectorInstrinsicScalarOpd(ID, 1)) { | |||
313 | return (CI->getArgOperand(1) == Scalar); | |||
314 | } | |||
315 | } | |||
316 | default: | |||
317 | return false; | |||
318 | } | |||
319 | } | |||
320 | ||||
321 | /// \returns the AA location that is being access by the instruction. | |||
322 | static MemoryLocation getLocation(Instruction *I, AliasAnalysis *AA) { | |||
323 | if (StoreInst *SI = dyn_cast<StoreInst>(I)) | |||
324 | return MemoryLocation::get(SI); | |||
325 | if (LoadInst *LI = dyn_cast<LoadInst>(I)) | |||
326 | return MemoryLocation::get(LI); | |||
327 | return MemoryLocation(); | |||
328 | } | |||
329 | ||||
330 | /// \returns True if the instruction is not a volatile or atomic load/store. | |||
331 | static bool isSimple(Instruction *I) { | |||
332 | if (LoadInst *LI = dyn_cast<LoadInst>(I)) | |||
333 | return LI->isSimple(); | |||
334 | if (StoreInst *SI = dyn_cast<StoreInst>(I)) | |||
335 | return SI->isSimple(); | |||
336 | if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(I)) | |||
337 | return !MI->isVolatile(); | |||
338 | return true; | |||
339 | } | |||
340 | ||||
341 | /// Bottom Up SLP Vectorizer. | |||
342 | class BoUpSLP { | |||
343 | public: | |||
344 | typedef SmallVector<Value *, 8> ValueList; | |||
345 | typedef SmallVector<Instruction *, 16> InstrList; | |||
346 | typedef SmallPtrSet<Value *, 16> ValueSet; | |||
347 | typedef SmallVector<StoreInst *, 8> StoreList; | |||
348 | ||||
349 | BoUpSLP(Function *Func, ScalarEvolution *Se, TargetTransformInfo *Tti, | |||
350 | TargetLibraryInfo *TLi, AliasAnalysis *Aa, LoopInfo *Li, | |||
351 | DominatorTree *Dt, AssumptionCache *AC, DemandedBits *DB, | |||
352 | const DataLayout *DL) | |||
353 | : NumLoadsWantToKeepOrder(0), NumLoadsWantToChangeOrder(0), F(Func), | |||
354 | SE(Se), TTI(Tti), TLI(TLi), AA(Aa), LI(Li), DT(Dt), AC(AC), DB(DB), | |||
355 | DL(DL), Builder(Se->getContext()) { | |||
356 | CodeMetrics::collectEphemeralValues(F, AC, EphValues); | |||
357 | // Use the vector register size specified by the target unless overridden | |||
358 | // by a command-line option. | |||
359 | // TODO: It would be better to limit the vectorization factor based on | |||
360 | // data type rather than just register size. For example, x86 AVX has | |||
361 | // 256-bit registers, but it does not support integer operations | |||
362 | // at that width (that requires AVX2). | |||
363 | if (MaxVectorRegSizeOption.getNumOccurrences()) | |||
364 | MaxVecRegSize = MaxVectorRegSizeOption; | |||
365 | else | |||
366 | MaxVecRegSize = TTI->getRegisterBitWidth(true); | |||
367 | ||||
368 | MinVecRegSize = MinVectorRegSizeOption; | |||
369 | } | |||
370 | ||||
371 | /// \brief Vectorize the tree that starts with the elements in \p VL. | |||
372 | /// Returns the vectorized root. | |||
373 | Value *vectorizeTree(); | |||
374 | ||||
375 | /// \returns the cost incurred by unwanted spills and fills, caused by | |||
376 | /// holding live values over call sites. | |||
377 | int getSpillCost(); | |||
378 | ||||
379 | /// \returns the vectorization cost of the subtree that starts at \p VL. | |||
380 | /// A negative number means that this is profitable. | |||
381 | int getTreeCost(); | |||
382 | ||||
383 | /// Construct a vectorizable tree that starts at \p Roots, ignoring users for | |||
384 | /// the purpose of scheduling and extraction in the \p UserIgnoreLst. | |||
385 | void buildTree(ArrayRef<Value *> Roots, | |||
386 | ArrayRef<Value *> UserIgnoreLst = None); | |||
387 | ||||
388 | /// Clear the internal data structures that are created by 'buildTree'. | |||
389 | void deleteTree() { | |||
390 | VectorizableTree.clear(); | |||
391 | ScalarToTreeEntry.clear(); | |||
392 | MustGather.clear(); | |||
393 | ExternalUses.clear(); | |||
394 | NumLoadsWantToKeepOrder = 0; | |||
395 | NumLoadsWantToChangeOrder = 0; | |||
396 | for (auto &Iter : BlocksSchedules) { | |||
397 | BlockScheduling *BS = Iter.second.get(); | |||
398 | BS->clear(); | |||
399 | } | |||
400 | MinBWs.clear(); | |||
401 | } | |||
402 | ||||
403 | /// \brief Perform LICM and CSE on the newly generated gather sequences. | |||
404 | void optimizeGatherSequence(); | |||
405 | ||||
406 | /// \returns true if it is beneficial to reverse the vector order. | |||
407 | bool shouldReorder() const { | |||
408 | return NumLoadsWantToChangeOrder > NumLoadsWantToKeepOrder; | |||
409 | } | |||
410 | ||||
411 | /// \return The vector element size in bits to use when vectorizing the | |||
412 | /// expression tree ending at \p V. If V is a store, the size is the width of | |||
413 | /// the stored value. Otherwise, the size is the width of the largest loaded | |||
414 | /// value reaching V. This method is used by the vectorizer to calculate | |||
415 | /// vectorization factors. | |||
416 | unsigned getVectorElementSize(Value *V); | |||
417 | ||||
418 | /// Compute the minimum type sizes required to represent the entries in a | |||
419 | /// vectorizable tree. | |||
420 | void computeMinimumValueSizes(); | |||
421 | ||||
422 | // \returns maximum vector register size as set by TTI or overridden by cl::opt. | |||
423 | unsigned getMaxVecRegSize() const { | |||
424 | return MaxVecRegSize; | |||
425 | } | |||
426 | ||||
427 | // \returns minimum vector register size as set by cl::opt. | |||
428 | unsigned getMinVecRegSize() const { | |||
429 | return MinVecRegSize; | |||
430 | } | |||
431 | ||||
432 | /// \brief Check if ArrayType or StructType is isomorphic to some VectorType. | |||
433 | /// | |||
434 | /// \returns number of elements in vector if isomorphism exists, 0 otherwise. | |||
435 | unsigned canMapToVector(Type *T, const DataLayout &DL) const; | |||
436 | ||||
437 | private: | |||
438 | struct TreeEntry; | |||
439 | ||||
440 | /// \returns the cost of the vectorizable entry. | |||
441 | int getEntryCost(TreeEntry *E); | |||
442 | ||||
443 | /// This is the recursive part of buildTree. | |||
444 | void buildTree_rec(ArrayRef<Value *> Roots, unsigned Depth); | |||
445 | ||||
446 | /// \returns True if the ExtractElement/ExtractValue instructions in VL can | |||
447 | /// be vectorized to use the original vector (or aggregate "bitcast" to a vector). | |||
448 | bool canReuseExtract(ArrayRef<Value *> VL, unsigned Opcode) const; | |||
449 | ||||
450 | /// Vectorize a single entry in the tree. | |||
451 | Value *vectorizeTree(TreeEntry *E); | |||
452 | ||||
453 | /// Vectorize a single entry in the tree, starting in \p VL. | |||
454 | Value *vectorizeTree(ArrayRef<Value *> VL); | |||
455 | ||||
456 | /// \returns the pointer to the vectorized value if \p VL is already | |||
457 | /// vectorized, or NULL. They may happen in cycles. | |||
458 | Value *alreadyVectorized(ArrayRef<Value *> VL) const; | |||
459 | ||||
460 | /// \returns the scalarization cost for this type. Scalarization in this | |||
461 | /// context means the creation of vectors from a group of scalars. | |||
462 | int getGatherCost(Type *Ty); | |||
463 | ||||
464 | /// \returns the scalarization cost for this list of values. Assuming that | |||
465 | /// this subtree gets vectorized, we may need to extract the values from the | |||
466 | /// roots. This method calculates the cost of extracting the values. | |||
467 | int getGatherCost(ArrayRef<Value *> VL); | |||
468 | ||||
469 | /// \brief Set the Builder insert point to one after the last instruction in | |||
470 | /// the bundle | |||
471 | void setInsertPointAfterBundle(ArrayRef<Value *> VL); | |||
472 | ||||
473 | /// \returns a vector from a collection of scalars in \p VL. | |||
474 | Value *Gather(ArrayRef<Value *> VL, VectorType *Ty); | |||
475 | ||||
476 | /// \returns whether the VectorizableTree is fully vectorizable and will | |||
477 | /// be beneficial even the tree height is tiny. | |||
478 | bool isFullyVectorizableTinyTree(); | |||
479 | ||||
480 | /// \reorder commutative operands in alt shuffle if they result in | |||
481 | /// vectorized code. | |||
482 | void reorderAltShuffleOperands(ArrayRef<Value *> VL, | |||
483 | SmallVectorImpl<Value *> &Left, | |||
484 | SmallVectorImpl<Value *> &Right); | |||
485 | /// \reorder commutative operands to get better probability of | |||
486 | /// generating vectorized code. | |||
487 | void reorderInputsAccordingToOpcode(ArrayRef<Value *> VL, | |||
488 | SmallVectorImpl<Value *> &Left, | |||
489 | SmallVectorImpl<Value *> &Right); | |||
490 | struct TreeEntry { | |||
491 | TreeEntry() : Scalars(), VectorizedValue(nullptr), | |||
492 | NeedToGather(0) {} | |||
493 | ||||
494 | /// \returns true if the scalars in VL are equal to this entry. | |||
495 | bool isSame(ArrayRef<Value *> VL) const { | |||
496 | assert(VL.size() == Scalars.size() && "Invalid size")((VL.size() == Scalars.size() && "Invalid size") ? static_cast <void> (0) : __assert_fail ("VL.size() == Scalars.size() && \"Invalid size\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn271203/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 496, __PRETTY_FUNCTION__)); | |||
497 | return std::equal(VL.begin(), VL.end(), Scalars.begin()); | |||
498 | } | |||
499 | ||||
500 | /// A vector of scalars. | |||
501 | ValueList Scalars; | |||
502 | ||||
503 | /// The Scalars are vectorized into this value. It is initialized to Null. | |||
504 | Value *VectorizedValue; | |||
505 | ||||
506 | /// Do we need to gather this sequence ? | |||
507 | bool NeedToGather; | |||
508 | }; | |||
509 | ||||
510 | /// Create a new VectorizableTree entry. | |||
511 | TreeEntry *newTreeEntry(ArrayRef<Value *> VL, bool Vectorized) { | |||
512 | VectorizableTree.emplace_back(); | |||
513 | int idx = VectorizableTree.size() - 1; | |||
514 | TreeEntry *Last = &VectorizableTree[idx]; | |||
515 | Last->Scalars.insert(Last->Scalars.begin(), VL.begin(), VL.end()); | |||
516 | Last->NeedToGather = !Vectorized; | |||
517 | if (Vectorized) { | |||
518 | for (int i = 0, e = VL.size(); i != e; ++i) { | |||
519 | assert(!ScalarToTreeEntry.count(VL[i]) && "Scalar already in tree!")((!ScalarToTreeEntry.count(VL[i]) && "Scalar already in tree!" ) ? static_cast<void> (0) : __assert_fail ("!ScalarToTreeEntry.count(VL[i]) && \"Scalar already in tree!\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn271203/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 519, __PRETTY_FUNCTION__)); | |||
520 | ScalarToTreeEntry[VL[i]] = idx; | |||
521 | } | |||
522 | } else { | |||
523 | MustGather.insert(VL.begin(), VL.end()); | |||
524 | } | |||
525 | return Last; | |||
526 | } | |||
527 | ||||
528 | /// -- Vectorization State -- | |||
529 | /// Holds all of the tree entries. | |||
530 | std::vector<TreeEntry> VectorizableTree; | |||
531 | ||||
532 | /// Maps a specific scalar to its tree entry. | |||
533 | SmallDenseMap<Value*, int> ScalarToTreeEntry; | |||
534 | ||||
535 | /// A list of scalars that we found that we need to keep as scalars. | |||
536 | ValueSet MustGather; | |||
537 | ||||
538 | /// This POD struct describes one external user in the vectorized tree. | |||
539 | struct ExternalUser { | |||
540 | ExternalUser (Value *S, llvm::User *U, int L) : | |||
541 | Scalar(S), User(U), Lane(L){} | |||
542 | // Which scalar in our function. | |||
543 | Value *Scalar; | |||
544 | // Which user that uses the scalar. | |||
545 | llvm::User *User; | |||
546 | // Which lane does the scalar belong to. | |||
547 | int Lane; | |||
548 | }; | |||
549 | typedef SmallVector<ExternalUser, 16> UserList; | |||
550 | ||||
551 | /// Checks if two instructions may access the same memory. | |||
552 | /// | |||
553 | /// \p Loc1 is the location of \p Inst1. It is passed explicitly because it | |||
554 | /// is invariant in the calling loop. | |||
555 | bool isAliased(const MemoryLocation &Loc1, Instruction *Inst1, | |||
556 | Instruction *Inst2) { | |||
557 | ||||
558 | // First check if the result is already in the cache. | |||
559 | AliasCacheKey key = std::make_pair(Inst1, Inst2); | |||
560 | Optional<bool> &result = AliasCache[key]; | |||
561 | if (result.hasValue()) { | |||
562 | return result.getValue(); | |||
563 | } | |||
564 | MemoryLocation Loc2 = getLocation(Inst2, AA); | |||
565 | bool aliased = true; | |||
566 | if (Loc1.Ptr && Loc2.Ptr && isSimple(Inst1) && isSimple(Inst2)) { | |||
567 | // Do the alias check. | |||
568 | aliased = AA->alias(Loc1, Loc2); | |||
569 | } | |||
570 | // Store the result in the cache. | |||
571 | result = aliased; | |||
572 | return aliased; | |||
573 | } | |||
574 | ||||
575 | typedef std::pair<Instruction *, Instruction *> AliasCacheKey; | |||
576 | ||||
577 | /// Cache for alias results. | |||
578 | /// TODO: consider moving this to the AliasAnalysis itself. | |||
579 | DenseMap<AliasCacheKey, Optional<bool>> AliasCache; | |||
580 | ||||
581 | /// Removes an instruction from its block and eventually deletes it. | |||
582 | /// It's like Instruction::eraseFromParent() except that the actual deletion | |||
583 | /// is delayed until BoUpSLP is destructed. | |||
584 | /// This is required to ensure that there are no incorrect collisions in the | |||
585 | /// AliasCache, which can happen if a new instruction is allocated at the | |||
586 | /// same address as a previously deleted instruction. | |||
587 | void eraseInstruction(Instruction *I) { | |||
588 | I->removeFromParent(); | |||
589 | I->dropAllReferences(); | |||
590 | DeletedInstructions.push_back(std::unique_ptr<Instruction>(I)); | |||
591 | } | |||
592 | ||||
593 | /// Temporary store for deleted instructions. Instructions will be deleted | |||
594 | /// eventually when the BoUpSLP is destructed. | |||
595 | SmallVector<std::unique_ptr<Instruction>, 8> DeletedInstructions; | |||
596 | ||||
597 | /// A list of values that need to extracted out of the tree. | |||
598 | /// This list holds pairs of (Internal Scalar : External User). | |||
599 | UserList ExternalUses; | |||
600 | ||||
601 | /// Values used only by @llvm.assume calls. | |||
602 | SmallPtrSet<const Value *, 32> EphValues; | |||
603 | ||||
604 | /// Holds all of the instructions that we gathered. | |||
605 | SetVector<Instruction *> GatherSeq; | |||
606 | /// A list of blocks that we are going to CSE. | |||
607 | SetVector<BasicBlock *> CSEBlocks; | |||
608 | ||||
609 | /// Contains all scheduling relevant data for an instruction. | |||
610 | /// A ScheduleData either represents a single instruction or a member of an | |||
611 | /// instruction bundle (= a group of instructions which is combined into a | |||
612 | /// vector instruction). | |||
613 | struct ScheduleData { | |||
614 | ||||
615 | // The initial value for the dependency counters. It means that the | |||
616 | // dependencies are not calculated yet. | |||
617 | enum { InvalidDeps = -1 }; | |||
618 | ||||
619 | ScheduleData() | |||
620 | : Inst(nullptr), FirstInBundle(nullptr), NextInBundle(nullptr), | |||
621 | NextLoadStore(nullptr), SchedulingRegionID(0), SchedulingPriority(0), | |||
622 | Dependencies(InvalidDeps), UnscheduledDeps(InvalidDeps), | |||
623 | UnscheduledDepsInBundle(InvalidDeps), IsScheduled(false) {} | |||
624 | ||||
625 | void init(int BlockSchedulingRegionID) { | |||
626 | FirstInBundle = this; | |||
627 | NextInBundle = nullptr; | |||
628 | NextLoadStore = nullptr; | |||
629 | IsScheduled = false; | |||
630 | SchedulingRegionID = BlockSchedulingRegionID; | |||
631 | UnscheduledDepsInBundle = UnscheduledDeps; | |||
632 | clearDependencies(); | |||
633 | } | |||
634 | ||||
635 | /// Returns true if the dependency information has been calculated. | |||
636 | bool hasValidDependencies() const { return Dependencies != InvalidDeps; } | |||
637 | ||||
638 | /// Returns true for single instructions and for bundle representatives | |||
639 | /// (= the head of a bundle). | |||
640 | bool isSchedulingEntity() const { return FirstInBundle == this; } | |||
641 | ||||
642 | /// Returns true if it represents an instruction bundle and not only a | |||
643 | /// single instruction. | |||
644 | bool isPartOfBundle() const { | |||
645 | return NextInBundle != nullptr || FirstInBundle != this; | |||
646 | } | |||
647 | ||||
648 | /// Returns true if it is ready for scheduling, i.e. it has no more | |||
649 | /// unscheduled depending instructions/bundles. | |||
650 | bool isReady() const { | |||
651 | assert(isSchedulingEntity() &&((isSchedulingEntity() && "can't consider non-scheduling entity for ready list" ) ? static_cast<void> (0) : __assert_fail ("isSchedulingEntity() && \"can't consider non-scheduling entity for ready list\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn271203/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 652, __PRETTY_FUNCTION__)) | |||
652 | "can't consider non-scheduling entity for ready list")((isSchedulingEntity() && "can't consider non-scheduling entity for ready list" ) ? static_cast<void> (0) : __assert_fail ("isSchedulingEntity() && \"can't consider non-scheduling entity for ready list\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn271203/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 652, __PRETTY_FUNCTION__)); | |||
653 | return UnscheduledDepsInBundle == 0 && !IsScheduled; | |||
654 | } | |||
655 | ||||
656 | /// Modifies the number of unscheduled dependencies, also updating it for | |||
657 | /// the whole bundle. | |||
658 | int incrementUnscheduledDeps(int Incr) { | |||
659 | UnscheduledDeps += Incr; | |||
660 | return FirstInBundle->UnscheduledDepsInBundle += Incr; | |||
661 | } | |||
662 | ||||
663 | /// Sets the number of unscheduled dependencies to the number of | |||
664 | /// dependencies. | |||
665 | void resetUnscheduledDeps() { | |||
666 | incrementUnscheduledDeps(Dependencies - UnscheduledDeps); | |||
667 | } | |||
668 | ||||
669 | /// Clears all dependency information. | |||
670 | void clearDependencies() { | |||
671 | Dependencies = InvalidDeps; | |||
672 | resetUnscheduledDeps(); | |||
673 | MemoryDependencies.clear(); | |||
674 | } | |||
675 | ||||
676 | void dump(raw_ostream &os) const { | |||
677 | if (!isSchedulingEntity()) { | |||
678 | os << "/ " << *Inst; | |||
679 | } else if (NextInBundle) { | |||
680 | os << '[' << *Inst; | |||
681 | ScheduleData *SD = NextInBundle; | |||
682 | while (SD) { | |||
683 | os << ';' << *SD->Inst; | |||
684 | SD = SD->NextInBundle; | |||
685 | } | |||
686 | os << ']'; | |||
687 | } else { | |||
688 | os << *Inst; | |||
689 | } | |||
690 | } | |||
691 | ||||
692 | Instruction *Inst; | |||
693 | ||||
694 | /// Points to the head in an instruction bundle (and always to this for | |||
695 | /// single instructions). | |||
696 | ScheduleData *FirstInBundle; | |||
697 | ||||
698 | /// Single linked list of all instructions in a bundle. Null if it is a | |||
699 | /// single instruction. | |||
700 | ScheduleData *NextInBundle; | |||
701 | ||||
702 | /// Single linked list of all memory instructions (e.g. load, store, call) | |||
703 | /// in the block - until the end of the scheduling region. | |||
704 | ScheduleData *NextLoadStore; | |||
705 | ||||
706 | /// The dependent memory instructions. | |||
707 | /// This list is derived on demand in calculateDependencies(). | |||
708 | SmallVector<ScheduleData *, 4> MemoryDependencies; | |||
709 | ||||
710 | /// This ScheduleData is in the current scheduling region if this matches | |||
711 | /// the current SchedulingRegionID of BlockScheduling. | |||
712 | int SchedulingRegionID; | |||
713 | ||||
714 | /// Used for getting a "good" final ordering of instructions. | |||
715 | int SchedulingPriority; | |||
716 | ||||
717 | /// The number of dependencies. Constitutes of the number of users of the | |||
718 | /// instruction plus the number of dependent memory instructions (if any). | |||
719 | /// This value is calculated on demand. | |||
720 | /// If InvalidDeps, the number of dependencies is not calculated yet. | |||
721 | /// | |||
722 | int Dependencies; | |||
723 | ||||
724 | /// The number of dependencies minus the number of dependencies of scheduled | |||
725 | /// instructions. As soon as this is zero, the instruction/bundle gets ready | |||
726 | /// for scheduling. | |||
727 | /// Note that this is negative as long as Dependencies is not calculated. | |||
728 | int UnscheduledDeps; | |||
729 | ||||
730 | /// The sum of UnscheduledDeps in a bundle. Equals to UnscheduledDeps for | |||
731 | /// single instructions. | |||
732 | int UnscheduledDepsInBundle; | |||
733 | ||||
734 | /// True if this instruction is scheduled (or considered as scheduled in the | |||
735 | /// dry-run). | |||
736 | bool IsScheduled; | |||
737 | }; | |||
738 | ||||
739 | #ifndef NDEBUG | |||
740 | friend raw_ostream &operator<<(raw_ostream &os, | |||
741 | const BoUpSLP::ScheduleData &SD); | |||
742 | #endif | |||
743 | ||||
744 | /// Contains all scheduling data for a basic block. | |||
745 | /// | |||
746 | struct BlockScheduling { | |||
747 | ||||
748 | BlockScheduling(BasicBlock *BB) | |||
749 | : BB(BB), ChunkSize(BB->size()), ChunkPos(ChunkSize), | |||
750 | ScheduleStart(nullptr), ScheduleEnd(nullptr), | |||
751 | FirstLoadStoreInRegion(nullptr), LastLoadStoreInRegion(nullptr), | |||
752 | ScheduleRegionSize(0), | |||
753 | ScheduleRegionSizeLimit(ScheduleRegionSizeBudget), | |||
754 | // Make sure that the initial SchedulingRegionID is greater than the | |||
755 | // initial SchedulingRegionID in ScheduleData (which is 0). | |||
756 | SchedulingRegionID(1) {} | |||
757 | ||||
758 | void clear() { | |||
759 | ReadyInsts.clear(); | |||
760 | ScheduleStart = nullptr; | |||
761 | ScheduleEnd = nullptr; | |||
762 | FirstLoadStoreInRegion = nullptr; | |||
763 | LastLoadStoreInRegion = nullptr; | |||
764 | ||||
765 | // Reduce the maximum schedule region size by the size of the | |||
766 | // previous scheduling run. | |||
767 | ScheduleRegionSizeLimit -= ScheduleRegionSize; | |||
768 | if (ScheduleRegionSizeLimit < MinScheduleRegionSize) | |||
769 | ScheduleRegionSizeLimit = MinScheduleRegionSize; | |||
770 | ScheduleRegionSize = 0; | |||
771 | ||||
772 | // Make a new scheduling region, i.e. all existing ScheduleData is not | |||
773 | // in the new region yet. | |||
774 | ++SchedulingRegionID; | |||
775 | } | |||
776 | ||||
777 | ScheduleData *getScheduleData(Value *V) { | |||
778 | ScheduleData *SD = ScheduleDataMap[V]; | |||
779 | if (SD && SD->SchedulingRegionID == SchedulingRegionID) | |||
780 | return SD; | |||
781 | return nullptr; | |||
782 | } | |||
783 | ||||
784 | bool isInSchedulingRegion(ScheduleData *SD) { | |||
785 | return SD->SchedulingRegionID == SchedulingRegionID; | |||
786 | } | |||
787 | ||||
788 | /// Marks an instruction as scheduled and puts all dependent ready | |||
789 | /// instructions into the ready-list. | |||
790 | template <typename ReadyListType> | |||
791 | void schedule(ScheduleData *SD, ReadyListType &ReadyList) { | |||
792 | SD->IsScheduled = true; | |||
793 | DEBUG(dbgs() << "SLP: schedule " << *SD << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: schedule " << *SD << "\n"; } } while (0); | |||
794 | ||||
795 | ScheduleData *BundleMember = SD; | |||
796 | while (BundleMember) { | |||
797 | // Handle the def-use chain dependencies. | |||
798 | for (Use &U : BundleMember->Inst->operands()) { | |||
799 | ScheduleData *OpDef = getScheduleData(U.get()); | |||
800 | if (OpDef && OpDef->hasValidDependencies() && | |||
801 | OpDef->incrementUnscheduledDeps(-1) == 0) { | |||
802 | // There are no more unscheduled dependencies after decrementing, | |||
803 | // so we can put the dependent instruction into the ready list. | |||
804 | ScheduleData *DepBundle = OpDef->FirstInBundle; | |||
805 | assert(!DepBundle->IsScheduled &&((!DepBundle->IsScheduled && "already scheduled bundle gets ready" ) ? static_cast<void> (0) : __assert_fail ("!DepBundle->IsScheduled && \"already scheduled bundle gets ready\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn271203/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 806, __PRETTY_FUNCTION__)) | |||
806 | "already scheduled bundle gets ready")((!DepBundle->IsScheduled && "already scheduled bundle gets ready" ) ? static_cast<void> (0) : __assert_fail ("!DepBundle->IsScheduled && \"already scheduled bundle gets ready\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn271203/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 806, __PRETTY_FUNCTION__)); | |||
807 | ReadyList.insert(DepBundle); | |||
808 | DEBUG(dbgs() << "SLP: gets ready (def): " << *DepBundle << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: gets ready (def): " << *DepBundle << "\n"; } } while (0); | |||
809 | } | |||
810 | } | |||
811 | // Handle the memory dependencies. | |||
812 | for (ScheduleData *MemoryDepSD : BundleMember->MemoryDependencies) { | |||
813 | if (MemoryDepSD->incrementUnscheduledDeps(-1) == 0) { | |||
814 | // There are no more unscheduled dependencies after decrementing, | |||
815 | // so we can put the dependent instruction into the ready list. | |||
816 | ScheduleData *DepBundle = MemoryDepSD->FirstInBundle; | |||
817 | assert(!DepBundle->IsScheduled &&((!DepBundle->IsScheduled && "already scheduled bundle gets ready" ) ? static_cast<void> (0) : __assert_fail ("!DepBundle->IsScheduled && \"already scheduled bundle gets ready\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn271203/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 818, __PRETTY_FUNCTION__)) | |||
818 | "already scheduled bundle gets ready")((!DepBundle->IsScheduled && "already scheduled bundle gets ready" ) ? static_cast<void> (0) : __assert_fail ("!DepBundle->IsScheduled && \"already scheduled bundle gets ready\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn271203/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 818, __PRETTY_FUNCTION__)); | |||
819 | ReadyList.insert(DepBundle); | |||
820 | DEBUG(dbgs() << "SLP: gets ready (mem): " << *DepBundle << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: gets ready (mem): " << *DepBundle << "\n"; } } while (0); | |||
821 | } | |||
822 | } | |||
823 | BundleMember = BundleMember->NextInBundle; | |||
824 | } | |||
825 | } | |||
826 | ||||
827 | /// Put all instructions into the ReadyList which are ready for scheduling. | |||
828 | template <typename ReadyListType> | |||
829 | void initialFillReadyList(ReadyListType &ReadyList) { | |||
830 | for (auto *I = ScheduleStart; I != ScheduleEnd; I = I->getNextNode()) { | |||
831 | ScheduleData *SD = getScheduleData(I); | |||
832 | if (SD->isSchedulingEntity() && SD->isReady()) { | |||
833 | ReadyList.insert(SD); | |||
834 | DEBUG(dbgs() << "SLP: initially in ready list: " << *I << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: initially in ready list: " << *I << "\n"; } } while (0); | |||
835 | } | |||
836 | } | |||
837 | } | |||
838 | ||||
839 | /// Checks if a bundle of instructions can be scheduled, i.e. has no | |||
840 | /// cyclic dependencies. This is only a dry-run, no instructions are | |||
841 | /// actually moved at this stage. | |||
842 | bool tryScheduleBundle(ArrayRef<Value *> VL, BoUpSLP *SLP); | |||
843 | ||||
844 | /// Un-bundles a group of instructions. | |||
845 | void cancelScheduling(ArrayRef<Value *> VL); | |||
846 | ||||
847 | /// Extends the scheduling region so that V is inside the region. | |||
848 | /// \returns true if the region size is within the limit. | |||
849 | bool extendSchedulingRegion(Value *V); | |||
850 | ||||
851 | /// Initialize the ScheduleData structures for new instructions in the | |||
852 | /// scheduling region. | |||
853 | void initScheduleData(Instruction *FromI, Instruction *ToI, | |||
854 | ScheduleData *PrevLoadStore, | |||
855 | ScheduleData *NextLoadStore); | |||
856 | ||||
857 | /// Updates the dependency information of a bundle and of all instructions/ | |||
858 | /// bundles which depend on the original bundle. | |||
859 | void calculateDependencies(ScheduleData *SD, bool InsertInReadyList, | |||
860 | BoUpSLP *SLP); | |||
861 | ||||
862 | /// Sets all instruction in the scheduling region to un-scheduled. | |||
863 | void resetSchedule(); | |||
864 | ||||
865 | BasicBlock *BB; | |||
866 | ||||
867 | /// Simple memory allocation for ScheduleData. | |||
868 | std::vector<std::unique_ptr<ScheduleData[]>> ScheduleDataChunks; | |||
869 | ||||
870 | /// The size of a ScheduleData array in ScheduleDataChunks. | |||
871 | int ChunkSize; | |||
872 | ||||
873 | /// The allocator position in the current chunk, which is the last entry | |||
874 | /// of ScheduleDataChunks. | |||
875 | int ChunkPos; | |||
876 | ||||
877 | /// Attaches ScheduleData to Instruction. | |||
878 | /// Note that the mapping survives during all vectorization iterations, i.e. | |||
879 | /// ScheduleData structures are recycled. | |||
880 | DenseMap<Value *, ScheduleData *> ScheduleDataMap; | |||
881 | ||||
882 | struct ReadyList : SmallVector<ScheduleData *, 8> { | |||
883 | void insert(ScheduleData *SD) { push_back(SD); } | |||
884 | }; | |||
885 | ||||
886 | /// The ready-list for scheduling (only used for the dry-run). | |||
887 | ReadyList ReadyInsts; | |||
888 | ||||
889 | /// The first instruction of the scheduling region. | |||
890 | Instruction *ScheduleStart; | |||
891 | ||||
892 | /// The first instruction _after_ the scheduling region. | |||
893 | Instruction *ScheduleEnd; | |||
894 | ||||
895 | /// The first memory accessing instruction in the scheduling region | |||
896 | /// (can be null). | |||
897 | ScheduleData *FirstLoadStoreInRegion; | |||
898 | ||||
899 | /// The last memory accessing instruction in the scheduling region | |||
900 | /// (can be null). | |||
901 | ScheduleData *LastLoadStoreInRegion; | |||
902 | ||||
903 | /// The current size of the scheduling region. | |||
904 | int ScheduleRegionSize; | |||
905 | ||||
906 | /// The maximum size allowed for the scheduling region. | |||
907 | int ScheduleRegionSizeLimit; | |||
908 | ||||
909 | /// The ID of the scheduling region. For a new vectorization iteration this | |||
910 | /// is incremented which "removes" all ScheduleData from the region. | |||
911 | int SchedulingRegionID; | |||
912 | }; | |||
913 | ||||
914 | /// Attaches the BlockScheduling structures to basic blocks. | |||
915 | MapVector<BasicBlock *, std::unique_ptr<BlockScheduling>> BlocksSchedules; | |||
916 | ||||
917 | /// Performs the "real" scheduling. Done before vectorization is actually | |||
918 | /// performed in a basic block. | |||
919 | void scheduleBlock(BlockScheduling *BS); | |||
920 | ||||
921 | /// List of users to ignore during scheduling and that don't need extracting. | |||
922 | ArrayRef<Value *> UserIgnoreList; | |||
923 | ||||
924 | // Number of load-bundles, which contain consecutive loads. | |||
925 | int NumLoadsWantToKeepOrder; | |||
926 | ||||
927 | // Number of load-bundles of size 2, which are consecutive loads if reversed. | |||
928 | int NumLoadsWantToChangeOrder; | |||
929 | ||||
930 | // Analysis and block reference. | |||
931 | Function *F; | |||
932 | ScalarEvolution *SE; | |||
933 | TargetTransformInfo *TTI; | |||
934 | TargetLibraryInfo *TLI; | |||
935 | AliasAnalysis *AA; | |||
936 | LoopInfo *LI; | |||
937 | DominatorTree *DT; | |||
938 | AssumptionCache *AC; | |||
939 | DemandedBits *DB; | |||
940 | const DataLayout *DL; | |||
941 | unsigned MaxVecRegSize; // This is set by TTI or overridden by cl::opt. | |||
942 | unsigned MinVecRegSize; // Set by cl::opt (default: 128). | |||
943 | /// Instruction builder to construct the vectorized tree. | |||
944 | IRBuilder<> Builder; | |||
945 | ||||
946 | /// A map of scalar integer values to the smallest bit width with which they | |||
947 | /// can legally be represented. | |||
948 | MapVector<Value *, uint64_t> MinBWs; | |||
949 | }; | |||
950 | ||||
951 | #ifndef NDEBUG | |||
952 | raw_ostream &operator<<(raw_ostream &os, const BoUpSLP::ScheduleData &SD) { | |||
953 | SD.dump(os); | |||
954 | return os; | |||
955 | } | |||
956 | #endif | |||
957 | ||||
958 | void BoUpSLP::buildTree(ArrayRef<Value *> Roots, | |||
959 | ArrayRef<Value *> UserIgnoreLst) { | |||
960 | deleteTree(); | |||
961 | UserIgnoreList = UserIgnoreLst; | |||
962 | if (!getSameType(Roots)) | |||
963 | return; | |||
964 | buildTree_rec(Roots, 0); | |||
965 | ||||
966 | // Collect the values that we need to extract from the tree. | |||
967 | for (int EIdx = 0, EE = VectorizableTree.size(); EIdx < EE; ++EIdx) { | |||
968 | TreeEntry *Entry = &VectorizableTree[EIdx]; | |||
969 | ||||
970 | // For each lane: | |||
971 | for (int Lane = 0, LE = Entry->Scalars.size(); Lane != LE; ++Lane) { | |||
972 | Value *Scalar = Entry->Scalars[Lane]; | |||
973 | ||||
974 | // No need to handle users of gathered values. | |||
975 | if (Entry->NeedToGather) | |||
976 | continue; | |||
977 | ||||
978 | for (User *U : Scalar->users()) { | |||
979 | DEBUG(dbgs() << "SLP: Checking user:" << *U << ".\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: Checking user:" << *U << ".\n"; } } while (0); | |||
980 | ||||
981 | Instruction *UserInst = dyn_cast<Instruction>(U); | |||
982 | if (!UserInst) | |||
983 | continue; | |||
984 | ||||
985 | // Skip in-tree scalars that become vectors | |||
986 | if (ScalarToTreeEntry.count(U)) { | |||
987 | int Idx = ScalarToTreeEntry[U]; | |||
988 | TreeEntry *UseEntry = &VectorizableTree[Idx]; | |||
989 | Value *UseScalar = UseEntry->Scalars[0]; | |||
990 | // Some in-tree scalars will remain as scalar in vectorized | |||
991 | // instructions. If that is the case, the one in Lane 0 will | |||
992 | // be used. | |||
993 | if (UseScalar != U || | |||
994 | !InTreeUserNeedToExtract(Scalar, UserInst, TLI)) { | |||
995 | DEBUG(dbgs() << "SLP: \tInternal user will be removed:" << *Udo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: \tInternal user will be removed:" << *U << ".\n"; } } while (0) | |||
996 | << ".\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: \tInternal user will be removed:" << *U << ".\n"; } } while (0); | |||
997 | assert(!VectorizableTree[Idx].NeedToGather && "Bad state")((!VectorizableTree[Idx].NeedToGather && "Bad state") ? static_cast<void> (0) : __assert_fail ("!VectorizableTree[Idx].NeedToGather && \"Bad state\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn271203/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 997, __PRETTY_FUNCTION__)); | |||
998 | continue; | |||
999 | } | |||
1000 | } | |||
1001 | ||||
1002 | // Ignore users in the user ignore list. | |||
1003 | if (std::find(UserIgnoreList.begin(), UserIgnoreList.end(), UserInst) != | |||
1004 | UserIgnoreList.end()) | |||
1005 | continue; | |||
1006 | ||||
1007 | DEBUG(dbgs() << "SLP: Need to extract:" << *U << " from lane " <<do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: Need to extract:" << * U << " from lane " << Lane << " from " << *Scalar << ".\n"; } } while (0) | |||
1008 | Lane << " from " << *Scalar << ".\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: Need to extract:" << * U << " from lane " << Lane << " from " << *Scalar << ".\n"; } } while (0); | |||
1009 | ExternalUses.push_back(ExternalUser(Scalar, U, Lane)); | |||
1010 | } | |||
1011 | } | |||
1012 | } | |||
1013 | } | |||
1014 | ||||
1015 | ||||
1016 | void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth) { | |||
1017 | bool SameTy = getSameType(VL); (void)SameTy; | |||
1018 | bool isAltShuffle = false; | |||
1019 | assert(SameTy && "Invalid types!")((SameTy && "Invalid types!") ? static_cast<void> (0) : __assert_fail ("SameTy && \"Invalid types!\"", "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn271203/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 1019, __PRETTY_FUNCTION__)); | |||
1020 | ||||
1021 | if (Depth == RecursionMaxDepth) { | |||
1022 | DEBUG(dbgs() << "SLP: Gathering due to max recursion depth.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: Gathering due to max recursion depth.\n" ; } } while (0); | |||
1023 | newTreeEntry(VL, false); | |||
1024 | return; | |||
1025 | } | |||
1026 | ||||
1027 | // Don't handle vectors. | |||
1028 | if (VL[0]->getType()->isVectorTy()) { | |||
1029 | DEBUG(dbgs() << "SLP: Gathering due to vector type.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: Gathering due to vector type.\n" ; } } while (0); | |||
1030 | newTreeEntry(VL, false); | |||
1031 | return; | |||
1032 | } | |||
1033 | ||||
1034 | if (StoreInst *SI = dyn_cast<StoreInst>(VL[0])) | |||
1035 | if (SI->getValueOperand()->getType()->isVectorTy()) { | |||
1036 | DEBUG(dbgs() << "SLP: Gathering due to store vector type.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: Gathering due to store vector type.\n" ; } } while (0); | |||
1037 | newTreeEntry(VL, false); | |||
1038 | return; | |||
1039 | } | |||
1040 | unsigned Opcode = getSameOpcode(VL); | |||
1041 | ||||
1042 | // Check that this shuffle vector refers to the alternate | |||
1043 | // sequence of opcodes. | |||
1044 | if (Opcode == Instruction::ShuffleVector) { | |||
1045 | Instruction *I0 = dyn_cast<Instruction>(VL[0]); | |||
1046 | unsigned Op = I0->getOpcode(); | |||
1047 | if (Op != Instruction::ShuffleVector) | |||
1048 | isAltShuffle = true; | |||
1049 | } | |||
1050 | ||||
1051 | // If all of the operands are identical or constant we have a simple solution. | |||
1052 | if (allConstant(VL) || isSplat(VL) || !getSameBlock(VL) || !Opcode) { | |||
1053 | DEBUG(dbgs() << "SLP: Gathering due to C,S,B,O. \n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: Gathering due to C,S,B,O. \n" ; } } while (0); | |||
1054 | newTreeEntry(VL, false); | |||
1055 | return; | |||
1056 | } | |||
1057 | ||||
1058 | // We now know that this is a vector of instructions of the same type from | |||
1059 | // the same block. | |||
1060 | ||||
1061 | // Don't vectorize ephemeral values. | |||
1062 | for (unsigned i = 0, e = VL.size(); i != e; ++i) { | |||
1063 | if (EphValues.count(VL[i])) { | |||
1064 | DEBUG(dbgs() << "SLP: The instruction (" << *VL[i] <<do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: The instruction (" << * VL[i] << ") is ephemeral.\n"; } } while (0) | |||
1065 | ") is ephemeral.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: The instruction (" << * VL[i] << ") is ephemeral.\n"; } } while (0); | |||
1066 | newTreeEntry(VL, false); | |||
1067 | return; | |||
1068 | } | |||
1069 | } | |||
1070 | ||||
1071 | // Check if this is a duplicate of another entry. | |||
1072 | if (ScalarToTreeEntry.count(VL[0])) { | |||
1073 | int Idx = ScalarToTreeEntry[VL[0]]; | |||
1074 | TreeEntry *E = &VectorizableTree[Idx]; | |||
1075 | for (unsigned i = 0, e = VL.size(); i != e; ++i) { | |||
1076 | DEBUG(dbgs() << "SLP: \tChecking bundle: " << *VL[i] << ".\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: \tChecking bundle: " << *VL[i] << ".\n"; } } while (0); | |||
1077 | if (E->Scalars[i] != VL[i]) { | |||
1078 | DEBUG(dbgs() << "SLP: Gathering due to partial overlap.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: Gathering due to partial overlap.\n" ; } } while (0); | |||
1079 | newTreeEntry(VL, false); | |||
1080 | return; | |||
1081 | } | |||
1082 | } | |||
1083 | DEBUG(dbgs() << "SLP: Perfect diamond merge at " << *VL[0] << ".\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: Perfect diamond merge at " << *VL[0] << ".\n"; } } while (0); | |||
1084 | return; | |||
1085 | } | |||
1086 | ||||
1087 | // Check that none of the instructions in the bundle are already in the tree. | |||
1088 | for (unsigned i = 0, e = VL.size(); i != e; ++i) { | |||
1089 | if (ScalarToTreeEntry.count(VL[i])) { | |||
1090 | DEBUG(dbgs() << "SLP: The instruction (" << *VL[i] <<do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: The instruction (" << * VL[i] << ") is already in tree.\n"; } } while (0) | |||
1091 | ") is already in tree.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: The instruction (" << * VL[i] << ") is already in tree.\n"; } } while (0); | |||
1092 | newTreeEntry(VL, false); | |||
1093 | return; | |||
1094 | } | |||
1095 | } | |||
1096 | ||||
1097 | // If any of the scalars is marked as a value that needs to stay scalar then | |||
1098 | // we need to gather the scalars. | |||
1099 | for (unsigned i = 0, e = VL.size(); i != e; ++i) { | |||
1100 | if (MustGather.count(VL[i])) { | |||
1101 | DEBUG(dbgs() << "SLP: Gathering due to gathered scalar.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: Gathering due to gathered scalar.\n" ; } } while (0); | |||
1102 | newTreeEntry(VL, false); | |||
1103 | return; | |||
1104 | } | |||
1105 | } | |||
1106 | ||||
1107 | // Check that all of the users of the scalars that we want to vectorize are | |||
1108 | // schedulable. | |||
1109 | Instruction *VL0 = cast<Instruction>(VL[0]); | |||
1110 | BasicBlock *BB = cast<Instruction>(VL0)->getParent(); | |||
1111 | ||||
1112 | if (!DT->isReachableFromEntry(BB)) { | |||
1113 | // Don't go into unreachable blocks. They may contain instructions with | |||
1114 | // dependency cycles which confuse the final scheduling. | |||
1115 | DEBUG(dbgs() << "SLP: bundle in unreachable block.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: bundle in unreachable block.\n" ; } } while (0); | |||
1116 | newTreeEntry(VL, false); | |||
1117 | return; | |||
1118 | } | |||
1119 | ||||
1120 | // Check that every instructions appears once in this bundle. | |||
1121 | for (unsigned i = 0, e = VL.size(); i < e; ++i) | |||
1122 | for (unsigned j = i+1; j < e; ++j) | |||
1123 | if (VL[i] == VL[j]) { | |||
1124 | DEBUG(dbgs() << "SLP: Scalar used twice in bundle.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: Scalar used twice in bundle.\n" ; } } while (0); | |||
1125 | newTreeEntry(VL, false); | |||
1126 | return; | |||
1127 | } | |||
1128 | ||||
1129 | auto &BSRef = BlocksSchedules[BB]; | |||
1130 | if (!BSRef) { | |||
1131 | BSRef = llvm::make_unique<BlockScheduling>(BB); | |||
1132 | } | |||
1133 | BlockScheduling &BS = *BSRef.get(); | |||
1134 | ||||
1135 | if (!BS.tryScheduleBundle(VL, this)) { | |||
1136 | DEBUG(dbgs() << "SLP: We are not able to schedule this bundle!\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: We are not able to schedule this bundle!\n" ; } } while (0); | |||
1137 | assert((!BS.getScheduleData(VL[0]) ||(((!BS.getScheduleData(VL[0]) || !BS.getScheduleData(VL[0])-> isPartOfBundle()) && "tryScheduleBundle should cancelScheduling on failure" ) ? static_cast<void> (0) : __assert_fail ("(!BS.getScheduleData(VL[0]) || !BS.getScheduleData(VL[0])->isPartOfBundle()) && \"tryScheduleBundle should cancelScheduling on failure\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn271203/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 1139, __PRETTY_FUNCTION__)) | |||
1138 | !BS.getScheduleData(VL[0])->isPartOfBundle()) &&(((!BS.getScheduleData(VL[0]) || !BS.getScheduleData(VL[0])-> isPartOfBundle()) && "tryScheduleBundle should cancelScheduling on failure" ) ? static_cast<void> (0) : __assert_fail ("(!BS.getScheduleData(VL[0]) || !BS.getScheduleData(VL[0])->isPartOfBundle()) && \"tryScheduleBundle should cancelScheduling on failure\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn271203/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 1139, __PRETTY_FUNCTION__)) | |||
1139 | "tryScheduleBundle should cancelScheduling on failure")(((!BS.getScheduleData(VL[0]) || !BS.getScheduleData(VL[0])-> isPartOfBundle()) && "tryScheduleBundle should cancelScheduling on failure" ) ? static_cast<void> (0) : __assert_fail ("(!BS.getScheduleData(VL[0]) || !BS.getScheduleData(VL[0])->isPartOfBundle()) && \"tryScheduleBundle should cancelScheduling on failure\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn271203/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 1139, __PRETTY_FUNCTION__)); | |||
1140 | newTreeEntry(VL, false); | |||
1141 | return; | |||
1142 | } | |||
1143 | DEBUG(dbgs() << "SLP: We are able to schedule this bundle.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: We are able to schedule this bundle.\n" ; } } while (0); | |||
1144 | ||||
1145 | switch (Opcode) { | |||
1146 | case Instruction::PHI: { | |||
1147 | PHINode *PH = dyn_cast<PHINode>(VL0); | |||
1148 | ||||
1149 | // Check for terminator values (e.g. invoke). | |||
1150 | for (unsigned j = 0; j < VL.size(); ++j) | |||
1151 | for (unsigned i = 0, e = PH->getNumIncomingValues(); i < e; ++i) { | |||
1152 | TerminatorInst *Term = dyn_cast<TerminatorInst>( | |||
1153 | cast<PHINode>(VL[j])->getIncomingValueForBlock(PH->getIncomingBlock(i))); | |||
1154 | if (Term) { | |||
1155 | DEBUG(dbgs() << "SLP: Need to swizzle PHINodes (TerminatorInst use).\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: Need to swizzle PHINodes (TerminatorInst use).\n" ; } } while (0); | |||
1156 | BS.cancelScheduling(VL); | |||
1157 | newTreeEntry(VL, false); | |||
1158 | return; | |||
1159 | } | |||
1160 | } | |||
1161 | ||||
1162 | newTreeEntry(VL, true); | |||
1163 | DEBUG(dbgs() << "SLP: added a vector of PHINodes.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: added a vector of PHINodes.\n" ; } } while (0); | |||
1164 | ||||
1165 | for (unsigned i = 0, e = PH->getNumIncomingValues(); i < e; ++i) { | |||
1166 | ValueList Operands; | |||
1167 | // Prepare the operand vector. | |||
1168 | for (unsigned j = 0; j < VL.size(); ++j) | |||
1169 | Operands.push_back(cast<PHINode>(VL[j])->getIncomingValueForBlock( | |||
1170 | PH->getIncomingBlock(i))); | |||
1171 | ||||
1172 | buildTree_rec(Operands, Depth + 1); | |||
1173 | } | |||
1174 | return; | |||
1175 | } | |||
1176 | case Instruction::ExtractValue: | |||
1177 | case Instruction::ExtractElement: { | |||
1178 | bool Reuse = canReuseExtract(VL, Opcode); | |||
1179 | if (Reuse) { | |||
1180 | DEBUG(dbgs() << "SLP: Reusing extract sequence.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: Reusing extract sequence.\n" ; } } while (0); | |||
1181 | } else { | |||
1182 | BS.cancelScheduling(VL); | |||
1183 | } | |||
1184 | newTreeEntry(VL, Reuse); | |||
1185 | return; | |||
1186 | } | |||
1187 | case Instruction::Load: { | |||
1188 | // Check that a vectorized load would load the same memory as a scalar | |||
1189 | // load. | |||
1190 | // For example we don't want vectorize loads that are smaller than 8 bit. | |||
1191 | // Even though we have a packed struct {<i2, i2, i2, i2>} LLVM treats | |||
1192 | // loading/storing it as an i8 struct. If we vectorize loads/stores from | |||
1193 | // such a struct we read/write packed bits disagreeing with the | |||
1194 | // unvectorized version. | |||
1195 | Type *ScalarTy = VL[0]->getType(); | |||
1196 | ||||
1197 | if (DL->getTypeSizeInBits(ScalarTy) != | |||
1198 | DL->getTypeAllocSizeInBits(ScalarTy)) { | |||
1199 | BS.cancelScheduling(VL); | |||
1200 | newTreeEntry(VL, false); | |||
1201 | DEBUG(dbgs() << "SLP: Gathering loads of non-packed type.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: Gathering loads of non-packed type.\n" ; } } while (0); | |||
1202 | return; | |||
1203 | } | |||
1204 | // Check if the loads are consecutive or of we need to swizzle them. | |||
1205 | for (unsigned i = 0, e = VL.size() - 1; i < e; ++i) { | |||
1206 | LoadInst *L = cast<LoadInst>(VL[i]); | |||
1207 | if (!L->isSimple()) { | |||
1208 | BS.cancelScheduling(VL); | |||
1209 | newTreeEntry(VL, false); | |||
1210 | DEBUG(dbgs() << "SLP: Gathering non-simple loads.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: Gathering non-simple loads.\n" ; } } while (0); | |||
1211 | return; | |||
1212 | } | |||
1213 | ||||
1214 | if (!isConsecutiveAccess(VL[i], VL[i + 1], *DL, *SE)) { | |||
1215 | if (VL.size() == 2 && isConsecutiveAccess(VL[1], VL[0], *DL, *SE)) { | |||
1216 | ++NumLoadsWantToChangeOrder; | |||
1217 | } | |||
1218 | BS.cancelScheduling(VL); | |||
1219 | newTreeEntry(VL, false); | |||
1220 | DEBUG(dbgs() << "SLP: Gathering non-consecutive loads.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: Gathering non-consecutive loads.\n" ; } } while (0); | |||
1221 | return; | |||
1222 | } | |||
1223 | } | |||
1224 | ++NumLoadsWantToKeepOrder; | |||
1225 | newTreeEntry(VL, true); | |||
1226 | DEBUG(dbgs() << "SLP: added a vector of loads.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: added a vector of loads.\n"; } } while (0); | |||
1227 | return; | |||
1228 | } | |||
1229 | case Instruction::ZExt: | |||
1230 | case Instruction::SExt: | |||
1231 | case Instruction::FPToUI: | |||
1232 | case Instruction::FPToSI: | |||
1233 | case Instruction::FPExt: | |||
1234 | case Instruction::PtrToInt: | |||
1235 | case Instruction::IntToPtr: | |||
1236 | case Instruction::SIToFP: | |||
1237 | case Instruction::UIToFP: | |||
1238 | case Instruction::Trunc: | |||
1239 | case Instruction::FPTrunc: | |||
1240 | case Instruction::BitCast: { | |||
1241 | Type *SrcTy = VL0->getOperand(0)->getType(); | |||
1242 | for (unsigned i = 0; i < VL.size(); ++i) { | |||
1243 | Type *Ty = cast<Instruction>(VL[i])->getOperand(0)->getType(); | |||
1244 | if (Ty != SrcTy || !isValidElementType(Ty)) { | |||
1245 | BS.cancelScheduling(VL); | |||
1246 | newTreeEntry(VL, false); | |||
1247 | DEBUG(dbgs() << "SLP: Gathering casts with different src types.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: Gathering casts with different src types.\n" ; } } while (0); | |||
1248 | return; | |||
1249 | } | |||
1250 | } | |||
1251 | newTreeEntry(VL, true); | |||
1252 | DEBUG(dbgs() << "SLP: added a vector of casts.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: added a vector of casts.\n"; } } while (0); | |||
1253 | ||||
1254 | for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) { | |||
1255 | ValueList Operands; | |||
1256 | // Prepare the operand vector. | |||
1257 | for (unsigned j = 0; j < VL.size(); ++j) | |||
1258 | Operands.push_back(cast<Instruction>(VL[j])->getOperand(i)); | |||
1259 | ||||
1260 | buildTree_rec(Operands, Depth+1); | |||
1261 | } | |||
1262 | return; | |||
1263 | } | |||
1264 | case Instruction::ICmp: | |||
1265 | case Instruction::FCmp: { | |||
1266 | // Check that all of the compares have the same predicate. | |||
1267 | CmpInst::Predicate P0 = cast<CmpInst>(VL0)->getPredicate(); | |||
1268 | Type *ComparedTy = cast<Instruction>(VL[0])->getOperand(0)->getType(); | |||
1269 | for (unsigned i = 1, e = VL.size(); i < e; ++i) { | |||
1270 | CmpInst *Cmp = cast<CmpInst>(VL[i]); | |||
1271 | if (Cmp->getPredicate() != P0 || | |||
1272 | Cmp->getOperand(0)->getType() != ComparedTy) { | |||
1273 | BS.cancelScheduling(VL); | |||
1274 | newTreeEntry(VL, false); | |||
1275 | DEBUG(dbgs() << "SLP: Gathering cmp with different predicate.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: Gathering cmp with different predicate.\n" ; } } while (0); | |||
1276 | return; | |||
1277 | } | |||
1278 | } | |||
1279 | ||||
1280 | newTreeEntry(VL, true); | |||
1281 | DEBUG(dbgs() << "SLP: added a vector of compares.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: added a vector of compares.\n" ; } } while (0); | |||
1282 | ||||
1283 | for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) { | |||
1284 | ValueList Operands; | |||
1285 | // Prepare the operand vector. | |||
1286 | for (unsigned j = 0; j < VL.size(); ++j) | |||
1287 | Operands.push_back(cast<Instruction>(VL[j])->getOperand(i)); | |||
1288 | ||||
1289 | buildTree_rec(Operands, Depth+1); | |||
1290 | } | |||
1291 | return; | |||
1292 | } | |||
1293 | case Instruction::Select: | |||
1294 | case Instruction::Add: | |||
1295 | case Instruction::FAdd: | |||
1296 | case Instruction::Sub: | |||
1297 | case Instruction::FSub: | |||
1298 | case Instruction::Mul: | |||
1299 | case Instruction::FMul: | |||
1300 | case Instruction::UDiv: | |||
1301 | case Instruction::SDiv: | |||
1302 | case Instruction::FDiv: | |||
1303 | case Instruction::URem: | |||
1304 | case Instruction::SRem: | |||
1305 | case Instruction::FRem: | |||
1306 | case Instruction::Shl: | |||
1307 | case Instruction::LShr: | |||
1308 | case Instruction::AShr: | |||
1309 | case Instruction::And: | |||
1310 | case Instruction::Or: | |||
1311 | case Instruction::Xor: { | |||
1312 | newTreeEntry(VL, true); | |||
1313 | DEBUG(dbgs() << "SLP: added a vector of bin op.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: added a vector of bin op.\n" ; } } while (0); | |||
1314 | ||||
1315 | // Sort operands of the instructions so that each side is more likely to | |||
1316 | // have the same opcode. | |||
1317 | if (isa<BinaryOperator>(VL0) && VL0->isCommutative()) { | |||
1318 | ValueList Left, Right; | |||
1319 | reorderInputsAccordingToOpcode(VL, Left, Right); | |||
1320 | buildTree_rec(Left, Depth + 1); | |||
1321 | buildTree_rec(Right, Depth + 1); | |||
1322 | return; | |||
1323 | } | |||
1324 | ||||
1325 | for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) { | |||
1326 | ValueList Operands; | |||
1327 | // Prepare the operand vector. | |||
1328 | for (unsigned j = 0; j < VL.size(); ++j) | |||
1329 | Operands.push_back(cast<Instruction>(VL[j])->getOperand(i)); | |||
1330 | ||||
1331 | buildTree_rec(Operands, Depth+1); | |||
1332 | } | |||
1333 | return; | |||
1334 | } | |||
1335 | case Instruction::GetElementPtr: { | |||
1336 | // We don't combine GEPs with complicated (nested) indexing. | |||
1337 | for (unsigned j = 0; j < VL.size(); ++j) { | |||
1338 | if (cast<Instruction>(VL[j])->getNumOperands() != 2) { | |||
1339 | DEBUG(dbgs() << "SLP: not-vectorizable GEP (nested indexes).\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: not-vectorizable GEP (nested indexes).\n" ; } } while (0); | |||
1340 | BS.cancelScheduling(VL); | |||
1341 | newTreeEntry(VL, false); | |||
1342 | return; | |||
1343 | } | |||
1344 | } | |||
1345 | ||||
1346 | // We can't combine several GEPs into one vector if they operate on | |||
1347 | // different types. | |||
1348 | Type *Ty0 = cast<Instruction>(VL0)->getOperand(0)->getType(); | |||
1349 | for (unsigned j = 0; j < VL.size(); ++j) { | |||
1350 | Type *CurTy = cast<Instruction>(VL[j])->getOperand(0)->getType(); | |||
1351 | if (Ty0 != CurTy) { | |||
1352 | DEBUG(dbgs() << "SLP: not-vectorizable GEP (different types).\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: not-vectorizable GEP (different types).\n" ; } } while (0); | |||
1353 | BS.cancelScheduling(VL); | |||
1354 | newTreeEntry(VL, false); | |||
1355 | return; | |||
1356 | } | |||
1357 | } | |||
1358 | ||||
1359 | // We don't combine GEPs with non-constant indexes. | |||
1360 | for (unsigned j = 0; j < VL.size(); ++j) { | |||
1361 | auto Op = cast<Instruction>(VL[j])->getOperand(1); | |||
1362 | if (!isa<ConstantInt>(Op)) { | |||
1363 | DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: not-vectorizable GEP (non-constant indexes).\n" ; } } while (0) | |||
1364 | dbgs() << "SLP: not-vectorizable GEP (non-constant indexes).\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: not-vectorizable GEP (non-constant indexes).\n" ; } } while (0); | |||
1365 | BS.cancelScheduling(VL); | |||
1366 | newTreeEntry(VL, false); | |||
1367 | return; | |||
1368 | } | |||
1369 | } | |||
1370 | ||||
1371 | newTreeEntry(VL, true); | |||
1372 | DEBUG(dbgs() << "SLP: added a vector of GEPs.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: added a vector of GEPs.\n"; } } while (0); | |||
1373 | for (unsigned i = 0, e = 2; i < e; ++i) { | |||
1374 | ValueList Operands; | |||
1375 | // Prepare the operand vector. | |||
1376 | for (unsigned j = 0; j < VL.size(); ++j) | |||
1377 | Operands.push_back(cast<Instruction>(VL[j])->getOperand(i)); | |||
1378 | ||||
1379 | buildTree_rec(Operands, Depth + 1); | |||
1380 | } | |||
1381 | return; | |||
1382 | } | |||
1383 | case Instruction::Store: { | |||
1384 | // Check if the stores are consecutive or of we need to swizzle them. | |||
1385 | for (unsigned i = 0, e = VL.size() - 1; i < e; ++i) | |||
1386 | if (!isConsecutiveAccess(VL[i], VL[i + 1], *DL, *SE)) { | |||
1387 | BS.cancelScheduling(VL); | |||
1388 | newTreeEntry(VL, false); | |||
1389 | DEBUG(dbgs() << "SLP: Non-consecutive store.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: Non-consecutive store.\n"; } } while (0); | |||
1390 | return; | |||
1391 | } | |||
1392 | ||||
1393 | newTreeEntry(VL, true); | |||
1394 | DEBUG(dbgs() << "SLP: added a vector of stores.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: added a vector of stores.\n" ; } } while (0); | |||
1395 | ||||
1396 | ValueList Operands; | |||
1397 | for (unsigned j = 0; j < VL.size(); ++j) | |||
1398 | Operands.push_back(cast<Instruction>(VL[j])->getOperand(0)); | |||
1399 | ||||
1400 | buildTree_rec(Operands, Depth + 1); | |||
1401 | return; | |||
1402 | } | |||
1403 | case Instruction::Call: { | |||
1404 | // Check if the calls are all to the same vectorizable intrinsic. | |||
1405 | CallInst *CI = cast<CallInst>(VL[0]); | |||
1406 | // Check if this is an Intrinsic call or something that can be | |||
1407 | // represented by an intrinsic call | |||
1408 | Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); | |||
1409 | if (!isTriviallyVectorizable(ID)) { | |||
1410 | BS.cancelScheduling(VL); | |||
1411 | newTreeEntry(VL, false); | |||
1412 | DEBUG(dbgs() << "SLP: Non-vectorizable call.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: Non-vectorizable call.\n"; } } while (0); | |||
1413 | return; | |||
1414 | } | |||
1415 | Function *Int = CI->getCalledFunction(); | |||
1416 | Value *A1I = nullptr; | |||
1417 | if (hasVectorInstrinsicScalarOpd(ID, 1)) | |||
1418 | A1I = CI->getArgOperand(1); | |||
1419 | for (unsigned i = 1, e = VL.size(); i != e; ++i) { | |||
1420 | CallInst *CI2 = dyn_cast<CallInst>(VL[i]); | |||
1421 | if (!CI2 || CI2->getCalledFunction() != Int || | |||
1422 | getVectorIntrinsicIDForCall(CI2, TLI) != ID || | |||
1423 | !CI->hasIdenticalOperandBundleSchema(*CI2)) { | |||
1424 | BS.cancelScheduling(VL); | |||
1425 | newTreeEntry(VL, false); | |||
1426 | DEBUG(dbgs() << "SLP: mismatched calls:" << *CI << "!=" << *VL[i]do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: mismatched calls:" << * CI << "!=" << *VL[i] << "\n"; } } while (0) | |||
1427 | << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: mismatched calls:" << * CI << "!=" << *VL[i] << "\n"; } } while (0); | |||
1428 | return; | |||
1429 | } | |||
1430 | // ctlz,cttz and powi are special intrinsics whose second argument | |||
1431 | // should be same in order for them to be vectorized. | |||
1432 | if (hasVectorInstrinsicScalarOpd(ID, 1)) { | |||
1433 | Value *A1J = CI2->getArgOperand(1); | |||
1434 | if (A1I != A1J) { | |||
1435 | BS.cancelScheduling(VL); | |||
1436 | newTreeEntry(VL, false); | |||
1437 | DEBUG(dbgs() << "SLP: mismatched arguments in call:" << *CIdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: mismatched arguments in call:" << *CI << " argument "<< A1I<<"!=" << A1J << "\n"; } } while (0) | |||
1438 | << " argument "<< A1I<<"!=" << A1Jdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: mismatched arguments in call:" << *CI << " argument "<< A1I<<"!=" << A1J << "\n"; } } while (0) | |||
1439 | << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: mismatched arguments in call:" << *CI << " argument "<< A1I<<"!=" << A1J << "\n"; } } while (0); | |||
1440 | return; | |||
1441 | } | |||
1442 | } | |||
1443 | // Verify that the bundle operands are identical between the two calls. | |||
1444 | if (CI->hasOperandBundles() && | |||
1445 | !std::equal(CI->op_begin() + CI->getBundleOperandsStartIndex(), | |||
1446 | CI->op_begin() + CI->getBundleOperandsEndIndex(), | |||
1447 | CI2->op_begin() + CI2->getBundleOperandsStartIndex())) { | |||
1448 | BS.cancelScheduling(VL); | |||
1449 | newTreeEntry(VL, false); | |||
1450 | DEBUG(dbgs() << "SLP: mismatched bundle operands in calls:" << *CI << "!="do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: mismatched bundle operands in calls:" << *CI << "!=" << *VL[i] << '\n'; } } while (0) | |||
1451 | << *VL[i] << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: mismatched bundle operands in calls:" << *CI << "!=" << *VL[i] << '\n'; } } while (0); | |||
1452 | return; | |||
1453 | } | |||
1454 | } | |||
1455 | ||||
1456 | newTreeEntry(VL, true); | |||
1457 | for (unsigned i = 0, e = CI->getNumArgOperands(); i != e; ++i) { | |||
1458 | ValueList Operands; | |||
1459 | // Prepare the operand vector. | |||
1460 | for (unsigned j = 0; j < VL.size(); ++j) { | |||
1461 | CallInst *CI2 = dyn_cast<CallInst>(VL[j]); | |||
1462 | Operands.push_back(CI2->getArgOperand(i)); | |||
1463 | } | |||
1464 | buildTree_rec(Operands, Depth + 1); | |||
1465 | } | |||
1466 | return; | |||
1467 | } | |||
1468 | case Instruction::ShuffleVector: { | |||
1469 | // If this is not an alternate sequence of opcode like add-sub | |||
1470 | // then do not vectorize this instruction. | |||
1471 | if (!isAltShuffle) { | |||
1472 | BS.cancelScheduling(VL); | |||
1473 | newTreeEntry(VL, false); | |||
1474 | DEBUG(dbgs() << "SLP: ShuffleVector are not vectorized.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: ShuffleVector are not vectorized.\n" ; } } while (0); | |||
1475 | return; | |||
1476 | } | |||
1477 | newTreeEntry(VL, true); | |||
1478 | DEBUG(dbgs() << "SLP: added a ShuffleVector op.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: added a ShuffleVector op.\n" ; } } while (0); | |||
1479 | ||||
1480 | // Reorder operands if reordering would enable vectorization. | |||
1481 | if (isa<BinaryOperator>(VL0)) { | |||
1482 | ValueList Left, Right; | |||
1483 | reorderAltShuffleOperands(VL, Left, Right); | |||
1484 | buildTree_rec(Left, Depth + 1); | |||
1485 | buildTree_rec(Right, Depth + 1); | |||
1486 | return; | |||
1487 | } | |||
1488 | ||||
1489 | for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) { | |||
1490 | ValueList Operands; | |||
1491 | // Prepare the operand vector. | |||
1492 | for (unsigned j = 0; j < VL.size(); ++j) | |||
1493 | Operands.push_back(cast<Instruction>(VL[j])->getOperand(i)); | |||
1494 | ||||
1495 | buildTree_rec(Operands, Depth + 1); | |||
1496 | } | |||
1497 | return; | |||
1498 | } | |||
1499 | default: | |||
1500 | BS.cancelScheduling(VL); | |||
1501 | newTreeEntry(VL, false); | |||
1502 | DEBUG(dbgs() << "SLP: Gathering unknown instruction.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: Gathering unknown instruction.\n" ; } } while (0); | |||
1503 | return; | |||
1504 | } | |||
1505 | } | |||
1506 | ||||
1507 | unsigned BoUpSLP::canMapToVector(Type *T, const DataLayout &DL) const { | |||
1508 | unsigned N; | |||
1509 | Type *EltTy; | |||
1510 | auto *ST = dyn_cast<StructType>(T); | |||
1511 | if (ST) { | |||
1512 | N = ST->getNumElements(); | |||
1513 | EltTy = *ST->element_begin(); | |||
1514 | } else { | |||
1515 | N = cast<ArrayType>(T)->getNumElements(); | |||
1516 | EltTy = cast<ArrayType>(T)->getElementType(); | |||
1517 | } | |||
1518 | if (!isValidElementType(EltTy)) | |||
1519 | return 0; | |||
1520 | uint64_t VTSize = DL.getTypeStoreSizeInBits(VectorType::get(EltTy, N)); | |||
1521 | if (VTSize < MinVecRegSize || VTSize > MaxVecRegSize || VTSize != DL.getTypeStoreSizeInBits(T)) | |||
1522 | return 0; | |||
1523 | if (ST) { | |||
1524 | // Check that struct is homogeneous. | |||
1525 | for (const auto *Ty : ST->elements()) | |||
1526 | if (Ty != EltTy) | |||
1527 | return 0; | |||
1528 | } | |||
1529 | return N; | |||
1530 | } | |||
1531 | ||||
1532 | bool BoUpSLP::canReuseExtract(ArrayRef<Value *> VL, unsigned Opcode) const { | |||
1533 | assert(Opcode == Instruction::ExtractElement ||((Opcode == Instruction::ExtractElement || Opcode == Instruction ::ExtractValue) ? static_cast<void> (0) : __assert_fail ("Opcode == Instruction::ExtractElement || Opcode == Instruction::ExtractValue" , "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn271203/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 1534, __PRETTY_FUNCTION__)) | |||
1534 | Opcode == Instruction::ExtractValue)((Opcode == Instruction::ExtractElement || Opcode == Instruction ::ExtractValue) ? static_cast<void> (0) : __assert_fail ("Opcode == Instruction::ExtractElement || Opcode == Instruction::ExtractValue" , "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn271203/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 1534, __PRETTY_FUNCTION__)); | |||
1535 | assert(Opcode == getSameOpcode(VL) && "Invalid opcode")((Opcode == getSameOpcode(VL) && "Invalid opcode") ? static_cast <void> (0) : __assert_fail ("Opcode == getSameOpcode(VL) && \"Invalid opcode\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn271203/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 1535, __PRETTY_FUNCTION__)); | |||
1536 | // Check if all of the extracts come from the same vector and from the | |||
1537 | // correct offset. | |||
1538 | Value *VL0 = VL[0]; | |||
1539 | Instruction *E0 = cast<Instruction>(VL0); | |||
1540 | Value *Vec = E0->getOperand(0); | |||
1541 | ||||
1542 | // We have to extract from a vector/aggregate with the same number of elements. | |||
1543 | unsigned NElts; | |||
1544 | if (Opcode == Instruction::ExtractValue) { | |||
1545 | const DataLayout &DL = E0->getModule()->getDataLayout(); | |||
1546 | NElts = canMapToVector(Vec->getType(), DL); | |||
1547 | if (!NElts) | |||
1548 | return false; | |||
1549 | // Check if load can be rewritten as load of vector. | |||
1550 | LoadInst *LI = dyn_cast<LoadInst>(Vec); | |||
1551 | if (!LI || !LI->isSimple() || !LI->hasNUses(VL.size())) | |||
1552 | return false; | |||
1553 | } else { | |||
1554 | NElts = Vec->getType()->getVectorNumElements(); | |||
1555 | } | |||
1556 | ||||
1557 | if (NElts != VL.size()) | |||
1558 | return false; | |||
1559 | ||||
1560 | // Check that all of the indices extract from the correct offset. | |||
1561 | if (!matchExtractIndex(E0, 0, Opcode)) | |||
1562 | return false; | |||
1563 | ||||
1564 | for (unsigned i = 1, e = VL.size(); i < e; ++i) { | |||
1565 | Instruction *E = cast<Instruction>(VL[i]); | |||
1566 | if (!matchExtractIndex(E, i, Opcode)) | |||
1567 | return false; | |||
1568 | if (E->getOperand(0) != Vec) | |||
1569 | return false; | |||
1570 | } | |||
1571 | ||||
1572 | return true; | |||
1573 | } | |||
1574 | ||||
1575 | int BoUpSLP::getEntryCost(TreeEntry *E) { | |||
1576 | ArrayRef<Value*> VL = E->Scalars; | |||
1577 | ||||
1578 | Type *ScalarTy = VL[0]->getType(); | |||
1579 | if (StoreInst *SI = dyn_cast<StoreInst>(VL[0])) | |||
1580 | ScalarTy = SI->getValueOperand()->getType(); | |||
1581 | VectorType *VecTy = VectorType::get(ScalarTy, VL.size()); | |||
1582 | ||||
1583 | // If we have computed a smaller type for the expression, update VecTy so | |||
1584 | // that the costs will be accurate. | |||
1585 | if (MinBWs.count(VL[0])) | |||
1586 | VecTy = VectorType::get(IntegerType::get(F->getContext(), MinBWs[VL[0]]), | |||
1587 | VL.size()); | |||
1588 | ||||
1589 | if (E->NeedToGather) { | |||
1590 | if (allConstant(VL)) | |||
1591 | return 0; | |||
1592 | if (isSplat(VL)) { | |||
1593 | return TTI->getShuffleCost(TargetTransformInfo::SK_Broadcast, VecTy, 0); | |||
1594 | } | |||
1595 | return getGatherCost(E->Scalars); | |||
1596 | } | |||
1597 | unsigned Opcode = getSameOpcode(VL); | |||
1598 | assert(Opcode && getSameType(VL) && getSameBlock(VL) && "Invalid VL")((Opcode && getSameType(VL) && getSameBlock(VL ) && "Invalid VL") ? static_cast<void> (0) : __assert_fail ("Opcode && getSameType(VL) && getSameBlock(VL) && \"Invalid VL\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn271203/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 1598, __PRETTY_FUNCTION__)); | |||
1599 | Instruction *VL0 = cast<Instruction>(VL[0]); | |||
1600 | switch (Opcode) { | |||
1601 | case Instruction::PHI: { | |||
1602 | return 0; | |||
1603 | } | |||
1604 | case Instruction::ExtractValue: | |||
1605 | case Instruction::ExtractElement: { | |||
1606 | if (canReuseExtract(VL, Opcode)) { | |||
1607 | int DeadCost = 0; | |||
1608 | for (unsigned i = 0, e = VL.size(); i < e; ++i) { | |||
1609 | Instruction *E = cast<Instruction>(VL[i]); | |||
1610 | if (E->hasOneUse()) | |||
1611 | // Take credit for instruction that will become dead. | |||
1612 | DeadCost += | |||
1613 | TTI->getVectorInstrCost(Instruction::ExtractElement, VecTy, i); | |||
1614 | } | |||
1615 | return -DeadCost; | |||
1616 | } | |||
1617 | return getGatherCost(VecTy); | |||
1618 | } | |||
1619 | case Instruction::ZExt: | |||
1620 | case Instruction::SExt: | |||
1621 | case Instruction::FPToUI: | |||
1622 | case Instruction::FPToSI: | |||
1623 | case Instruction::FPExt: | |||
1624 | case Instruction::PtrToInt: | |||
1625 | case Instruction::IntToPtr: | |||
1626 | case Instruction::SIToFP: | |||
1627 | case Instruction::UIToFP: | |||
1628 | case Instruction::Trunc: | |||
1629 | case Instruction::FPTrunc: | |||
1630 | case Instruction::BitCast: { | |||
1631 | Type *SrcTy = VL0->getOperand(0)->getType(); | |||
1632 | ||||
1633 | // Calculate the cost of this instruction. | |||
1634 | int ScalarCost = VL.size() * TTI->getCastInstrCost(VL0->getOpcode(), | |||
1635 | VL0->getType(), SrcTy); | |||
1636 | ||||
1637 | VectorType *SrcVecTy = VectorType::get(SrcTy, VL.size()); | |||
1638 | int VecCost = TTI->getCastInstrCost(VL0->getOpcode(), VecTy, SrcVecTy); | |||
1639 | return VecCost - ScalarCost; | |||
1640 | } | |||
1641 | case Instruction::FCmp: | |||
1642 | case Instruction::ICmp: | |||
1643 | case Instruction::Select: { | |||
1644 | // Calculate the cost of this instruction. | |||
1645 | VectorType *MaskTy = VectorType::get(Builder.getInt1Ty(), VL.size()); | |||
1646 | int ScalarCost = VecTy->getNumElements() * | |||
1647 | TTI->getCmpSelInstrCost(Opcode, ScalarTy, Builder.getInt1Ty()); | |||
1648 | int VecCost = TTI->getCmpSelInstrCost(Opcode, VecTy, MaskTy); | |||
1649 | return VecCost - ScalarCost; | |||
1650 | } | |||
1651 | case Instruction::Add: | |||
1652 | case Instruction::FAdd: | |||
1653 | case Instruction::Sub: | |||
1654 | case Instruction::FSub: | |||
1655 | case Instruction::Mul: | |||
1656 | case Instruction::FMul: | |||
1657 | case Instruction::UDiv: | |||
1658 | case Instruction::SDiv: | |||
1659 | case Instruction::FDiv: | |||
1660 | case Instruction::URem: | |||
1661 | case Instruction::SRem: | |||
1662 | case Instruction::FRem: | |||
1663 | case Instruction::Shl: | |||
1664 | case Instruction::LShr: | |||
1665 | case Instruction::AShr: | |||
1666 | case Instruction::And: | |||
1667 | case Instruction::Or: | |||
1668 | case Instruction::Xor: { | |||
1669 | // Certain instructions can be cheaper to vectorize if they have a | |||
1670 | // constant second vector operand. | |||
1671 | TargetTransformInfo::OperandValueKind Op1VK = | |||
1672 | TargetTransformInfo::OK_AnyValue; | |||
1673 | TargetTransformInfo::OperandValueKind Op2VK = | |||
1674 | TargetTransformInfo::OK_UniformConstantValue; | |||
1675 | TargetTransformInfo::OperandValueProperties Op1VP = | |||
1676 | TargetTransformInfo::OP_None; | |||
1677 | TargetTransformInfo::OperandValueProperties Op2VP = | |||
1678 | TargetTransformInfo::OP_None; | |||
1679 | ||||
1680 | // If all operands are exactly the same ConstantInt then set the | |||
1681 | // operand kind to OK_UniformConstantValue. | |||
1682 | // If instead not all operands are constants, then set the operand kind | |||
1683 | // to OK_AnyValue. If all operands are constants but not the same, | |||
1684 | // then set the operand kind to OK_NonUniformConstantValue. | |||
1685 | ConstantInt *CInt = nullptr; | |||
1686 | for (unsigned i = 0; i < VL.size(); ++i) { | |||
1687 | const Instruction *I = cast<Instruction>(VL[i]); | |||
1688 | if (!isa<ConstantInt>(I->getOperand(1))) { | |||
1689 | Op2VK = TargetTransformInfo::OK_AnyValue; | |||
1690 | break; | |||
1691 | } | |||
1692 | if (i == 0) { | |||
1693 | CInt = cast<ConstantInt>(I->getOperand(1)); | |||
1694 | continue; | |||
1695 | } | |||
1696 | if (Op2VK == TargetTransformInfo::OK_UniformConstantValue && | |||
1697 | CInt != cast<ConstantInt>(I->getOperand(1))) | |||
1698 | Op2VK = TargetTransformInfo::OK_NonUniformConstantValue; | |||
1699 | } | |||
1700 | // FIXME: Currently cost of model modification for division by power of | |||
1701 | // 2 is handled for X86 and AArch64. Add support for other targets. | |||
1702 | if (Op2VK == TargetTransformInfo::OK_UniformConstantValue && CInt && | |||
1703 | CInt->getValue().isPowerOf2()) | |||
1704 | Op2VP = TargetTransformInfo::OP_PowerOf2; | |||
1705 | ||||
1706 | int ScalarCost = VecTy->getNumElements() * | |||
1707 | TTI->getArithmeticInstrCost(Opcode, ScalarTy, Op1VK, | |||
1708 | Op2VK, Op1VP, Op2VP); | |||
1709 | int VecCost = TTI->getArithmeticInstrCost(Opcode, VecTy, Op1VK, Op2VK, | |||
1710 | Op1VP, Op2VP); | |||
1711 | return VecCost - ScalarCost; | |||
1712 | } | |||
1713 | case Instruction::GetElementPtr: { | |||
1714 | TargetTransformInfo::OperandValueKind Op1VK = | |||
1715 | TargetTransformInfo::OK_AnyValue; | |||
1716 | TargetTransformInfo::OperandValueKind Op2VK = | |||
1717 | TargetTransformInfo::OK_UniformConstantValue; | |||
1718 | ||||
1719 | int ScalarCost = | |||
1720 | VecTy->getNumElements() * | |||
1721 | TTI->getArithmeticInstrCost(Instruction::Add, ScalarTy, Op1VK, Op2VK); | |||
1722 | int VecCost = | |||
1723 | TTI->getArithmeticInstrCost(Instruction::Add, VecTy, Op1VK, Op2VK); | |||
1724 | ||||
1725 | return VecCost - ScalarCost; | |||
1726 | } | |||
1727 | case Instruction::Load: { | |||
1728 | // Cost of wide load - cost of scalar loads. | |||
1729 | int ScalarLdCost = VecTy->getNumElements() * | |||
1730 | TTI->getMemoryOpCost(Instruction::Load, ScalarTy, 1, 0); | |||
1731 | int VecLdCost = TTI->getMemoryOpCost(Instruction::Load, VecTy, 1, 0); | |||
1732 | return VecLdCost - ScalarLdCost; | |||
1733 | } | |||
1734 | case Instruction::Store: { | |||
1735 | // We know that we can merge the stores. Calculate the cost. | |||
1736 | int ScalarStCost = VecTy->getNumElements() * | |||
1737 | TTI->getMemoryOpCost(Instruction::Store, ScalarTy, 1, 0); | |||
1738 | int VecStCost = TTI->getMemoryOpCost(Instruction::Store, VecTy, 1, 0); | |||
1739 | return VecStCost - ScalarStCost; | |||
1740 | } | |||
1741 | case Instruction::Call: { | |||
1742 | CallInst *CI = cast<CallInst>(VL0); | |||
1743 | Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); | |||
1744 | ||||
1745 | // Calculate the cost of the scalar and vector calls. | |||
1746 | SmallVector<Type*, 4> ScalarTys, VecTys; | |||
1747 | for (unsigned op = 0, opc = CI->getNumArgOperands(); op!= opc; ++op) { | |||
1748 | ScalarTys.push_back(CI->getArgOperand(op)->getType()); | |||
1749 | VecTys.push_back(VectorType::get(CI->getArgOperand(op)->getType(), | |||
1750 | VecTy->getNumElements())); | |||
1751 | } | |||
1752 | ||||
1753 | FastMathFlags FMF; | |||
1754 | if (auto *FPMO = dyn_cast<FPMathOperator>(CI)) | |||
1755 | FMF = FPMO->getFastMathFlags(); | |||
1756 | ||||
1757 | int ScalarCallCost = VecTy->getNumElements() * | |||
1758 | TTI->getIntrinsicInstrCost(ID, ScalarTy, ScalarTys, FMF); | |||
1759 | ||||
1760 | int VecCallCost = TTI->getIntrinsicInstrCost(ID, VecTy, VecTys, FMF); | |||
1761 | ||||
1762 | DEBUG(dbgs() << "SLP: Call cost "<< VecCallCost - ScalarCallCostdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: Call cost "<< VecCallCost - ScalarCallCost << " (" << VecCallCost << "-" << ScalarCallCost << ")" << " for " << *CI << "\n"; } } while (0) | |||
1763 | << " (" << VecCallCost << "-" << ScalarCallCost << ")"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: Call cost "<< VecCallCost - ScalarCallCost << " (" << VecCallCost << "-" << ScalarCallCost << ")" << " for " << *CI << "\n"; } } while (0) | |||
1764 | << " for " << *CI << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: Call cost "<< VecCallCost - ScalarCallCost << " (" << VecCallCost << "-" << ScalarCallCost << ")" << " for " << *CI << "\n"; } } while (0); | |||
1765 | ||||
1766 | return VecCallCost - ScalarCallCost; | |||
1767 | } | |||
1768 | case Instruction::ShuffleVector: { | |||
1769 | TargetTransformInfo::OperandValueKind Op1VK = | |||
1770 | TargetTransformInfo::OK_AnyValue; | |||
1771 | TargetTransformInfo::OperandValueKind Op2VK = | |||
1772 | TargetTransformInfo::OK_AnyValue; | |||
1773 | int ScalarCost = 0; | |||
1774 | int VecCost = 0; | |||
1775 | for (unsigned i = 0; i < VL.size(); ++i) { | |||
1776 | Instruction *I = cast<Instruction>(VL[i]); | |||
1777 | if (!I) | |||
1778 | break; | |||
1779 | ScalarCost += | |||
1780 | TTI->getArithmeticInstrCost(I->getOpcode(), ScalarTy, Op1VK, Op2VK); | |||
1781 | } | |||
1782 | // VecCost is equal to sum of the cost of creating 2 vectors | |||
1783 | // and the cost of creating shuffle. | |||
1784 | Instruction *I0 = cast<Instruction>(VL[0]); | |||
1785 | VecCost = | |||
1786 | TTI->getArithmeticInstrCost(I0->getOpcode(), VecTy, Op1VK, Op2VK); | |||
1787 | Instruction *I1 = cast<Instruction>(VL[1]); | |||
1788 | VecCost += | |||
1789 | TTI->getArithmeticInstrCost(I1->getOpcode(), VecTy, Op1VK, Op2VK); | |||
1790 | VecCost += | |||
1791 | TTI->getShuffleCost(TargetTransformInfo::SK_Alternate, VecTy, 0); | |||
1792 | return VecCost - ScalarCost; | |||
1793 | } | |||
1794 | default: | |||
1795 | llvm_unreachable("Unknown instruction")::llvm::llvm_unreachable_internal("Unknown instruction", "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn271203/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 1795); | |||
1796 | } | |||
1797 | } | |||
1798 | ||||
1799 | bool BoUpSLP::isFullyVectorizableTinyTree() { | |||
1800 | DEBUG(dbgs() << "SLP: Check whether the tree with height " <<do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: Check whether the tree with height " << VectorizableTree.size() << " is fully vectorizable .\n" ; } } while (0) | |||
1801 | VectorizableTree.size() << " is fully vectorizable .\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: Check whether the tree with height " << VectorizableTree.size() << " is fully vectorizable .\n" ; } } while (0); | |||
1802 | ||||
1803 | // We only handle trees of height 2. | |||
1804 | if (VectorizableTree.size() != 2) | |||
1805 | return false; | |||
1806 | ||||
1807 | // Handle splat and all-constants stores. | |||
1808 | if (!VectorizableTree[0].NeedToGather && | |||
1809 | (allConstant(VectorizableTree[1].Scalars) || | |||
1810 | isSplat(VectorizableTree[1].Scalars))) | |||
1811 | return true; | |||
1812 | ||||
1813 | // Gathering cost would be too much for tiny trees. | |||
1814 | if (VectorizableTree[0].NeedToGather || VectorizableTree[1].NeedToGather) | |||
1815 | return false; | |||
1816 | ||||
1817 | return true; | |||
1818 | } | |||
1819 | ||||
1820 | int BoUpSLP::getSpillCost() { | |||
1821 | // Walk from the bottom of the tree to the top, tracking which values are | |||
1822 | // live. When we see a call instruction that is not part of our tree, | |||
1823 | // query TTI to see if there is a cost to keeping values live over it | |||
1824 | // (for example, if spills and fills are required). | |||
1825 | unsigned BundleWidth = VectorizableTree.front().Scalars.size(); | |||
1826 | int Cost = 0; | |||
1827 | ||||
1828 | SmallPtrSet<Instruction*, 4> LiveValues; | |||
1829 | Instruction *PrevInst = nullptr; | |||
1830 | ||||
1831 | for (unsigned N = 0; N < VectorizableTree.size(); ++N) { | |||
1832 | Instruction *Inst = dyn_cast<Instruction>(VectorizableTree[N].Scalars[0]); | |||
1833 | if (!Inst) | |||
1834 | continue; | |||
1835 | ||||
1836 | if (!PrevInst) { | |||
1837 | PrevInst = Inst; | |||
1838 | continue; | |||
1839 | } | |||
1840 | ||||
1841 | // Update LiveValues. | |||
1842 | LiveValues.erase(PrevInst); | |||
1843 | for (auto &J : PrevInst->operands()) { | |||
1844 | if (isa<Instruction>(&*J) && ScalarToTreeEntry.count(&*J)) | |||
1845 | LiveValues.insert(cast<Instruction>(&*J)); | |||
1846 | } | |||
1847 | ||||
1848 | DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: #LV: " << LiveValues.size (); for (auto *X : LiveValues) dbgs() << " " << X ->getName(); dbgs() << ", Looking at "; Inst->dump ();; } } while (0) | |||
1849 | dbgs() << "SLP: #LV: " << LiveValues.size();do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: #LV: " << LiveValues.size (); for (auto *X : LiveValues) dbgs() << " " << X ->getName(); dbgs() << ", Looking at "; Inst->dump ();; } } while (0) | |||
1850 | for (auto *X : LiveValues)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: #LV: " << LiveValues.size (); for (auto *X : LiveValues) dbgs() << " " << X ->getName(); dbgs() << ", Looking at "; Inst->dump ();; } } while (0) | |||
1851 | dbgs() << " " << X->getName();do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: #LV: " << LiveValues.size (); for (auto *X : LiveValues) dbgs() << " " << X ->getName(); dbgs() << ", Looking at "; Inst->dump ();; } } while (0) | |||
1852 | dbgs() << ", Looking at ";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: #LV: " << LiveValues.size (); for (auto *X : LiveValues) dbgs() << " " << X ->getName(); dbgs() << ", Looking at "; Inst->dump ();; } } while (0) | |||
1853 | Inst->dump();do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: #LV: " << LiveValues.size (); for (auto *X : LiveValues) dbgs() << " " << X ->getName(); dbgs() << ", Looking at "; Inst->dump ();; } } while (0) | |||
1854 | )do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: #LV: " << LiveValues.size (); for (auto *X : LiveValues) dbgs() << " " << X ->getName(); dbgs() << ", Looking at "; Inst->dump ();; } } while (0); | |||
1855 | ||||
1856 | // Now find the sequence of instructions between PrevInst and Inst. | |||
1857 | BasicBlock::reverse_iterator InstIt(Inst->getIterator()), | |||
1858 | PrevInstIt(PrevInst->getIterator()); | |||
1859 | --PrevInstIt; | |||
1860 | while (InstIt != PrevInstIt) { | |||
1861 | if (PrevInstIt == PrevInst->getParent()->rend()) { | |||
1862 | PrevInstIt = Inst->getParent()->rbegin(); | |||
1863 | continue; | |||
1864 | } | |||
1865 | ||||
1866 | if (isa<CallInst>(&*PrevInstIt) && &*PrevInstIt != PrevInst) { | |||
1867 | SmallVector<Type*, 4> V; | |||
1868 | for (auto *II : LiveValues) | |||
1869 | V.push_back(VectorType::get(II->getType(), BundleWidth)); | |||
1870 | Cost += TTI->getCostOfKeepingLiveOverCall(V); | |||
1871 | } | |||
1872 | ||||
1873 | ++PrevInstIt; | |||
1874 | } | |||
1875 | ||||
1876 | PrevInst = Inst; | |||
1877 | } | |||
1878 | ||||
1879 | return Cost; | |||
1880 | } | |||
1881 | ||||
1882 | int BoUpSLP::getTreeCost() { | |||
1883 | int Cost = 0; | |||
1884 | DEBUG(dbgs() << "SLP: Calculating cost for tree of size " <<do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: Calculating cost for tree of size " << VectorizableTree.size() << ".\n"; } } while ( 0) | |||
1885 | VectorizableTree.size() << ".\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: Calculating cost for tree of size " << VectorizableTree.size() << ".\n"; } } while ( 0); | |||
1886 | ||||
1887 | // We only vectorize tiny trees if it is fully vectorizable. | |||
1888 | if (VectorizableTree.size() < 3 && !isFullyVectorizableTinyTree()) { | |||
1889 | if (VectorizableTree.empty()) { | |||
1890 | assert(!ExternalUses.size() && "We should not have any external users")((!ExternalUses.size() && "We should not have any external users" ) ? static_cast<void> (0) : __assert_fail ("!ExternalUses.size() && \"We should not have any external users\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn271203/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 1890, __PRETTY_FUNCTION__)); | |||
1891 | } | |||
1892 | return INT_MAX2147483647; | |||
1893 | } | |||
1894 | ||||
1895 | unsigned BundleWidth = VectorizableTree[0].Scalars.size(); | |||
1896 | ||||
1897 | for (TreeEntry &TE : VectorizableTree) { | |||
1898 | int C = getEntryCost(&TE); | |||
1899 | DEBUG(dbgs() << "SLP: Adding cost " << C << " for bundle that starts with "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: Adding cost " << C << " for bundle that starts with " << *TE.Scalars[0] << ".\n"; } } while (0) | |||
1900 | << *TE.Scalars[0] << ".\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: Adding cost " << C << " for bundle that starts with " << *TE.Scalars[0] << ".\n"; } } while (0); | |||
1901 | Cost += C; | |||
1902 | } | |||
1903 | ||||
1904 | SmallSet<Value *, 16> ExtractCostCalculated; | |||
1905 | int ExtractCost = 0; | |||
1906 | for (ExternalUser &EU : ExternalUses) { | |||
1907 | // We only add extract cost once for the same scalar. | |||
1908 | if (!ExtractCostCalculated.insert(EU.Scalar).second) | |||
1909 | continue; | |||
1910 | ||||
1911 | // Uses by ephemeral values are free (because the ephemeral value will be | |||
1912 | // removed prior to code generation, and so the extraction will be | |||
1913 | // removed as well). | |||
1914 | if (EphValues.count(EU.User)) | |||
1915 | continue; | |||
1916 | ||||
1917 | // If we plan to rewrite the tree in a smaller type, we will need to sign | |||
1918 | // extend the extracted value back to the original type. Here, we account | |||
1919 | // for the extract and the added cost of the sign extend if needed. | |||
1920 | auto *VecTy = VectorType::get(EU.Scalar->getType(), BundleWidth); | |||
1921 | auto *ScalarRoot = VectorizableTree[0].Scalars[0]; | |||
1922 | if (MinBWs.count(ScalarRoot)) { | |||
1923 | auto *MinTy = IntegerType::get(F->getContext(), MinBWs[ScalarRoot]); | |||
1924 | VecTy = VectorType::get(MinTy, BundleWidth); | |||
1925 | ExtractCost += TTI->getExtractWithExtendCost( | |||
1926 | Instruction::SExt, EU.Scalar->getType(), VecTy, EU.Lane); | |||
1927 | } else { | |||
1928 | ExtractCost += | |||
1929 | TTI->getVectorInstrCost(Instruction::ExtractElement, VecTy, EU.Lane); | |||
1930 | } | |||
1931 | } | |||
1932 | ||||
1933 | int SpillCost = getSpillCost(); | |||
1934 | Cost += SpillCost + ExtractCost; | |||
1935 | ||||
1936 | DEBUG(dbgs() << "SLP: Spill Cost = " << SpillCost << ".\n"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: Spill Cost = " << SpillCost << ".\n" << "SLP: Extract Cost = " << ExtractCost << ".\n" << "SLP: Total Cost = " << Cost << ".\n"; } } while (0) | |||
1937 | << "SLP: Extract Cost = " << ExtractCost << ".\n"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: Spill Cost = " << SpillCost << ".\n" << "SLP: Extract Cost = " << ExtractCost << ".\n" << "SLP: Total Cost = " << Cost << ".\n"; } } while (0) | |||
1938 | << "SLP: Total Cost = " << Cost << ".\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: Spill Cost = " << SpillCost << ".\n" << "SLP: Extract Cost = " << ExtractCost << ".\n" << "SLP: Total Cost = " << Cost << ".\n"; } } while (0); | |||
1939 | return Cost; | |||
1940 | } | |||
1941 | ||||
1942 | int BoUpSLP::getGatherCost(Type *Ty) { | |||
1943 | int Cost = 0; | |||
1944 | for (unsigned i = 0, e = cast<VectorType>(Ty)->getNumElements(); i < e; ++i) | |||
1945 | Cost += TTI->getVectorInstrCost(Instruction::InsertElement, Ty, i); | |||
1946 | return Cost; | |||
1947 | } | |||
1948 | ||||
1949 | int BoUpSLP::getGatherCost(ArrayRef<Value *> VL) { | |||
1950 | // Find the type of the operands in VL. | |||
1951 | Type *ScalarTy = VL[0]->getType(); | |||
1952 | if (StoreInst *SI = dyn_cast<StoreInst>(VL[0])) | |||
1953 | ScalarTy = SI->getValueOperand()->getType(); | |||
1954 | VectorType *VecTy = VectorType::get(ScalarTy, VL.size()); | |||
1955 | // Find the cost of inserting/extracting values from the vector. | |||
1956 | return getGatherCost(VecTy); | |||
1957 | } | |||
1958 | ||||
1959 | // Reorder commutative operations in alternate shuffle if the resulting vectors | |||
1960 | // are consecutive loads. This would allow us to vectorize the tree. | |||
1961 | // If we have something like- | |||
1962 | // load a[0] - load b[0] | |||
1963 | // load b[1] + load a[1] | |||
1964 | // load a[2] - load b[2] | |||
1965 | // load a[3] + load b[3] | |||
1966 | // Reordering the second load b[1] load a[1] would allow us to vectorize this | |||
1967 | // code. | |||
1968 | void BoUpSLP::reorderAltShuffleOperands(ArrayRef<Value *> VL, | |||
1969 | SmallVectorImpl<Value *> &Left, | |||
1970 | SmallVectorImpl<Value *> &Right) { | |||
1971 | // Push left and right operands of binary operation into Left and Right | |||
1972 | for (unsigned i = 0, e = VL.size(); i < e; ++i) { | |||
1973 | Left.push_back(cast<Instruction>(VL[i])->getOperand(0)); | |||
1974 | Right.push_back(cast<Instruction>(VL[i])->getOperand(1)); | |||
1975 | } | |||
1976 | ||||
1977 | // Reorder if we have a commutative operation and consecutive access | |||
1978 | // are on either side of the alternate instructions. | |||
1979 | for (unsigned j = 0; j < VL.size() - 1; ++j) { | |||
1980 | if (LoadInst *L = dyn_cast<LoadInst>(Left[j])) { | |||
1981 | if (LoadInst *L1 = dyn_cast<LoadInst>(Right[j + 1])) { | |||
1982 | Instruction *VL1 = cast<Instruction>(VL[j]); | |||
1983 | Instruction *VL2 = cast<Instruction>(VL[j + 1]); | |||
1984 | if (VL1->isCommutative() && isConsecutiveAccess(L, L1, *DL, *SE)) { | |||
1985 | std::swap(Left[j], Right[j]); | |||
1986 | continue; | |||
1987 | } else if (VL2->isCommutative() && | |||
1988 | isConsecutiveAccess(L, L1, *DL, *SE)) { | |||
1989 | std::swap(Left[j + 1], Right[j + 1]); | |||
1990 | continue; | |||
1991 | } | |||
1992 | // else unchanged | |||
1993 | } | |||
1994 | } | |||
1995 | if (LoadInst *L = dyn_cast<LoadInst>(Right[j])) { | |||
1996 | if (LoadInst *L1 = dyn_cast<LoadInst>(Left[j + 1])) { | |||
1997 | Instruction *VL1 = cast<Instruction>(VL[j]); | |||
1998 | Instruction *VL2 = cast<Instruction>(VL[j + 1]); | |||
1999 | if (VL1->isCommutative() && isConsecutiveAccess(L, L1, *DL, *SE)) { | |||
2000 | std::swap(Left[j], Right[j]); | |||
2001 | continue; | |||
2002 | } else if (VL2->isCommutative() && | |||
2003 | isConsecutiveAccess(L, L1, *DL, *SE)) { | |||
2004 | std::swap(Left[j + 1], Right[j + 1]); | |||
2005 | continue; | |||
2006 | } | |||
2007 | // else unchanged | |||
2008 | } | |||
2009 | } | |||
2010 | } | |||
2011 | } | |||
2012 | ||||
2013 | // Return true if I should be commuted before adding it's left and right | |||
2014 | // operands to the arrays Left and Right. | |||
2015 | // | |||
2016 | // The vectorizer is trying to either have all elements one side being | |||
2017 | // instruction with the same opcode to enable further vectorization, or having | |||
2018 | // a splat to lower the vectorizing cost. | |||
2019 | static bool shouldReorderOperands(int i, Instruction &I, | |||
2020 | SmallVectorImpl<Value *> &Left, | |||
2021 | SmallVectorImpl<Value *> &Right, | |||
2022 | bool AllSameOpcodeLeft, | |||
2023 | bool AllSameOpcodeRight, bool SplatLeft, | |||
2024 | bool SplatRight) { | |||
2025 | Value *VLeft = I.getOperand(0); | |||
2026 | Value *VRight = I.getOperand(1); | |||
2027 | // If we have "SplatRight", try to see if commuting is needed to preserve it. | |||
2028 | if (SplatRight) { | |||
2029 | if (VRight == Right[i - 1]) | |||
2030 | // Preserve SplatRight | |||
2031 | return false; | |||
2032 | if (VLeft == Right[i - 1]) { | |||
2033 | // Commuting would preserve SplatRight, but we don't want to break | |||
2034 | // SplatLeft either, i.e. preserve the original order if possible. | |||
2035 | // (FIXME: why do we care?) | |||
2036 | if (SplatLeft && VLeft == Left[i - 1]) | |||
2037 | return false; | |||
2038 | return true; | |||
2039 | } | |||
2040 | } | |||
2041 | // Symmetrically handle Right side. | |||
2042 | if (SplatLeft) { | |||
2043 | if (VLeft == Left[i - 1]) | |||
2044 | // Preserve SplatLeft | |||
2045 | return false; | |||
2046 | if (VRight == Left[i - 1]) | |||
2047 | return true; | |||
2048 | } | |||
2049 | ||||
2050 | Instruction *ILeft = dyn_cast<Instruction>(VLeft); | |||
2051 | Instruction *IRight = dyn_cast<Instruction>(VRight); | |||
2052 | ||||
2053 | // If we have "AllSameOpcodeRight", try to see if the left operands preserves | |||
2054 | // it and not the right, in this case we want to commute. | |||
2055 | if (AllSameOpcodeRight) { | |||
2056 | unsigned RightPrevOpcode = cast<Instruction>(Right[i - 1])->getOpcode(); | |||
2057 | if (IRight && RightPrevOpcode == IRight->getOpcode()) | |||
2058 | // Do not commute, a match on the right preserves AllSameOpcodeRight | |||
2059 | return false; | |||
2060 | if (ILeft && RightPrevOpcode == ILeft->getOpcode()) { | |||
2061 | // We have a match and may want to commute, but first check if there is | |||
2062 | // not also a match on the existing operands on the Left to preserve | |||
2063 | // AllSameOpcodeLeft, i.e. preserve the original order if possible. | |||
2064 | // (FIXME: why do we care?) | |||
2065 | if (AllSameOpcodeLeft && ILeft && | |||
2066 | cast<Instruction>(Left[i - 1])->getOpcode() == ILeft->getOpcode()) | |||
2067 | return false; | |||
2068 | return true; | |||
2069 | } | |||
2070 | } | |||
2071 | // Symmetrically handle Left side. | |||
2072 | if (AllSameOpcodeLeft) { | |||
2073 | unsigned LeftPrevOpcode = cast<Instruction>(Left[i - 1])->getOpcode(); | |||
2074 | if (ILeft && LeftPrevOpcode == ILeft->getOpcode()) | |||
2075 | return false; | |||
2076 | if (IRight && LeftPrevOpcode == IRight->getOpcode()) | |||
2077 | return true; | |||
2078 | } | |||
2079 | return false; | |||
2080 | } | |||
2081 | ||||
2082 | void BoUpSLP::reorderInputsAccordingToOpcode(ArrayRef<Value *> VL, | |||
2083 | SmallVectorImpl<Value *> &Left, | |||
2084 | SmallVectorImpl<Value *> &Right) { | |||
2085 | ||||
2086 | if (VL.size()) { | |||
2087 | // Peel the first iteration out of the loop since there's nothing | |||
2088 | // interesting to do anyway and it simplifies the checks in the loop. | |||
2089 | auto VLeft = cast<Instruction>(VL[0])->getOperand(0); | |||
2090 | auto VRight = cast<Instruction>(VL[0])->getOperand(1); | |||
2091 | if (!isa<Instruction>(VRight) && isa<Instruction>(VLeft)) | |||
2092 | // Favor having instruction to the right. FIXME: why? | |||
2093 | std::swap(VLeft, VRight); | |||
2094 | Left.push_back(VLeft); | |||
2095 | Right.push_back(VRight); | |||
2096 | } | |||
2097 | ||||
2098 | // Keep track if we have instructions with all the same opcode on one side. | |||
2099 | bool AllSameOpcodeLeft = isa<Instruction>(Left[0]); | |||
2100 | bool AllSameOpcodeRight = isa<Instruction>(Right[0]); | |||
2101 | // Keep track if we have one side with all the same value (broadcast). | |||
2102 | bool SplatLeft = true; | |||
2103 | bool SplatRight = true; | |||
2104 | ||||
2105 | for (unsigned i = 1, e = VL.size(); i != e; ++i) { | |||
2106 | Instruction *I = cast<Instruction>(VL[i]); | |||
2107 | assert(I->isCommutative() && "Can only process commutative instruction")((I->isCommutative() && "Can only process commutative instruction" ) ? static_cast<void> (0) : __assert_fail ("I->isCommutative() && \"Can only process commutative instruction\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn271203/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 2107, __PRETTY_FUNCTION__)); | |||
2108 | // Commute to favor either a splat or maximizing having the same opcodes on | |||
2109 | // one side. | |||
2110 | if (shouldReorderOperands(i, *I, Left, Right, AllSameOpcodeLeft, | |||
2111 | AllSameOpcodeRight, SplatLeft, SplatRight)) { | |||
2112 | Left.push_back(I->getOperand(1)); | |||
2113 | Right.push_back(I->getOperand(0)); | |||
2114 | } else { | |||
2115 | Left.push_back(I->getOperand(0)); | |||
2116 | Right.push_back(I->getOperand(1)); | |||
2117 | } | |||
2118 | // Update Splat* and AllSameOpcode* after the insertion. | |||
2119 | SplatRight = SplatRight && (Right[i - 1] == Right[i]); | |||
2120 | SplatLeft = SplatLeft && (Left[i - 1] == Left[i]); | |||
2121 | AllSameOpcodeLeft = AllSameOpcodeLeft && isa<Instruction>(Left[i]) && | |||
2122 | (cast<Instruction>(Left[i - 1])->getOpcode() == | |||
2123 | cast<Instruction>(Left[i])->getOpcode()); | |||
2124 | AllSameOpcodeRight = AllSameOpcodeRight && isa<Instruction>(Right[i]) && | |||
2125 | (cast<Instruction>(Right[i - 1])->getOpcode() == | |||
2126 | cast<Instruction>(Right[i])->getOpcode()); | |||
2127 | } | |||
2128 | ||||
2129 | // If one operand end up being broadcast, return this operand order. | |||
2130 | if (SplatRight || SplatLeft) | |||
2131 | return; | |||
2132 | ||||
2133 | // Finally check if we can get longer vectorizable chain by reordering | |||
2134 | // without breaking the good operand order detected above. | |||
2135 | // E.g. If we have something like- | |||
2136 | // load a[0] load b[0] | |||
2137 | // load b[1] load a[1] | |||
2138 | // load a[2] load b[2] | |||
2139 | // load a[3] load b[3] | |||
2140 | // Reordering the second load b[1] load a[1] would allow us to vectorize | |||
2141 | // this code and we still retain AllSameOpcode property. | |||
2142 | // FIXME: This load reordering might break AllSameOpcode in some rare cases | |||
2143 | // such as- | |||
2144 | // add a[0],c[0] load b[0] | |||
2145 | // add a[1],c[2] load b[1] | |||
2146 | // b[2] load b[2] | |||
2147 | // add a[3],c[3] load b[3] | |||
2148 | for (unsigned j = 0; j < VL.size() - 1; ++j) { | |||
2149 | if (LoadInst *L = dyn_cast<LoadInst>(Left[j])) { | |||
2150 | if (LoadInst *L1 = dyn_cast<LoadInst>(Right[j + 1])) { | |||
2151 | if (isConsecutiveAccess(L, L1, *DL, *SE)) { | |||
2152 | std::swap(Left[j + 1], Right[j + 1]); | |||
2153 | continue; | |||
2154 | } | |||
2155 | } | |||
2156 | } | |||
2157 | if (LoadInst *L = dyn_cast<LoadInst>(Right[j])) { | |||
2158 | if (LoadInst *L1 = dyn_cast<LoadInst>(Left[j + 1])) { | |||
2159 | if (isConsecutiveAccess(L, L1, *DL, *SE)) { | |||
2160 | std::swap(Left[j + 1], Right[j + 1]); | |||
2161 | continue; | |||
2162 | } | |||
2163 | } | |||
2164 | } | |||
2165 | // else unchanged | |||
2166 | } | |||
2167 | } | |||
2168 | ||||
2169 | void BoUpSLP::setInsertPointAfterBundle(ArrayRef<Value *> VL) { | |||
2170 | Instruction *VL0 = cast<Instruction>(VL[0]); | |||
2171 | BasicBlock::iterator NextInst(VL0); | |||
2172 | ++NextInst; | |||
2173 | Builder.SetInsertPoint(VL0->getParent(), NextInst); | |||
2174 | Builder.SetCurrentDebugLocation(VL0->getDebugLoc()); | |||
2175 | } | |||
2176 | ||||
2177 | Value *BoUpSLP::Gather(ArrayRef<Value *> VL, VectorType *Ty) { | |||
2178 | Value *Vec = UndefValue::get(Ty); | |||
2179 | // Generate the 'InsertElement' instruction. | |||
2180 | for (unsigned i = 0; i < Ty->getNumElements(); ++i) { | |||
2181 | Vec = Builder.CreateInsertElement(Vec, VL[i], Builder.getInt32(i)); | |||
2182 | if (Instruction *Insrt = dyn_cast<Instruction>(Vec)) { | |||
2183 | GatherSeq.insert(Insrt); | |||
2184 | CSEBlocks.insert(Insrt->getParent()); | |||
2185 | ||||
2186 | // Add to our 'need-to-extract' list. | |||
2187 | if (ScalarToTreeEntry.count(VL[i])) { | |||
2188 | int Idx = ScalarToTreeEntry[VL[i]]; | |||
2189 | TreeEntry *E = &VectorizableTree[Idx]; | |||
2190 | // Find which lane we need to extract. | |||
2191 | int FoundLane = -1; | |||
2192 | for (unsigned Lane = 0, LE = VL.size(); Lane != LE; ++Lane) { | |||
2193 | // Is this the lane of the scalar that we are looking for ? | |||
2194 | if (E->Scalars[Lane] == VL[i]) { | |||
2195 | FoundLane = Lane; | |||
2196 | break; | |||
2197 | } | |||
2198 | } | |||
2199 | assert(FoundLane >= 0 && "Could not find the correct lane")((FoundLane >= 0 && "Could not find the correct lane" ) ? static_cast<void> (0) : __assert_fail ("FoundLane >= 0 && \"Could not find the correct lane\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn271203/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 2199, __PRETTY_FUNCTION__)); | |||
2200 | ExternalUses.push_back(ExternalUser(VL[i], Insrt, FoundLane)); | |||
2201 | } | |||
2202 | } | |||
2203 | } | |||
2204 | ||||
2205 | return Vec; | |||
2206 | } | |||
2207 | ||||
2208 | Value *BoUpSLP::alreadyVectorized(ArrayRef<Value *> VL) const { | |||
2209 | SmallDenseMap<Value*, int>::const_iterator Entry | |||
2210 | = ScalarToTreeEntry.find(VL[0]); | |||
2211 | if (Entry != ScalarToTreeEntry.end()) { | |||
2212 | int Idx = Entry->second; | |||
2213 | const TreeEntry *En = &VectorizableTree[Idx]; | |||
2214 | if (En->isSame(VL) && En->VectorizedValue) | |||
2215 | return En->VectorizedValue; | |||
2216 | } | |||
2217 | return nullptr; | |||
2218 | } | |||
2219 | ||||
2220 | Value *BoUpSLP::vectorizeTree(ArrayRef<Value *> VL) { | |||
2221 | if (ScalarToTreeEntry.count(VL[0])) { | |||
2222 | int Idx = ScalarToTreeEntry[VL[0]]; | |||
2223 | TreeEntry *E = &VectorizableTree[Idx]; | |||
2224 | if (E->isSame(VL)) | |||
2225 | return vectorizeTree(E); | |||
2226 | } | |||
2227 | ||||
2228 | Type *ScalarTy = VL[0]->getType(); | |||
2229 | if (StoreInst *SI = dyn_cast<StoreInst>(VL[0])) | |||
2230 | ScalarTy = SI->getValueOperand()->getType(); | |||
2231 | VectorType *VecTy = VectorType::get(ScalarTy, VL.size()); | |||
2232 | ||||
2233 | return Gather(VL, VecTy); | |||
2234 | } | |||
2235 | ||||
2236 | Value *BoUpSLP::vectorizeTree(TreeEntry *E) { | |||
2237 | IRBuilder<>::InsertPointGuard Guard(Builder); | |||
2238 | ||||
2239 | if (E->VectorizedValue) { | |||
2240 | DEBUG(dbgs() << "SLP: Diamond merged for " << *E->Scalars[0] << ".\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: Diamond merged for " << *E->Scalars[0] << ".\n"; } } while (0); | |||
2241 | return E->VectorizedValue; | |||
2242 | } | |||
2243 | ||||
2244 | Instruction *VL0 = cast<Instruction>(E->Scalars[0]); | |||
2245 | Type *ScalarTy = VL0->getType(); | |||
2246 | if (StoreInst *SI = dyn_cast<StoreInst>(VL0)) | |||
2247 | ScalarTy = SI->getValueOperand()->getType(); | |||
2248 | VectorType *VecTy = VectorType::get(ScalarTy, E->Scalars.size()); | |||
2249 | ||||
2250 | if (E->NeedToGather) { | |||
2251 | setInsertPointAfterBundle(E->Scalars); | |||
2252 | return Gather(E->Scalars, VecTy); | |||
2253 | } | |||
2254 | ||||
2255 | unsigned Opcode = getSameOpcode(E->Scalars); | |||
2256 | ||||
2257 | switch (Opcode) { | |||
2258 | case Instruction::PHI: { | |||
2259 | PHINode *PH = dyn_cast<PHINode>(VL0); | |||
2260 | Builder.SetInsertPoint(PH->getParent()->getFirstNonPHI()); | |||
2261 | Builder.SetCurrentDebugLocation(PH->getDebugLoc()); | |||
2262 | PHINode *NewPhi = Builder.CreatePHI(VecTy, PH->getNumIncomingValues()); | |||
2263 | E->VectorizedValue = NewPhi; | |||
2264 | ||||
2265 | // PHINodes may have multiple entries from the same block. We want to | |||
2266 | // visit every block once. | |||
2267 | SmallSet<BasicBlock*, 4> VisitedBBs; | |||
2268 | ||||
2269 | for (unsigned i = 0, e = PH->getNumIncomingValues(); i < e; ++i) { | |||
2270 | ValueList Operands; | |||
2271 | BasicBlock *IBB = PH->getIncomingBlock(i); | |||
2272 | ||||
2273 | if (!VisitedBBs.insert(IBB).second) { | |||
2274 | NewPhi->addIncoming(NewPhi->getIncomingValueForBlock(IBB), IBB); | |||
2275 | continue; | |||
2276 | } | |||
2277 | ||||
2278 | // Prepare the operand vector. | |||
2279 | for (Value *V : E->Scalars) | |||
2280 | Operands.push_back(cast<PHINode>(V)->getIncomingValueForBlock(IBB)); | |||
2281 | ||||
2282 | Builder.SetInsertPoint(IBB->getTerminator()); | |||
2283 | Builder.SetCurrentDebugLocation(PH->getDebugLoc()); | |||
2284 | Value *Vec = vectorizeTree(Operands); | |||
2285 | NewPhi->addIncoming(Vec, IBB); | |||
2286 | } | |||
2287 | ||||
2288 | assert(NewPhi->getNumIncomingValues() == PH->getNumIncomingValues() &&((NewPhi->getNumIncomingValues() == PH->getNumIncomingValues () && "Invalid number of incoming values") ? static_cast <void> (0) : __assert_fail ("NewPhi->getNumIncomingValues() == PH->getNumIncomingValues() && \"Invalid number of incoming values\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn271203/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 2289, __PRETTY_FUNCTION__)) | |||
2289 | "Invalid number of incoming values")((NewPhi->getNumIncomingValues() == PH->getNumIncomingValues () && "Invalid number of incoming values") ? static_cast <void> (0) : __assert_fail ("NewPhi->getNumIncomingValues() == PH->getNumIncomingValues() && \"Invalid number of incoming values\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn271203/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 2289, __PRETTY_FUNCTION__)); | |||
2290 | return NewPhi; | |||
2291 | } | |||
2292 | ||||
2293 | case Instruction::ExtractElement: { | |||
2294 | if (canReuseExtract(E->Scalars, Instruction::ExtractElement)) { | |||
2295 | Value *V = VL0->getOperand(0); | |||
2296 | E->VectorizedValue = V; | |||
2297 | return V; | |||
2298 | } | |||
2299 | return Gather(E->Scalars, VecTy); | |||
2300 | } | |||
2301 | case Instruction::ExtractValue: { | |||
2302 | if (canReuseExtract(E->Scalars, Instruction::ExtractValue)) { | |||
2303 | LoadInst *LI = cast<LoadInst>(VL0->getOperand(0)); | |||
2304 | Builder.SetInsertPoint(LI); | |||
2305 | PointerType *PtrTy = PointerType::get(VecTy, LI->getPointerAddressSpace()); | |||
2306 | Value *Ptr = Builder.CreateBitCast(LI->getOperand(0), PtrTy); | |||
2307 | LoadInst *V = Builder.CreateAlignedLoad(Ptr, LI->getAlignment()); | |||
2308 | E->VectorizedValue = V; | |||
2309 | return propagateMetadata(V, E->Scalars); | |||
2310 | } | |||
2311 | return Gather(E->Scalars, VecTy); | |||
2312 | } | |||
2313 | case Instruction::ZExt: | |||
2314 | case Instruction::SExt: | |||
2315 | case Instruction::FPToUI: | |||
2316 | case Instruction::FPToSI: | |||
2317 | case Instruction::FPExt: | |||
2318 | case Instruction::PtrToInt: | |||
2319 | case Instruction::IntToPtr: | |||
2320 | case Instruction::SIToFP: | |||
2321 | case Instruction::UIToFP: | |||
2322 | case Instruction::Trunc: | |||
2323 | case Instruction::FPTrunc: | |||
2324 | case Instruction::BitCast: { | |||
2325 | ValueList INVL; | |||
2326 | for (Value *V : E->Scalars) | |||
2327 | INVL.push_back(cast<Instruction>(V)->getOperand(0)); | |||
2328 | ||||
2329 | setInsertPointAfterBundle(E->Scalars); | |||
2330 | ||||
2331 | Value *InVec = vectorizeTree(INVL); | |||
2332 | ||||
2333 | if (Value *V = alreadyVectorized(E->Scalars)) | |||
2334 | return V; | |||
2335 | ||||
2336 | CastInst *CI = dyn_cast<CastInst>(VL0); | |||
2337 | Value *V = Builder.CreateCast(CI->getOpcode(), InVec, VecTy); | |||
2338 | E->VectorizedValue = V; | |||
2339 | ++NumVectorInstructions; | |||
2340 | return V; | |||
2341 | } | |||
2342 | case Instruction::FCmp: | |||
2343 | case Instruction::ICmp: { | |||
2344 | ValueList LHSV, RHSV; | |||
2345 | for (Value *V : E->Scalars) { | |||
2346 | LHSV.push_back(cast<Instruction>(V)->getOperand(0)); | |||
2347 | RHSV.push_back(cast<Instruction>(V)->getOperand(1)); | |||
2348 | } | |||
2349 | ||||
2350 | setInsertPointAfterBundle(E->Scalars); | |||
2351 | ||||
2352 | Value *L = vectorizeTree(LHSV); | |||
2353 | Value *R = vectorizeTree(RHSV); | |||
2354 | ||||
2355 | if (Value *V = alreadyVectorized(E->Scalars)) | |||
2356 | return V; | |||
2357 | ||||
2358 | CmpInst::Predicate P0 = cast<CmpInst>(VL0)->getPredicate(); | |||
2359 | Value *V; | |||
2360 | if (Opcode == Instruction::FCmp) | |||
2361 | V = Builder.CreateFCmp(P0, L, R); | |||
2362 | else | |||
2363 | V = Builder.CreateICmp(P0, L, R); | |||
2364 | ||||
2365 | E->VectorizedValue = V; | |||
2366 | ++NumVectorInstructions; | |||
2367 | return V; | |||
2368 | } | |||
2369 | case Instruction::Select: { | |||
2370 | ValueList TrueVec, FalseVec, CondVec; | |||
2371 | for (Value *V : E->Scalars) { | |||
2372 | CondVec.push_back(cast<Instruction>(V)->getOperand(0)); | |||
2373 | TrueVec.push_back(cast<Instruction>(V)->getOperand(1)); | |||
2374 | FalseVec.push_back(cast<Instruction>(V)->getOperand(2)); | |||
2375 | } | |||
2376 | ||||
2377 | setInsertPointAfterBundle(E->Scalars); | |||
2378 | ||||
2379 | Value *Cond = vectorizeTree(CondVec); | |||
2380 | Value *True = vectorizeTree(TrueVec); | |||
2381 | Value *False = vectorizeTree(FalseVec); | |||
2382 | ||||
2383 | if (Value *V = alreadyVectorized(E->Scalars)) | |||
2384 | return V; | |||
2385 | ||||
2386 | Value *V = Builder.CreateSelect(Cond, True, False); | |||
2387 | E->VectorizedValue = V; | |||
2388 | ++NumVectorInstructions; | |||
2389 | return V; | |||
2390 | } | |||
2391 | case Instruction::Add: | |||
2392 | case Instruction::FAdd: | |||
2393 | case Instruction::Sub: | |||
2394 | case Instruction::FSub: | |||
2395 | case Instruction::Mul: | |||
2396 | case Instruction::FMul: | |||
2397 | case Instruction::UDiv: | |||
2398 | case Instruction::SDiv: | |||
2399 | case Instruction::FDiv: | |||
2400 | case Instruction::URem: | |||
2401 | case Instruction::SRem: | |||
2402 | case Instruction::FRem: | |||
2403 | case Instruction::Shl: | |||
2404 | case Instruction::LShr: | |||
2405 | case Instruction::AShr: | |||
2406 | case Instruction::And: | |||
2407 | case Instruction::Or: | |||
2408 | case Instruction::Xor: { | |||
2409 | ValueList LHSVL, RHSVL; | |||
2410 | if (isa<BinaryOperator>(VL0) && VL0->isCommutative()) | |||
2411 | reorderInputsAccordingToOpcode(E->Scalars, LHSVL, RHSVL); | |||
2412 | else | |||
2413 | for (Value *V : E->Scalars) { | |||
2414 | LHSVL.push_back(cast<Instruction>(V)->getOperand(0)); | |||
2415 | RHSVL.push_back(cast<Instruction>(V)->getOperand(1)); | |||
2416 | } | |||
2417 | ||||
2418 | setInsertPointAfterBundle(E->Scalars); | |||
2419 | ||||
2420 | Value *LHS = vectorizeTree(LHSVL); | |||
2421 | Value *RHS = vectorizeTree(RHSVL); | |||
2422 | ||||
2423 | if (LHS == RHS && isa<Instruction>(LHS)) { | |||
2424 | assert((VL0->getOperand(0) == VL0->getOperand(1)) && "Invalid order")(((VL0->getOperand(0) == VL0->getOperand(1)) && "Invalid order") ? static_cast<void> (0) : __assert_fail ("(VL0->getOperand(0) == VL0->getOperand(1)) && \"Invalid order\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn271203/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 2424, __PRETTY_FUNCTION__)); | |||
2425 | } | |||
2426 | ||||
2427 | if (Value *V = alreadyVectorized(E->Scalars)) | |||
2428 | return V; | |||
2429 | ||||
2430 | BinaryOperator *BinOp = cast<BinaryOperator>(VL0); | |||
2431 | Value *V = Builder.CreateBinOp(BinOp->getOpcode(), LHS, RHS); | |||
2432 | E->VectorizedValue = V; | |||
2433 | propagateIRFlags(E->VectorizedValue, E->Scalars); | |||
2434 | ++NumVectorInstructions; | |||
2435 | ||||
2436 | if (Instruction *I = dyn_cast<Instruction>(V)) | |||
2437 | return propagateMetadata(I, E->Scalars); | |||
2438 | ||||
2439 | return V; | |||
2440 | } | |||
2441 | case Instruction::Load: { | |||
2442 | // Loads are inserted at the head of the tree because we don't want to | |||
2443 | // sink them all the way down past store instructions. | |||
2444 | setInsertPointAfterBundle(E->Scalars); | |||
2445 | ||||
2446 | LoadInst *LI = cast<LoadInst>(VL0); | |||
2447 | Type *ScalarLoadTy = LI->getType(); | |||
2448 | unsigned AS = LI->getPointerAddressSpace(); | |||
2449 | ||||
2450 | Value *VecPtr = Builder.CreateBitCast(LI->getPointerOperand(), | |||
2451 | VecTy->getPointerTo(AS)); | |||
2452 | ||||
2453 | // The pointer operand uses an in-tree scalar so we add the new BitCast to | |||
2454 | // ExternalUses list to make sure that an extract will be generated in the | |||
2455 | // future. | |||
2456 | if (ScalarToTreeEntry.count(LI->getPointerOperand())) | |||
2457 | ExternalUses.push_back( | |||
2458 | ExternalUser(LI->getPointerOperand(), cast<User>(VecPtr), 0)); | |||
2459 | ||||
2460 | unsigned Alignment = LI->getAlignment(); | |||
2461 | LI = Builder.CreateLoad(VecPtr); | |||
2462 | if (!Alignment) { | |||
2463 | Alignment = DL->getABITypeAlignment(ScalarLoadTy); | |||
2464 | } | |||
2465 | LI->setAlignment(Alignment); | |||
2466 | E->VectorizedValue = LI; | |||
2467 | ++NumVectorInstructions; | |||
2468 | return propagateMetadata(LI, E->Scalars); | |||
2469 | } | |||
2470 | case Instruction::Store: { | |||
2471 | StoreInst *SI = cast<StoreInst>(VL0); | |||
2472 | unsigned Alignment = SI->getAlignment(); | |||
2473 | unsigned AS = SI->getPointerAddressSpace(); | |||
2474 | ||||
2475 | ValueList ValueOp; | |||
2476 | for (Value *V : E->Scalars) | |||
2477 | ValueOp.push_back(cast<StoreInst>(V)->getValueOperand()); | |||
2478 | ||||
2479 | setInsertPointAfterBundle(E->Scalars); | |||
2480 | ||||
2481 | Value *VecValue = vectorizeTree(ValueOp); | |||
2482 | Value *VecPtr = Builder.CreateBitCast(SI->getPointerOperand(), | |||
2483 | VecTy->getPointerTo(AS)); | |||
2484 | StoreInst *S = Builder.CreateStore(VecValue, VecPtr); | |||
2485 | ||||
2486 | // The pointer operand uses an in-tree scalar so we add the new BitCast to | |||
2487 | // ExternalUses list to make sure that an extract will be generated in the | |||
2488 | // future. | |||
2489 | if (ScalarToTreeEntry.count(SI->getPointerOperand())) | |||
2490 | ExternalUses.push_back( | |||
2491 | ExternalUser(SI->getPointerOperand(), cast<User>(VecPtr), 0)); | |||
2492 | ||||
2493 | if (!Alignment) { | |||
2494 | Alignment = DL->getABITypeAlignment(SI->getValueOperand()->getType()); | |||
2495 | } | |||
2496 | S->setAlignment(Alignment); | |||
2497 | E->VectorizedValue = S; | |||
2498 | ++NumVectorInstructions; | |||
2499 | return propagateMetadata(S, E->Scalars); | |||
2500 | } | |||
2501 | case Instruction::GetElementPtr: { | |||
2502 | setInsertPointAfterBundle(E->Scalars); | |||
2503 | ||||
2504 | ValueList Op0VL; | |||
2505 | for (Value *V : E->Scalars) | |||
2506 | Op0VL.push_back(cast<GetElementPtrInst>(V)->getOperand(0)); | |||
2507 | ||||
2508 | Value *Op0 = vectorizeTree(Op0VL); | |||
2509 | ||||
2510 | std::vector<Value *> OpVecs; | |||
2511 | for (int j = 1, e = cast<GetElementPtrInst>(VL0)->getNumOperands(); j < e; | |||
2512 | ++j) { | |||
2513 | ValueList OpVL; | |||
2514 | for (Value *V : E->Scalars) | |||
2515 | OpVL.push_back(cast<GetElementPtrInst>(V)->getOperand(j)); | |||
2516 | ||||
2517 | Value *OpVec = vectorizeTree(OpVL); | |||
2518 | OpVecs.push_back(OpVec); | |||
2519 | } | |||
2520 | ||||
2521 | Value *V = Builder.CreateGEP( | |||
2522 | cast<GetElementPtrInst>(VL0)->getSourceElementType(), Op0, OpVecs); | |||
2523 | E->VectorizedValue = V; | |||
2524 | ++NumVectorInstructions; | |||
2525 | ||||
2526 | if (Instruction *I = dyn_cast<Instruction>(V)) | |||
2527 | return propagateMetadata(I, E->Scalars); | |||
2528 | ||||
2529 | return V; | |||
2530 | } | |||
2531 | case Instruction::Call: { | |||
2532 | CallInst *CI = cast<CallInst>(VL0); | |||
2533 | setInsertPointAfterBundle(E->Scalars); | |||
2534 | Function *FI; | |||
2535 | Intrinsic::ID IID = Intrinsic::not_intrinsic; | |||
2536 | Value *ScalarArg = nullptr; | |||
2537 | if (CI && (FI = CI->getCalledFunction())) { | |||
2538 | IID = FI->getIntrinsicID(); | |||
2539 | } | |||
2540 | std::vector<Value *> OpVecs; | |||
2541 | for (int j = 0, e = CI->getNumArgOperands(); j < e; ++j) { | |||
2542 | ValueList OpVL; | |||
2543 | // ctlz,cttz and powi are special intrinsics whose second argument is | |||
2544 | // a scalar. This argument should not be vectorized. | |||
2545 | if (hasVectorInstrinsicScalarOpd(IID, 1) && j == 1) { | |||
2546 | CallInst *CEI = cast<CallInst>(E->Scalars[0]); | |||
2547 | ScalarArg = CEI->getArgOperand(j); | |||
2548 | OpVecs.push_back(CEI->getArgOperand(j)); | |||
2549 | continue; | |||
2550 | } | |||
2551 | for (Value *V : E->Scalars) { | |||
2552 | CallInst *CEI = cast<CallInst>(V); | |||
2553 | OpVL.push_back(CEI->getArgOperand(j)); | |||
2554 | } | |||
2555 | ||||
2556 | Value *OpVec = vectorizeTree(OpVL); | |||
2557 | DEBUG(dbgs() << "SLP: OpVec[" << j << "]: " << *OpVec << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: OpVec[" << j << "]: " << *OpVec << "\n"; } } while (0); | |||
2558 | OpVecs.push_back(OpVec); | |||
2559 | } | |||
2560 | ||||
2561 | Module *M = F->getParent(); | |||
2562 | Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); | |||
2563 | Type *Tys[] = { VectorType::get(CI->getType(), E->Scalars.size()) }; | |||
2564 | Function *CF = Intrinsic::getDeclaration(M, ID, Tys); | |||
2565 | SmallVector<OperandBundleDef, 1> OpBundles; | |||
2566 | CI->getOperandBundlesAsDefs(OpBundles); | |||
2567 | Value *V = Builder.CreateCall(CF, OpVecs, OpBundles); | |||
2568 | ||||
2569 | // The scalar argument uses an in-tree scalar so we add the new vectorized | |||
2570 | // call to ExternalUses list to make sure that an extract will be | |||
2571 | // generated in the future. | |||
2572 | if (ScalarArg && ScalarToTreeEntry.count(ScalarArg)) | |||
2573 | ExternalUses.push_back(ExternalUser(ScalarArg, cast<User>(V), 0)); | |||
2574 | ||||
2575 | E->VectorizedValue = V; | |||
2576 | ++NumVectorInstructions; | |||
2577 | return V; | |||
2578 | } | |||
2579 | case Instruction::ShuffleVector: { | |||
2580 | ValueList LHSVL, RHSVL; | |||
2581 | assert(isa<BinaryOperator>(VL0) && "Invalid Shuffle Vector Operand")((isa<BinaryOperator>(VL0) && "Invalid Shuffle Vector Operand" ) ? static_cast<void> (0) : __assert_fail ("isa<BinaryOperator>(VL0) && \"Invalid Shuffle Vector Operand\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn271203/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 2581, __PRETTY_FUNCTION__)); | |||
2582 | reorderAltShuffleOperands(E->Scalars, LHSVL, RHSVL); | |||
2583 | setInsertPointAfterBundle(E->Scalars); | |||
2584 | ||||
2585 | Value *LHS = vectorizeTree(LHSVL); | |||
2586 | Value *RHS = vectorizeTree(RHSVL); | |||
2587 | ||||
2588 | if (Value *V = alreadyVectorized(E->Scalars)) | |||
2589 | return V; | |||
2590 | ||||
2591 | // Create a vector of LHS op1 RHS | |||
2592 | BinaryOperator *BinOp0 = cast<BinaryOperator>(VL0); | |||
2593 | Value *V0 = Builder.CreateBinOp(BinOp0->getOpcode(), LHS, RHS); | |||
2594 | ||||
2595 | // Create a vector of LHS op2 RHS | |||
2596 | Instruction *VL1 = cast<Instruction>(E->Scalars[1]); | |||
2597 | BinaryOperator *BinOp1 = cast<BinaryOperator>(VL1); | |||
2598 | Value *V1 = Builder.CreateBinOp(BinOp1->getOpcode(), LHS, RHS); | |||
2599 | ||||
2600 | // Create shuffle to take alternate operations from the vector. | |||
2601 | // Also, gather up odd and even scalar ops to propagate IR flags to | |||
2602 | // each vector operation. | |||
2603 | ValueList OddScalars, EvenScalars; | |||
2604 | unsigned e = E->Scalars.size(); | |||
2605 | SmallVector<Constant *, 8> Mask(e); | |||
2606 | for (unsigned i = 0; i < e; ++i) { | |||
2607 | if (i & 1) { | |||
2608 | Mask[i] = Builder.getInt32(e + i); | |||
2609 | OddScalars.push_back(E->Scalars[i]); | |||
2610 | } else { | |||
2611 | Mask[i] = Builder.getInt32(i); | |||
2612 | EvenScalars.push_back(E->Scalars[i]); | |||
2613 | } | |||
2614 | } | |||
2615 | ||||
2616 | Value *ShuffleMask = ConstantVector::get(Mask); | |||
2617 | propagateIRFlags(V0, EvenScalars); | |||
2618 | propagateIRFlags(V1, OddScalars); | |||
2619 | ||||
2620 | Value *V = Builder.CreateShuffleVector(V0, V1, ShuffleMask); | |||
2621 | E->VectorizedValue = V; | |||
2622 | ++NumVectorInstructions; | |||
2623 | if (Instruction *I = dyn_cast<Instruction>(V)) | |||
2624 | return propagateMetadata(I, E->Scalars); | |||
2625 | ||||
2626 | return V; | |||
2627 | } | |||
2628 | default: | |||
2629 | llvm_unreachable("unknown inst")::llvm::llvm_unreachable_internal("unknown inst", "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn271203/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 2629); | |||
2630 | } | |||
2631 | return nullptr; | |||
2632 | } | |||
2633 | ||||
2634 | Value *BoUpSLP::vectorizeTree() { | |||
2635 | ||||
2636 | // All blocks must be scheduled before any instructions are inserted. | |||
2637 | for (auto &BSIter : BlocksSchedules) { | |||
2638 | scheduleBlock(BSIter.second.get()); | |||
2639 | } | |||
2640 | ||||
2641 | Builder.SetInsertPoint(&F->getEntryBlock().front()); | |||
2642 | auto *VectorRoot = vectorizeTree(&VectorizableTree[0]); | |||
2643 | ||||
2644 | // If the vectorized tree can be rewritten in a smaller type, we truncate the | |||
2645 | // vectorized root. InstCombine will then rewrite the entire expression. We | |||
2646 | // sign extend the extracted values below. | |||
2647 | auto *ScalarRoot = VectorizableTree[0].Scalars[0]; | |||
2648 | if (MinBWs.count(ScalarRoot)) { | |||
2649 | if (auto *I = dyn_cast<Instruction>(VectorRoot)) | |||
2650 | Builder.SetInsertPoint(&*++BasicBlock::iterator(I)); | |||
2651 | auto BundleWidth = VectorizableTree[0].Scalars.size(); | |||
2652 | auto *MinTy = IntegerType::get(F->getContext(), MinBWs[ScalarRoot]); | |||
2653 | auto *VecTy = VectorType::get(MinTy, BundleWidth); | |||
2654 | auto *Trunc = Builder.CreateTrunc(VectorRoot, VecTy); | |||
2655 | VectorizableTree[0].VectorizedValue = Trunc; | |||
2656 | } | |||
2657 | ||||
2658 | DEBUG(dbgs() << "SLP: Extracting " << ExternalUses.size() << " values .\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: Extracting " << ExternalUses .size() << " values .\n"; } } while (0); | |||
2659 | ||||
2660 | // Extract all of the elements with the external uses. | |||
2661 | for (UserList::iterator it = ExternalUses.begin(), e = ExternalUses.end(); | |||
2662 | it != e; ++it) { | |||
2663 | Value *Scalar = it->Scalar; | |||
2664 | llvm::User *User = it->User; | |||
2665 | ||||
2666 | // Skip users that we already RAUW. This happens when one instruction | |||
2667 | // has multiple uses of the same value. | |||
2668 | if (std::find(Scalar->user_begin(), Scalar->user_end(), User) == | |||
2669 | Scalar->user_end()) | |||
2670 | continue; | |||
2671 | assert(ScalarToTreeEntry.count(Scalar) && "Invalid scalar")((ScalarToTreeEntry.count(Scalar) && "Invalid scalar" ) ? static_cast<void> (0) : __assert_fail ("ScalarToTreeEntry.count(Scalar) && \"Invalid scalar\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn271203/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 2671, __PRETTY_FUNCTION__)); | |||
2672 | ||||
2673 | int Idx = ScalarToTreeEntry[Scalar]; | |||
2674 | TreeEntry *E = &VectorizableTree[Idx]; | |||
2675 | assert(!E->NeedToGather && "Extracting from a gather list")((!E->NeedToGather && "Extracting from a gather list" ) ? static_cast<void> (0) : __assert_fail ("!E->NeedToGather && \"Extracting from a gather list\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn271203/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 2675, __PRETTY_FUNCTION__)); | |||
2676 | ||||
2677 | Value *Vec = E->VectorizedValue; | |||
2678 | assert(Vec && "Can't find vectorizable value")((Vec && "Can't find vectorizable value") ? static_cast <void> (0) : __assert_fail ("Vec && \"Can't find vectorizable value\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn271203/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 2678, __PRETTY_FUNCTION__)); | |||
2679 | ||||
2680 | Value *Lane = Builder.getInt32(it->Lane); | |||
2681 | // Generate extracts for out-of-tree users. | |||
2682 | // Find the insertion point for the extractelement lane. | |||
2683 | if (auto *VecI = dyn_cast<Instruction>(Vec)) { | |||
2684 | if (PHINode *PH = dyn_cast<PHINode>(User)) { | |||
2685 | for (int i = 0, e = PH->getNumIncomingValues(); i != e; ++i) { | |||
2686 | if (PH->getIncomingValue(i) == Scalar) { | |||
2687 | TerminatorInst *IncomingTerminator = | |||
2688 | PH->getIncomingBlock(i)->getTerminator(); | |||
2689 | if (isa<CatchSwitchInst>(IncomingTerminator)) { | |||
2690 | Builder.SetInsertPoint(VecI->getParent(), | |||
2691 | std::next(VecI->getIterator())); | |||
2692 | } else { | |||
2693 | Builder.SetInsertPoint(PH->getIncomingBlock(i)->getTerminator()); | |||
2694 | } | |||
2695 | Value *Ex = Builder.CreateExtractElement(Vec, Lane); | |||
2696 | if (MinBWs.count(ScalarRoot)) | |||
2697 | Ex = Builder.CreateSExt(Ex, Scalar->getType()); | |||
2698 | CSEBlocks.insert(PH->getIncomingBlock(i)); | |||
2699 | PH->setOperand(i, Ex); | |||
2700 | } | |||
2701 | } | |||
2702 | } else { | |||
2703 | Builder.SetInsertPoint(cast<Instruction>(User)); | |||
2704 | Value *Ex = Builder.CreateExtractElement(Vec, Lane); | |||
2705 | if (MinBWs.count(ScalarRoot)) | |||
2706 | Ex = Builder.CreateSExt(Ex, Scalar->getType()); | |||
2707 | CSEBlocks.insert(cast<Instruction>(User)->getParent()); | |||
2708 | User->replaceUsesOfWith(Scalar, Ex); | |||
2709 | } | |||
2710 | } else { | |||
2711 | Builder.SetInsertPoint(&F->getEntryBlock().front()); | |||
2712 | Value *Ex = Builder.CreateExtractElement(Vec, Lane); | |||
2713 | if (MinBWs.count(ScalarRoot)) | |||
2714 | Ex = Builder.CreateSExt(Ex, Scalar->getType()); | |||
2715 | CSEBlocks.insert(&F->getEntryBlock()); | |||
2716 | User->replaceUsesOfWith(Scalar, Ex); | |||
2717 | } | |||
2718 | ||||
2719 | DEBUG(dbgs() << "SLP: Replaced:" << *User << ".\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: Replaced:" << *User << ".\n"; } } while (0); | |||
2720 | } | |||
2721 | ||||
2722 | // For each vectorized value: | |||
2723 | for (int EIdx = 0, EE = VectorizableTree.size(); EIdx < EE; ++EIdx) { | |||
2724 | TreeEntry *Entry = &VectorizableTree[EIdx]; | |||
2725 | ||||
2726 | // For each lane: | |||
2727 | for (int Lane = 0, LE = Entry->Scalars.size(); Lane != LE; ++Lane) { | |||
2728 | Value *Scalar = Entry->Scalars[Lane]; | |||
2729 | // No need to handle users of gathered values. | |||
2730 | if (Entry->NeedToGather) | |||
2731 | continue; | |||
2732 | ||||
2733 | assert(Entry->VectorizedValue && "Can't find vectorizable value")((Entry->VectorizedValue && "Can't find vectorizable value" ) ? static_cast<void> (0) : __assert_fail ("Entry->VectorizedValue && \"Can't find vectorizable value\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn271203/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 2733, __PRETTY_FUNCTION__)); | |||
2734 | ||||
2735 | Type *Ty = Scalar->getType(); | |||
2736 | if (!Ty->isVoidTy()) { | |||
2737 | #ifndef NDEBUG | |||
2738 | for (User *U : Scalar->users()) { | |||
2739 | DEBUG(dbgs() << "SLP: \tvalidating user:" << *U << ".\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: \tvalidating user:" << *U << ".\n"; } } while (0); | |||
2740 | ||||
2741 | assert((ScalarToTreeEntry.count(U) ||(((ScalarToTreeEntry.count(U) || (std::find(UserIgnoreList.begin (), UserIgnoreList.end(), U) != UserIgnoreList.end())) && "Replacing out-of-tree value with undef") ? static_cast<void > (0) : __assert_fail ("(ScalarToTreeEntry.count(U) || (std::find(UserIgnoreList.begin(), UserIgnoreList.end(), U) != UserIgnoreList.end())) && \"Replacing out-of-tree value with undef\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn271203/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 2745, __PRETTY_FUNCTION__)) | |||
2742 | // It is legal to replace users in the ignorelist by undef.(((ScalarToTreeEntry.count(U) || (std::find(UserIgnoreList.begin (), UserIgnoreList.end(), U) != UserIgnoreList.end())) && "Replacing out-of-tree value with undef") ? static_cast<void > (0) : __assert_fail ("(ScalarToTreeEntry.count(U) || (std::find(UserIgnoreList.begin(), UserIgnoreList.end(), U) != UserIgnoreList.end())) && \"Replacing out-of-tree value with undef\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn271203/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 2745, __PRETTY_FUNCTION__)) | |||
2743 | (std::find(UserIgnoreList.begin(), UserIgnoreList.end(), U) !=(((ScalarToTreeEntry.count(U) || (std::find(UserIgnoreList.begin (), UserIgnoreList.end(), U) != UserIgnoreList.end())) && "Replacing out-of-tree value with undef") ? static_cast<void > (0) : __assert_fail ("(ScalarToTreeEntry.count(U) || (std::find(UserIgnoreList.begin(), UserIgnoreList.end(), U) != UserIgnoreList.end())) && \"Replacing out-of-tree value with undef\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn271203/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 2745, __PRETTY_FUNCTION__)) | |||
2744 | UserIgnoreList.end())) &&(((ScalarToTreeEntry.count(U) || (std::find(UserIgnoreList.begin (), UserIgnoreList.end(), U) != UserIgnoreList.end())) && "Replacing out-of-tree value with undef") ? static_cast<void > (0) : __assert_fail ("(ScalarToTreeEntry.count(U) || (std::find(UserIgnoreList.begin(), UserIgnoreList.end(), U) != UserIgnoreList.end())) && \"Replacing out-of-tree value with undef\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn271203/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 2745, __PRETTY_FUNCTION__)) | |||
2745 | "Replacing out-of-tree value with undef")(((ScalarToTreeEntry.count(U) || (std::find(UserIgnoreList.begin (), UserIgnoreList.end(), U) != UserIgnoreList.end())) && "Replacing out-of-tree value with undef") ? static_cast<void > (0) : __assert_fail ("(ScalarToTreeEntry.count(U) || (std::find(UserIgnoreList.begin(), UserIgnoreList.end(), U) != UserIgnoreList.end())) && \"Replacing out-of-tree value with undef\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn271203/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 2745, __PRETTY_FUNCTION__)); | |||
2746 | } | |||
2747 | #endif | |||
2748 | Value *Undef = UndefValue::get(Ty); | |||
2749 | Scalar->replaceAllUsesWith(Undef); | |||
2750 | } | |||
2751 | DEBUG(dbgs() << "SLP: \tErasing scalar:" << *Scalar << ".\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: \tErasing scalar:" << * Scalar << ".\n"; } } while (0); | |||
2752 | eraseInstruction(cast<Instruction>(Scalar)); | |||
2753 | } | |||
2754 | } | |||
2755 | ||||
2756 | Builder.ClearInsertionPoint(); | |||
2757 | ||||
2758 | return VectorizableTree[0].VectorizedValue; | |||
2759 | } | |||
2760 | ||||
2761 | void BoUpSLP::optimizeGatherSequence() { | |||
2762 | DEBUG(dbgs() << "SLP: Optimizing " << GatherSeq.size()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: Optimizing " << GatherSeq .size() << " gather sequences instructions.\n"; } } while (0) | |||
2763 | << " gather sequences instructions.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: Optimizing " << GatherSeq .size() << " gather sequences instructions.\n"; } } while (0); | |||
2764 | // LICM InsertElementInst sequences. | |||
2765 | for (SetVector<Instruction *>::iterator it = GatherSeq.begin(), | |||
| ||||
2766 | e = GatherSeq.end(); it != e; ++it) { | |||
2767 | InsertElementInst *Insert = dyn_cast<InsertElementInst>(*it); | |||
2768 | ||||
2769 | if (!Insert) | |||
2770 | continue; | |||
2771 | ||||
2772 | // Check if this block is inside a loop. | |||
2773 | Loop *L = LI->getLoopFor(Insert->getParent()); | |||
2774 | if (!L) | |||
2775 | continue; | |||
2776 | ||||
2777 | // Check if it has a preheader. | |||
2778 | BasicBlock *PreHeader = L->getLoopPreheader(); | |||
2779 | if (!PreHeader) | |||
2780 | continue; | |||
2781 | ||||
2782 | // If the vector or the element that we insert into it are | |||
2783 | // instructions that are defined in this basic block then we can't | |||
2784 | // hoist this instruction. | |||
2785 | Instruction *CurrVec = dyn_cast<Instruction>(Insert->getOperand(0)); | |||
2786 | Instruction *NewElem = dyn_cast<Instruction>(Insert->getOperand(1)); | |||
2787 | if (CurrVec && L->contains(CurrVec)) | |||
2788 | continue; | |||
2789 | if (NewElem && L->contains(NewElem)) | |||
2790 | continue; | |||
2791 | ||||
2792 | // We can hoist this instruction. Move it to the pre-header. | |||
2793 | Insert->moveBefore(PreHeader->getTerminator()); | |||
2794 | } | |||
2795 | ||||
2796 | // Make a list of all reachable blocks in our CSE queue. | |||
2797 | SmallVector<const DomTreeNode *, 8> CSEWorkList; | |||
2798 | CSEWorkList.reserve(CSEBlocks.size()); | |||
2799 | for (BasicBlock *BB : CSEBlocks) | |||
2800 | if (DomTreeNode *N = DT->getNode(BB)) { | |||
2801 | assert(DT->isReachableFromEntry(N))((DT->isReachableFromEntry(N)) ? static_cast<void> ( 0) : __assert_fail ("DT->isReachableFromEntry(N)", "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn271203/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 2801, __PRETTY_FUNCTION__)); | |||
2802 | CSEWorkList.push_back(N); | |||
2803 | } | |||
2804 | ||||
2805 | // Sort blocks by domination. This ensures we visit a block after all blocks | |||
2806 | // dominating it are visited. | |||
2807 | std::stable_sort(CSEWorkList.begin(), CSEWorkList.end(), | |||
2808 | [this](const DomTreeNode *A, const DomTreeNode *B) { | |||
2809 | return DT->properlyDominates(A, B); | |||
2810 | }); | |||
2811 | ||||
2812 | // Perform O(N^2) search over the gather sequences and merge identical | |||
2813 | // instructions. TODO: We can further optimize this scan if we split the | |||
2814 | // instructions into different buckets based on the insert lane. | |||
2815 | SmallVector<Instruction *, 16> Visited; | |||
2816 | for (auto I = CSEWorkList.begin(), E = CSEWorkList.end(); I != E; ++I) { | |||
2817 | assert((I == CSEWorkList.begin() || !DT->dominates(*I, *std::prev(I))) &&(((I == CSEWorkList.begin() || !DT->dominates(*I, *std::prev (I))) && "Worklist not sorted properly!") ? static_cast <void> (0) : __assert_fail ("(I == CSEWorkList.begin() || !DT->dominates(*I, *std::prev(I))) && \"Worklist not sorted properly!\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn271203/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 2818, __PRETTY_FUNCTION__)) | |||
2818 | "Worklist not sorted properly!")(((I == CSEWorkList.begin() || !DT->dominates(*I, *std::prev (I))) && "Worklist not sorted properly!") ? static_cast <void> (0) : __assert_fail ("(I == CSEWorkList.begin() || !DT->dominates(*I, *std::prev(I))) && \"Worklist not sorted properly!\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn271203/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 2818, __PRETTY_FUNCTION__)); | |||
2819 | BasicBlock *BB = (*I)->getBlock(); | |||
| ||||
2820 | // For all instructions in blocks containing gather sequences: | |||
2821 | for (BasicBlock::iterator it = BB->begin(), e = BB->end(); it != e;) { | |||
2822 | Instruction *In = &*it++; | |||
2823 | if (!isa<InsertElementInst>(In) && !isa<ExtractElementInst>(In)) | |||
2824 | continue; | |||
2825 | ||||
2826 | // Check if we can replace this instruction with any of the | |||
2827 | // visited instructions. | |||
2828 | for (SmallVectorImpl<Instruction *>::iterator v = Visited.begin(), | |||
2829 | ve = Visited.end(); | |||
2830 | v != ve; ++v) { | |||
2831 | if (In->isIdenticalTo(*v) && | |||
2832 | DT->dominates((*v)->getParent(), In->getParent())) { | |||
2833 | In->replaceAllUsesWith(*v); | |||
2834 | eraseInstruction(In); | |||
2835 | In = nullptr; | |||
2836 | break; | |||
2837 | } | |||
2838 | } | |||
2839 | if (In) { | |||
2840 | assert(std::find(Visited.begin(), Visited.end(), In) == Visited.end())((std::find(Visited.begin(), Visited.end(), In) == Visited.end ()) ? static_cast<void> (0) : __assert_fail ("std::find(Visited.begin(), Visited.end(), In) == Visited.end()" , "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn271203/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 2840, __PRETTY_FUNCTION__)); | |||
2841 | Visited.push_back(In); | |||
2842 | } | |||
2843 | } | |||
2844 | } | |||
2845 | CSEBlocks.clear(); | |||
2846 | GatherSeq.clear(); | |||
2847 | } | |||
2848 | ||||
2849 | // Groups the instructions to a bundle (which is then a single scheduling entity) | |||
2850 | // and schedules instructions until the bundle gets ready. | |||
2851 | bool BoUpSLP::BlockScheduling::tryScheduleBundle(ArrayRef<Value *> VL, | |||
2852 | BoUpSLP *SLP) { | |||
2853 | if (isa<PHINode>(VL[0])) | |||
2854 | return true; | |||
2855 | ||||
2856 | // Initialize the instruction bundle. | |||
2857 | Instruction *OldScheduleEnd = ScheduleEnd; | |||
2858 | ScheduleData *PrevInBundle = nullptr; | |||
2859 | ScheduleData *Bundle = nullptr; | |||
2860 | bool ReSchedule = false; | |||
2861 | DEBUG(dbgs() << "SLP: bundle: " << *VL[0] << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: bundle: " << *VL[0] << "\n"; } } while (0); | |||
2862 | ||||
2863 | // Make sure that the scheduling region contains all | |||
2864 | // instructions of the bundle. | |||
2865 | for (Value *V : VL) { | |||
2866 | if (!extendSchedulingRegion(V)) | |||
2867 | return false; | |||
2868 | } | |||
2869 | ||||
2870 | for (Value *V : VL) { | |||
2871 | ScheduleData *BundleMember = getScheduleData(V); | |||
2872 | assert(BundleMember &&((BundleMember && "no ScheduleData for bundle member (maybe not in same basic block)" ) ? static_cast<void> (0) : __assert_fail ("BundleMember && \"no ScheduleData for bundle member (maybe not in same basic block)\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn271203/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 2873, __PRETTY_FUNCTION__)) | |||
2873 | "no ScheduleData for bundle member (maybe not in same basic block)")((BundleMember && "no ScheduleData for bundle member (maybe not in same basic block)" ) ? static_cast<void> (0) : __assert_fail ("BundleMember && \"no ScheduleData for bundle member (maybe not in same basic block)\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn271203/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 2873, __PRETTY_FUNCTION__)); | |||
2874 | if (BundleMember->IsScheduled) { | |||
2875 | // A bundle member was scheduled as single instruction before and now | |||
2876 | // needs to be scheduled as part of the bundle. We just get rid of the | |||
2877 | // existing schedule. | |||
2878 | DEBUG(dbgs() << "SLP: reset schedule because " << *BundleMemberdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: reset schedule because " << *BundleMember << " was already scheduled\n"; } } while (0) | |||
2879 | << " was already scheduled\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: reset schedule because " << *BundleMember << " was already scheduled\n"; } } while (0); | |||
2880 | ReSchedule = true; | |||
2881 | } | |||
2882 | assert(BundleMember->isSchedulingEntity() &&((BundleMember->isSchedulingEntity() && "bundle member already part of other bundle" ) ? static_cast<void> (0) : __assert_fail ("BundleMember->isSchedulingEntity() && \"bundle member already part of other bundle\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn271203/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 2883, __PRETTY_FUNCTION__)) | |||
2883 | "bundle member already part of other bundle")((BundleMember->isSchedulingEntity() && "bundle member already part of other bundle" ) ? static_cast<void> (0) : __assert_fail ("BundleMember->isSchedulingEntity() && \"bundle member already part of other bundle\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn271203/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 2883, __PRETTY_FUNCTION__)); | |||
2884 | if (PrevInBundle) { | |||
2885 | PrevInBundle->NextInBundle = BundleMember; | |||
2886 | } else { | |||
2887 | Bundle = BundleMember; | |||
2888 | } | |||
2889 | BundleMember->UnscheduledDepsInBundle = 0; | |||
2890 | Bundle->UnscheduledDepsInBundle += BundleMember->UnscheduledDeps; | |||
2891 | ||||
2892 | // Group the instructions to a bundle. | |||
2893 | BundleMember->FirstInBundle = Bundle; | |||
2894 | PrevInBundle = BundleMember; | |||
2895 | } | |||
2896 | if (ScheduleEnd != OldScheduleEnd) { | |||
2897 | // The scheduling region got new instructions at the lower end (or it is a | |||
2898 | // new region for the first bundle). This makes it necessary to | |||
2899 | // recalculate all dependencies. | |||
2900 | // It is seldom that this needs to be done a second time after adding the | |||
2901 | // initial bundle to the region. | |||
2902 | for (auto *I = ScheduleStart; I != ScheduleEnd; I = I->getNextNode()) { | |||
2903 | ScheduleData *SD = getScheduleData(I); | |||
2904 | SD->clearDependencies(); | |||
2905 | } | |||
2906 | ReSchedule = true; | |||
2907 | } | |||
2908 | if (ReSchedule) { | |||
2909 | resetSchedule(); | |||
2910 | initialFillReadyList(ReadyInsts); | |||
2911 | } | |||
2912 | ||||
2913 | DEBUG(dbgs() << "SLP: try schedule bundle " << *Bundle << " in block "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: try schedule bundle " << *Bundle << " in block " << BB->getName() << "\n"; } } while (0) | |||
2914 | << BB->getName() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: try schedule bundle " << *Bundle << " in block " << BB->getName() << "\n"; } } while (0); | |||
2915 | ||||
2916 | calculateDependencies(Bundle, true, SLP); | |||
2917 | ||||
2918 | // Now try to schedule the new bundle. As soon as the bundle is "ready" it | |||
2919 | // means that there are no cyclic dependencies and we can schedule it. | |||
2920 | // Note that's important that we don't "schedule" the bundle yet (see | |||
2921 | // cancelScheduling). | |||
2922 | while (!Bundle->isReady() && !ReadyInsts.empty()) { | |||
2923 | ||||
2924 | ScheduleData *pickedSD = ReadyInsts.back(); | |||
2925 | ReadyInsts.pop_back(); | |||
2926 | ||||
2927 | if (pickedSD->isSchedulingEntity() && pickedSD->isReady()) { | |||
2928 | schedule(pickedSD, ReadyInsts); | |||
2929 | } | |||
2930 | } | |||
2931 | if (!Bundle->isReady()) { | |||
2932 | cancelScheduling(VL); | |||
2933 | return false; | |||
2934 | } | |||
2935 | return true; | |||
2936 | } | |||
2937 | ||||
2938 | void BoUpSLP::BlockScheduling::cancelScheduling(ArrayRef<Value *> VL) { | |||
2939 | if (isa<PHINode>(VL[0])) | |||
2940 | return; | |||
2941 | ||||
2942 | ScheduleData *Bundle = getScheduleData(VL[0]); | |||
2943 | DEBUG(dbgs() << "SLP: cancel scheduling of " << *Bundle << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: cancel scheduling of " << *Bundle << "\n"; } } while (0); | |||
2944 | assert(!Bundle->IsScheduled &&((!Bundle->IsScheduled && "Can't cancel bundle which is already scheduled" ) ? static_cast<void> (0) : __assert_fail ("!Bundle->IsScheduled && \"Can't cancel bundle which is already scheduled\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn271203/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 2945, __PRETTY_FUNCTION__)) | |||
2945 | "Can't cancel bundle which is already scheduled")((!Bundle->IsScheduled && "Can't cancel bundle which is already scheduled" ) ? static_cast<void> (0) : __assert_fail ("!Bundle->IsScheduled && \"Can't cancel bundle which is already scheduled\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn271203/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 2945, __PRETTY_FUNCTION__)); | |||
2946 | assert(Bundle->isSchedulingEntity() && Bundle->isPartOfBundle() &&((Bundle->isSchedulingEntity() && Bundle->isPartOfBundle () && "tried to unbundle something which is not a bundle" ) ? static_cast<void> (0) : __assert_fail ("Bundle->isSchedulingEntity() && Bundle->isPartOfBundle() && \"tried to unbundle something which is not a bundle\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn271203/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 2947, __PRETTY_FUNCTION__)) | |||
2947 | "tried to unbundle something which is not a bundle")((Bundle->isSchedulingEntity() && Bundle->isPartOfBundle () && "tried to unbundle something which is not a bundle" ) ? static_cast<void> (0) : __assert_fail ("Bundle->isSchedulingEntity() && Bundle->isPartOfBundle() && \"tried to unbundle something which is not a bundle\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn271203/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 2947, __PRETTY_FUNCTION__)); | |||
2948 | ||||
2949 | // Un-bundle: make single instructions out of the bundle. | |||
2950 | ScheduleData *BundleMember = Bundle; | |||
2951 | while (BundleMember) { | |||
2952 | assert(BundleMember->FirstInBundle == Bundle && "corrupt bundle links")((BundleMember->FirstInBundle == Bundle && "corrupt bundle links" ) ? static_cast<void> (0) : __assert_fail ("BundleMember->FirstInBundle == Bundle && \"corrupt bundle links\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn271203/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 2952, __PRETTY_FUNCTION__)); | |||
2953 | BundleMember->FirstInBundle = BundleMember; | |||
2954 | ScheduleData *Next = BundleMember->NextInBundle; | |||
2955 | BundleMember->NextInBundle = nullptr; | |||
2956 | BundleMember->UnscheduledDepsInBundle = BundleMember->UnscheduledDeps; | |||
2957 | if (BundleMember->UnscheduledDepsInBundle == 0) { | |||
2958 | ReadyInsts.insert(BundleMember); | |||
2959 | } | |||
2960 | BundleMember = Next; | |||
2961 | } | |||
2962 | } | |||
2963 | ||||
2964 | bool BoUpSLP::BlockScheduling::extendSchedulingRegion(Value *V) { | |||
2965 | if (getScheduleData(V)) | |||
2966 | return true; | |||
2967 | Instruction *I = dyn_cast<Instruction>(V); | |||
2968 | assert(I && "bundle member must be an instruction")((I && "bundle member must be an instruction") ? static_cast <void> (0) : __assert_fail ("I && \"bundle member must be an instruction\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn271203/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 2968, __PRETTY_FUNCTION__)); | |||
2969 | assert(!isa<PHINode>(I) && "phi nodes don't need to be scheduled")((!isa<PHINode>(I) && "phi nodes don't need to be scheduled" ) ? static_cast<void> (0) : __assert_fail ("!isa<PHINode>(I) && \"phi nodes don't need to be scheduled\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn271203/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 2969, __PRETTY_FUNCTION__)); | |||
2970 | if (!ScheduleStart) { | |||
2971 | // It's the first instruction in the new region. | |||
2972 | initScheduleData(I, I->getNextNode(), nullptr, nullptr); | |||
2973 | ScheduleStart = I; | |||
2974 | ScheduleEnd = I->getNextNode(); | |||
2975 | assert(ScheduleEnd && "tried to vectorize a TerminatorInst?")((ScheduleEnd && "tried to vectorize a TerminatorInst?" ) ? static_cast<void> (0) : __assert_fail ("ScheduleEnd && \"tried to vectorize a TerminatorInst?\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn271203/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 2975, __PRETTY_FUNCTION__)); | |||
2976 | DEBUG(dbgs() << "SLP: initialize schedule region to " << *I << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: initialize schedule region to " << *I << "\n"; } } while (0); | |||
2977 | return true; | |||
2978 | } | |||
2979 | // Search up and down at the same time, because we don't know if the new | |||
2980 | // instruction is above or below the existing scheduling region. | |||
2981 | BasicBlock::reverse_iterator UpIter(ScheduleStart->getIterator()); | |||
2982 | BasicBlock::reverse_iterator UpperEnd = BB->rend(); | |||
2983 | BasicBlock::iterator DownIter(ScheduleEnd); | |||
2984 | BasicBlock::iterator LowerEnd = BB->end(); | |||
2985 | for (;;) { | |||
2986 | if (++ScheduleRegionSize > ScheduleRegionSizeLimit) { | |||
2987 | DEBUG(dbgs() << "SLP: exceeded schedule region size limit\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: exceeded schedule region size limit\n" ; } } while (0); | |||
2988 | return false; | |||
2989 | } | |||
2990 | ||||
2991 | if (UpIter != UpperEnd) { | |||
2992 | if (&*UpIter == I) { | |||
2993 | initScheduleData(I, ScheduleStart, nullptr, FirstLoadStoreInRegion); | |||
2994 | ScheduleStart = I; | |||
2995 | DEBUG(dbgs() << "SLP: extend schedule region start to " << *I << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: extend schedule region start to " << *I << "\n"; } } while (0); | |||
2996 | return true; | |||
2997 | } | |||
2998 | UpIter++; | |||
2999 | } | |||
3000 | if (DownIter != LowerEnd) { | |||
3001 | if (&*DownIter == I) { | |||
3002 | initScheduleData(ScheduleEnd, I->getNextNode(), LastLoadStoreInRegion, | |||
3003 | nullptr); | |||
3004 | ScheduleEnd = I->getNextNode(); | |||
3005 | assert(ScheduleEnd && "tried to vectorize a TerminatorInst?")((ScheduleEnd && "tried to vectorize a TerminatorInst?" ) ? static_cast<void> (0) : __assert_fail ("ScheduleEnd && \"tried to vectorize a TerminatorInst?\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn271203/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 3005, __PRETTY_FUNCTION__)); | |||
3006 | DEBUG(dbgs() << "SLP: extend schedule region end to " << *I << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: extend schedule region end to " << *I << "\n"; } } while (0); | |||
3007 | return true; | |||
3008 | } | |||
3009 | DownIter++; | |||
3010 | } | |||
3011 | assert((UpIter != UpperEnd || DownIter != LowerEnd) &&(((UpIter != UpperEnd || DownIter != LowerEnd) && "instruction not found in block" ) ? static_cast<void> (0) : __assert_fail ("(UpIter != UpperEnd || DownIter != LowerEnd) && \"instruction not found in block\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn271203/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 3012, __PRETTY_FUNCTION__)) | |||
3012 | "instruction not found in block")(((UpIter != UpperEnd || DownIter != LowerEnd) && "instruction not found in block" ) ? static_cast<void> (0) : __assert_fail ("(UpIter != UpperEnd || DownIter != LowerEnd) && \"instruction not found in block\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn271203/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 3012, __PRETTY_FUNCTION__)); | |||
3013 | } | |||
3014 | return true; | |||
3015 | } | |||
3016 | ||||
3017 | void BoUpSLP::BlockScheduling::initScheduleData(Instruction *FromI, | |||
3018 | Instruction *ToI, | |||
3019 | ScheduleData *PrevLoadStore, | |||
3020 | ScheduleData *NextLoadStore) { | |||
3021 | ScheduleData *CurrentLoadStore = PrevLoadStore; | |||
3022 | for (Instruction *I = FromI; I != ToI; I = I->getNextNode()) { | |||
3023 | ScheduleData *SD = ScheduleDataMap[I]; | |||
3024 | if (!SD) { | |||
3025 | // Allocate a new ScheduleData for the instruction. | |||
3026 | if (ChunkPos >= ChunkSize) { | |||
3027 | ScheduleDataChunks.push_back( | |||
3028 | llvm::make_unique<ScheduleData[]>(ChunkSize)); | |||
3029 | ChunkPos = 0; | |||
3030 | } | |||
3031 | SD = &(ScheduleDataChunks.back()[ChunkPos++]); | |||
3032 | ScheduleDataMap[I] = SD; | |||
3033 | SD->Inst = I; | |||
3034 | } | |||
3035 | assert(!isInSchedulingRegion(SD) &&((!isInSchedulingRegion(SD) && "new ScheduleData already in scheduling region" ) ? static_cast<void> (0) : __assert_fail ("!isInSchedulingRegion(SD) && \"new ScheduleData already in scheduling region\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn271203/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 3036, __PRETTY_FUNCTION__)) | |||
3036 | "new ScheduleData already in scheduling region")((!isInSchedulingRegion(SD) && "new ScheduleData already in scheduling region" ) ? static_cast<void> (0) : __assert_fail ("!isInSchedulingRegion(SD) && \"new ScheduleData already in scheduling region\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn271203/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 3036, __PRETTY_FUNCTION__)); | |||
3037 | SD->init(SchedulingRegionID); | |||
3038 | ||||
3039 | if (I->mayReadOrWriteMemory()) { | |||
3040 | // Update the linked list of memory accessing instructions. | |||
3041 | if (CurrentLoadStore) { | |||
3042 | CurrentLoadStore->NextLoadStore = SD; | |||
3043 | } else { | |||
3044 | FirstLoadStoreInRegion = SD; | |||
3045 | } | |||
3046 | CurrentLoadStore = SD; | |||
3047 | } | |||
3048 | } | |||
3049 | if (NextLoadStore) { | |||
3050 | if (CurrentLoadStore) | |||
3051 | CurrentLoadStore->NextLoadStore = NextLoadStore; | |||
3052 | } else { | |||
3053 | LastLoadStoreInRegion = CurrentLoadStore; | |||
3054 | } | |||
3055 | } | |||
3056 | ||||
3057 | void BoUpSLP::BlockScheduling::calculateDependencies(ScheduleData *SD, | |||
3058 | bool InsertInReadyList, | |||
3059 | BoUpSLP *SLP) { | |||
3060 | assert(SD->isSchedulingEntity())((SD->isSchedulingEntity()) ? static_cast<void> (0) : __assert_fail ("SD->isSchedulingEntity()", "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn271203/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 3060, __PRETTY_FUNCTION__)); | |||
3061 | ||||
3062 | SmallVector<ScheduleData *, 10> WorkList; | |||
3063 | WorkList.push_back(SD); | |||
3064 | ||||
3065 | while (!WorkList.empty()) { | |||
3066 | ScheduleData *SD = WorkList.back(); | |||
3067 | WorkList.pop_back(); | |||
3068 | ||||
3069 | ScheduleData *BundleMember = SD; | |||
3070 | while (BundleMember) { | |||
3071 | assert(isInSchedulingRegion(BundleMember))((isInSchedulingRegion(BundleMember)) ? static_cast<void> (0) : __assert_fail ("isInSchedulingRegion(BundleMember)", "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn271203/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 3071, __PRETTY_FUNCTION__)); | |||
3072 | if (!BundleMember->hasValidDependencies()) { | |||
3073 | ||||
3074 | DEBUG(dbgs() << "SLP: update deps of " << *BundleMember << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: update deps of " << *BundleMember << "\n"; } } while (0); | |||
3075 | BundleMember->Dependencies = 0; | |||
3076 | BundleMember->resetUnscheduledDeps(); | |||
3077 | ||||
3078 | // Handle def-use chain dependencies. | |||
3079 | for (User *U : BundleMember->Inst->users()) { | |||
3080 | if (isa<Instruction>(U)) { | |||
3081 | ScheduleData *UseSD = getScheduleData(U); | |||
3082 | if (UseSD && isInSchedulingRegion(UseSD->FirstInBundle)) { | |||
3083 | BundleMember->Dependencies++; | |||
3084 | ScheduleData *DestBundle = UseSD->FirstInBundle; | |||
3085 | if (!DestBundle->IsScheduled) { | |||
3086 | BundleMember->incrementUnscheduledDeps(1); | |||
3087 | } | |||
3088 | if (!DestBundle->hasValidDependencies()) { | |||
3089 | WorkList.push_back(DestBundle); | |||
3090 | } | |||
3091 | } | |||
3092 | } else { | |||
3093 | // I'm not sure if this can ever happen. But we need to be safe. | |||
3094 | // This lets the instruction/bundle never be scheduled and | |||
3095 | // eventually disable vectorization. | |||
3096 | BundleMember->Dependencies++; | |||
3097 | BundleMember->incrementUnscheduledDeps(1); | |||
3098 | } | |||
3099 | } | |||
3100 | ||||
3101 | // Handle the memory dependencies. | |||
3102 | ScheduleData *DepDest = BundleMember->NextLoadStore; | |||
3103 | if (DepDest) { | |||
3104 | Instruction *SrcInst = BundleMember->Inst; | |||
3105 | MemoryLocation SrcLoc = getLocation(SrcInst, SLP->AA); | |||
3106 | bool SrcMayWrite = BundleMember->Inst->mayWriteToMemory(); | |||
3107 | unsigned numAliased = 0; | |||
3108 | unsigned DistToSrc = 1; | |||
3109 | ||||
3110 | while (DepDest) { | |||
3111 | assert(isInSchedulingRegion(DepDest))((isInSchedulingRegion(DepDest)) ? static_cast<void> (0 ) : __assert_fail ("isInSchedulingRegion(DepDest)", "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn271203/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 3111, __PRETTY_FUNCTION__)); | |||
3112 | ||||
3113 | // We have two limits to reduce the complexity: | |||
3114 | // 1) AliasedCheckLimit: It's a small limit to reduce calls to | |||
3115 | // SLP->isAliased (which is the expensive part in this loop). | |||
3116 | // 2) MaxMemDepDistance: It's for very large blocks and it aborts | |||
3117 | // the whole loop (even if the loop is fast, it's quadratic). | |||
3118 | // It's important for the loop break condition (see below) to | |||
3119 | // check this limit even between two read-only instructions. | |||
3120 | if (DistToSrc >= MaxMemDepDistance || | |||
3121 | ((SrcMayWrite || DepDest->Inst->mayWriteToMemory()) && | |||
3122 | (numAliased >= AliasedCheckLimit || | |||
3123 | SLP->isAliased(SrcLoc, SrcInst, DepDest->Inst)))) { | |||
3124 | ||||
3125 | // We increment the counter only if the locations are aliased | |||
3126 | // (instead of counting all alias checks). This gives a better | |||
3127 | // balance between reduced runtime and accurate dependencies. | |||
3128 | numAliased++; | |||
3129 | ||||
3130 | DepDest->MemoryDependencies.push_back(BundleMember); | |||
3131 | BundleMember->Dependencies++; | |||
3132 | ScheduleData *DestBundle = DepDest->FirstInBundle; | |||
3133 | if (!DestBundle->IsScheduled) { | |||
3134 | BundleMember->incrementUnscheduledDeps(1); | |||
3135 | } | |||
3136 | if (!DestBundle->hasValidDependencies()) { | |||
3137 | WorkList.push_back(DestBundle); | |||
3138 | } | |||
3139 | } | |||
3140 | DepDest = DepDest->NextLoadStore; | |||
3141 | ||||
3142 | // Example, explaining the loop break condition: Let's assume our | |||
3143 | // starting instruction is i0 and MaxMemDepDistance = 3. | |||
3144 | // | |||
3145 | // +--------v--v--v | |||
3146 | // i0,i1,i2,i3,i4,i5,i6,i7,i8 | |||
3147 | // +--------^--^--^ | |||
3148 | // | |||
3149 | // MaxMemDepDistance let us stop alias-checking at i3 and we add | |||
3150 | // dependencies from i0 to i3,i4,.. (even if they are not aliased). | |||
3151 | // Previously we already added dependencies from i3 to i6,i7,i8 | |||
3152 | // (because of MaxMemDepDistance). As we added a dependency from | |||
3153 | // i0 to i3, we have transitive dependencies from i0 to i6,i7,i8 | |||
3154 | // and we can abort this loop at i6. | |||
3155 | if (DistToSrc >= 2 * MaxMemDepDistance) | |||
3156 | break; | |||
3157 | DistToSrc++; | |||
3158 | } | |||
3159 | } | |||
3160 | } | |||
3161 | BundleMember = BundleMember->NextInBundle; | |||
3162 | } | |||
3163 | if (InsertInReadyList && SD->isReady()) { | |||
3164 | ReadyInsts.push_back(SD); | |||
3165 | DEBUG(dbgs() << "SLP: gets ready on update: " << *SD->Inst << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: gets ready on update: " << *SD->Inst << "\n"; } } while (0); | |||
3166 | } | |||
3167 | } | |||
3168 | } | |||
3169 | ||||
3170 | void BoUpSLP::BlockScheduling::resetSchedule() { | |||
3171 | assert(ScheduleStart &&((ScheduleStart && "tried to reset schedule on block which has not been scheduled" ) ? static_cast<void> (0) : __assert_fail ("ScheduleStart && \"tried to reset schedule on block which has not been scheduled\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn271203/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 3172, __PRETTY_FUNCTION__)) | |||
3172 | "tried to reset schedule on block which has not been scheduled")((ScheduleStart && "tried to reset schedule on block which has not been scheduled" ) ? static_cast<void> (0) : __assert_fail ("ScheduleStart && \"tried to reset schedule on block which has not been scheduled\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn271203/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 3172, __PRETTY_FUNCTION__)); | |||
3173 | for (Instruction *I = ScheduleStart; I != ScheduleEnd; I = I->getNextNode()) { | |||
3174 | ScheduleData *SD = getScheduleData(I); | |||
3175 | assert(isInSchedulingRegion(SD))((isInSchedulingRegion(SD)) ? static_cast<void> (0) : __assert_fail ("isInSchedulingRegion(SD)", "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn271203/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 3175, __PRETTY_FUNCTION__)); | |||
3176 | SD->IsScheduled = false; | |||
3177 | SD->resetUnscheduledDeps(); | |||
3178 | } | |||
3179 | ReadyInsts.clear(); | |||
3180 | } | |||
3181 | ||||
3182 | void BoUpSLP::scheduleBlock(BlockScheduling *BS) { | |||
3183 | ||||
3184 | if (!BS->ScheduleStart) | |||
3185 | return; | |||
3186 | ||||
3187 | DEBUG(dbgs() << "SLP: schedule block " << BS->BB->getName() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: schedule block " << BS ->BB->getName() << "\n"; } } while (0); | |||
3188 | ||||
3189 | BS->resetSchedule(); | |||
3190 | ||||
3191 | // For the real scheduling we use a more sophisticated ready-list: it is | |||
3192 | // sorted by the original instruction location. This lets the final schedule | |||
3193 | // be as close as possible to the original instruction order. | |||
3194 | struct ScheduleDataCompare { | |||
3195 | bool operator()(ScheduleData *SD1, ScheduleData *SD2) { | |||
3196 | return SD2->SchedulingPriority < SD1->SchedulingPriority; | |||
3197 | } | |||
3198 | }; | |||
3199 | std::set<ScheduleData *, ScheduleDataCompare> ReadyInsts; | |||
3200 | ||||
3201 | // Ensure that all dependency data is updated and fill the ready-list with | |||
3202 | // initial instructions. | |||
3203 | int Idx = 0; | |||
3204 | int NumToSchedule = 0; | |||
3205 | for (auto *I = BS->ScheduleStart; I != BS->ScheduleEnd; | |||
3206 | I = I->getNextNode()) { | |||
3207 | ScheduleData *SD = BS->getScheduleData(I); | |||
3208 | assert(((SD->isPartOfBundle() == (ScalarToTreeEntry.count(SD-> Inst) != 0) && "scheduler and vectorizer have different opinion on what is a bundle" ) ? static_cast<void> (0) : __assert_fail ("SD->isPartOfBundle() == (ScalarToTreeEntry.count(SD->Inst) != 0) && \"scheduler and vectorizer have different opinion on what is a bundle\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn271203/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 3210, __PRETTY_FUNCTION__)) | |||
3209 | SD->isPartOfBundle() == (ScalarToTreeEntry.count(SD->Inst) != 0) &&((SD->isPartOfBundle() == (ScalarToTreeEntry.count(SD-> Inst) != 0) && "scheduler and vectorizer have different opinion on what is a bundle" ) ? static_cast<void> (0) : __assert_fail ("SD->isPartOfBundle() == (ScalarToTreeEntry.count(SD->Inst) != 0) && \"scheduler and vectorizer have different opinion on what is a bundle\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn271203/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 3210, __PRETTY_FUNCTION__)) | |||
3210 | "scheduler and vectorizer have different opinion on what is a bundle")((SD->isPartOfBundle() == (ScalarToTreeEntry.count(SD-> Inst) != 0) && "scheduler and vectorizer have different opinion on what is a bundle" ) ? static_cast<void> (0) : __assert_fail ("SD->isPartOfBundle() == (ScalarToTreeEntry.count(SD->Inst) != 0) && \"scheduler and vectorizer have different opinion on what is a bundle\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn271203/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 3210, __PRETTY_FUNCTION__)); | |||
3211 | SD->FirstInBundle->SchedulingPriority = Idx++; | |||
3212 | if (SD->isSchedulingEntity()) { | |||
3213 | BS->calculateDependencies(SD, false, this); | |||
3214 | NumToSchedule++; | |||
3215 | } | |||
3216 | } | |||
3217 | BS->initialFillReadyList(ReadyInsts); | |||
3218 | ||||
3219 | Instruction *LastScheduledInst = BS->ScheduleEnd; | |||
3220 | ||||
3221 | // Do the "real" scheduling. | |||
3222 | while (!ReadyInsts.empty()) { | |||
3223 | ScheduleData *picked = *ReadyInsts.begin(); | |||
3224 | ReadyInsts.erase(ReadyInsts.begin()); | |||
3225 | ||||
3226 | // Move the scheduled instruction(s) to their dedicated places, if not | |||
3227 | // there yet. | |||
3228 | ScheduleData *BundleMember = picked; | |||
3229 | while (BundleMember) { | |||
3230 | Instruction *pickedInst = BundleMember->Inst; | |||
3231 | if (LastScheduledInst->getNextNode() != pickedInst) { | |||
3232 | BS->BB->getInstList().remove(pickedInst); | |||
3233 | BS->BB->getInstList().insert(LastScheduledInst->getIterator(), | |||
3234 | pickedInst); | |||
3235 | } | |||
3236 | LastScheduledInst = pickedInst; | |||
3237 | BundleMember = BundleMember->NextInBundle; | |||
3238 | } | |||
3239 | ||||
3240 | BS->schedule(picked, ReadyInsts); | |||
3241 | NumToSchedule--; | |||
3242 | } | |||
3243 | assert(NumToSchedule == 0 && "could not schedule all instructions")((NumToSchedule == 0 && "could not schedule all instructions" ) ? static_cast<void> (0) : __assert_fail ("NumToSchedule == 0 && \"could not schedule all instructions\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn271203/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 3243, __PRETTY_FUNCTION__)); | |||
3244 | ||||
3245 | // Avoid duplicate scheduling of the block. | |||
3246 | BS->ScheduleStart = nullptr; | |||
3247 | } | |||
3248 | ||||
3249 | unsigned BoUpSLP::getVectorElementSize(Value *V) { | |||
3250 | // If V is a store, just return the width of the stored value without | |||
3251 | // traversing the expression tree. This is the common case. | |||
3252 | if (auto *Store = dyn_cast<StoreInst>(V)) | |||
3253 | return DL->getTypeSizeInBits(Store->getValueOperand()->getType()); | |||
3254 | ||||
3255 | // If V is not a store, we can traverse the expression tree to find loads | |||
3256 | // that feed it. The type of the loaded value may indicate a more suitable | |||
3257 | // width than V's type. We want to base the vector element size on the width | |||
3258 | // of memory operations where possible. | |||
3259 | SmallVector<Instruction *, 16> Worklist; | |||
3260 | SmallPtrSet<Instruction *, 16> Visited; | |||
3261 | if (auto *I = dyn_cast<Instruction>(V)) | |||
3262 | Worklist.push_back(I); | |||
3263 | ||||
3264 | // Traverse the expression tree in bottom-up order looking for loads. If we | |||
3265 | // encounter an instruciton we don't yet handle, we give up. | |||
3266 | auto MaxWidth = 0u; | |||
3267 | auto FoundUnknownInst = false; | |||
3268 | while (!Worklist.empty() && !FoundUnknownInst) { | |||
3269 | auto *I = Worklist.pop_back_val(); | |||
3270 | Visited.insert(I); | |||
3271 | ||||
3272 | // We should only be looking at scalar instructions here. If the current | |||
3273 | // instruction has a vector type, give up. | |||
3274 | auto *Ty = I->getType(); | |||
3275 | if (isa<VectorType>(Ty)) | |||
3276 | FoundUnknownInst = true; | |||
3277 | ||||
3278 | // If the current instruction is a load, update MaxWidth to reflect the | |||
3279 | // width of the loaded value. | |||
3280 | else if (isa<LoadInst>(I)) | |||
3281 | MaxWidth = std::max<unsigned>(MaxWidth, DL->getTypeSizeInBits(Ty)); | |||
3282 | ||||
3283 | // Otherwise, we need to visit the operands of the instruction. We only | |||
3284 | // handle the interesting cases from buildTree here. If an operand is an | |||
3285 | // instruction we haven't yet visited, we add it to the worklist. | |||
3286 | else if (isa<PHINode>(I) || isa<CastInst>(I) || isa<GetElementPtrInst>(I) || | |||
3287 | isa<CmpInst>(I) || isa<SelectInst>(I) || isa<BinaryOperator>(I)) { | |||
3288 | for (Use &U : I->operands()) | |||
3289 | if (auto *J = dyn_cast<Instruction>(U.get())) | |||
3290 | if (!Visited.count(J)) | |||
3291 | Worklist.push_back(J); | |||
3292 | } | |||
3293 | ||||
3294 | // If we don't yet handle the instruction, give up. | |||
3295 | else | |||
3296 | FoundUnknownInst = true; | |||
3297 | } | |||
3298 | ||||
3299 | // If we didn't encounter a memory access in the expression tree, or if we | |||
3300 | // gave up for some reason, just return the width of V. | |||
3301 | if (!MaxWidth || FoundUnknownInst) | |||
3302 | return DL->getTypeSizeInBits(V->getType()); | |||
3303 | ||||
3304 | // Otherwise, return the maximum width we found. | |||
3305 | return MaxWidth; | |||
3306 | } | |||
3307 | ||||
3308 | // Determine if a value V in a vectorizable expression Expr can be demoted to a | |||
3309 | // smaller type with a truncation. We collect the values that will be demoted | |||
3310 | // in ToDemote and additional roots that require investigating in Roots. | |||
3311 | static bool collectValuesToDemote(Value *V, SmallPtrSetImpl<Value *> &Expr, | |||
3312 | SmallVectorImpl<Value *> &ToDemote, | |||
3313 | SmallVectorImpl<Value *> &Roots) { | |||
3314 | ||||
3315 | // We can always demote constants. | |||
3316 | if (isa<Constant>(V)) { | |||
3317 | ToDemote.push_back(V); | |||
3318 | return true; | |||
3319 | } | |||
3320 | ||||
3321 | // If the value is not an instruction in the expression with only one use, it | |||
3322 | // cannot be demoted. | |||
3323 | auto *I = dyn_cast<Instruction>(V); | |||
3324 | if (!I || !I->hasOneUse() || !Expr.count(I)) | |||
3325 | return false; | |||
3326 | ||||
3327 | switch (I->getOpcode()) { | |||
3328 | ||||
3329 | // We can always demote truncations and extensions. Since truncations can | |||
3330 | // seed additional demotion, we save the truncated value. | |||
3331 | case Instruction::Trunc: | |||
3332 | Roots.push_back(I->getOperand(0)); | |||
3333 | case Instruction::ZExt: | |||
3334 | case Instruction::SExt: | |||
3335 | break; | |||
3336 | ||||
3337 | // We can demote certain binary operations if we can demote both of their | |||
3338 | // operands. | |||
3339 | case Instruction::Add: | |||
3340 | case Instruction::Sub: | |||
3341 | case Instruction::Mul: | |||
3342 | case Instruction::And: | |||
3343 | case Instruction::Or: | |||
3344 | case Instruction::Xor: | |||
3345 | if (!collectValuesToDemote(I->getOperand(0), Expr, ToDemote, Roots) || | |||
3346 | !collectValuesToDemote(I->getOperand(1), Expr, ToDemote, Roots)) | |||
3347 | return false; | |||
3348 | break; | |||
3349 | ||||
3350 | // We can demote selects if we can demote their true and false values. | |||
3351 | case Instruction::Select: { | |||
3352 | SelectInst *SI = cast<SelectInst>(I); | |||
3353 | if (!collectValuesToDemote(SI->getTrueValue(), Expr, ToDemote, Roots) || | |||
3354 | !collectValuesToDemote(SI->getFalseValue(), Expr, ToDemote, Roots)) | |||
3355 | return false; | |||
3356 | break; | |||
3357 | } | |||
3358 | ||||
3359 | // We can demote phis if we can demote all their incoming operands. Note that | |||
3360 | // we don't need to worry about cycles since we ensure single use above. | |||
3361 | case Instruction::PHI: { | |||
3362 | PHINode *PN = cast<PHINode>(I); | |||
3363 | for (Value *IncValue : PN->incoming_values()) | |||
3364 | if (!collectValuesToDemote(IncValue, Expr, ToDemote, Roots)) | |||
3365 | return false; | |||
3366 | break; | |||
3367 | } | |||
3368 | ||||
3369 | // Otherwise, conservatively give up. | |||
3370 | default: | |||
3371 | return false; | |||
3372 | } | |||
3373 | ||||
3374 | // Record the value that we can demote. | |||
3375 | ToDemote.push_back(V); | |||
3376 | return true; | |||
3377 | } | |||
3378 | ||||
3379 | void BoUpSLP::computeMinimumValueSizes() { | |||
3380 | // If there are no external uses, the expression tree must be rooted by a | |||
3381 | // store. We can't demote in-memory values, so there is nothing to do here. | |||
3382 | if (ExternalUses.empty()) | |||
3383 | return; | |||
3384 | ||||
3385 | // We only attempt to truncate integer expressions. | |||
3386 | auto &TreeRoot = VectorizableTree[0].Scalars; | |||
3387 | auto *TreeRootIT = dyn_cast<IntegerType>(TreeRoot[0]->getType()); | |||
3388 | if (!TreeRootIT) | |||
3389 | return; | |||
3390 | ||||
3391 | // If the expression is not rooted by a store, these roots should have | |||
3392 | // external uses. We will rely on InstCombine to rewrite the expression in | |||
3393 | // the narrower type. However, InstCombine only rewrites single-use values. | |||
3394 | // This means that if a tree entry other than a root is used externally, it | |||
3395 | // must have multiple uses and InstCombine will not rewrite it. The code | |||
3396 | // below ensures that only the roots are used externally. | |||
3397 | SmallPtrSet<Value *, 32> Expr(TreeRoot.begin(), TreeRoot.end()); | |||
3398 | for (auto &EU : ExternalUses) | |||
3399 | if (!Expr.erase(EU.Scalar)) | |||
3400 | return; | |||
3401 | if (!Expr.empty()) | |||
3402 | return; | |||
3403 | ||||
3404 | // Collect the scalar values of the vectorizable expression. We will use this | |||
3405 | // context to determine which values can be demoted. If we see a truncation, | |||
3406 | // we mark it as seeding another demotion. | |||
3407 | for (auto &Entry : VectorizableTree) | |||
3408 | Expr.insert(Entry.Scalars.begin(), Entry.Scalars.end()); | |||
3409 | ||||
3410 | // Ensure the roots of the vectorizable tree don't form a cycle. They must | |||
3411 | // have a single external user that is not in the vectorizable tree. | |||
3412 | for (auto *Root : TreeRoot) | |||
3413 | if (!Root->hasOneUse() || Expr.count(*Root->user_begin())) | |||
3414 | return; | |||
3415 | ||||
3416 | // Conservatively determine if we can actually truncate the roots of the | |||
3417 | // expression. Collect the values that can be demoted in ToDemote and | |||
3418 | // additional roots that require investigating in Roots. | |||
3419 | SmallVector<Value *, 32> ToDemote; | |||
3420 | SmallVector<Value *, 4> Roots; | |||
3421 | for (auto *Root : TreeRoot) | |||
3422 | if (!collectValuesToDemote(Root, Expr, ToDemote, Roots)) | |||
3423 | return; | |||
3424 | ||||
3425 | // The maximum bit width required to represent all the values that can be | |||
3426 | // demoted without loss of precision. It would be safe to truncate the roots | |||
3427 | // of the expression to this width. | |||
3428 | auto MaxBitWidth = 8u; | |||
3429 | ||||
3430 | // We first check if all the bits of the roots are demanded. If they're not, | |||
3431 | // we can truncate the roots to this narrower type. | |||
3432 | for (auto *Root : TreeRoot) { | |||
3433 | auto Mask = DB->getDemandedBits(cast<Instruction>(Root)); | |||
3434 | MaxBitWidth = std::max<unsigned>( | |||
3435 | Mask.getBitWidth() - Mask.countLeadingZeros(), MaxBitWidth); | |||
3436 | } | |||
3437 | ||||
3438 | // If all the bits of the roots are demanded, we can try a little harder to | |||
3439 | // compute a narrower type. This can happen, for example, if the roots are | |||
3440 | // getelementptr indices. InstCombine promotes these indices to the pointer | |||
3441 | // width. Thus, all their bits are technically demanded even though the | |||
3442 | // address computation might be vectorized in a smaller type. | |||
3443 | // | |||
3444 | // We start by looking at each entry that can be demoted. We compute the | |||
3445 | // maximum bit width required to store the scalar by using ValueTracking to | |||
3446 | // compute the number of high-order bits we can truncate. | |||
3447 | if (MaxBitWidth == DL->getTypeSizeInBits(TreeRoot[0]->getType())) { | |||
3448 | MaxBitWidth = 8u; | |||
3449 | for (auto *Scalar : ToDemote) { | |||
3450 | auto NumSignBits = ComputeNumSignBits(Scalar, *DL, 0, AC, 0, DT); | |||
3451 | auto NumTypeBits = DL->getTypeSizeInBits(Scalar->getType()); | |||
3452 | MaxBitWidth = std::max<unsigned>(NumTypeBits - NumSignBits, MaxBitWidth); | |||
3453 | } | |||
3454 | } | |||
3455 | ||||
3456 | // Round MaxBitWidth up to the next power-of-two. | |||
3457 | if (!isPowerOf2_64(MaxBitWidth)) | |||
3458 | MaxBitWidth = NextPowerOf2(MaxBitWidth); | |||
3459 | ||||
3460 | // If the maximum bit width we compute is less than the with of the roots' | |||
3461 | // type, we can proceed with the narrowing. Otherwise, do nothing. | |||
3462 | if (MaxBitWidth >= TreeRootIT->getBitWidth()) | |||
3463 | return; | |||
3464 | ||||
3465 | // If we can truncate the root, we must collect additional values that might | |||
3466 | // be demoted as a result. That is, those seeded by truncations we will | |||
3467 | // modify. | |||
3468 | while (!Roots.empty()) | |||
3469 | collectValuesToDemote(Roots.pop_back_val(), Expr, ToDemote, Roots); | |||
3470 | ||||
3471 | // Finally, map the values we can demote to the maximum bit with we computed. | |||
3472 | for (auto *Scalar : ToDemote) | |||
3473 | MinBWs[Scalar] = MaxBitWidth; | |||
3474 | } | |||
3475 | ||||
3476 | /// The SLPVectorizer Pass. | |||
3477 | struct SLPVectorizer : public FunctionPass { | |||
3478 | typedef SmallVector<StoreInst *, 8> StoreList; | |||
3479 | typedef MapVector<Value *, StoreList> StoreListMap; | |||
3480 | typedef SmallVector<WeakVH, 8> WeakVHList; | |||
3481 | typedef MapVector<Value *, WeakVHList> WeakVHListMap; | |||
3482 | ||||
3483 | /// Pass identification, replacement for typeid | |||
3484 | static char ID; | |||
3485 | ||||
3486 | explicit SLPVectorizer() : FunctionPass(ID) { | |||
3487 | initializeSLPVectorizerPass(*PassRegistry::getPassRegistry()); | |||
3488 | } | |||
3489 | ||||
3490 | ScalarEvolution *SE; | |||
3491 | TargetTransformInfo *TTI; | |||
3492 | TargetLibraryInfo *TLI; | |||
3493 | AliasAnalysis *AA; | |||
3494 | LoopInfo *LI; | |||
3495 | DominatorTree *DT; | |||
3496 | AssumptionCache *AC; | |||
3497 | DemandedBits *DB; | |||
3498 | const DataLayout *DL; | |||
3499 | ||||
3500 | bool doInitialization(Module &M) override { | |||
3501 | DL = &M.getDataLayout(); | |||
3502 | return false; | |||
3503 | } | |||
3504 | ||||
3505 | bool runOnFunction(Function &F) override { | |||
3506 | if (skipFunction(F)) | |||
3507 | return false; | |||
3508 | ||||
3509 | SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE(); | |||
3510 | TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); | |||
3511 | auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>(); | |||
3512 | TLI = TLIP ? &TLIP->getTLI() : nullptr; | |||
3513 | AA = &getAnalysis<AAResultsWrapperPass>().getAAResults(); | |||
3514 | LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); | |||
3515 | DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); | |||
3516 | AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); | |||
3517 | DB = &getAnalysis<DemandedBitsWrapperPass>().getDemandedBits(); | |||
3518 | ||||
3519 | Stores.clear(); | |||
3520 | GEPs.clear(); | |||
3521 | bool Changed = false; | |||
3522 | ||||
3523 | // If the target claims to have no vector registers don't attempt | |||
3524 | // vectorization. | |||
3525 | if (!TTI->getNumberOfRegisters(true)) | |||
3526 | return false; | |||
3527 | ||||
3528 | // Don't vectorize when the attribute NoImplicitFloat is used. | |||
3529 | if (F.hasFnAttribute(Attribute::NoImplicitFloat)) | |||
3530 | return false; | |||
3531 | ||||
3532 | DEBUG(dbgs() << "SLP: Analyzing blocks in " << F.getName() << ".\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: Analyzing blocks in " << F.getName() << ".\n"; } } while (0); | |||
3533 | ||||
3534 | // Use the bottom up slp vectorizer to construct chains that start with | |||
3535 | // store instructions. | |||
3536 | BoUpSLP R(&F, SE, TTI, TLI, AA, LI, DT, AC, DB, DL); | |||
3537 | ||||
3538 | // A general note: the vectorizer must use BoUpSLP::eraseInstruction() to | |||
3539 | // delete instructions. | |||
3540 | ||||
3541 | // Scan the blocks in the function in post order. | |||
3542 | for (auto BB : post_order(&F.getEntryBlock())) { | |||
3543 | collectSeedInstructions(BB); | |||
3544 | ||||
3545 | // Vectorize trees that end at stores. | |||
3546 | if (!Stores.empty()) { | |||
3547 | DEBUG(dbgs() << "SLP: Found stores for " << Stores.size()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: Found stores for " << Stores .size() << " underlying objects.\n"; } } while (0) | |||
3548 | << " underlying objects.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: Found stores for " << Stores .size() << " underlying objects.\n"; } } while (0); | |||
3549 | Changed |= vectorizeStoreChains(R); | |||
3550 | } | |||
3551 | ||||
3552 | // Vectorize trees that end at reductions. | |||
3553 | Changed |= vectorizeChainsInBlock(BB, R); | |||
3554 | ||||
3555 | // Vectorize the index computations of getelementptr instructions. This | |||
3556 | // is primarily intended to catch gather-like idioms ending at | |||
3557 | // non-consecutive loads. | |||
3558 | if (!GEPs.empty()) { | |||
3559 | DEBUG(dbgs() << "SLP: Found GEPs for " << GEPs.size()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: Found GEPs for " << GEPs .size() << " underlying objects.\n"; } } while (0) | |||
3560 | << " underlying objects.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: Found GEPs for " << GEPs .size() << " underlying objects.\n"; } } while (0); | |||
3561 | Changed |= vectorizeGEPIndices(BB, R); | |||
3562 | } | |||
3563 | } | |||
3564 | ||||
3565 | if (Changed) { | |||
3566 | R.optimizeGatherSequence(); | |||
3567 | DEBUG(dbgs() << "SLP: vectorized \"" << F.getName() << "\"\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: vectorized \"" << F.getName () << "\"\n"; } } while (0); | |||
3568 | DEBUG(verifyFunction(F))do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { verifyFunction(F); } } while (0); | |||
3569 | } | |||
3570 | return Changed; | |||
3571 | } | |||
3572 | ||||
3573 | void getAnalysisUsage(AnalysisUsage &AU) const override { | |||
3574 | FunctionPass::getAnalysisUsage(AU); | |||
3575 | AU.addRequired<AssumptionCacheTracker>(); | |||
3576 | AU.addRequired<ScalarEvolutionWrapperPass>(); | |||
3577 | AU.addRequired<AAResultsWrapperPass>(); | |||
3578 | AU.addRequired<TargetTransformInfoWrapperPass>(); | |||
3579 | AU.addRequired<LoopInfoWrapperPass>(); | |||
3580 | AU.addRequired<DominatorTreeWrapperPass>(); | |||
3581 | AU.addRequired<DemandedBitsWrapperPass>(); | |||
3582 | AU.addPreserved<LoopInfoWrapperPass>(); | |||
3583 | AU.addPreserved<DominatorTreeWrapperPass>(); | |||
3584 | AU.addPreserved<AAResultsWrapperPass>(); | |||
3585 | AU.addPreserved<GlobalsAAWrapperPass>(); | |||
3586 | AU.setPreservesCFG(); | |||
3587 | } | |||
3588 | ||||
3589 | private: | |||
3590 | /// \brief Collect store and getelementptr instructions and organize them | |||
3591 | /// according to the underlying object of their pointer operands. We sort the | |||
3592 | /// instructions by their underlying objects to reduce the cost of | |||
3593 | /// consecutive access queries. | |||
3594 | /// | |||
3595 | /// TODO: We can further reduce this cost if we flush the chain creation | |||
3596 | /// every time we run into a memory barrier. | |||
3597 | void collectSeedInstructions(BasicBlock *BB); | |||
3598 | ||||
3599 | /// \brief Try to vectorize a chain that starts at two arithmetic instrs. | |||
3600 | bool tryToVectorizePair(Value *A, Value *B, BoUpSLP &R); | |||
3601 | ||||
3602 | /// \brief Try to vectorize a list of operands. | |||
3603 | /// \@param BuildVector A list of users to ignore for the purpose of | |||
3604 | /// scheduling and that don't need extracting. | |||
3605 | /// \returns true if a value was vectorized. | |||
3606 | bool tryToVectorizeList(ArrayRef<Value *> VL, BoUpSLP &R, | |||
3607 | ArrayRef<Value *> BuildVector = None, | |||
3608 | bool allowReorder = false); | |||
3609 | ||||
3610 | /// \brief Try to vectorize a chain that may start at the operands of \V; | |||
3611 | bool tryToVectorize(BinaryOperator *V, BoUpSLP &R); | |||
3612 | ||||
3613 | /// \brief Vectorize the store instructions collected in Stores. | |||
3614 | bool vectorizeStoreChains(BoUpSLP &R); | |||
3615 | ||||
3616 | /// \brief Vectorize the index computations of the getelementptr instructions | |||
3617 | /// collected in GEPs. | |||
3618 | bool vectorizeGEPIndices(BasicBlock *BB, BoUpSLP &R); | |||
3619 | ||||
3620 | /// \brief Scan the basic block and look for patterns that are likely to start | |||
3621 | /// a vectorization chain. | |||
3622 | bool vectorizeChainsInBlock(BasicBlock *BB, BoUpSLP &R); | |||
3623 | ||||
3624 | bool vectorizeStoreChain(ArrayRef<Value *> Chain, int CostThreshold, | |||
3625 | BoUpSLP &R, unsigned VecRegSize); | |||
3626 | ||||
3627 | bool vectorizeStores(ArrayRef<StoreInst *> Stores, int costThreshold, | |||
3628 | BoUpSLP &R); | |||
3629 | ||||
3630 | /// The store instructions in a basic block organized by base pointer. | |||
3631 | StoreListMap Stores; | |||
3632 | ||||
3633 | /// The getelementptr instructions in a basic block organized by base pointer. | |||
3634 | WeakVHListMap GEPs; | |||
3635 | }; | |||
3636 | ||||
3637 | /// \brief Check that the Values in the slice in VL array are still existent in | |||
3638 | /// the WeakVH array. | |||
3639 | /// Vectorization of part of the VL array may cause later values in the VL array | |||
3640 | /// to become invalid. We track when this has happened in the WeakVH array. | |||
3641 | static bool hasValueBeenRAUWed(ArrayRef<Value *> VL, ArrayRef<WeakVH> VH, | |||
3642 | unsigned SliceBegin, unsigned SliceSize) { | |||
3643 | VL = VL.slice(SliceBegin, SliceSize); | |||
3644 | VH = VH.slice(SliceBegin, SliceSize); | |||
3645 | return !std::equal(VL.begin(), VL.end(), VH.begin()); | |||
3646 | } | |||
3647 | ||||
3648 | bool SLPVectorizer::vectorizeStoreChain(ArrayRef<Value *> Chain, | |||
3649 | int CostThreshold, BoUpSLP &R, | |||
3650 | unsigned VecRegSize) { | |||
3651 | unsigned ChainLen = Chain.size(); | |||
3652 | DEBUG(dbgs() << "SLP: Analyzing a store chain of length " << ChainLendo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: Analyzing a store chain of length " << ChainLen << "\n"; } } while (0) | |||
3653 | << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: Analyzing a store chain of length " << ChainLen << "\n"; } } while (0); | |||
3654 | unsigned Sz = R.getVectorElementSize(Chain[0]); | |||
3655 | unsigned VF = VecRegSize / Sz; | |||
3656 | ||||
3657 | if (!isPowerOf2_32(Sz) || VF < 2) | |||
3658 | return false; | |||
3659 | ||||
3660 | // Keep track of values that were deleted by vectorizing in the loop below. | |||
3661 | SmallVector<WeakVH, 8> TrackValues(Chain.begin(), Chain.end()); | |||
3662 | ||||
3663 | bool Changed = false; | |||
3664 | // Look for profitable vectorizable trees at all offsets, starting at zero. | |||
3665 | for (unsigned i = 0, e = ChainLen; i < e; ++i) { | |||
3666 | if (i + VF > e) | |||
3667 | break; | |||
3668 | ||||
3669 | // Check that a previous iteration of this loop did not delete the Value. | |||
3670 | if (hasValueBeenRAUWed(Chain, TrackValues, i, VF)) | |||
3671 | continue; | |||
3672 | ||||
3673 | DEBUG(dbgs() << "SLP: Analyzing " << VF << " stores at offset " << ido { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: Analyzing " << VF << " stores at offset " << i << "\n"; } } while (0) | |||
3674 | << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: Analyzing " << VF << " stores at offset " << i << "\n"; } } while (0); | |||
3675 | ArrayRef<Value *> Operands = Chain.slice(i, VF); | |||
3676 | ||||
3677 | R.buildTree(Operands); | |||
3678 | R.computeMinimumValueSizes(); | |||
3679 | ||||
3680 | int Cost = R.getTreeCost(); | |||
3681 | ||||
3682 | DEBUG(dbgs() << "SLP: Found cost=" << Cost << " for VF=" << VF << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: Found cost=" << Cost << " for VF=" << VF << "\n"; } } while (0); | |||
3683 | if (Cost < CostThreshold) { | |||
3684 | DEBUG(dbgs() << "SLP: Decided to vectorize cost=" << Cost << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: Decided to vectorize cost=" << Cost << "\n"; } } while (0); | |||
3685 | R.vectorizeTree(); | |||
3686 | ||||
3687 | // Move to the next bundle. | |||
3688 | i += VF - 1; | |||
3689 | Changed = true; | |||
3690 | } | |||
3691 | } | |||
3692 | ||||
3693 | return Changed; | |||
3694 | } | |||
3695 | ||||
3696 | bool SLPVectorizer::vectorizeStores(ArrayRef<StoreInst *> Stores, | |||
3697 | int costThreshold, BoUpSLP &R) { | |||
3698 | SetVector<StoreInst *> Heads, Tails; | |||
3699 | SmallDenseMap<StoreInst *, StoreInst *> ConsecutiveChain; | |||
3700 | ||||
3701 | // We may run into multiple chains that merge into a single chain. We mark the | |||
3702 | // stores that we vectorized so that we don't visit the same store twice. | |||
3703 | BoUpSLP::ValueSet VectorizedStores; | |||
3704 | bool Changed = false; | |||
3705 | ||||
3706 | // Do a quadratic search on all of the given stores and find | |||
3707 | // all of the pairs of stores that follow each other. | |||
3708 | SmallVector<unsigned, 16> IndexQueue; | |||
3709 | for (unsigned i = 0, e = Stores.size(); i < e; ++i) { | |||
3710 | IndexQueue.clear(); | |||
3711 | // If a store has multiple consecutive store candidates, search Stores | |||
3712 | // array according to the sequence: from i+1 to e, then from i-1 to 0. | |||
3713 | // This is because usually pairing with immediate succeeding or preceding | |||
3714 | // candidate create the best chance to find slp vectorization opportunity. | |||
3715 | unsigned j = 0; | |||
3716 | for (j = i + 1; j < e; ++j) | |||
3717 | IndexQueue.push_back(j); | |||
3718 | for (j = i; j > 0; --j) | |||
3719 | IndexQueue.push_back(j - 1); | |||
3720 | ||||
3721 | for (auto &k : IndexQueue) { | |||
3722 | if (isConsecutiveAccess(Stores[i], Stores[k], *DL, *SE)) { | |||
3723 | Tails.insert(Stores[k]); | |||
3724 | Heads.insert(Stores[i]); | |||
3725 | ConsecutiveChain[Stores[i]] = Stores[k]; | |||
3726 | break; | |||
3727 | } | |||
3728 | } | |||
3729 | } | |||
3730 | ||||
3731 | // For stores that start but don't end a link in the chain: | |||
3732 | for (SetVector<StoreInst *>::iterator it = Heads.begin(), e = Heads.end(); | |||
3733 | it != e; ++it) { | |||
3734 | if (Tails.count(*it)) | |||
3735 | continue; | |||
3736 | ||||
3737 | // We found a store instr that starts a chain. Now follow the chain and try | |||
3738 | // to vectorize it. | |||
3739 | BoUpSLP::ValueList Operands; | |||
3740 | StoreInst *I = *it; | |||
3741 | // Collect the chain into a list. | |||
3742 | while (Tails.count(I) || Heads.count(I)) { | |||
3743 | if (VectorizedStores.count(I)) | |||
3744 | break; | |||
3745 | Operands.push_back(I); | |||
3746 | // Move to the next value in the chain. | |||
3747 | I = ConsecutiveChain[I]; | |||
3748 | } | |||
3749 | ||||
3750 | // FIXME: Is division-by-2 the correct step? Should we assert that the | |||
3751 | // register size is a power-of-2? | |||
3752 | for (unsigned Size = R.getMaxVecRegSize(); Size >= R.getMinVecRegSize(); Size /= 2) { | |||
3753 | if (vectorizeStoreChain(Operands, costThreshold, R, Size)) { | |||
3754 | // Mark the vectorized stores so that we don't vectorize them again. | |||
3755 | VectorizedStores.insert(Operands.begin(), Operands.end()); | |||
3756 | Changed = true; | |||
3757 | break; | |||
3758 | } | |||
3759 | } | |||
3760 | } | |||
3761 | ||||
3762 | return Changed; | |||
3763 | } | |||
3764 | ||||
3765 | void SLPVectorizer::collectSeedInstructions(BasicBlock *BB) { | |||
3766 | ||||
3767 | // Initialize the collections. We will make a single pass over the block. | |||
3768 | Stores.clear(); | |||
3769 | GEPs.clear(); | |||
3770 | ||||
3771 | // Visit the store and getelementptr instructions in BB and organize them in | |||
3772 | // Stores and GEPs according to the underlying objects of their pointer | |||
3773 | // operands. | |||
3774 | for (Instruction &I : *BB) { | |||
3775 | ||||
3776 | // Ignore store instructions that are volatile or have a pointer operand | |||
3777 | // that doesn't point to a scalar type. | |||
3778 | if (auto *SI = dyn_cast<StoreInst>(&I)) { | |||
3779 | if (!SI->isSimple()) | |||
3780 | continue; | |||
3781 | if (!isValidElementType(SI->getValueOperand()->getType())) | |||
3782 | continue; | |||
3783 | Stores[GetUnderlyingObject(SI->getPointerOperand(), *DL)].push_back(SI); | |||
3784 | } | |||
3785 | ||||
3786 | // Ignore getelementptr instructions that have more than one index, a | |||
3787 | // constant index, or a pointer operand that doesn't point to a scalar | |||
3788 | // type. | |||
3789 | else if (auto *GEP = dyn_cast<GetElementPtrInst>(&I)) { | |||
3790 | auto Idx = GEP->idx_begin()->get(); | |||
3791 | if (GEP->getNumIndices() > 1 || isa<Constant>(Idx)) | |||
3792 | continue; | |||
3793 | if (!isValidElementType(Idx->getType())) | |||
3794 | continue; | |||
3795 | GEPs[GetUnderlyingObject(GEP->getPointerOperand(), *DL)].push_back(GEP); | |||
3796 | } | |||
3797 | } | |||
3798 | } | |||
3799 | ||||
3800 | bool SLPVectorizer::tryToVectorizePair(Value *A, Value *B, BoUpSLP &R) { | |||
3801 | if (!A || !B) | |||
3802 | return false; | |||
3803 | Value *VL[] = { A, B }; | |||
3804 | return tryToVectorizeList(VL, R, None, true); | |||
3805 | } | |||
3806 | ||||
3807 | bool SLPVectorizer::tryToVectorizeList(ArrayRef<Value *> VL, BoUpSLP &R, | |||
3808 | ArrayRef<Value *> BuildVector, | |||
3809 | bool allowReorder) { | |||
3810 | if (VL.size() < 2) | |||
3811 | return false; | |||
3812 | ||||
3813 | DEBUG(dbgs() << "SLP: Vectorizing a list of length = " << VL.size() << ".\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: Vectorizing a list of length = " << VL.size() << ".\n"; } } while (0); | |||
3814 | ||||
3815 | // Check that all of the parts are scalar instructions of the same type. | |||
3816 | Instruction *I0 = dyn_cast<Instruction>(VL[0]); | |||
3817 | if (!I0) | |||
3818 | return false; | |||
3819 | ||||
3820 | unsigned Opcode0 = I0->getOpcode(); | |||
3821 | ||||
3822 | // FIXME: Register size should be a parameter to this function, so we can | |||
3823 | // try different vectorization factors. | |||
3824 | unsigned Sz = R.getVectorElementSize(I0); | |||
3825 | unsigned VF = R.getMinVecRegSize() / Sz; | |||
3826 | ||||
3827 | for (Value *V : VL) { | |||
3828 | Type *Ty = V->getType(); | |||
3829 | if (!isValidElementType(Ty)) | |||
3830 | return false; | |||
3831 | Instruction *Inst = dyn_cast<Instruction>(V); | |||
3832 | if (!Inst || Inst->getOpcode() != Opcode0) | |||
3833 | return false; | |||
3834 | } | |||
3835 | ||||
3836 | bool Changed = false; | |||
3837 | ||||
3838 | // Keep track of values that were deleted by vectorizing in the loop below. | |||
3839 | SmallVector<WeakVH, 8> TrackValues(VL.begin(), VL.end()); | |||
3840 | ||||
3841 | for (unsigned i = 0, e = VL.size(); i < e; ++i) { | |||
3842 | unsigned OpsWidth = 0; | |||
3843 | ||||
3844 | if (i + VF > e) | |||
3845 | OpsWidth = e - i; | |||
3846 | else | |||
3847 | OpsWidth = VF; | |||
3848 | ||||
3849 | if (!isPowerOf2_32(OpsWidth) || OpsWidth < 2) | |||
3850 | break; | |||
3851 | ||||
3852 | // Check that a previous iteration of this loop did not delete the Value. | |||
3853 | if (hasValueBeenRAUWed(VL, TrackValues, i, OpsWidth)) | |||
3854 | continue; | |||
3855 | ||||
3856 | DEBUG(dbgs() << "SLP: Analyzing " << OpsWidth << " operations "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: Analyzing " << OpsWidth << " operations " << "\n"; } } while (0) | |||
3857 | << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: Analyzing " << OpsWidth << " operations " << "\n"; } } while (0); | |||
3858 | ArrayRef<Value *> Ops = VL.slice(i, OpsWidth); | |||
3859 | ||||
3860 | ArrayRef<Value *> BuildVectorSlice; | |||
3861 | if (!BuildVector.empty()) | |||
3862 | BuildVectorSlice = BuildVector.slice(i, OpsWidth); | |||
3863 | ||||
3864 | R.buildTree(Ops, BuildVectorSlice); | |||
3865 | // TODO: check if we can allow reordering also for other cases than | |||
3866 | // tryToVectorizePair() | |||
3867 | if (allowReorder && R.shouldReorder()) { | |||
3868 | assert(Ops.size() == 2)((Ops.size() == 2) ? static_cast<void> (0) : __assert_fail ("Ops.size() == 2", "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn271203/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 3868, __PRETTY_FUNCTION__)); | |||
3869 | assert(BuildVectorSlice.empty())((BuildVectorSlice.empty()) ? static_cast<void> (0) : __assert_fail ("BuildVectorSlice.empty()", "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn271203/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 3869, __PRETTY_FUNCTION__)); | |||
3870 | Value *ReorderedOps[] = { Ops[1], Ops[0] }; | |||
3871 | R.buildTree(ReorderedOps, None); | |||
3872 | } | |||
3873 | R.computeMinimumValueSizes(); | |||
3874 | int Cost = R.getTreeCost(); | |||
3875 | ||||
3876 | if (Cost < -SLPCostThreshold) { | |||
3877 | DEBUG(dbgs() << "SLP: Vectorizing list at cost:" << Cost << ".\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: Vectorizing list at cost:" << Cost << ".\n"; } } while (0); | |||
3878 | Value *VectorizedRoot = R.vectorizeTree(); | |||
3879 | ||||
3880 | // Reconstruct the build vector by extracting the vectorized root. This | |||
3881 | // way we handle the case where some elements of the vector are undefined. | |||
3882 | // (return (inserelt <4 xi32> (insertelt undef (opd0) 0) (opd1) 2)) | |||
3883 | if (!BuildVectorSlice.empty()) { | |||
3884 | // The insert point is the last build vector instruction. The vectorized | |||
3885 | // root will precede it. This guarantees that we get an instruction. The | |||
3886 | // vectorized tree could have been constant folded. | |||
3887 | Instruction *InsertAfter = cast<Instruction>(BuildVectorSlice.back()); | |||
3888 | unsigned VecIdx = 0; | |||
3889 | for (auto &V : BuildVectorSlice) { | |||
3890 | IRBuilder<NoFolder> Builder(InsertAfter->getParent(), | |||
3891 | ++BasicBlock::iterator(InsertAfter)); | |||
3892 | Instruction *I = cast<Instruction>(V); | |||
3893 | assert(isa<InsertElementInst>(I) || isa<InsertValueInst>(I))((isa<InsertElementInst>(I) || isa<InsertValueInst> (I)) ? static_cast<void> (0) : __assert_fail ("isa<InsertElementInst>(I) || isa<InsertValueInst>(I)" , "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn271203/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 3893, __PRETTY_FUNCTION__)); | |||
3894 | Instruction *Extract = cast<Instruction>(Builder.CreateExtractElement( | |||
3895 | VectorizedRoot, Builder.getInt32(VecIdx++))); | |||
3896 | I->setOperand(1, Extract); | |||
3897 | I->removeFromParent(); | |||
3898 | I->insertAfter(Extract); | |||
3899 | InsertAfter = I; | |||
3900 | } | |||
3901 | } | |||
3902 | // Move to the next bundle. | |||
3903 | i += VF - 1; | |||
3904 | Changed = true; | |||
3905 | } | |||
3906 | } | |||
3907 | ||||
3908 | return Changed; | |||
3909 | } | |||
3910 | ||||
3911 | bool SLPVectorizer::tryToVectorize(BinaryOperator *V, BoUpSLP &R) { | |||
3912 | if (!V) | |||
3913 | return false; | |||
3914 | ||||
3915 | // Try to vectorize V. | |||
3916 | if (tryToVectorizePair(V->getOperand(0), V->getOperand(1), R)) | |||
3917 | return true; | |||
3918 | ||||
3919 | BinaryOperator *A = dyn_cast<BinaryOperator>(V->getOperand(0)); | |||
3920 | BinaryOperator *B = dyn_cast<BinaryOperator>(V->getOperand(1)); | |||
3921 | // Try to skip B. | |||
3922 | if (B && B->hasOneUse()) { | |||
3923 | BinaryOperator *B0 = dyn_cast<BinaryOperator>(B->getOperand(0)); | |||
3924 | BinaryOperator *B1 = dyn_cast<BinaryOperator>(B->getOperand(1)); | |||
3925 | if (tryToVectorizePair(A, B0, R)) { | |||
3926 | return true; | |||
3927 | } | |||
3928 | if (tryToVectorizePair(A, B1, R)) { | |||
3929 | return true; | |||
3930 | } | |||
3931 | } | |||
3932 | ||||
3933 | // Try to skip A. | |||
3934 | if (A && A->hasOneUse()) { | |||
3935 | BinaryOperator *A0 = dyn_cast<BinaryOperator>(A->getOperand(0)); | |||
3936 | BinaryOperator *A1 = dyn_cast<BinaryOperator>(A->getOperand(1)); | |||
3937 | if (tryToVectorizePair(A0, B, R)) { | |||
3938 | return true; | |||
3939 | } | |||
3940 | if (tryToVectorizePair(A1, B, R)) { | |||
3941 | return true; | |||
3942 | } | |||
3943 | } | |||
3944 | return 0; | |||
3945 | } | |||
3946 | ||||
3947 | /// \brief Generate a shuffle mask to be used in a reduction tree. | |||
3948 | /// | |||
3949 | /// \param VecLen The length of the vector to be reduced. | |||
3950 | /// \param NumEltsToRdx The number of elements that should be reduced in the | |||
3951 | /// vector. | |||
3952 | /// \param IsPairwise Whether the reduction is a pairwise or splitting | |||
3953 | /// reduction. A pairwise reduction will generate a mask of | |||
3954 | /// <0,2,...> or <1,3,..> while a splitting reduction will generate | |||
3955 | /// <2,3, undef,undef> for a vector of 4 and NumElts = 2. | |||
3956 | /// \param IsLeft True will generate a mask of even elements, odd otherwise. | |||
3957 | static Value *createRdxShuffleMask(unsigned VecLen, unsigned NumEltsToRdx, | |||
3958 | bool IsPairwise, bool IsLeft, | |||
3959 | IRBuilder<> &Builder) { | |||
3960 | assert((IsPairwise || !IsLeft) && "Don't support a <0,1,undef,...> mask")(((IsPairwise || !IsLeft) && "Don't support a <0,1,undef,...> mask" ) ? static_cast<void> (0) : __assert_fail ("(IsPairwise || !IsLeft) && \"Don't support a <0,1,undef,...> mask\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn271203/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 3960, __PRETTY_FUNCTION__)); | |||
3961 | ||||
3962 | SmallVector<Constant *, 32> ShuffleMask( | |||
3963 | VecLen, UndefValue::get(Builder.getInt32Ty())); | |||
3964 | ||||
3965 | if (IsPairwise) | |||
3966 | // Build a mask of 0, 2, ... (left) or 1, 3, ... (right). | |||
3967 | for (unsigned i = 0; i != NumEltsToRdx; ++i) | |||
3968 | ShuffleMask[i] = Builder.getInt32(2 * i + !IsLeft); | |||
3969 | else | |||
3970 | // Move the upper half of the vector to the lower half. | |||
3971 | for (unsigned i = 0; i != NumEltsToRdx; ++i) | |||
3972 | ShuffleMask[i] = Builder.getInt32(NumEltsToRdx + i); | |||
3973 | ||||
3974 | return ConstantVector::get(ShuffleMask); | |||
3975 | } | |||
3976 | ||||
3977 | ||||
3978 | /// Model horizontal reductions. | |||
3979 | /// | |||
3980 | /// A horizontal reduction is a tree of reduction operations (currently add and | |||
3981 | /// fadd) that has operations that can be put into a vector as its leaf. | |||
3982 | /// For example, this tree: | |||
3983 | /// | |||
3984 | /// mul mul mul mul | |||
3985 | /// \ / \ / | |||
3986 | /// + + | |||
3987 | /// \ / | |||
3988 | /// + | |||
3989 | /// This tree has "mul" as its reduced values and "+" as its reduction | |||
3990 | /// operations. A reduction might be feeding into a store or a binary operation | |||
3991 | /// feeding a phi. | |||
3992 | /// ... | |||
3993 | /// \ / | |||
3994 | /// + | |||
3995 | /// | | |||
3996 | /// phi += | |||
3997 | /// | |||
3998 | /// Or: | |||
3999 | /// ... | |||
4000 | /// \ / | |||
4001 | /// + | |||
4002 | /// | | |||
4003 | /// *p = | |||
4004 | /// | |||
4005 | class HorizontalReduction { | |||
4006 | SmallVector<Value *, 16> ReductionOps; | |||
4007 | SmallVector<Value *, 32> ReducedVals; | |||
4008 | ||||
4009 | BinaryOperator *ReductionRoot; | |||
4010 | PHINode *ReductionPHI; | |||
4011 | ||||
4012 | /// The opcode of the reduction. | |||
4013 | unsigned ReductionOpcode; | |||
4014 | /// The opcode of the values we perform a reduction on. | |||
4015 | unsigned ReducedValueOpcode; | |||
4016 | /// Should we model this reduction as a pairwise reduction tree or a tree that | |||
4017 | /// splits the vector in halves and adds those halves. | |||
4018 | bool IsPairwiseReduction; | |||
4019 | ||||
4020 | public: | |||
4021 | /// The width of one full horizontal reduction operation. | |||
4022 | unsigned ReduxWidth; | |||
4023 | ||||
4024 | /// Minimal width of available vector registers. It's used to determine | |||
4025 | /// ReduxWidth. | |||
4026 | unsigned MinVecRegSize; | |||
4027 | ||||
4028 | HorizontalReduction(unsigned MinVecRegSize) | |||
4029 | : ReductionRoot(nullptr), ReductionPHI(nullptr), ReductionOpcode(0), | |||
4030 | ReducedValueOpcode(0), IsPairwiseReduction(false), ReduxWidth(0), | |||
4031 | MinVecRegSize(MinVecRegSize) {} | |||
4032 | ||||
4033 | /// \brief Try to find a reduction tree. | |||
4034 | bool matchAssociativeReduction(PHINode *Phi, BinaryOperator *B) { | |||
4035 | assert((!Phi ||(((!Phi || std::find(Phi->op_begin(), Phi->op_end(), B) != Phi->op_end()) && "Thi phi needs to use the binary operator" ) ? static_cast<void> (0) : __assert_fail ("(!Phi || std::find(Phi->op_begin(), Phi->op_end(), B) != Phi->op_end()) && \"Thi phi needs to use the binary operator\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn271203/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 4037, __PRETTY_FUNCTION__)) | |||
4036 | std::find(Phi->op_begin(), Phi->op_end(), B) != Phi->op_end()) &&(((!Phi || std::find(Phi->op_begin(), Phi->op_end(), B) != Phi->op_end()) && "Thi phi needs to use the binary operator" ) ? static_cast<void> (0) : __assert_fail ("(!Phi || std::find(Phi->op_begin(), Phi->op_end(), B) != Phi->op_end()) && \"Thi phi needs to use the binary operator\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn271203/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 4037, __PRETTY_FUNCTION__)) | |||
4037 | "Thi phi needs to use the binary operator")(((!Phi || std::find(Phi->op_begin(), Phi->op_end(), B) != Phi->op_end()) && "Thi phi needs to use the binary operator" ) ? static_cast<void> (0) : __assert_fail ("(!Phi || std::find(Phi->op_begin(), Phi->op_end(), B) != Phi->op_end()) && \"Thi phi needs to use the binary operator\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn271203/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 4037, __PRETTY_FUNCTION__)); | |||
4038 | ||||
4039 | // We could have a initial reductions that is not an add. | |||
4040 | // r *= v1 + v2 + v3 + v4 | |||
4041 | // In such a case start looking for a tree rooted in the first '+'. | |||
4042 | if (Phi) { | |||
4043 | if (B->getOperand(0) == Phi) { | |||
4044 | Phi = nullptr; | |||
4045 | B = dyn_cast<BinaryOperator>(B->getOperand(1)); | |||
4046 | } else if (B->getOperand(1) == Phi) { | |||
4047 | Phi = nullptr; | |||
4048 | B = dyn_cast<BinaryOperator>(B->getOperand(0)); | |||
4049 | } | |||
4050 | } | |||
4051 | ||||
4052 | if (!B) | |||
4053 | return false; | |||
4054 | ||||
4055 | Type *Ty = B->getType(); | |||
4056 | if (!isValidElementType(Ty)) | |||
4057 | return false; | |||
4058 | ||||
4059 | const DataLayout &DL = B->getModule()->getDataLayout(); | |||
4060 | ReductionOpcode = B->getOpcode(); | |||
4061 | ReducedValueOpcode = 0; | |||
4062 | // FIXME: Register size should be a parameter to this function, so we can | |||
4063 | // try different vectorization factors. | |||
4064 | ReduxWidth = MinVecRegSize / DL.getTypeSizeInBits(Ty); | |||
4065 | ReductionRoot = B; | |||
4066 | ReductionPHI = Phi; | |||
4067 | ||||
4068 | if (ReduxWidth < 4) | |||
4069 | return false; | |||
4070 | ||||
4071 | // We currently only support adds. | |||
4072 | if (ReductionOpcode != Instruction::Add && | |||
4073 | ReductionOpcode != Instruction::FAdd) | |||
4074 | return false; | |||
4075 | ||||
4076 | // Post order traverse the reduction tree starting at B. We only handle true | |||
4077 | // trees containing only binary operators or selects. | |||
4078 | SmallVector<std::pair<Instruction *, unsigned>, 32> Stack; | |||
4079 | Stack.push_back(std::make_pair(B, 0)); | |||
4080 | while (!Stack.empty()) { | |||
4081 | Instruction *TreeN = Stack.back().first; | |||
4082 | unsigned EdgeToVist = Stack.back().second++; | |||
4083 | bool IsReducedValue = TreeN->getOpcode() != ReductionOpcode; | |||
4084 | ||||
4085 | // Only handle trees in the current basic block. | |||
4086 | if (TreeN->getParent() != B->getParent()) | |||
4087 | return false; | |||
4088 | ||||
4089 | // Each tree node needs to have one user except for the ultimate | |||
4090 | // reduction. | |||
4091 | if (!TreeN->hasOneUse() && TreeN != B) | |||
4092 | return false; | |||
4093 | ||||
4094 | // Postorder vist. | |||
4095 | if (EdgeToVist == 2 || IsReducedValue) { | |||
4096 | if (IsReducedValue) { | |||
4097 | // Make sure that the opcodes of the operations that we are going to | |||
4098 | // reduce match. | |||
4099 | if (!ReducedValueOpcode) | |||
4100 | ReducedValueOpcode = TreeN->getOpcode(); | |||
4101 | else if (ReducedValueOpcode != TreeN->getOpcode()) | |||
4102 | return false; | |||
4103 | ReducedVals.push_back(TreeN); | |||
4104 | } else { | |||
4105 | // We need to be able to reassociate the adds. | |||
4106 | if (!TreeN->isAssociative()) | |||
4107 | return false; | |||
4108 | ReductionOps.push_back(TreeN); | |||
4109 | } | |||
4110 | // Retract. | |||
4111 | Stack.pop_back(); | |||
4112 | continue; | |||
4113 | } | |||
4114 | ||||
4115 | // Visit left or right. | |||
4116 | Value *NextV = TreeN->getOperand(EdgeToVist); | |||
4117 | // We currently only allow BinaryOperator's and SelectInst's as reduction | |||
4118 | // values in our tree. | |||
4119 | if (isa<BinaryOperator>(NextV) || isa<SelectInst>(NextV)) | |||
4120 | Stack.push_back(std::make_pair(cast<Instruction>(NextV), 0)); | |||
4121 | else if (NextV != Phi) | |||
4122 | return false; | |||
4123 | } | |||
4124 | return true; | |||
4125 | } | |||
4126 | ||||
4127 | /// \brief Attempt to vectorize the tree found by | |||
4128 | /// matchAssociativeReduction. | |||
4129 | bool tryToReduce(BoUpSLP &V, TargetTransformInfo *TTI) { | |||
4130 | if (ReducedVals.empty()) | |||
4131 | return false; | |||
4132 | ||||
4133 | unsigned NumReducedVals = ReducedVals.size(); | |||
4134 | if (NumReducedVals < ReduxWidth) | |||
4135 | return false; | |||
4136 | ||||
4137 | Value *VectorizedTree = nullptr; | |||
4138 | IRBuilder<> Builder(ReductionRoot); | |||
4139 | FastMathFlags Unsafe; | |||
4140 | Unsafe.setUnsafeAlgebra(); | |||
4141 | Builder.setFastMathFlags(Unsafe); | |||
4142 | unsigned i = 0; | |||
4143 | ||||
4144 | for (; i < NumReducedVals - ReduxWidth + 1; i += ReduxWidth) { | |||
4145 | V.buildTree(makeArrayRef(&ReducedVals[i], ReduxWidth), ReductionOps); | |||
4146 | V.computeMinimumValueSizes(); | |||
4147 | ||||
4148 | // Estimate cost. | |||
4149 | int Cost = V.getTreeCost() + getReductionCost(TTI, ReducedVals[i]); | |||
4150 | if (Cost >= -SLPCostThreshold) | |||
4151 | break; | |||
4152 | ||||
4153 | DEBUG(dbgs() << "SLP: Vectorizing horizontal reduction at cost:" << Costdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: Vectorizing horizontal reduction at cost:" << Cost << ". (HorRdx)\n"; } } while (0) | |||
4154 | << ". (HorRdx)\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: Vectorizing horizontal reduction at cost:" << Cost << ". (HorRdx)\n"; } } while (0); | |||
4155 | ||||
4156 | // Vectorize a tree. | |||
4157 | DebugLoc Loc = cast<Instruction>(ReducedVals[i])->getDebugLoc(); | |||
4158 | Value *VectorizedRoot = V.vectorizeTree(); | |||
4159 | ||||
4160 | // Emit a reduction. | |||
4161 | Value *ReducedSubTree = emitReduction(VectorizedRoot, Builder); | |||
4162 | if (VectorizedTree) { | |||
4163 | Builder.SetCurrentDebugLocation(Loc); | |||
4164 | VectorizedTree = createBinOp(Builder, ReductionOpcode, VectorizedTree, | |||
4165 | ReducedSubTree, "bin.rdx"); | |||
4166 | } else | |||
4167 | VectorizedTree = ReducedSubTree; | |||
4168 | } | |||
4169 | ||||
4170 | if (VectorizedTree) { | |||
4171 | // Finish the reduction. | |||
4172 | for (; i < NumReducedVals; ++i) { | |||
4173 | Builder.SetCurrentDebugLocation( | |||
4174 | cast<Instruction>(ReducedVals[i])->getDebugLoc()); | |||
4175 | VectorizedTree = createBinOp(Builder, ReductionOpcode, VectorizedTree, | |||
4176 | ReducedVals[i]); | |||
4177 | } | |||
4178 | // Update users. | |||
4179 | if (ReductionPHI) { | |||
4180 | assert(ReductionRoot && "Need a reduction operation")((ReductionRoot && "Need a reduction operation") ? static_cast <void> (0) : __assert_fail ("ReductionRoot && \"Need a reduction operation\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn271203/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 4180, __PRETTY_FUNCTION__)); | |||
4181 | ReductionRoot->setOperand(0, VectorizedTree); | |||
4182 | ReductionRoot->setOperand(1, ReductionPHI); | |||
4183 | } else | |||
4184 | ReductionRoot->replaceAllUsesWith(VectorizedTree); | |||
4185 | } | |||
4186 | return VectorizedTree != nullptr; | |||
4187 | } | |||
4188 | ||||
4189 | unsigned numReductionValues() const { | |||
4190 | return ReducedVals.size(); | |||
4191 | } | |||
4192 | ||||
4193 | private: | |||
4194 | /// \brief Calculate the cost of a reduction. | |||
4195 | int getReductionCost(TargetTransformInfo *TTI, Value *FirstReducedVal) { | |||
4196 | Type *ScalarTy = FirstReducedVal->getType(); | |||
4197 | Type *VecTy = VectorType::get(ScalarTy, ReduxWidth); | |||
4198 | ||||
4199 | int PairwiseRdxCost = TTI->getReductionCost(ReductionOpcode, VecTy, true); | |||
4200 | int SplittingRdxCost = TTI->getReductionCost(ReductionOpcode, VecTy, false); | |||
4201 | ||||
4202 | IsPairwiseReduction = PairwiseRdxCost < SplittingRdxCost; | |||
4203 | int VecReduxCost = IsPairwiseReduction ? PairwiseRdxCost : SplittingRdxCost; | |||
4204 | ||||
4205 | int ScalarReduxCost = | |||
4206 | ReduxWidth * TTI->getArithmeticInstrCost(ReductionOpcode, VecTy); | |||
4207 | ||||
4208 | DEBUG(dbgs() << "SLP: Adding cost " << VecReduxCost - ScalarReduxCostdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: Adding cost " << VecReduxCost - ScalarReduxCost << " for reduction that starts with " << *FirstReducedVal << " (It is a " << (IsPairwiseReduction ? "pairwise" : "splitting") << " reduction)\n"; } } while (0) | |||
4209 | << " for reduction that starts with " << *FirstReducedValdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: Adding cost " << VecReduxCost - ScalarReduxCost << " for reduction that starts with " << *FirstReducedVal << " (It is a " << (IsPairwiseReduction ? "pairwise" : "splitting") << " reduction)\n"; } } while (0) | |||
4210 | << " (It is a "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: Adding cost " << VecReduxCost - ScalarReduxCost << " for reduction that starts with " << *FirstReducedVal << " (It is a " << (IsPairwiseReduction ? "pairwise" : "splitting") << " reduction)\n"; } } while (0) | |||
4211 | << (IsPairwiseReduction ? "pairwise" : "splitting")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: Adding cost " << VecReduxCost - ScalarReduxCost << " for reduction that starts with " << *FirstReducedVal << " (It is a " << (IsPairwiseReduction ? "pairwise" : "splitting") << " reduction)\n"; } } while (0) | |||
4212 | << " reduction)\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: Adding cost " << VecReduxCost - ScalarReduxCost << " for reduction that starts with " << *FirstReducedVal << " (It is a " << (IsPairwiseReduction ? "pairwise" : "splitting") << " reduction)\n"; } } while (0); | |||
4213 | ||||
4214 | return VecReduxCost - ScalarReduxCost; | |||
4215 | } | |||
4216 | ||||
4217 | static Value *createBinOp(IRBuilder<> &Builder, unsigned Opcode, Value *L, | |||
4218 | Value *R, const Twine &Name = "") { | |||
4219 | if (Opcode == Instruction::FAdd) | |||
4220 | return Builder.CreateFAdd(L, R, Name); | |||
4221 | return Builder.CreateBinOp((Instruction::BinaryOps)Opcode, L, R, Name); | |||
4222 | } | |||
4223 | ||||
4224 | /// \brief Emit a horizontal reduction of the vectorized value. | |||
4225 | Value *emitReduction(Value *VectorizedValue, IRBuilder<> &Builder) { | |||
4226 | assert(VectorizedValue && "Need to have a vectorized tree node")((VectorizedValue && "Need to have a vectorized tree node" ) ? static_cast<void> (0) : __assert_fail ("VectorizedValue && \"Need to have a vectorized tree node\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn271203/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 4226, __PRETTY_FUNCTION__)); | |||
4227 | assert(isPowerOf2_32(ReduxWidth) &&((isPowerOf2_32(ReduxWidth) && "We only handle power-of-two reductions for now" ) ? static_cast<void> (0) : __assert_fail ("isPowerOf2_32(ReduxWidth) && \"We only handle power-of-two reductions for now\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn271203/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 4228, __PRETTY_FUNCTION__)) | |||
4228 | "We only handle power-of-two reductions for now")((isPowerOf2_32(ReduxWidth) && "We only handle power-of-two reductions for now" ) ? static_cast<void> (0) : __assert_fail ("isPowerOf2_32(ReduxWidth) && \"We only handle power-of-two reductions for now\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn271203/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 4228, __PRETTY_FUNCTION__)); | |||
4229 | ||||
4230 | Value *TmpVec = VectorizedValue; | |||
4231 | for (unsigned i = ReduxWidth / 2; i != 0; i >>= 1) { | |||
4232 | if (IsPairwiseReduction) { | |||
4233 | Value *LeftMask = | |||
4234 | createRdxShuffleMask(ReduxWidth, i, true, true, Builder); | |||
4235 | Value *RightMask = | |||
4236 | createRdxShuffleMask(ReduxWidth, i, true, false, Builder); | |||
4237 | ||||
4238 | Value *LeftShuf = Builder.CreateShuffleVector( | |||
4239 | TmpVec, UndefValue::get(TmpVec->getType()), LeftMask, "rdx.shuf.l"); | |||
4240 | Value *RightShuf = Builder.CreateShuffleVector( | |||
4241 | TmpVec, UndefValue::get(TmpVec->getType()), (RightMask), | |||
4242 | "rdx.shuf.r"); | |||
4243 | TmpVec = createBinOp(Builder, ReductionOpcode, LeftShuf, RightShuf, | |||
4244 | "bin.rdx"); | |||
4245 | } else { | |||
4246 | Value *UpperHalf = | |||
4247 | createRdxShuffleMask(ReduxWidth, i, false, false, Builder); | |||
4248 | Value *Shuf = Builder.CreateShuffleVector( | |||
4249 | TmpVec, UndefValue::get(TmpVec->getType()), UpperHalf, "rdx.shuf"); | |||
4250 | TmpVec = createBinOp(Builder, ReductionOpcode, TmpVec, Shuf, "bin.rdx"); | |||
4251 | } | |||
4252 | } | |||
4253 | ||||
4254 | // The result is in the first element of the vector. | |||
4255 | return Builder.CreateExtractElement(TmpVec, Builder.getInt32(0)); | |||
4256 | } | |||
4257 | }; | |||
4258 | ||||
4259 | /// \brief Recognize construction of vectors like | |||
4260 | /// %ra = insertelement <4 x float> undef, float %s0, i32 0 | |||
4261 | /// %rb = insertelement <4 x float> %ra, float %s1, i32 1 | |||
4262 | /// %rc = insertelement <4 x float> %rb, float %s2, i32 2 | |||
4263 | /// %rd = insertelement <4 x float> %rc, float %s3, i32 3 | |||
4264 | /// | |||
4265 | /// Returns true if it matches | |||
4266 | /// | |||
4267 | static bool findBuildVector(InsertElementInst *FirstInsertElem, | |||
4268 | SmallVectorImpl<Value *> &BuildVector, | |||
4269 | SmallVectorImpl<Value *> &BuildVectorOpds) { | |||
4270 | if (!isa<UndefValue>(FirstInsertElem->getOperand(0))) | |||
4271 | return false; | |||
4272 | ||||
4273 | InsertElementInst *IE = FirstInsertElem; | |||
4274 | while (true) { | |||
4275 | BuildVector.push_back(IE); | |||
4276 | BuildVectorOpds.push_back(IE->getOperand(1)); | |||
4277 | ||||
4278 | if (IE->use_empty()) | |||
4279 | return false; | |||
4280 | ||||
4281 | InsertElementInst *NextUse = dyn_cast<InsertElementInst>(IE->user_back()); | |||
4282 | if (!NextUse) | |||
4283 | return true; | |||
4284 | ||||
4285 | // If this isn't the final use, make sure the next insertelement is the only | |||
4286 | // use. It's OK if the final constructed vector is used multiple times | |||
4287 | if (!IE->hasOneUse()) | |||
4288 | return false; | |||
4289 | ||||
4290 | IE = NextUse; | |||
4291 | } | |||
4292 | ||||
4293 | return false; | |||
4294 | } | |||
4295 | ||||
4296 | /// \brief Like findBuildVector, but looks backwards for construction of aggregate. | |||
4297 | /// | |||
4298 | /// \return true if it matches. | |||
4299 | static bool findBuildAggregate(InsertValueInst *IV, | |||
4300 | SmallVectorImpl<Value *> &BuildVector, | |||
4301 | SmallVectorImpl<Value *> &BuildVectorOpds) { | |||
4302 | if (!IV->hasOneUse()) | |||
4303 | return false; | |||
4304 | Value *V = IV->getAggregateOperand(); | |||
4305 | if (!isa<UndefValue>(V)) { | |||
4306 | InsertValueInst *I = dyn_cast<InsertValueInst>(V); | |||
4307 | if (!I || !findBuildAggregate(I, BuildVector, BuildVectorOpds)) | |||
4308 | return false; | |||
4309 | } | |||
4310 | BuildVector.push_back(IV); | |||
4311 | BuildVectorOpds.push_back(IV->getInsertedValueOperand()); | |||
4312 | return true; | |||
4313 | } | |||
4314 | ||||
4315 | static bool PhiTypeSorterFunc(Value *V, Value *V2) { | |||
4316 | return V->getType() < V2->getType(); | |||
4317 | } | |||
4318 | ||||
4319 | /// \brief Try and get a reduction value from a phi node. | |||
4320 | /// | |||
4321 | /// Given a phi node \p P in a block \p ParentBB, consider possible reductions | |||
4322 | /// if they come from either \p ParentBB or a containing loop latch. | |||
4323 | /// | |||
4324 | /// \returns A candidate reduction value if possible, or \code nullptr \endcode | |||
4325 | /// if not possible. | |||
4326 | static Value *getReductionValue(const DominatorTree *DT, PHINode *P, | |||
4327 | BasicBlock *ParentBB, LoopInfo *LI) { | |||
4328 | // There are situations where the reduction value is not dominated by the | |||
4329 | // reduction phi. Vectorizing such cases has been reported to cause | |||
4330 | // miscompiles. See PR25787. | |||
4331 | auto DominatedReduxValue = [&](Value *R) { | |||
4332 | return ( | |||
4333 | dyn_cast<Instruction>(R) && | |||
4334 | DT->dominates(P->getParent(), dyn_cast<Instruction>(R)->getParent())); | |||
4335 | }; | |||
4336 | ||||
4337 | Value *Rdx = nullptr; | |||
4338 | ||||
4339 | // Return the incoming value if it comes from the same BB as the phi node. | |||
4340 | if (P->getIncomingBlock(0) == ParentBB) { | |||
4341 | Rdx = P->getIncomingValue(0); | |||
4342 | } else if (P->getIncomingBlock(1) == ParentBB) { | |||
4343 | Rdx = P->getIncomingValue(1); | |||
4344 | } | |||
4345 | ||||
4346 | if (Rdx && DominatedReduxValue(Rdx)) | |||
4347 | return Rdx; | |||
4348 | ||||
4349 | // Otherwise, check whether we have a loop latch to look at. | |||
4350 | Loop *BBL = LI->getLoopFor(ParentBB); | |||
4351 | if (!BBL) | |||
4352 | return nullptr; | |||
4353 | BasicBlock *BBLatch = BBL->getLoopLatch(); | |||
4354 | if (!BBLatch) | |||
4355 | return nullptr; | |||
4356 | ||||
4357 | // There is a loop latch, return the incoming value if it comes from | |||
4358 | // that. This reduction pattern occassionaly turns up. | |||
4359 | if (P->getIncomingBlock(0) == BBLatch) { | |||
4360 | Rdx = P->getIncomingValue(0); | |||
4361 | } else if (P->getIncomingBlock(1) == BBLatch) { | |||
4362 | Rdx = P->getIncomingValue(1); | |||
4363 | } | |||
4364 | ||||
4365 | if (Rdx && DominatedReduxValue(Rdx)) | |||
4366 | return Rdx; | |||
4367 | ||||
4368 | return nullptr; | |||
4369 | } | |||
4370 | ||||
4371 | /// \brief Attempt to reduce a horizontal reduction. | |||
4372 | /// If it is legal to match a horizontal reduction feeding | |||
4373 | /// the phi node P with reduction operators BI, then check if it | |||
4374 | /// can be done. | |||
4375 | /// \returns true if a horizontal reduction was matched and reduced. | |||
4376 | /// \returns false if a horizontal reduction was not matched. | |||
4377 | static bool canMatchHorizontalReduction(PHINode *P, BinaryOperator *BI, | |||
4378 | BoUpSLP &R, TargetTransformInfo *TTI, | |||
4379 | unsigned MinRegSize) { | |||
4380 | if (!ShouldVectorizeHor) | |||
4381 | return false; | |||
4382 | ||||
4383 | HorizontalReduction HorRdx(MinRegSize); | |||
4384 | if (!HorRdx.matchAssociativeReduction(P, BI)) | |||
4385 | return false; | |||
4386 | ||||
4387 | // If there is a sufficient number of reduction values, reduce | |||
4388 | // to a nearby power-of-2. Can safely generate oversized | |||
4389 | // vectors and rely on the backend to split them to legal sizes. | |||
4390 | HorRdx.ReduxWidth = | |||
4391 | std::max((uint64_t)4, PowerOf2Floor(HorRdx.numReductionValues())); | |||
4392 | ||||
4393 | return HorRdx.tryToReduce(R, TTI); | |||
4394 | } | |||
4395 | ||||
4396 | bool SLPVectorizer::vectorizeChainsInBlock(BasicBlock *BB, BoUpSLP &R) { | |||
4397 | bool Changed = false; | |||
4398 | SmallVector<Value *, 4> Incoming; | |||
4399 | SmallSet<Value *, 16> VisitedInstrs; | |||
4400 | ||||
4401 | bool HaveVectorizedPhiNodes = true; | |||
4402 | while (HaveVectorizedPhiNodes) { | |||
4403 | HaveVectorizedPhiNodes = false; | |||
4404 | ||||
4405 | // Collect the incoming values from the PHIs. | |||
4406 | Incoming.clear(); | |||
4407 | for (Instruction &I : *BB) { | |||
4408 | PHINode *P = dyn_cast<PHINode>(&I); | |||
4409 | if (!P) | |||
4410 | break; | |||
4411 | ||||
4412 | if (!VisitedInstrs.count(P)) | |||
4413 | Incoming.push_back(P); | |||
4414 | } | |||
4415 | ||||
4416 | // Sort by type. | |||
4417 | std::stable_sort(Incoming.begin(), Incoming.end(), PhiTypeSorterFunc); | |||
4418 | ||||
4419 | // Try to vectorize elements base on their type. | |||
4420 | for (SmallVector<Value *, 4>::iterator IncIt = Incoming.begin(), | |||
4421 | E = Incoming.end(); | |||
4422 | IncIt != E;) { | |||
4423 | ||||
4424 | // Look for the next elements with the same type. | |||
4425 | SmallVector<Value *, 4>::iterator SameTypeIt = IncIt; | |||
4426 | while (SameTypeIt != E && | |||
4427 | (*SameTypeIt)->getType() == (*IncIt)->getType()) { | |||
4428 | VisitedInstrs.insert(*SameTypeIt); | |||
4429 | ++SameTypeIt; | |||
4430 | } | |||
4431 | ||||
4432 | // Try to vectorize them. | |||
4433 | unsigned NumElts = (SameTypeIt - IncIt); | |||
4434 | DEBUG(errs() << "SLP: Trying to vectorize starting at PHIs (" << NumElts << ")\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { errs() << "SLP: Trying to vectorize starting at PHIs (" << NumElts << ")\n"; } } while (0); | |||
4435 | if (NumElts > 1 && tryToVectorizeList(makeArrayRef(IncIt, NumElts), R)) { | |||
4436 | // Success start over because instructions might have been changed. | |||
4437 | HaveVectorizedPhiNodes = true; | |||
4438 | Changed = true; | |||
4439 | break; | |||
4440 | } | |||
4441 | ||||
4442 | // Start over at the next instruction of a different type (or the end). | |||
4443 | IncIt = SameTypeIt; | |||
4444 | } | |||
4445 | } | |||
4446 | ||||
4447 | VisitedInstrs.clear(); | |||
4448 | ||||
4449 | for (BasicBlock::iterator it = BB->begin(), e = BB->end(); it != e; it++) { | |||
4450 | // We may go through BB multiple times so skip the one we have checked. | |||
4451 | if (!VisitedInstrs.insert(&*it).second) | |||
4452 | continue; | |||
4453 | ||||
4454 | if (isa<DbgInfoIntrinsic>(it)) | |||
4455 | continue; | |||
4456 | ||||
4457 | // Try to vectorize reductions that use PHINodes. | |||
4458 | if (PHINode *P = dyn_cast<PHINode>(it)) { | |||
4459 | // Check that the PHI is a reduction PHI. | |||
4460 | if (P->getNumIncomingValues() != 2) | |||
4461 | return Changed; | |||
4462 | ||||
4463 | Value *Rdx = getReductionValue(DT, P, BB, LI); | |||
4464 | ||||
4465 | // Check if this is a Binary Operator. | |||
4466 | BinaryOperator *BI = dyn_cast_or_null<BinaryOperator>(Rdx); | |||
4467 | if (!BI) | |||
4468 | continue; | |||
4469 | ||||
4470 | // Try to match and vectorize a horizontal reduction. | |||
4471 | if (canMatchHorizontalReduction(P, BI, R, TTI, R.getMinVecRegSize())) { | |||
4472 | Changed = true; | |||
4473 | it = BB->begin(); | |||
4474 | e = BB->end(); | |||
4475 | continue; | |||
4476 | } | |||
4477 | ||||
4478 | Value *Inst = BI->getOperand(0); | |||
4479 | if (Inst == P) | |||
4480 | Inst = BI->getOperand(1); | |||
4481 | ||||
4482 | if (tryToVectorize(dyn_cast<BinaryOperator>(Inst), R)) { | |||
4483 | // We would like to start over since some instructions are deleted | |||
4484 | // and the iterator may become invalid value. | |||
4485 | Changed = true; | |||
4486 | it = BB->begin(); | |||
4487 | e = BB->end(); | |||
4488 | continue; | |||
4489 | } | |||
4490 | ||||
4491 | continue; | |||
4492 | } | |||
4493 | ||||
4494 | if (ShouldStartVectorizeHorAtStore) | |||
4495 | if (StoreInst *SI = dyn_cast<StoreInst>(it)) | |||
4496 | if (BinaryOperator *BinOp = | |||
4497 | dyn_cast<BinaryOperator>(SI->getValueOperand())) { | |||
4498 | if (canMatchHorizontalReduction(nullptr, BinOp, R, TTI, | |||
4499 | R.getMinVecRegSize()) || | |||
4500 | tryToVectorize(BinOp, R)) { | |||
4501 | Changed = true; | |||
4502 | it = BB->begin(); | |||
4503 | e = BB->end(); | |||
4504 | continue; | |||
4505 | } | |||
4506 | } | |||
4507 | ||||
4508 | // Try to vectorize horizontal reductions feeding into a return. | |||
4509 | if (ReturnInst *RI = dyn_cast<ReturnInst>(it)) | |||
4510 | if (RI->getNumOperands() != 0) | |||
4511 | if (BinaryOperator *BinOp = | |||
4512 | dyn_cast<BinaryOperator>(RI->getOperand(0))) { | |||
4513 | DEBUG(dbgs() << "SLP: Found a return to vectorize.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: Found a return to vectorize.\n" ; } } while (0); | |||
4514 | if (tryToVectorizePair(BinOp->getOperand(0), | |||
4515 | BinOp->getOperand(1), R)) { | |||
4516 | Changed = true; | |||
4517 | it = BB->begin(); | |||
4518 | e = BB->end(); | |||
4519 | continue; | |||
4520 | } | |||
4521 | } | |||
4522 | ||||
4523 | // Try to vectorize trees that start at compare instructions. | |||
4524 | if (CmpInst *CI = dyn_cast<CmpInst>(it)) { | |||
4525 | if (tryToVectorizePair(CI->getOperand(0), CI->getOperand(1), R)) { | |||
4526 | Changed = true; | |||
4527 | // We would like to start over since some instructions are deleted | |||
4528 | // and the iterator may become invalid value. | |||
4529 | it = BB->begin(); | |||
4530 | e = BB->end(); | |||
4531 | continue; | |||
4532 | } | |||
4533 | ||||
4534 | for (int i = 0; i < 2; ++i) { | |||
4535 | if (BinaryOperator *BI = dyn_cast<BinaryOperator>(CI->getOperand(i))) { | |||
4536 | if (tryToVectorizePair(BI->getOperand(0), BI->getOperand(1), R)) { | |||
4537 | Changed = true; | |||
4538 | // We would like to start over since some instructions are deleted | |||
4539 | // and the iterator may become invalid value. | |||
4540 | it = BB->begin(); | |||
4541 | e = BB->end(); | |||
4542 | break; | |||
4543 | } | |||
4544 | } | |||
4545 | } | |||
4546 | continue; | |||
4547 | } | |||
4548 | ||||
4549 | // Try to vectorize trees that start at insertelement instructions. | |||
4550 | if (InsertElementInst *FirstInsertElem = dyn_cast<InsertElementInst>(it)) { | |||
4551 | SmallVector<Value *, 16> BuildVector; | |||
4552 | SmallVector<Value *, 16> BuildVectorOpds; | |||
4553 | if (!findBuildVector(FirstInsertElem, BuildVector, BuildVectorOpds)) | |||
4554 | continue; | |||
4555 | ||||
4556 | // Vectorize starting with the build vector operands ignoring the | |||
4557 | // BuildVector instructions for the purpose of scheduling and user | |||
4558 | // extraction. | |||
4559 | if (tryToVectorizeList(BuildVectorOpds, R, BuildVector)) { | |||
4560 | Changed = true; | |||
4561 | it = BB->begin(); | |||
4562 | e = BB->end(); | |||
4563 | } | |||
4564 | ||||
4565 | continue; | |||
4566 | } | |||
4567 | ||||
4568 | // Try to vectorize trees that start at insertvalue instructions feeding into | |||
4569 | // a store. | |||
4570 | if (StoreInst *SI = dyn_cast<StoreInst>(it)) { | |||
4571 | if (InsertValueInst *LastInsertValue = dyn_cast<InsertValueInst>(SI->getValueOperand())) { | |||
4572 | const DataLayout &DL = BB->getModule()->getDataLayout(); | |||
4573 | if (R.canMapToVector(SI->getValueOperand()->getType(), DL)) { | |||
4574 | SmallVector<Value *, 16> BuildVector; | |||
4575 | SmallVector<Value *, 16> BuildVectorOpds; | |||
4576 | if (!findBuildAggregate(LastInsertValue, BuildVector, BuildVectorOpds)) | |||
4577 | continue; | |||
4578 | ||||
4579 | DEBUG(dbgs() << "SLP: store of array mappable to vector: " << *SI << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: store of array mappable to vector: " << *SI << "\n"; } } while (0); | |||
4580 | if (tryToVectorizeList(BuildVectorOpds, R, BuildVector, false)) { | |||
4581 | Changed = true; | |||
4582 | it = BB->begin(); | |||
4583 | e = BB->end(); | |||
4584 | } | |||
4585 | continue; | |||
4586 | } | |||
4587 | } | |||
4588 | } | |||
4589 | } | |||
4590 | ||||
4591 | return Changed; | |||
4592 | } | |||
4593 | ||||
4594 | bool SLPVectorizer::vectorizeGEPIndices(BasicBlock *BB, BoUpSLP &R) { | |||
4595 | auto Changed = false; | |||
4596 | for (auto &Entry : GEPs) { | |||
4597 | ||||
4598 | // If the getelementptr list has fewer than two elements, there's nothing | |||
4599 | // to do. | |||
4600 | if (Entry.second.size() < 2) | |||
4601 | continue; | |||
4602 | ||||
4603 | DEBUG(dbgs() << "SLP: Analyzing a getelementptr list of length "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: Analyzing a getelementptr list of length " << Entry.second.size() << ".\n"; } } while (0) | |||
4604 | << Entry.second.size() << ".\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: Analyzing a getelementptr list of length " << Entry.second.size() << ".\n"; } } while (0); | |||
4605 | ||||
4606 | // We process the getelementptr list in chunks of 16 (like we do for | |||
4607 | // stores) to minimize compile-time. | |||
4608 | for (unsigned BI = 0, BE = Entry.second.size(); BI < BE; BI += 16) { | |||
4609 | auto Len = std::min<unsigned>(BE - BI, 16); | |||
4610 | auto GEPList = makeArrayRef(&Entry.second[BI], Len); | |||
4611 | ||||
4612 | // Initialize a set a candidate getelementptrs. Note that we use a | |||
4613 | // SetVector here to preserve program order. If the index computations | |||
4614 | // are vectorizable and begin with loads, we want to minimize the chance | |||
4615 | // of having to reorder them later. | |||
4616 | SetVector<Value *> Candidates(GEPList.begin(), GEPList.end()); | |||
4617 | ||||
4618 | // Some of the candidates may have already been vectorized after we | |||
4619 | // initially collected them. If so, the WeakVHs will have nullified the | |||
4620 | // values, so remove them from the set of candidates. | |||
4621 | Candidates.remove(nullptr); | |||
4622 | ||||
4623 | // Remove from the set of candidates all pairs of getelementptrs with | |||
4624 | // constant differences. Such getelementptrs are likely not good | |||
4625 | // candidates for vectorization in a bottom-up phase since one can be | |||
4626 | // computed from the other. We also ensure all candidate getelementptr | |||
4627 | // indices are unique. | |||
4628 | for (int I = 0, E = GEPList.size(); I < E && Candidates.size() > 1; ++I) { | |||
4629 | auto *GEPI = cast<GetElementPtrInst>(GEPList[I]); | |||
4630 | if (!Candidates.count(GEPI)) | |||
4631 | continue; | |||
4632 | auto *SCEVI = SE->getSCEV(GEPList[I]); | |||
4633 | for (int J = I + 1; J < E && Candidates.size() > 1; ++J) { | |||
4634 | auto *GEPJ = cast<GetElementPtrInst>(GEPList[J]); | |||
4635 | auto *SCEVJ = SE->getSCEV(GEPList[J]); | |||
4636 | if (isa<SCEVConstant>(SE->getMinusSCEV(SCEVI, SCEVJ))) { | |||
4637 | Candidates.remove(GEPList[I]); | |||
4638 | Candidates.remove(GEPList[J]); | |||
4639 | } else if (GEPI->idx_begin()->get() == GEPJ->idx_begin()->get()) { | |||
4640 | Candidates.remove(GEPList[J]); | |||
4641 | } | |||
4642 | } | |||
4643 | } | |||
4644 | ||||
4645 | // We break out of the above computation as soon as we know there are | |||
4646 | // fewer than two candidates remaining. | |||
4647 | if (Candidates.size() < 2) | |||
4648 | continue; | |||
4649 | ||||
4650 | // Add the single, non-constant index of each candidate to the bundle. We | |||
4651 | // ensured the indices met these constraints when we originally collected | |||
4652 | // the getelementptrs. | |||
4653 | SmallVector<Value *, 16> Bundle(Candidates.size()); | |||
4654 | auto BundleIndex = 0u; | |||
4655 | for (auto *V : Candidates) { | |||
4656 | auto *GEP = cast<GetElementPtrInst>(V); | |||
4657 | auto *GEPIdx = GEP->idx_begin()->get(); | |||
4658 | assert(GEP->getNumIndices() == 1 || !isa<Constant>(GEPIdx))((GEP->getNumIndices() == 1 || !isa<Constant>(GEPIdx )) ? static_cast<void> (0) : __assert_fail ("GEP->getNumIndices() == 1 || !isa<Constant>(GEPIdx)" , "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn271203/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 4658, __PRETTY_FUNCTION__)); | |||
4659 | Bundle[BundleIndex++] = GEPIdx; | |||
4660 | } | |||
4661 | ||||
4662 | // Try and vectorize the indices. We are currently only interested in | |||
4663 | // gather-like cases of the form: | |||
4664 | // | |||
4665 | // ... = g[a[0] - b[0]] + g[a[1] - b[1]] + ... | |||
4666 | // | |||
4667 | // where the loads of "a", the loads of "b", and the subtractions can be | |||
4668 | // performed in parallel. It's likely that detecting this pattern in a | |||
4669 | // bottom-up phase will be simpler and less costly than building a | |||
4670 | // full-blown top-down phase beginning at the consecutive loads. | |||
4671 | Changed |= tryToVectorizeList(Bundle, R); | |||
4672 | } | |||
4673 | } | |||
4674 | return Changed; | |||
4675 | } | |||
4676 | ||||
4677 | bool SLPVectorizer::vectorizeStoreChains(BoUpSLP &R) { | |||
4678 | bool Changed = false; | |||
4679 | // Attempt to sort and vectorize each of the store-groups. | |||
4680 | for (StoreListMap::iterator it = Stores.begin(), e = Stores.end(); it != e; | |||
4681 | ++it) { | |||
4682 | if (it->second.size() < 2) | |||
4683 | continue; | |||
4684 | ||||
4685 | DEBUG(dbgs() << "SLP: Analyzing a store chain of length "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: Analyzing a store chain of length " << it->second.size() << ".\n"; } } while (0) | |||
4686 | << it->second.size() << ".\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: Analyzing a store chain of length " << it->second.size() << ".\n"; } } while (0); | |||
4687 | ||||
4688 | // Process the stores in chunks of 16. | |||
4689 | // TODO: The limit of 16 inhibits greater vectorization factors. | |||
4690 | // For example, AVX2 supports v32i8. Increasing this limit, however, | |||
4691 | // may cause a significant compile-time increase. | |||
4692 | for (unsigned CI = 0, CE = it->second.size(); CI < CE; CI+=16) { | |||
4693 | unsigned Len = std::min<unsigned>(CE - CI, 16); | |||
4694 | Changed |= vectorizeStores(makeArrayRef(&it->second[CI], Len), | |||
4695 | -SLPCostThreshold, R); | |||
4696 | } | |||
4697 | } | |||
4698 | return Changed; | |||
4699 | } | |||
4700 | ||||
4701 | } // end anonymous namespace | |||
4702 | ||||
4703 | char SLPVectorizer::ID = 0; | |||
4704 | static const char lv_name[] = "SLP Vectorizer"; | |||
4705 | INITIALIZE_PASS_BEGIN(SLPVectorizer, SV_NAME, lv_name, false, false)static void* initializeSLPVectorizerPassOnce(PassRegistry & Registry) { | |||
4706 | INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)initializeAAResultsWrapperPassPass(Registry); | |||
4707 | INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)initializeTargetTransformInfoWrapperPassPass(Registry); | |||
4708 | INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)initializeAssumptionCacheTrackerPass(Registry); | |||
4709 | INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass)initializeScalarEvolutionWrapperPassPass(Registry); | |||
4710 | INITIALIZE_PASS_DEPENDENCY(LoopSimplify)initializeLoopSimplifyPass(Registry); | |||
4711 | INITIALIZE_PASS_DEPENDENCY(DemandedBitsWrapperPass)initializeDemandedBitsWrapperPassPass(Registry); | |||
4712 | INITIALIZE_PASS_END(SLPVectorizer, SV_NAME, lv_name, false, false)PassInfo *PI = new PassInfo(lv_name, "slp-vectorizer", & SLPVectorizer ::ID, PassInfo::NormalCtor_t(callDefaultCtor< SLPVectorizer >), false, false); Registry.registerPass(*PI, true); return PI; } void llvm::initializeSLPVectorizerPass(PassRegistry & Registry) { static volatile sys::cas_flag initialized = 0; sys ::cas_flag old_val = sys::CompareAndSwap(&initialized, 1, 0); if (old_val == 0) { initializeSLPVectorizerPassOnce(Registry ); sys::MemoryFence(); ; ; initialized = 2; ; } else { sys::cas_flag tmp = initialized; sys::MemoryFence(); while (tmp != 2) { tmp = initialized; sys::MemoryFence(); } } ; } | |||
4713 | ||||
4714 | namespace llvm { | |||
4715 | Pass *createSLPVectorizerPass() { return new SLPVectorizer(); } | |||
4716 | } |