File: | llvm/lib/Transforms/Vectorize/LoopVectorize.cpp |
Warning: | line 4803, column 22 Forming reference to null pointer |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | //===- LoopVectorize.cpp - A Loop Vectorizer ------------------------------===// | ||||
2 | // | ||||
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. | ||||
4 | // See https://llvm.org/LICENSE.txt for license information. | ||||
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception | ||||
6 | // | ||||
7 | //===----------------------------------------------------------------------===// | ||||
8 | // | ||||
9 | // This is the LLVM loop vectorizer. This pass modifies 'vectorizable' loops | ||||
10 | // and generates target-independent LLVM-IR. | ||||
11 | // The vectorizer uses the TargetTransformInfo analysis to estimate the costs | ||||
12 | // of instructions in order to estimate the profitability of vectorization. | ||||
13 | // | ||||
14 | // The loop vectorizer combines consecutive loop iterations into a single | ||||
15 | // 'wide' iteration. After this transformation the index is incremented | ||||
16 | // by the SIMD vector width, and not by one. | ||||
17 | // | ||||
18 | // This pass has three parts: | ||||
19 | // 1. The main loop pass that drives the different parts. | ||||
20 | // 2. LoopVectorizationLegality - A unit that checks for the legality | ||||
21 | // of the vectorization. | ||||
22 | // 3. InnerLoopVectorizer - A unit that performs the actual | ||||
23 | // widening of instructions. | ||||
24 | // 4. LoopVectorizationCostModel - A unit that checks for the profitability | ||||
25 | // of vectorization. It decides on the optimal vector width, which | ||||
26 | // can be one, if vectorization is not profitable. | ||||
27 | // | ||||
28 | // There is a development effort going on to migrate loop vectorizer to the | ||||
29 | // VPlan infrastructure and to introduce outer loop vectorization support (see | ||||
30 | // docs/Proposal/VectorizationPlan.rst and | ||||
31 | // http://lists.llvm.org/pipermail/llvm-dev/2017-December/119523.html). For this | ||||
32 | // purpose, we temporarily introduced the VPlan-native vectorization path: an | ||||
33 | // alternative vectorization path that is natively implemented on top of the | ||||
34 | // VPlan infrastructure. See EnableVPlanNativePath for enabling. | ||||
35 | // | ||||
36 | //===----------------------------------------------------------------------===// | ||||
37 | // | ||||
38 | // The reduction-variable vectorization is based on the paper: | ||||
39 | // D. Nuzman and R. Henderson. Multi-platform Auto-vectorization. | ||||
40 | // | ||||
41 | // Variable uniformity checks are inspired by: | ||||
42 | // Karrenberg, R. and Hack, S. Whole Function Vectorization. | ||||
43 | // | ||||
44 | // The interleaved access vectorization is based on the paper: | ||||
45 | // Dorit Nuzman, Ira Rosen and Ayal Zaks. Auto-Vectorization of Interleaved | ||||
46 | // Data for SIMD | ||||
47 | // | ||||
48 | // Other ideas/concepts are from: | ||||
49 | // A. Zaks and D. Nuzman. Autovectorization in GCC-two years later. | ||||
50 | // | ||||
51 | // S. Maleki, Y. Gao, M. Garzaran, T. Wong and D. Padua. An Evaluation of | ||||
52 | // Vectorizing Compilers. | ||||
53 | // | ||||
54 | //===----------------------------------------------------------------------===// | ||||
55 | |||||
56 | #include "llvm/Transforms/Vectorize/LoopVectorize.h" | ||||
57 | #include "LoopVectorizationPlanner.h" | ||||
58 | #include "VPRecipeBuilder.h" | ||||
59 | #include "VPlan.h" | ||||
60 | #include "VPlanHCFGBuilder.h" | ||||
61 | #include "VPlanPredicator.h" | ||||
62 | #include "VPlanTransforms.h" | ||||
63 | #include "llvm/ADT/APInt.h" | ||||
64 | #include "llvm/ADT/ArrayRef.h" | ||||
65 | #include "llvm/ADT/DenseMap.h" | ||||
66 | #include "llvm/ADT/DenseMapInfo.h" | ||||
67 | #include "llvm/ADT/Hashing.h" | ||||
68 | #include "llvm/ADT/MapVector.h" | ||||
69 | #include "llvm/ADT/None.h" | ||||
70 | #include "llvm/ADT/Optional.h" | ||||
71 | #include "llvm/ADT/STLExtras.h" | ||||
72 | #include "llvm/ADT/SmallPtrSet.h" | ||||
73 | #include "llvm/ADT/SmallSet.h" | ||||
74 | #include "llvm/ADT/SmallVector.h" | ||||
75 | #include "llvm/ADT/Statistic.h" | ||||
76 | #include "llvm/ADT/StringRef.h" | ||||
77 | #include "llvm/ADT/Twine.h" | ||||
78 | #include "llvm/ADT/iterator_range.h" | ||||
79 | #include "llvm/Analysis/AssumptionCache.h" | ||||
80 | #include "llvm/Analysis/BasicAliasAnalysis.h" | ||||
81 | #include "llvm/Analysis/BlockFrequencyInfo.h" | ||||
82 | #include "llvm/Analysis/CFG.h" | ||||
83 | #include "llvm/Analysis/CodeMetrics.h" | ||||
84 | #include "llvm/Analysis/DemandedBits.h" | ||||
85 | #include "llvm/Analysis/GlobalsModRef.h" | ||||
86 | #include "llvm/Analysis/LoopAccessAnalysis.h" | ||||
87 | #include "llvm/Analysis/LoopAnalysisManager.h" | ||||
88 | #include "llvm/Analysis/LoopInfo.h" | ||||
89 | #include "llvm/Analysis/LoopIterator.h" | ||||
90 | #include "llvm/Analysis/MemorySSA.h" | ||||
91 | #include "llvm/Analysis/OptimizationRemarkEmitter.h" | ||||
92 | #include "llvm/Analysis/ProfileSummaryInfo.h" | ||||
93 | #include "llvm/Analysis/ScalarEvolution.h" | ||||
94 | #include "llvm/Analysis/ScalarEvolutionExpressions.h" | ||||
95 | #include "llvm/Analysis/TargetLibraryInfo.h" | ||||
96 | #include "llvm/Analysis/TargetTransformInfo.h" | ||||
97 | #include "llvm/Analysis/VectorUtils.h" | ||||
98 | #include "llvm/IR/Attributes.h" | ||||
99 | #include "llvm/IR/BasicBlock.h" | ||||
100 | #include "llvm/IR/CFG.h" | ||||
101 | #include "llvm/IR/Constant.h" | ||||
102 | #include "llvm/IR/Constants.h" | ||||
103 | #include "llvm/IR/DataLayout.h" | ||||
104 | #include "llvm/IR/DebugInfoMetadata.h" | ||||
105 | #include "llvm/IR/DebugLoc.h" | ||||
106 | #include "llvm/IR/DerivedTypes.h" | ||||
107 | #include "llvm/IR/DiagnosticInfo.h" | ||||
108 | #include "llvm/IR/Dominators.h" | ||||
109 | #include "llvm/IR/Function.h" | ||||
110 | #include "llvm/IR/IRBuilder.h" | ||||
111 | #include "llvm/IR/InstrTypes.h" | ||||
112 | #include "llvm/IR/Instruction.h" | ||||
113 | #include "llvm/IR/Instructions.h" | ||||
114 | #include "llvm/IR/IntrinsicInst.h" | ||||
115 | #include "llvm/IR/Intrinsics.h" | ||||
116 | #include "llvm/IR/LLVMContext.h" | ||||
117 | #include "llvm/IR/Metadata.h" | ||||
118 | #include "llvm/IR/Module.h" | ||||
119 | #include "llvm/IR/Operator.h" | ||||
120 | #include "llvm/IR/PatternMatch.h" | ||||
121 | #include "llvm/IR/Type.h" | ||||
122 | #include "llvm/IR/Use.h" | ||||
123 | #include "llvm/IR/User.h" | ||||
124 | #include "llvm/IR/Value.h" | ||||
125 | #include "llvm/IR/ValueHandle.h" | ||||
126 | #include "llvm/IR/Verifier.h" | ||||
127 | #include "llvm/InitializePasses.h" | ||||
128 | #include "llvm/Pass.h" | ||||
129 | #include "llvm/Support/Casting.h" | ||||
130 | #include "llvm/Support/CommandLine.h" | ||||
131 | #include "llvm/Support/Compiler.h" | ||||
132 | #include "llvm/Support/Debug.h" | ||||
133 | #include "llvm/Support/ErrorHandling.h" | ||||
134 | #include "llvm/Support/InstructionCost.h" | ||||
135 | #include "llvm/Support/MathExtras.h" | ||||
136 | #include "llvm/Support/raw_ostream.h" | ||||
137 | #include "llvm/Transforms/Utils/BasicBlockUtils.h" | ||||
138 | #include "llvm/Transforms/Utils/InjectTLIMappings.h" | ||||
139 | #include "llvm/Transforms/Utils/LoopSimplify.h" | ||||
140 | #include "llvm/Transforms/Utils/LoopUtils.h" | ||||
141 | #include "llvm/Transforms/Utils/LoopVersioning.h" | ||||
142 | #include "llvm/Transforms/Utils/ScalarEvolutionExpander.h" | ||||
143 | #include "llvm/Transforms/Utils/SizeOpts.h" | ||||
144 | #include "llvm/Transforms/Vectorize/LoopVectorizationLegality.h" | ||||
145 | #include <algorithm> | ||||
146 | #include <cassert> | ||||
147 | #include <cstdint> | ||||
148 | #include <cstdlib> | ||||
149 | #include <functional> | ||||
150 | #include <iterator> | ||||
151 | #include <limits> | ||||
152 | #include <memory> | ||||
153 | #include <string> | ||||
154 | #include <tuple> | ||||
155 | #include <utility> | ||||
156 | |||||
157 | using namespace llvm; | ||||
158 | |||||
159 | #define LV_NAME"loop-vectorize" "loop-vectorize" | ||||
160 | #define DEBUG_TYPE"loop-vectorize" LV_NAME"loop-vectorize" | ||||
161 | |||||
162 | #ifndef NDEBUG | ||||
163 | const char VerboseDebug[] = DEBUG_TYPE"loop-vectorize" "-verbose"; | ||||
164 | #endif | ||||
165 | |||||
166 | /// @{ | ||||
167 | /// Metadata attribute names | ||||
168 | const char LLVMLoopVectorizeFollowupAll[] = "llvm.loop.vectorize.followup_all"; | ||||
169 | const char LLVMLoopVectorizeFollowupVectorized[] = | ||||
170 | "llvm.loop.vectorize.followup_vectorized"; | ||||
171 | const char LLVMLoopVectorizeFollowupEpilogue[] = | ||||
172 | "llvm.loop.vectorize.followup_epilogue"; | ||||
173 | /// @} | ||||
174 | |||||
175 | STATISTIC(LoopsVectorized, "Number of loops vectorized")static llvm::Statistic LoopsVectorized = {"loop-vectorize", "LoopsVectorized" , "Number of loops vectorized"}; | ||||
176 | STATISTIC(LoopsAnalyzed, "Number of loops analyzed for vectorization")static llvm::Statistic LoopsAnalyzed = {"loop-vectorize", "LoopsAnalyzed" , "Number of loops analyzed for vectorization"}; | ||||
177 | STATISTIC(LoopsEpilogueVectorized, "Number of epilogues vectorized")static llvm::Statistic LoopsEpilogueVectorized = {"loop-vectorize" , "LoopsEpilogueVectorized", "Number of epilogues vectorized" }; | ||||
178 | |||||
179 | static cl::opt<bool> EnableEpilogueVectorization( | ||||
180 | "enable-epilogue-vectorization", cl::init(true), cl::Hidden, | ||||
181 | cl::desc("Enable vectorization of epilogue loops.")); | ||||
182 | |||||
183 | static cl::opt<unsigned> EpilogueVectorizationForceVF( | ||||
184 | "epilogue-vectorization-force-VF", cl::init(1), cl::Hidden, | ||||
185 | cl::desc("When epilogue vectorization is enabled, and a value greater than " | ||||
186 | "1 is specified, forces the given VF for all applicable epilogue " | ||||
187 | "loops.")); | ||||
188 | |||||
189 | static cl::opt<unsigned> EpilogueVectorizationMinVF( | ||||
190 | "epilogue-vectorization-minimum-VF", cl::init(16), cl::Hidden, | ||||
191 | cl::desc("Only loops with vectorization factor equal to or larger than " | ||||
192 | "the specified value are considered for epilogue vectorization.")); | ||||
193 | |||||
194 | /// Loops with a known constant trip count below this number are vectorized only | ||||
195 | /// if no scalar iteration overheads are incurred. | ||||
196 | static cl::opt<unsigned> TinyTripCountVectorThreshold( | ||||
197 | "vectorizer-min-trip-count", cl::init(16), cl::Hidden, | ||||
198 | cl::desc("Loops with a constant trip count that is smaller than this " | ||||
199 | "value are vectorized only if no scalar iteration overheads " | ||||
200 | "are incurred.")); | ||||
201 | |||||
202 | static cl::opt<unsigned> PragmaVectorizeMemoryCheckThreshold( | ||||
203 | "pragma-vectorize-memory-check-threshold", cl::init(128), cl::Hidden, | ||||
204 | cl::desc("The maximum allowed number of runtime memory checks with a " | ||||
205 | "vectorize(enable) pragma.")); | ||||
206 | |||||
207 | // Option prefer-predicate-over-epilogue indicates that an epilogue is undesired, | ||||
208 | // that predication is preferred, and this lists all options. I.e., the | ||||
209 | // vectorizer will try to fold the tail-loop (epilogue) into the vector body | ||||
210 | // and predicate the instructions accordingly. If tail-folding fails, there are | ||||
211 | // different fallback strategies depending on these values: | ||||
212 | namespace PreferPredicateTy { | ||||
213 | enum Option { | ||||
214 | ScalarEpilogue = 0, | ||||
215 | PredicateElseScalarEpilogue, | ||||
216 | PredicateOrDontVectorize | ||||
217 | }; | ||||
218 | } // namespace PreferPredicateTy | ||||
219 | |||||
220 | static cl::opt<PreferPredicateTy::Option> PreferPredicateOverEpilogue( | ||||
221 | "prefer-predicate-over-epilogue", | ||||
222 | cl::init(PreferPredicateTy::ScalarEpilogue), | ||||
223 | cl::Hidden, | ||||
224 | cl::desc("Tail-folding and predication preferences over creating a scalar " | ||||
225 | "epilogue loop."), | ||||
226 | cl::values(clEnumValN(PreferPredicateTy::ScalarEpilogue,llvm::cl::OptionEnumValue { "scalar-epilogue", int(PreferPredicateTy ::ScalarEpilogue), "Don't tail-predicate loops, create scalar epilogue" } | ||||
227 | "scalar-epilogue",llvm::cl::OptionEnumValue { "scalar-epilogue", int(PreferPredicateTy ::ScalarEpilogue), "Don't tail-predicate loops, create scalar epilogue" } | ||||
228 | "Don't tail-predicate loops, create scalar epilogue")llvm::cl::OptionEnumValue { "scalar-epilogue", int(PreferPredicateTy ::ScalarEpilogue), "Don't tail-predicate loops, create scalar epilogue" }, | ||||
229 | clEnumValN(PreferPredicateTy::PredicateElseScalarEpilogue,llvm::cl::OptionEnumValue { "predicate-else-scalar-epilogue", int(PreferPredicateTy::PredicateElseScalarEpilogue), "prefer tail-folding, create scalar epilogue if tail " "folding fails." } | ||||
230 | "predicate-else-scalar-epilogue",llvm::cl::OptionEnumValue { "predicate-else-scalar-epilogue", int(PreferPredicateTy::PredicateElseScalarEpilogue), "prefer tail-folding, create scalar epilogue if tail " "folding fails." } | ||||
231 | "prefer tail-folding, create scalar epilogue if tail "llvm::cl::OptionEnumValue { "predicate-else-scalar-epilogue", int(PreferPredicateTy::PredicateElseScalarEpilogue), "prefer tail-folding, create scalar epilogue if tail " "folding fails." } | ||||
232 | "folding fails.")llvm::cl::OptionEnumValue { "predicate-else-scalar-epilogue", int(PreferPredicateTy::PredicateElseScalarEpilogue), "prefer tail-folding, create scalar epilogue if tail " "folding fails." }, | ||||
233 | clEnumValN(PreferPredicateTy::PredicateOrDontVectorize,llvm::cl::OptionEnumValue { "predicate-dont-vectorize", int(PreferPredicateTy ::PredicateOrDontVectorize), "prefers tail-folding, don't attempt vectorization if " "tail-folding fails." } | ||||
234 | "predicate-dont-vectorize",llvm::cl::OptionEnumValue { "predicate-dont-vectorize", int(PreferPredicateTy ::PredicateOrDontVectorize), "prefers tail-folding, don't attempt vectorization if " "tail-folding fails." } | ||||
235 | "prefers tail-folding, don't attempt vectorization if "llvm::cl::OptionEnumValue { "predicate-dont-vectorize", int(PreferPredicateTy ::PredicateOrDontVectorize), "prefers tail-folding, don't attempt vectorization if " "tail-folding fails." } | ||||
236 | "tail-folding fails.")llvm::cl::OptionEnumValue { "predicate-dont-vectorize", int(PreferPredicateTy ::PredicateOrDontVectorize), "prefers tail-folding, don't attempt vectorization if " "tail-folding fails." })); | ||||
237 | |||||
238 | static cl::opt<bool> MaximizeBandwidth( | ||||
239 | "vectorizer-maximize-bandwidth", cl::init(false), cl::Hidden, | ||||
240 | cl::desc("Maximize bandwidth when selecting vectorization factor which " | ||||
241 | "will be determined by the smallest type in loop.")); | ||||
242 | |||||
243 | static cl::opt<bool> EnableInterleavedMemAccesses( | ||||
244 | "enable-interleaved-mem-accesses", cl::init(false), cl::Hidden, | ||||
245 | cl::desc("Enable vectorization on interleaved memory accesses in a loop")); | ||||
246 | |||||
247 | /// An interleave-group may need masking if it resides in a block that needs | ||||
248 | /// predication, or in order to mask away gaps. | ||||
249 | static cl::opt<bool> EnableMaskedInterleavedMemAccesses( | ||||
250 | "enable-masked-interleaved-mem-accesses", cl::init(false), cl::Hidden, | ||||
251 | cl::desc("Enable vectorization on masked interleaved memory accesses in a loop")); | ||||
252 | |||||
253 | static cl::opt<unsigned> TinyTripCountInterleaveThreshold( | ||||
254 | "tiny-trip-count-interleave-threshold", cl::init(128), cl::Hidden, | ||||
255 | cl::desc("We don't interleave loops with a estimated constant trip count " | ||||
256 | "below this number")); | ||||
257 | |||||
258 | static cl::opt<unsigned> ForceTargetNumScalarRegs( | ||||
259 | "force-target-num-scalar-regs", cl::init(0), cl::Hidden, | ||||
260 | cl::desc("A flag that overrides the target's number of scalar registers.")); | ||||
261 | |||||
262 | static cl::opt<unsigned> ForceTargetNumVectorRegs( | ||||
263 | "force-target-num-vector-regs", cl::init(0), cl::Hidden, | ||||
264 | cl::desc("A flag that overrides the target's number of vector registers.")); | ||||
265 | |||||
266 | static cl::opt<unsigned> ForceTargetMaxScalarInterleaveFactor( | ||||
267 | "force-target-max-scalar-interleave", cl::init(0), cl::Hidden, | ||||
268 | cl::desc("A flag that overrides the target's max interleave factor for " | ||||
269 | "scalar loops.")); | ||||
270 | |||||
271 | static cl::opt<unsigned> ForceTargetMaxVectorInterleaveFactor( | ||||
272 | "force-target-max-vector-interleave", cl::init(0), cl::Hidden, | ||||
273 | cl::desc("A flag that overrides the target's max interleave factor for " | ||||
274 | "vectorized loops.")); | ||||
275 | |||||
276 | static cl::opt<unsigned> ForceTargetInstructionCost( | ||||
277 | "force-target-instruction-cost", cl::init(0), cl::Hidden, | ||||
278 | cl::desc("A flag that overrides the target's expected cost for " | ||||
279 | "an instruction to a single constant value. Mostly " | ||||
280 | "useful for getting consistent testing.")); | ||||
281 | |||||
282 | static cl::opt<bool> ForceTargetSupportsScalableVectors( | ||||
283 | "force-target-supports-scalable-vectors", cl::init(false), cl::Hidden, | ||||
284 | cl::desc( | ||||
285 | "Pretend that scalable vectors are supported, even if the target does " | ||||
286 | "not support them. This flag should only be used for testing.")); | ||||
287 | |||||
288 | static cl::opt<unsigned> SmallLoopCost( | ||||
289 | "small-loop-cost", cl::init(20), cl::Hidden, | ||||
290 | cl::desc( | ||||
291 | "The cost of a loop that is considered 'small' by the interleaver.")); | ||||
292 | |||||
293 | static cl::opt<bool> LoopVectorizeWithBlockFrequency( | ||||
294 | "loop-vectorize-with-block-frequency", cl::init(true), cl::Hidden, | ||||
295 | cl::desc("Enable the use of the block frequency analysis to access PGO " | ||||
296 | "heuristics minimizing code growth in cold regions and being more " | ||||
297 | "aggressive in hot regions.")); | ||||
298 | |||||
299 | // Runtime interleave loops for load/store throughput. | ||||
300 | static cl::opt<bool> EnableLoadStoreRuntimeInterleave( | ||||
301 | "enable-loadstore-runtime-interleave", cl::init(true), cl::Hidden, | ||||
302 | cl::desc( | ||||
303 | "Enable runtime interleaving until load/store ports are saturated")); | ||||
304 | |||||
305 | /// Interleave small loops with scalar reductions. | ||||
306 | static cl::opt<bool> InterleaveSmallLoopScalarReduction( | ||||
307 | "interleave-small-loop-scalar-reduction", cl::init(false), cl::Hidden, | ||||
308 | cl::desc("Enable interleaving for loops with small iteration counts that " | ||||
309 | "contain scalar reductions to expose ILP.")); | ||||
310 | |||||
311 | /// The number of stores in a loop that are allowed to need predication. | ||||
312 | static cl::opt<unsigned> NumberOfStoresToPredicate( | ||||
313 | "vectorize-num-stores-pred", cl::init(1), cl::Hidden, | ||||
314 | cl::desc("Max number of stores to be predicated behind an if.")); | ||||
315 | |||||
316 | static cl::opt<bool> EnableIndVarRegisterHeur( | ||||
317 | "enable-ind-var-reg-heur", cl::init(true), cl::Hidden, | ||||
318 | cl::desc("Count the induction variable only once when interleaving")); | ||||
319 | |||||
320 | static cl::opt<bool> EnableCondStoresVectorization( | ||||
321 | "enable-cond-stores-vec", cl::init(true), cl::Hidden, | ||||
322 | cl::desc("Enable if predication of stores during vectorization.")); | ||||
323 | |||||
324 | static cl::opt<unsigned> MaxNestedScalarReductionIC( | ||||
325 | "max-nested-scalar-reduction-interleave", cl::init(2), cl::Hidden, | ||||
326 | cl::desc("The maximum interleave count to use when interleaving a scalar " | ||||
327 | "reduction in a nested loop.")); | ||||
328 | |||||
329 | static cl::opt<bool> | ||||
330 | PreferInLoopReductions("prefer-inloop-reductions", cl::init(false), | ||||
331 | cl::Hidden, | ||||
332 | cl::desc("Prefer in-loop vector reductions, " | ||||
333 | "overriding the targets preference.")); | ||||
334 | |||||
335 | cl::opt<bool> EnableStrictReductions( | ||||
336 | "enable-strict-reductions", cl::init(false), cl::Hidden, | ||||
337 | cl::desc("Enable the vectorisation of loops with in-order (strict) " | ||||
338 | "FP reductions")); | ||||
339 | |||||
340 | static cl::opt<bool> PreferPredicatedReductionSelect( | ||||
341 | "prefer-predicated-reduction-select", cl::init(false), cl::Hidden, | ||||
342 | cl::desc( | ||||
343 | "Prefer predicating a reduction operation over an after loop select.")); | ||||
344 | |||||
345 | cl::opt<bool> EnableVPlanNativePath( | ||||
346 | "enable-vplan-native-path", cl::init(false), cl::Hidden, | ||||
347 | cl::desc("Enable VPlan-native vectorization path with " | ||||
348 | "support for outer loop vectorization.")); | ||||
349 | |||||
350 | // FIXME: Remove this switch once we have divergence analysis. Currently we | ||||
351 | // assume divergent non-backedge branches when this switch is true. | ||||
352 | cl::opt<bool> EnableVPlanPredication( | ||||
353 | "enable-vplan-predication", cl::init(false), cl::Hidden, | ||||
354 | cl::desc("Enable VPlan-native vectorization path predicator with " | ||||
355 | "support for outer loop vectorization.")); | ||||
356 | |||||
357 | // This flag enables the stress testing of the VPlan H-CFG construction in the | ||||
358 | // VPlan-native vectorization path. It must be used in conjuction with | ||||
359 | // -enable-vplan-native-path. -vplan-verify-hcfg can also be used to enable the | ||||
360 | // verification of the H-CFGs built. | ||||
361 | static cl::opt<bool> VPlanBuildStressTest( | ||||
362 | "vplan-build-stress-test", cl::init(false), cl::Hidden, | ||||
363 | cl::desc( | ||||
364 | "Build VPlan for every supported loop nest in the function and bail " | ||||
365 | "out right after the build (stress test the VPlan H-CFG construction " | ||||
366 | "in the VPlan-native vectorization path).")); | ||||
367 | |||||
368 | cl::opt<bool> llvm::EnableLoopInterleaving( | ||||
369 | "interleave-loops", cl::init(true), cl::Hidden, | ||||
370 | cl::desc("Enable loop interleaving in Loop vectorization passes")); | ||||
371 | cl::opt<bool> llvm::EnableLoopVectorization( | ||||
372 | "vectorize-loops", cl::init(true), cl::Hidden, | ||||
373 | cl::desc("Run the Loop vectorization passes")); | ||||
374 | |||||
375 | cl::opt<bool> PrintVPlansInDotFormat( | ||||
376 | "vplan-print-in-dot-format", cl::init(false), cl::Hidden, | ||||
377 | cl::desc("Use dot format instead of plain text when dumping VPlans")); | ||||
378 | |||||
379 | /// A helper function that returns true if the given type is irregular. The | ||||
380 | /// type is irregular if its allocated size doesn't equal the store size of an | ||||
381 | /// element of the corresponding vector type. | ||||
382 | static bool hasIrregularType(Type *Ty, const DataLayout &DL) { | ||||
383 | // Determine if an array of N elements of type Ty is "bitcast compatible" | ||||
384 | // with a <N x Ty> vector. | ||||
385 | // This is only true if there is no padding between the array elements. | ||||
386 | return DL.getTypeAllocSizeInBits(Ty) != DL.getTypeSizeInBits(Ty); | ||||
387 | } | ||||
388 | |||||
389 | /// A helper function that returns the reciprocal of the block probability of | ||||
390 | /// predicated blocks. If we return X, we are assuming the predicated block | ||||
391 | /// will execute once for every X iterations of the loop header. | ||||
392 | /// | ||||
393 | /// TODO: We should use actual block probability here, if available. Currently, | ||||
394 | /// we always assume predicated blocks have a 50% chance of executing. | ||||
395 | static unsigned getReciprocalPredBlockProb() { return 2; } | ||||
396 | |||||
397 | /// A helper function that returns an integer or floating-point constant with | ||||
398 | /// value C. | ||||
399 | static Constant *getSignedIntOrFpConstant(Type *Ty, int64_t C) { | ||||
400 | return Ty->isIntegerTy() ? ConstantInt::getSigned(Ty, C) | ||||
401 | : ConstantFP::get(Ty, C); | ||||
402 | } | ||||
403 | |||||
404 | /// Returns "best known" trip count for the specified loop \p L as defined by | ||||
405 | /// the following procedure: | ||||
406 | /// 1) Returns exact trip count if it is known. | ||||
407 | /// 2) Returns expected trip count according to profile data if any. | ||||
408 | /// 3) Returns upper bound estimate if it is known. | ||||
409 | /// 4) Returns None if all of the above failed. | ||||
410 | static Optional<unsigned> getSmallBestKnownTC(ScalarEvolution &SE, Loop *L) { | ||||
411 | // Check if exact trip count is known. | ||||
412 | if (unsigned ExpectedTC = SE.getSmallConstantTripCount(L)) | ||||
413 | return ExpectedTC; | ||||
414 | |||||
415 | // Check if there is an expected trip count available from profile data. | ||||
416 | if (LoopVectorizeWithBlockFrequency) | ||||
417 | if (auto EstimatedTC = getLoopEstimatedTripCount(L)) | ||||
418 | return EstimatedTC; | ||||
419 | |||||
420 | // Check if upper bound estimate is known. | ||||
421 | if (unsigned ExpectedTC = SE.getSmallConstantMaxTripCount(L)) | ||||
422 | return ExpectedTC; | ||||
423 | |||||
424 | return None; | ||||
425 | } | ||||
426 | |||||
427 | // Forward declare GeneratedRTChecks. | ||||
428 | class GeneratedRTChecks; | ||||
429 | |||||
430 | namespace llvm { | ||||
431 | |||||
432 | /// InnerLoopVectorizer vectorizes loops which contain only one basic | ||||
433 | /// block to a specified vectorization factor (VF). | ||||
434 | /// This class performs the widening of scalars into vectors, or multiple | ||||
435 | /// scalars. This class also implements the following features: | ||||
436 | /// * It inserts an epilogue loop for handling loops that don't have iteration | ||||
437 | /// counts that are known to be a multiple of the vectorization factor. | ||||
438 | /// * It handles the code generation for reduction variables. | ||||
439 | /// * Scalarization (implementation using scalars) of un-vectorizable | ||||
440 | /// instructions. | ||||
441 | /// InnerLoopVectorizer does not perform any vectorization-legality | ||||
442 | /// checks, and relies on the caller to check for the different legality | ||||
443 | /// aspects. The InnerLoopVectorizer relies on the | ||||
444 | /// LoopVectorizationLegality class to provide information about the induction | ||||
445 | /// and reduction variables that were found to a given vectorization factor. | ||||
446 | class InnerLoopVectorizer { | ||||
447 | public: | ||||
448 | InnerLoopVectorizer(Loop *OrigLoop, PredicatedScalarEvolution &PSE, | ||||
449 | LoopInfo *LI, DominatorTree *DT, | ||||
450 | const TargetLibraryInfo *TLI, | ||||
451 | const TargetTransformInfo *TTI, AssumptionCache *AC, | ||||
452 | OptimizationRemarkEmitter *ORE, ElementCount VecWidth, | ||||
453 | unsigned UnrollFactor, LoopVectorizationLegality *LVL, | ||||
454 | LoopVectorizationCostModel *CM, BlockFrequencyInfo *BFI, | ||||
455 | ProfileSummaryInfo *PSI, GeneratedRTChecks &RTChecks) | ||||
456 | : OrigLoop(OrigLoop), PSE(PSE), LI(LI), DT(DT), TLI(TLI), TTI(TTI), | ||||
457 | AC(AC), ORE(ORE), VF(VecWidth), UF(UnrollFactor), | ||||
458 | Builder(PSE.getSE()->getContext()), Legal(LVL), Cost(CM), BFI(BFI), | ||||
459 | PSI(PSI), RTChecks(RTChecks) { | ||||
460 | // Query this against the original loop and save it here because the profile | ||||
461 | // of the original loop header may change as the transformation happens. | ||||
462 | OptForSizeBasedOnProfile = llvm::shouldOptimizeForSize( | ||||
463 | OrigLoop->getHeader(), PSI, BFI, PGSOQueryType::IRPass); | ||||
464 | } | ||||
465 | |||||
466 | virtual ~InnerLoopVectorizer() = default; | ||||
467 | |||||
468 | /// Create a new empty loop that will contain vectorized instructions later | ||||
469 | /// on, while the old loop will be used as the scalar remainder. Control flow | ||||
470 | /// is generated around the vectorized (and scalar epilogue) loops consisting | ||||
471 | /// of various checks and bypasses. Return the pre-header block of the new | ||||
472 | /// loop. | ||||
473 | /// In the case of epilogue vectorization, this function is overriden to | ||||
474 | /// handle the more complex control flow around the loops. | ||||
475 | virtual BasicBlock *createVectorizedLoopSkeleton(); | ||||
476 | |||||
477 | /// Widen a single instruction within the innermost loop. | ||||
478 | void widenInstruction(Instruction &I, VPValue *Def, VPUser &Operands, | ||||
479 | VPTransformState &State); | ||||
480 | |||||
481 | /// Widen a single call instruction within the innermost loop. | ||||
482 | void widenCallInstruction(CallInst &I, VPValue *Def, VPUser &ArgOperands, | ||||
483 | VPTransformState &State); | ||||
484 | |||||
485 | /// Widen a single select instruction within the innermost loop. | ||||
486 | void widenSelectInstruction(SelectInst &I, VPValue *VPDef, VPUser &Operands, | ||||
487 | bool InvariantCond, VPTransformState &State); | ||||
488 | |||||
489 | /// Fix the vectorized code, taking care of header phi's, live-outs, and more. | ||||
490 | void fixVectorizedLoop(VPTransformState &State); | ||||
491 | |||||
492 | // Return true if any runtime check is added. | ||||
493 | bool areSafetyChecksAdded() { return AddedSafetyChecks; } | ||||
494 | |||||
495 | /// A type for vectorized values in the new loop. Each value from the | ||||
496 | /// original loop, when vectorized, is represented by UF vector values in the | ||||
497 | /// new unrolled loop, where UF is the unroll factor. | ||||
498 | using VectorParts = SmallVector<Value *, 2>; | ||||
499 | |||||
500 | /// Vectorize a single GetElementPtrInst based on information gathered and | ||||
501 | /// decisions taken during planning. | ||||
502 | void widenGEP(GetElementPtrInst *GEP, VPValue *VPDef, VPUser &Indices, | ||||
503 | unsigned UF, ElementCount VF, bool IsPtrLoopInvariant, | ||||
504 | SmallBitVector &IsIndexLoopInvariant, VPTransformState &State); | ||||
505 | |||||
506 | /// Vectorize a single PHINode in a block. This method handles the induction | ||||
507 | /// variable canonicalization. It supports both VF = 1 for unrolled loops and | ||||
508 | /// arbitrary length vectors. | ||||
509 | void widenPHIInstruction(Instruction *PN, RecurrenceDescriptor *RdxDesc, | ||||
510 | VPWidenPHIRecipe *PhiR, VPTransformState &State); | ||||
511 | |||||
512 | /// A helper function to scalarize a single Instruction in the innermost loop. | ||||
513 | /// Generates a sequence of scalar instances for each lane between \p MinLane | ||||
514 | /// and \p MaxLane, times each part between \p MinPart and \p MaxPart, | ||||
515 | /// inclusive. Uses the VPValue operands from \p Operands instead of \p | ||||
516 | /// Instr's operands. | ||||
517 | void scalarizeInstruction(Instruction *Instr, VPValue *Def, VPUser &Operands, | ||||
518 | const VPIteration &Instance, bool IfPredicateInstr, | ||||
519 | VPTransformState &State); | ||||
520 | |||||
521 | /// Widen an integer or floating-point induction variable \p IV. If \p Trunc | ||||
522 | /// is provided, the integer induction variable will first be truncated to | ||||
523 | /// the corresponding type. | ||||
524 | void widenIntOrFpInduction(PHINode *IV, Value *Start, TruncInst *Trunc, | ||||
525 | VPValue *Def, VPValue *CastDef, | ||||
526 | VPTransformState &State); | ||||
527 | |||||
528 | /// Construct the vector value of a scalarized value \p V one lane at a time. | ||||
529 | void packScalarIntoVectorValue(VPValue *Def, const VPIteration &Instance, | ||||
530 | VPTransformState &State); | ||||
531 | |||||
532 | /// Try to vectorize interleaved access group \p Group with the base address | ||||
533 | /// given in \p Addr, optionally masking the vector operations if \p | ||||
534 | /// BlockInMask is non-null. Use \p State to translate given VPValues to IR | ||||
535 | /// values in the vectorized loop. | ||||
536 | void vectorizeInterleaveGroup(const InterleaveGroup<Instruction> *Group, | ||||
537 | ArrayRef<VPValue *> VPDefs, | ||||
538 | VPTransformState &State, VPValue *Addr, | ||||
539 | ArrayRef<VPValue *> StoredValues, | ||||
540 | VPValue *BlockInMask = nullptr); | ||||
541 | |||||
542 | /// Vectorize Load and Store instructions with the base address given in \p | ||||
543 | /// Addr, optionally masking the vector operations if \p BlockInMask is | ||||
544 | /// non-null. Use \p State to translate given VPValues to IR values in the | ||||
545 | /// vectorized loop. | ||||
546 | void vectorizeMemoryInstruction(Instruction *Instr, VPTransformState &State, | ||||
547 | VPValue *Def, VPValue *Addr, | ||||
548 | VPValue *StoredValue, VPValue *BlockInMask); | ||||
549 | |||||
550 | /// Set the debug location in the builder using the debug location in | ||||
551 | /// the instruction. | ||||
552 | void setDebugLocFromInst(IRBuilder<> &B, const Value *Ptr); | ||||
553 | |||||
554 | /// Fix the non-induction PHIs in the OrigPHIsToFix vector. | ||||
555 | void fixNonInductionPHIs(VPTransformState &State); | ||||
556 | |||||
557 | /// Returns true if the reordering of FP operations is not allowed, but we are | ||||
558 | /// able to vectorize with strict in-order reductions for the given RdxDesc. | ||||
559 | bool useOrderedReductions(RecurrenceDescriptor &RdxDesc); | ||||
560 | |||||
561 | /// Create a broadcast instruction. This method generates a broadcast | ||||
562 | /// instruction (shuffle) for loop invariant values and for the induction | ||||
563 | /// value. If this is the induction variable then we extend it to N, N+1, ... | ||||
564 | /// this is needed because each iteration in the loop corresponds to a SIMD | ||||
565 | /// element. | ||||
566 | virtual Value *getBroadcastInstrs(Value *V); | ||||
567 | |||||
568 | protected: | ||||
569 | friend class LoopVectorizationPlanner; | ||||
570 | |||||
571 | /// A small list of PHINodes. | ||||
572 | using PhiVector = SmallVector<PHINode *, 4>; | ||||
573 | |||||
574 | /// A type for scalarized values in the new loop. Each value from the | ||||
575 | /// original loop, when scalarized, is represented by UF x VF scalar values | ||||
576 | /// in the new unrolled loop, where UF is the unroll factor and VF is the | ||||
577 | /// vectorization factor. | ||||
578 | using ScalarParts = SmallVector<SmallVector<Value *, 4>, 2>; | ||||
579 | |||||
580 | /// Set up the values of the IVs correctly when exiting the vector loop. | ||||
581 | void fixupIVUsers(PHINode *OrigPhi, const InductionDescriptor &II, | ||||
582 | Value *CountRoundDown, Value *EndValue, | ||||
583 | BasicBlock *MiddleBlock); | ||||
584 | |||||
585 | /// Create a new induction variable inside L. | ||||
586 | PHINode *createInductionVariable(Loop *L, Value *Start, Value *End, | ||||
587 | Value *Step, Instruction *DL); | ||||
588 | |||||
589 | /// Handle all cross-iteration phis in the header. | ||||
590 | void fixCrossIterationPHIs(VPTransformState &State); | ||||
591 | |||||
592 | /// Fix a first-order recurrence. This is the second phase of vectorizing | ||||
593 | /// this phi node. | ||||
594 | void fixFirstOrderRecurrence(PHINode *Phi, VPTransformState &State); | ||||
595 | |||||
596 | /// Fix a reduction cross-iteration phi. This is the second phase of | ||||
597 | /// vectorizing this phi node. | ||||
598 | void fixReduction(VPWidenPHIRecipe *Phi, VPTransformState &State); | ||||
599 | |||||
600 | /// Clear NSW/NUW flags from reduction instructions if necessary. | ||||
601 | void clearReductionWrapFlags(const RecurrenceDescriptor &RdxDesc, | ||||
602 | VPTransformState &State); | ||||
603 | |||||
604 | /// Fixup the LCSSA phi nodes in the unique exit block. This simply | ||||
605 | /// means we need to add the appropriate incoming value from the middle | ||||
606 | /// block as exiting edges from the scalar epilogue loop (if present) are | ||||
607 | /// already in place, and we exit the vector loop exclusively to the middle | ||||
608 | /// block. | ||||
609 | void fixLCSSAPHIs(VPTransformState &State); | ||||
610 | |||||
611 | /// Iteratively sink the scalarized operands of a predicated instruction into | ||||
612 | /// the block that was created for it. | ||||
613 | void sinkScalarOperands(Instruction *PredInst); | ||||
614 | |||||
615 | /// Shrinks vector element sizes to the smallest bitwidth they can be legally | ||||
616 | /// represented as. | ||||
617 | void truncateToMinimalBitwidths(VPTransformState &State); | ||||
618 | |||||
619 | /// This function adds | ||||
620 | /// (StartIdx * Step, (StartIdx + 1) * Step, (StartIdx + 2) * Step, ...) | ||||
621 | /// to each vector element of Val. The sequence starts at StartIndex. | ||||
622 | /// \p Opcode is relevant for FP induction variable. | ||||
623 | virtual Value *getStepVector(Value *Val, int StartIdx, Value *Step, | ||||
624 | Instruction::BinaryOps Opcode = | ||||
625 | Instruction::BinaryOpsEnd); | ||||
626 | |||||
627 | /// Compute scalar induction steps. \p ScalarIV is the scalar induction | ||||
628 | /// variable on which to base the steps, \p Step is the size of the step, and | ||||
629 | /// \p EntryVal is the value from the original loop that maps to the steps. | ||||
630 | /// Note that \p EntryVal doesn't have to be an induction variable - it | ||||
631 | /// can also be a truncate instruction. | ||||
632 | void buildScalarSteps(Value *ScalarIV, Value *Step, Instruction *EntryVal, | ||||
633 | const InductionDescriptor &ID, VPValue *Def, | ||||
634 | VPValue *CastDef, VPTransformState &State); | ||||
635 | |||||
636 | /// Create a vector induction phi node based on an existing scalar one. \p | ||||
637 | /// EntryVal is the value from the original loop that maps to the vector phi | ||||
638 | /// node, and \p Step is the loop-invariant step. If \p EntryVal is a | ||||
639 | /// truncate instruction, instead of widening the original IV, we widen a | ||||
640 | /// version of the IV truncated to \p EntryVal's type. | ||||
641 | void createVectorIntOrFpInductionPHI(const InductionDescriptor &II, | ||||
642 | Value *Step, Value *Start, | ||||
643 | Instruction *EntryVal, VPValue *Def, | ||||
644 | VPValue *CastDef, | ||||
645 | VPTransformState &State); | ||||
646 | |||||
647 | /// Returns true if an instruction \p I should be scalarized instead of | ||||
648 | /// vectorized for the chosen vectorization factor. | ||||
649 | bool shouldScalarizeInstruction(Instruction *I) const; | ||||
650 | |||||
651 | /// Returns true if we should generate a scalar version of \p IV. | ||||
652 | bool needsScalarInduction(Instruction *IV) const; | ||||
653 | |||||
654 | /// If there is a cast involved in the induction variable \p ID, which should | ||||
655 | /// be ignored in the vectorized loop body, this function records the | ||||
656 | /// VectorLoopValue of the respective Phi also as the VectorLoopValue of the | ||||
657 | /// cast. We had already proved that the casted Phi is equal to the uncasted | ||||
658 | /// Phi in the vectorized loop (under a runtime guard), and therefore | ||||
659 | /// there is no need to vectorize the cast - the same value can be used in the | ||||
660 | /// vector loop for both the Phi and the cast. | ||||
661 | /// If \p VectorLoopValue is a scalarized value, \p Lane is also specified, | ||||
662 | /// Otherwise, \p VectorLoopValue is a widened/vectorized value. | ||||
663 | /// | ||||
664 | /// \p EntryVal is the value from the original loop that maps to the vector | ||||
665 | /// phi node and is used to distinguish what is the IV currently being | ||||
666 | /// processed - original one (if \p EntryVal is a phi corresponding to the | ||||
667 | /// original IV) or the "newly-created" one based on the proof mentioned above | ||||
668 | /// (see also buildScalarSteps() and createVectorIntOrFPInductionPHI()). In the | ||||
669 | /// latter case \p EntryVal is a TruncInst and we must not record anything for | ||||
670 | /// that IV, but it's error-prone to expect callers of this routine to care | ||||
671 | /// about that, hence this explicit parameter. | ||||
672 | void recordVectorLoopValueForInductionCast( | ||||
673 | const InductionDescriptor &ID, const Instruction *EntryVal, | ||||
674 | Value *VectorLoopValue, VPValue *CastDef, VPTransformState &State, | ||||
675 | unsigned Part, unsigned Lane = UINT_MAX(2147483647 *2U +1U)); | ||||
676 | |||||
677 | /// Generate a shuffle sequence that will reverse the vector Vec. | ||||
678 | virtual Value *reverseVector(Value *Vec); | ||||
679 | |||||
680 | /// Returns (and creates if needed) the original loop trip count. | ||||
681 | Value *getOrCreateTripCount(Loop *NewLoop); | ||||
682 | |||||
683 | /// Returns (and creates if needed) the trip count of the widened loop. | ||||
684 | Value *getOrCreateVectorTripCount(Loop *NewLoop); | ||||
685 | |||||
686 | /// Returns a bitcasted value to the requested vector type. | ||||
687 | /// Also handles bitcasts of vector<float> <-> vector<pointer> types. | ||||
688 | Value *createBitOrPointerCast(Value *V, VectorType *DstVTy, | ||||
689 | const DataLayout &DL); | ||||
690 | |||||
691 | /// Emit a bypass check to see if the vector trip count is zero, including if | ||||
692 | /// it overflows. | ||||
693 | void emitMinimumIterationCountCheck(Loop *L, BasicBlock *Bypass); | ||||
694 | |||||
695 | /// Emit a bypass check to see if all of the SCEV assumptions we've | ||||
696 | /// had to make are correct. Returns the block containing the checks or | ||||
697 | /// nullptr if no checks have been added. | ||||
698 | BasicBlock *emitSCEVChecks(Loop *L, BasicBlock *Bypass); | ||||
699 | |||||
700 | /// Emit bypass checks to check any memory assumptions we may have made. | ||||
701 | /// Returns the block containing the checks or nullptr if no checks have been | ||||
702 | /// added. | ||||
703 | BasicBlock *emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass); | ||||
704 | |||||
705 | /// Compute the transformed value of Index at offset StartValue using step | ||||
706 | /// StepValue. | ||||
707 | /// For integer induction, returns StartValue + Index * StepValue. | ||||
708 | /// For pointer induction, returns StartValue[Index * StepValue]. | ||||
709 | /// FIXME: The newly created binary instructions should contain nsw/nuw | ||||
710 | /// flags, which can be found from the original scalar operations. | ||||
711 | Value *emitTransformedIndex(IRBuilder<> &B, Value *Index, ScalarEvolution *SE, | ||||
712 | const DataLayout &DL, | ||||
713 | const InductionDescriptor &ID) const; | ||||
714 | |||||
715 | /// Emit basic blocks (prefixed with \p Prefix) for the iteration check, | ||||
716 | /// vector loop preheader, middle block and scalar preheader. Also | ||||
717 | /// allocate a loop object for the new vector loop and return it. | ||||
718 | Loop *createVectorLoopSkeleton(StringRef Prefix); | ||||
719 | |||||
720 | /// Create new phi nodes for the induction variables to resume iteration count | ||||
721 | /// in the scalar epilogue, from where the vectorized loop left off (given by | ||||
722 | /// \p VectorTripCount). | ||||
723 | /// In cases where the loop skeleton is more complicated (eg. epilogue | ||||
724 | /// vectorization) and the resume values can come from an additional bypass | ||||
725 | /// block, the \p AdditionalBypass pair provides information about the bypass | ||||
726 | /// block and the end value on the edge from bypass to this loop. | ||||
727 | void createInductionResumeValues( | ||||
728 | Loop *L, Value *VectorTripCount, | ||||
729 | std::pair<BasicBlock *, Value *> AdditionalBypass = {nullptr, nullptr}); | ||||
730 | |||||
731 | /// Complete the loop skeleton by adding debug MDs, creating appropriate | ||||
732 | /// conditional branches in the middle block, preparing the builder and | ||||
733 | /// running the verifier. Take in the vector loop \p L as argument, and return | ||||
734 | /// the preheader of the completed vector loop. | ||||
735 | BasicBlock *completeLoopSkeleton(Loop *L, MDNode *OrigLoopID); | ||||
736 | |||||
737 | /// Add additional metadata to \p To that was not present on \p Orig. | ||||
738 | /// | ||||
739 | /// Currently this is used to add the noalias annotations based on the | ||||
740 | /// inserted memchecks. Use this for instructions that are *cloned* into the | ||||
741 | /// vector loop. | ||||
742 | void addNewMetadata(Instruction *To, const Instruction *Orig); | ||||
743 | |||||
744 | /// Add metadata from one instruction to another. | ||||
745 | /// | ||||
746 | /// This includes both the original MDs from \p From and additional ones (\see | ||||
747 | /// addNewMetadata). Use this for *newly created* instructions in the vector | ||||
748 | /// loop. | ||||
749 | void addMetadata(Instruction *To, Instruction *From); | ||||
750 | |||||
751 | /// Similar to the previous function but it adds the metadata to a | ||||
752 | /// vector of instructions. | ||||
753 | void addMetadata(ArrayRef<Value *> To, Instruction *From); | ||||
754 | |||||
755 | /// Allow subclasses to override and print debug traces before/after vplan | ||||
756 | /// execution, when trace information is requested. | ||||
757 | virtual void printDebugTracesAtStart(){}; | ||||
758 | virtual void printDebugTracesAtEnd(){}; | ||||
759 | |||||
760 | /// The original loop. | ||||
761 | Loop *OrigLoop; | ||||
762 | |||||
763 | /// A wrapper around ScalarEvolution used to add runtime SCEV checks. Applies | ||||
764 | /// dynamic knowledge to simplify SCEV expressions and converts them to a | ||||
765 | /// more usable form. | ||||
766 | PredicatedScalarEvolution &PSE; | ||||
767 | |||||
768 | /// Loop Info. | ||||
769 | LoopInfo *LI; | ||||
770 | |||||
771 | /// Dominator Tree. | ||||
772 | DominatorTree *DT; | ||||
773 | |||||
774 | /// Alias Analysis. | ||||
775 | AAResults *AA; | ||||
776 | |||||
777 | /// Target Library Info. | ||||
778 | const TargetLibraryInfo *TLI; | ||||
779 | |||||
780 | /// Target Transform Info. | ||||
781 | const TargetTransformInfo *TTI; | ||||
782 | |||||
783 | /// Assumption Cache. | ||||
784 | AssumptionCache *AC; | ||||
785 | |||||
786 | /// Interface to emit optimization remarks. | ||||
787 | OptimizationRemarkEmitter *ORE; | ||||
788 | |||||
789 | /// LoopVersioning. It's only set up (non-null) if memchecks were | ||||
790 | /// used. | ||||
791 | /// | ||||
792 | /// This is currently only used to add no-alias metadata based on the | ||||
793 | /// memchecks. The actually versioning is performed manually. | ||||
794 | std::unique_ptr<LoopVersioning> LVer; | ||||
795 | |||||
796 | /// The vectorization SIMD factor to use. Each vector will have this many | ||||
797 | /// vector elements. | ||||
798 | ElementCount VF; | ||||
799 | |||||
800 | /// The vectorization unroll factor to use. Each scalar is vectorized to this | ||||
801 | /// many different vector instructions. | ||||
802 | unsigned UF; | ||||
803 | |||||
804 | /// The builder that we use | ||||
805 | IRBuilder<> Builder; | ||||
806 | |||||
807 | // --- Vectorization state --- | ||||
808 | |||||
809 | /// The vector-loop preheader. | ||||
810 | BasicBlock *LoopVectorPreHeader; | ||||
811 | |||||
812 | /// The scalar-loop preheader. | ||||
813 | BasicBlock *LoopScalarPreHeader; | ||||
814 | |||||
815 | /// Middle Block between the vector and the scalar. | ||||
816 | BasicBlock *LoopMiddleBlock; | ||||
817 | |||||
818 | /// The (unique) ExitBlock of the scalar loop. Note that | ||||
819 | /// there can be multiple exiting edges reaching this block. | ||||
820 | BasicBlock *LoopExitBlock; | ||||
821 | |||||
822 | /// The vector loop body. | ||||
823 | BasicBlock *LoopVectorBody; | ||||
824 | |||||
825 | /// The scalar loop body. | ||||
826 | BasicBlock *LoopScalarBody; | ||||
827 | |||||
828 | /// A list of all bypass blocks. The first block is the entry of the loop. | ||||
829 | SmallVector<BasicBlock *, 4> LoopBypassBlocks; | ||||
830 | |||||
831 | /// The new Induction variable which was added to the new block. | ||||
832 | PHINode *Induction = nullptr; | ||||
833 | |||||
834 | /// The induction variable of the old basic block. | ||||
835 | PHINode *OldInduction = nullptr; | ||||
836 | |||||
837 | /// Store instructions that were predicated. | ||||
838 | SmallVector<Instruction *, 4> PredicatedInstructions; | ||||
839 | |||||
840 | /// Trip count of the original loop. | ||||
841 | Value *TripCount = nullptr; | ||||
842 | |||||
843 | /// Trip count of the widened loop (TripCount - TripCount % (VF*UF)) | ||||
844 | Value *VectorTripCount = nullptr; | ||||
845 | |||||
846 | /// The legality analysis. | ||||
847 | LoopVectorizationLegality *Legal; | ||||
848 | |||||
849 | /// The profitablity analysis. | ||||
850 | LoopVectorizationCostModel *Cost; | ||||
851 | |||||
852 | // Record whether runtime checks are added. | ||||
853 | bool AddedSafetyChecks = false; | ||||
854 | |||||
855 | // Holds the end values for each induction variable. We save the end values | ||||
856 | // so we can later fix-up the external users of the induction variables. | ||||
857 | DenseMap<PHINode *, Value *> IVEndValues; | ||||
858 | |||||
859 | // Vector of original scalar PHIs whose corresponding widened PHIs need to be | ||||
860 | // fixed up at the end of vector code generation. | ||||
861 | SmallVector<PHINode *, 8> OrigPHIsToFix; | ||||
862 | |||||
863 | /// BFI and PSI are used to check for profile guided size optimizations. | ||||
864 | BlockFrequencyInfo *BFI; | ||||
865 | ProfileSummaryInfo *PSI; | ||||
866 | |||||
867 | // Whether this loop should be optimized for size based on profile guided size | ||||
868 | // optimizatios. | ||||
869 | bool OptForSizeBasedOnProfile; | ||||
870 | |||||
871 | /// Structure to hold information about generated runtime checks, responsible | ||||
872 | /// for cleaning the checks, if vectorization turns out unprofitable. | ||||
873 | GeneratedRTChecks &RTChecks; | ||||
874 | }; | ||||
875 | |||||
876 | class InnerLoopUnroller : public InnerLoopVectorizer { | ||||
877 | public: | ||||
878 | InnerLoopUnroller(Loop *OrigLoop, PredicatedScalarEvolution &PSE, | ||||
879 | LoopInfo *LI, DominatorTree *DT, | ||||
880 | const TargetLibraryInfo *TLI, | ||||
881 | const TargetTransformInfo *TTI, AssumptionCache *AC, | ||||
882 | OptimizationRemarkEmitter *ORE, unsigned UnrollFactor, | ||||
883 | LoopVectorizationLegality *LVL, | ||||
884 | LoopVectorizationCostModel *CM, BlockFrequencyInfo *BFI, | ||||
885 | ProfileSummaryInfo *PSI, GeneratedRTChecks &Check) | ||||
886 | : InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, | ||||
887 | ElementCount::getFixed(1), UnrollFactor, LVL, CM, | ||||
888 | BFI, PSI, Check) {} | ||||
889 | |||||
890 | private: | ||||
891 | Value *getBroadcastInstrs(Value *V) override; | ||||
892 | Value *getStepVector(Value *Val, int StartIdx, Value *Step, | ||||
893 | Instruction::BinaryOps Opcode = | ||||
894 | Instruction::BinaryOpsEnd) override; | ||||
895 | Value *reverseVector(Value *Vec) override; | ||||
896 | }; | ||||
897 | |||||
898 | /// Encapsulate information regarding vectorization of a loop and its epilogue. | ||||
899 | /// This information is meant to be updated and used across two stages of | ||||
900 | /// epilogue vectorization. | ||||
901 | struct EpilogueLoopVectorizationInfo { | ||||
902 | ElementCount MainLoopVF = ElementCount::getFixed(0); | ||||
903 | unsigned MainLoopUF = 0; | ||||
904 | ElementCount EpilogueVF = ElementCount::getFixed(0); | ||||
905 | unsigned EpilogueUF = 0; | ||||
906 | BasicBlock *MainLoopIterationCountCheck = nullptr; | ||||
907 | BasicBlock *EpilogueIterationCountCheck = nullptr; | ||||
908 | BasicBlock *SCEVSafetyCheck = nullptr; | ||||
909 | BasicBlock *MemSafetyCheck = nullptr; | ||||
910 | Value *TripCount = nullptr; | ||||
911 | Value *VectorTripCount = nullptr; | ||||
912 | |||||
913 | EpilogueLoopVectorizationInfo(unsigned MVF, unsigned MUF, unsigned EVF, | ||||
914 | unsigned EUF) | ||||
915 | : MainLoopVF(ElementCount::getFixed(MVF)), MainLoopUF(MUF), | ||||
916 | EpilogueVF(ElementCount::getFixed(EVF)), EpilogueUF(EUF) { | ||||
917 | assert(EUF == 1 &&(static_cast <bool> (EUF == 1 && "A high UF for the epilogue loop is likely not beneficial." ) ? void (0) : __assert_fail ("EUF == 1 && \"A high UF for the epilogue loop is likely not beneficial.\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 918, __extension__ __PRETTY_FUNCTION__)) | ||||
918 | "A high UF for the epilogue loop is likely not beneficial.")(static_cast <bool> (EUF == 1 && "A high UF for the epilogue loop is likely not beneficial." ) ? void (0) : __assert_fail ("EUF == 1 && \"A high UF for the epilogue loop is likely not beneficial.\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 918, __extension__ __PRETTY_FUNCTION__)); | ||||
919 | } | ||||
920 | }; | ||||
921 | |||||
922 | /// An extension of the inner loop vectorizer that creates a skeleton for a | ||||
923 | /// vectorized loop that has its epilogue (residual) also vectorized. | ||||
924 | /// The idea is to run the vplan on a given loop twice, firstly to setup the | ||||
925 | /// skeleton and vectorize the main loop, and secondly to complete the skeleton | ||||
926 | /// from the first step and vectorize the epilogue. This is achieved by | ||||
927 | /// deriving two concrete strategy classes from this base class and invoking | ||||
928 | /// them in succession from the loop vectorizer planner. | ||||
929 | class InnerLoopAndEpilogueVectorizer : public InnerLoopVectorizer { | ||||
930 | public: | ||||
931 | InnerLoopAndEpilogueVectorizer( | ||||
932 | Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI, | ||||
933 | DominatorTree *DT, const TargetLibraryInfo *TLI, | ||||
934 | const TargetTransformInfo *TTI, AssumptionCache *AC, | ||||
935 | OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI, | ||||
936 | LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM, | ||||
937 | BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI, | ||||
938 | GeneratedRTChecks &Checks) | ||||
939 | : InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, | ||||
940 | EPI.MainLoopVF, EPI.MainLoopUF, LVL, CM, BFI, PSI, | ||||
941 | Checks), | ||||
942 | EPI(EPI) {} | ||||
943 | |||||
944 | // Override this function to handle the more complex control flow around the | ||||
945 | // three loops. | ||||
946 | BasicBlock *createVectorizedLoopSkeleton() final override { | ||||
947 | return createEpilogueVectorizedLoopSkeleton(); | ||||
948 | } | ||||
949 | |||||
950 | /// The interface for creating a vectorized skeleton using one of two | ||||
951 | /// different strategies, each corresponding to one execution of the vplan | ||||
952 | /// as described above. | ||||
953 | virtual BasicBlock *createEpilogueVectorizedLoopSkeleton() = 0; | ||||
954 | |||||
955 | /// Holds and updates state information required to vectorize the main loop | ||||
956 | /// and its epilogue in two separate passes. This setup helps us avoid | ||||
957 | /// regenerating and recomputing runtime safety checks. It also helps us to | ||||
958 | /// shorten the iteration-count-check path length for the cases where the | ||||
959 | /// iteration count of the loop is so small that the main vector loop is | ||||
960 | /// completely skipped. | ||||
961 | EpilogueLoopVectorizationInfo &EPI; | ||||
962 | }; | ||||
963 | |||||
964 | /// A specialized derived class of inner loop vectorizer that performs | ||||
965 | /// vectorization of *main* loops in the process of vectorizing loops and their | ||||
966 | /// epilogues. | ||||
967 | class EpilogueVectorizerMainLoop : public InnerLoopAndEpilogueVectorizer { | ||||
968 | public: | ||||
969 | EpilogueVectorizerMainLoop( | ||||
970 | Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI, | ||||
971 | DominatorTree *DT, const TargetLibraryInfo *TLI, | ||||
972 | const TargetTransformInfo *TTI, AssumptionCache *AC, | ||||
973 | OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI, | ||||
974 | LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM, | ||||
975 | BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI, | ||||
976 | GeneratedRTChecks &Check) | ||||
977 | : InnerLoopAndEpilogueVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, | ||||
978 | EPI, LVL, CM, BFI, PSI, Check) {} | ||||
979 | /// Implements the interface for creating a vectorized skeleton using the | ||||
980 | /// *main loop* strategy (ie the first pass of vplan execution). | ||||
981 | BasicBlock *createEpilogueVectorizedLoopSkeleton() final override; | ||||
982 | |||||
983 | protected: | ||||
984 | /// Emits an iteration count bypass check once for the main loop (when \p | ||||
985 | /// ForEpilogue is false) and once for the epilogue loop (when \p | ||||
986 | /// ForEpilogue is true). | ||||
987 | BasicBlock *emitMinimumIterationCountCheck(Loop *L, BasicBlock *Bypass, | ||||
988 | bool ForEpilogue); | ||||
989 | void printDebugTracesAtStart() override; | ||||
990 | void printDebugTracesAtEnd() override; | ||||
991 | }; | ||||
992 | |||||
993 | // A specialized derived class of inner loop vectorizer that performs | ||||
994 | // vectorization of *epilogue* loops in the process of vectorizing loops and | ||||
995 | // their epilogues. | ||||
996 | class EpilogueVectorizerEpilogueLoop : public InnerLoopAndEpilogueVectorizer { | ||||
997 | public: | ||||
998 | EpilogueVectorizerEpilogueLoop( | ||||
999 | Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI, | ||||
1000 | DominatorTree *DT, const TargetLibraryInfo *TLI, | ||||
1001 | const TargetTransformInfo *TTI, AssumptionCache *AC, | ||||
1002 | OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI, | ||||
1003 | LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM, | ||||
1004 | BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI, | ||||
1005 | GeneratedRTChecks &Checks) | ||||
1006 | : InnerLoopAndEpilogueVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, | ||||
1007 | EPI, LVL, CM, BFI, PSI, Checks) {} | ||||
1008 | /// Implements the interface for creating a vectorized skeleton using the | ||||
1009 | /// *epilogue loop* strategy (ie the second pass of vplan execution). | ||||
1010 | BasicBlock *createEpilogueVectorizedLoopSkeleton() final override; | ||||
1011 | |||||
1012 | protected: | ||||
1013 | /// Emits an iteration count bypass check after the main vector loop has | ||||
1014 | /// finished to see if there are any iterations left to execute by either | ||||
1015 | /// the vector epilogue or the scalar epilogue. | ||||
1016 | BasicBlock *emitMinimumVectorEpilogueIterCountCheck(Loop *L, | ||||
1017 | BasicBlock *Bypass, | ||||
1018 | BasicBlock *Insert); | ||||
1019 | void printDebugTracesAtStart() override; | ||||
1020 | void printDebugTracesAtEnd() override; | ||||
1021 | }; | ||||
1022 | } // end namespace llvm | ||||
1023 | |||||
1024 | /// Look for a meaningful debug location on the instruction or it's | ||||
1025 | /// operands. | ||||
1026 | static Instruction *getDebugLocFromInstOrOperands(Instruction *I) { | ||||
1027 | if (!I) | ||||
1028 | return I; | ||||
1029 | |||||
1030 | DebugLoc Empty; | ||||
1031 | if (I->getDebugLoc() != Empty) | ||||
1032 | return I; | ||||
1033 | |||||
1034 | for (Use &Op : I->operands()) { | ||||
1035 | if (Instruction *OpInst = dyn_cast<Instruction>(Op)) | ||||
1036 | if (OpInst->getDebugLoc() != Empty) | ||||
1037 | return OpInst; | ||||
1038 | } | ||||
1039 | |||||
1040 | return I; | ||||
1041 | } | ||||
1042 | |||||
1043 | void InnerLoopVectorizer::setDebugLocFromInst(IRBuilder<> &B, const Value *Ptr) { | ||||
1044 | if (const Instruction *Inst = dyn_cast_or_null<Instruction>(Ptr)) { | ||||
1045 | const DILocation *DIL = Inst->getDebugLoc(); | ||||
1046 | |||||
1047 | // When a FSDiscriminator is enabled, we don't need to add the multiply | ||||
1048 | // factors to the discriminators. | ||||
1049 | if (DIL && Inst->getFunction()->isDebugInfoForProfiling() && | ||||
1050 | !isa<DbgInfoIntrinsic>(Inst) && !EnableFSDiscriminator) { | ||||
1051 | // FIXME: For scalable vectors, assume vscale=1. | ||||
1052 | auto NewDIL = | ||||
1053 | DIL->cloneByMultiplyingDuplicationFactor(UF * VF.getKnownMinValue()); | ||||
1054 | if (NewDIL) | ||||
1055 | B.SetCurrentDebugLocation(NewDIL.getValue()); | ||||
1056 | else | ||||
1057 | LLVM_DEBUG(dbgs()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { dbgs() << "Failed to create new discriminator: " << DIL->getFilename() << " Line: " << DIL ->getLine(); } } while (false) | ||||
1058 | << "Failed to create new discriminator: "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { dbgs() << "Failed to create new discriminator: " << DIL->getFilename() << " Line: " << DIL ->getLine(); } } while (false) | ||||
1059 | << DIL->getFilename() << " Line: " << DIL->getLine())do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { dbgs() << "Failed to create new discriminator: " << DIL->getFilename() << " Line: " << DIL ->getLine(); } } while (false); | ||||
1060 | } else | ||||
1061 | B.SetCurrentDebugLocation(DIL); | ||||
1062 | } else | ||||
1063 | B.SetCurrentDebugLocation(DebugLoc()); | ||||
1064 | } | ||||
1065 | |||||
1066 | /// Write a \p DebugMsg about vectorization to the debug output stream. If \p I | ||||
1067 | /// is passed, the message relates to that particular instruction. | ||||
1068 | #ifndef NDEBUG | ||||
1069 | static void debugVectorizationMessage(const StringRef Prefix, | ||||
1070 | const StringRef DebugMsg, | ||||
1071 | Instruction *I) { | ||||
1072 | dbgs() << "LV: " << Prefix << DebugMsg; | ||||
1073 | if (I != nullptr) | ||||
1074 | dbgs() << " " << *I; | ||||
1075 | else | ||||
1076 | dbgs() << '.'; | ||||
1077 | dbgs() << '\n'; | ||||
1078 | } | ||||
1079 | #endif | ||||
1080 | |||||
1081 | /// Create an analysis remark that explains why vectorization failed | ||||
1082 | /// | ||||
1083 | /// \p PassName is the name of the pass (e.g. can be AlwaysPrint). \p | ||||
1084 | /// RemarkName is the identifier for the remark. If \p I is passed it is an | ||||
1085 | /// instruction that prevents vectorization. Otherwise \p TheLoop is used for | ||||
1086 | /// the location of the remark. \return the remark object that can be | ||||
1087 | /// streamed to. | ||||
1088 | static OptimizationRemarkAnalysis createLVAnalysis(const char *PassName, | ||||
1089 | StringRef RemarkName, Loop *TheLoop, Instruction *I) { | ||||
1090 | Value *CodeRegion = TheLoop->getHeader(); | ||||
1091 | DebugLoc DL = TheLoop->getStartLoc(); | ||||
1092 | |||||
1093 | if (I) { | ||||
1094 | CodeRegion = I->getParent(); | ||||
1095 | // If there is no debug location attached to the instruction, revert back to | ||||
1096 | // using the loop's. | ||||
1097 | if (I->getDebugLoc()) | ||||
1098 | DL = I->getDebugLoc(); | ||||
1099 | } | ||||
1100 | |||||
1101 | return OptimizationRemarkAnalysis(PassName, RemarkName, DL, CodeRegion); | ||||
1102 | } | ||||
1103 | |||||
1104 | /// Return a value for Step multiplied by VF. | ||||
1105 | static Value *createStepForVF(IRBuilder<> &B, Constant *Step, ElementCount VF) { | ||||
1106 | assert(isa<ConstantInt>(Step) && "Expected an integer step")(static_cast <bool> (isa<ConstantInt>(Step) && "Expected an integer step") ? void (0) : __assert_fail ("isa<ConstantInt>(Step) && \"Expected an integer step\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 1106, __extension__ __PRETTY_FUNCTION__)); | ||||
1107 | Constant *StepVal = ConstantInt::get( | ||||
1108 | Step->getType(), | ||||
1109 | cast<ConstantInt>(Step)->getSExtValue() * VF.getKnownMinValue()); | ||||
1110 | return VF.isScalable() ? B.CreateVScale(StepVal) : StepVal; | ||||
1111 | } | ||||
1112 | |||||
1113 | namespace llvm { | ||||
1114 | |||||
1115 | /// Return the runtime value for VF. | ||||
1116 | Value *getRuntimeVF(IRBuilder<> &B, Type *Ty, ElementCount VF) { | ||||
1117 | Constant *EC = ConstantInt::get(Ty, VF.getKnownMinValue()); | ||||
1118 | return VF.isScalable() ? B.CreateVScale(EC) : EC; | ||||
1119 | } | ||||
1120 | |||||
1121 | void reportVectorizationFailure(const StringRef DebugMsg, | ||||
1122 | const StringRef OREMsg, const StringRef ORETag, | ||||
1123 | OptimizationRemarkEmitter *ORE, Loop *TheLoop, | ||||
1124 | Instruction *I) { | ||||
1125 | LLVM_DEBUG(debugVectorizationMessage("Not vectorizing: ", DebugMsg, I))do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { debugVectorizationMessage("Not vectorizing: " , DebugMsg, I); } } while (false); | ||||
1126 | LoopVectorizeHints Hints(TheLoop, true /* doesn't matter */, *ORE); | ||||
1127 | ORE->emit( | ||||
1128 | createLVAnalysis(Hints.vectorizeAnalysisPassName(), ORETag, TheLoop, I) | ||||
1129 | << "loop not vectorized: " << OREMsg); | ||||
1130 | } | ||||
1131 | |||||
1132 | void reportVectorizationInfo(const StringRef Msg, const StringRef ORETag, | ||||
1133 | OptimizationRemarkEmitter *ORE, Loop *TheLoop, | ||||
1134 | Instruction *I) { | ||||
1135 | LLVM_DEBUG(debugVectorizationMessage("", Msg, I))do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { debugVectorizationMessage("", Msg, I); } } while (false); | ||||
1136 | LoopVectorizeHints Hints(TheLoop, true /* doesn't matter */, *ORE); | ||||
1137 | ORE->emit( | ||||
1138 | createLVAnalysis(Hints.vectorizeAnalysisPassName(), ORETag, TheLoop, I) | ||||
1139 | << Msg); | ||||
1140 | } | ||||
1141 | |||||
1142 | } // end namespace llvm | ||||
1143 | |||||
1144 | #ifndef NDEBUG | ||||
1145 | /// \return string containing a file name and a line # for the given loop. | ||||
1146 | static std::string getDebugLocString(const Loop *L) { | ||||
1147 | std::string Result; | ||||
1148 | if (L) { | ||||
1149 | raw_string_ostream OS(Result); | ||||
1150 | if (const DebugLoc LoopDbgLoc = L->getStartLoc()) | ||||
1151 | LoopDbgLoc.print(OS); | ||||
1152 | else | ||||
1153 | // Just print the module name. | ||||
1154 | OS << L->getHeader()->getParent()->getParent()->getModuleIdentifier(); | ||||
1155 | OS.flush(); | ||||
1156 | } | ||||
1157 | return Result; | ||||
1158 | } | ||||
1159 | #endif | ||||
1160 | |||||
1161 | void InnerLoopVectorizer::addNewMetadata(Instruction *To, | ||||
1162 | const Instruction *Orig) { | ||||
1163 | // If the loop was versioned with memchecks, add the corresponding no-alias | ||||
1164 | // metadata. | ||||
1165 | if (LVer && (isa<LoadInst>(Orig) || isa<StoreInst>(Orig))) | ||||
1166 | LVer->annotateInstWithNoAlias(To, Orig); | ||||
1167 | } | ||||
1168 | |||||
1169 | void InnerLoopVectorizer::addMetadata(Instruction *To, | ||||
1170 | Instruction *From) { | ||||
1171 | propagateMetadata(To, From); | ||||
1172 | addNewMetadata(To, From); | ||||
1173 | } | ||||
1174 | |||||
1175 | void InnerLoopVectorizer::addMetadata(ArrayRef<Value *> To, | ||||
1176 | Instruction *From) { | ||||
1177 | for (Value *V : To) { | ||||
1178 | if (Instruction *I = dyn_cast<Instruction>(V)) | ||||
1179 | addMetadata(I, From); | ||||
1180 | } | ||||
1181 | } | ||||
1182 | |||||
1183 | namespace llvm { | ||||
1184 | |||||
1185 | // Loop vectorization cost-model hints how the scalar epilogue loop should be | ||||
1186 | // lowered. | ||||
1187 | enum ScalarEpilogueLowering { | ||||
1188 | |||||
1189 | // The default: allowing scalar epilogues. | ||||
1190 | CM_ScalarEpilogueAllowed, | ||||
1191 | |||||
1192 | // Vectorization with OptForSize: don't allow epilogues. | ||||
1193 | CM_ScalarEpilogueNotAllowedOptSize, | ||||
1194 | |||||
1195 | // A special case of vectorisation with OptForSize: loops with a very small | ||||
1196 | // trip count are considered for vectorization under OptForSize, thereby | ||||
1197 | // making sure the cost of their loop body is dominant, free of runtime | ||||
1198 | // guards and scalar iteration overheads. | ||||
1199 | CM_ScalarEpilogueNotAllowedLowTripLoop, | ||||
1200 | |||||
1201 | // Loop hint predicate indicating an epilogue is undesired. | ||||
1202 | CM_ScalarEpilogueNotNeededUsePredicate, | ||||
1203 | |||||
1204 | // Directive indicating we must either tail fold or not vectorize | ||||
1205 | CM_ScalarEpilogueNotAllowedUsePredicate | ||||
1206 | }; | ||||
1207 | |||||
1208 | /// ElementCountComparator creates a total ordering for ElementCount | ||||
1209 | /// for the purposes of using it in a set structure. | ||||
1210 | struct ElementCountComparator { | ||||
1211 | bool operator()(const ElementCount &LHS, const ElementCount &RHS) const { | ||||
1212 | return std::make_tuple(LHS.isScalable(), LHS.getKnownMinValue()) < | ||||
1213 | std::make_tuple(RHS.isScalable(), RHS.getKnownMinValue()); | ||||
1214 | } | ||||
1215 | }; | ||||
1216 | using ElementCountSet = SmallSet<ElementCount, 16, ElementCountComparator>; | ||||
1217 | |||||
1218 | /// LoopVectorizationCostModel - estimates the expected speedups due to | ||||
1219 | /// vectorization. | ||||
1220 | /// In many cases vectorization is not profitable. This can happen because of | ||||
1221 | /// a number of reasons. In this class we mainly attempt to predict the | ||||
1222 | /// expected speedup/slowdowns due to the supported instruction set. We use the | ||||
1223 | /// TargetTransformInfo to query the different backends for the cost of | ||||
1224 | /// different operations. | ||||
1225 | class LoopVectorizationCostModel { | ||||
1226 | public: | ||||
1227 | LoopVectorizationCostModel(ScalarEpilogueLowering SEL, Loop *L, | ||||
1228 | PredicatedScalarEvolution &PSE, LoopInfo *LI, | ||||
1229 | LoopVectorizationLegality *Legal, | ||||
1230 | const TargetTransformInfo &TTI, | ||||
1231 | const TargetLibraryInfo *TLI, DemandedBits *DB, | ||||
1232 | AssumptionCache *AC, | ||||
1233 | OptimizationRemarkEmitter *ORE, const Function *F, | ||||
1234 | const LoopVectorizeHints *Hints, | ||||
1235 | InterleavedAccessInfo &IAI) | ||||
1236 | : ScalarEpilogueStatus(SEL), TheLoop(L), PSE(PSE), LI(LI), Legal(Legal), | ||||
1237 | TTI(TTI), TLI(TLI), DB(DB), AC(AC), ORE(ORE), TheFunction(F), | ||||
1238 | Hints(Hints), InterleaveInfo(IAI) {} | ||||
1239 | |||||
1240 | /// \return An upper bound for the vectorization factors (both fixed and | ||||
1241 | /// scalable). If the factors are 0, vectorization and interleaving should be | ||||
1242 | /// avoided up front. | ||||
1243 | FixedScalableVFPair computeMaxVF(ElementCount UserVF, unsigned UserIC); | ||||
1244 | |||||
1245 | /// \return True if runtime checks are required for vectorization, and false | ||||
1246 | /// otherwise. | ||||
1247 | bool runtimeChecksRequired(); | ||||
1248 | |||||
1249 | /// \return The most profitable vectorization factor and the cost of that VF. | ||||
1250 | /// This method checks every VF in \p CandidateVFs. If UserVF is not ZERO | ||||
1251 | /// then this vectorization factor will be selected if vectorization is | ||||
1252 | /// possible. | ||||
1253 | VectorizationFactor | ||||
1254 | selectVectorizationFactor(const ElementCountSet &CandidateVFs); | ||||
1255 | |||||
1256 | VectorizationFactor | ||||
1257 | selectEpilogueVectorizationFactor(const ElementCount MaxVF, | ||||
1258 | const LoopVectorizationPlanner &LVP); | ||||
1259 | |||||
1260 | /// Setup cost-based decisions for user vectorization factor. | ||||
1261 | void selectUserVectorizationFactor(ElementCount UserVF) { | ||||
1262 | collectUniformsAndScalars(UserVF); | ||||
1263 | collectInstsToScalarize(UserVF); | ||||
1264 | } | ||||
1265 | |||||
1266 | /// \return The size (in bits) of the smallest and widest types in the code | ||||
1267 | /// that needs to be vectorized. We ignore values that remain scalar such as | ||||
1268 | /// 64 bit loop indices. | ||||
1269 | std::pair<unsigned, unsigned> getSmallestAndWidestTypes(); | ||||
1270 | |||||
1271 | /// \return The desired interleave count. | ||||
1272 | /// If interleave count has been specified by metadata it will be returned. | ||||
1273 | /// Otherwise, the interleave count is computed and returned. VF and LoopCost | ||||
1274 | /// are the selected vectorization factor and the cost of the selected VF. | ||||
1275 | unsigned selectInterleaveCount(ElementCount VF, unsigned LoopCost); | ||||
1276 | |||||
1277 | /// Memory access instruction may be vectorized in more than one way. | ||||
1278 | /// Form of instruction after vectorization depends on cost. | ||||
1279 | /// This function takes cost-based decisions for Load/Store instructions | ||||
1280 | /// and collects them in a map. This decisions map is used for building | ||||
1281 | /// the lists of loop-uniform and loop-scalar instructions. | ||||
1282 | /// The calculated cost is saved with widening decision in order to | ||||
1283 | /// avoid redundant calculations. | ||||
1284 | void setCostBasedWideningDecision(ElementCount VF); | ||||
1285 | |||||
1286 | /// A struct that represents some properties of the register usage | ||||
1287 | /// of a loop. | ||||
1288 | struct RegisterUsage { | ||||
1289 | /// Holds the number of loop invariant values that are used in the loop. | ||||
1290 | /// The key is ClassID of target-provided register class. | ||||
1291 | SmallMapVector<unsigned, unsigned, 4> LoopInvariantRegs; | ||||
1292 | /// Holds the maximum number of concurrent live intervals in the loop. | ||||
1293 | /// The key is ClassID of target-provided register class. | ||||
1294 | SmallMapVector<unsigned, unsigned, 4> MaxLocalUsers; | ||||
1295 | }; | ||||
1296 | |||||
1297 | /// \return Returns information about the register usages of the loop for the | ||||
1298 | /// given vectorization factors. | ||||
1299 | SmallVector<RegisterUsage, 8> | ||||
1300 | calculateRegisterUsage(ArrayRef<ElementCount> VFs); | ||||
1301 | |||||
1302 | /// Collect values we want to ignore in the cost model. | ||||
1303 | void collectValuesToIgnore(); | ||||
1304 | |||||
1305 | /// Split reductions into those that happen in the loop, and those that happen | ||||
1306 | /// outside. In loop reductions are collected into InLoopReductionChains. | ||||
1307 | void collectInLoopReductions(); | ||||
1308 | |||||
1309 | /// Returns true if we should use strict in-order reductions for the given | ||||
1310 | /// RdxDesc. This is true if the -enable-strict-reductions flag is passed, | ||||
1311 | /// the IsOrdered flag of RdxDesc is set and we do not allow reordering | ||||
1312 | /// of FP operations. | ||||
1313 | bool useOrderedReductions(const RecurrenceDescriptor &RdxDesc) { | ||||
1314 | return EnableStrictReductions && !Hints->allowReordering() && | ||||
1315 | RdxDesc.isOrdered(); | ||||
1316 | } | ||||
1317 | |||||
1318 | /// \returns The smallest bitwidth each instruction can be represented with. | ||||
1319 | /// The vector equivalents of these instructions should be truncated to this | ||||
1320 | /// type. | ||||
1321 | const MapVector<Instruction *, uint64_t> &getMinimalBitwidths() const { | ||||
1322 | return MinBWs; | ||||
1323 | } | ||||
1324 | |||||
1325 | /// \returns True if it is more profitable to scalarize instruction \p I for | ||||
1326 | /// vectorization factor \p VF. | ||||
1327 | bool isProfitableToScalarize(Instruction *I, ElementCount VF) const { | ||||
1328 | assert(VF.isVector() &&(static_cast <bool> (VF.isVector() && "Profitable to scalarize relevant only for VF > 1." ) ? void (0) : __assert_fail ("VF.isVector() && \"Profitable to scalarize relevant only for VF > 1.\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 1329, __extension__ __PRETTY_FUNCTION__)) | ||||
1329 | "Profitable to scalarize relevant only for VF > 1.")(static_cast <bool> (VF.isVector() && "Profitable to scalarize relevant only for VF > 1." ) ? void (0) : __assert_fail ("VF.isVector() && \"Profitable to scalarize relevant only for VF > 1.\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 1329, __extension__ __PRETTY_FUNCTION__)); | ||||
1330 | |||||
1331 | // Cost model is not run in the VPlan-native path - return conservative | ||||
1332 | // result until this changes. | ||||
1333 | if (EnableVPlanNativePath) | ||||
1334 | return false; | ||||
1335 | |||||
1336 | auto Scalars = InstsToScalarize.find(VF); | ||||
1337 | assert(Scalars != InstsToScalarize.end() &&(static_cast <bool> (Scalars != InstsToScalarize.end() && "VF not yet analyzed for scalarization profitability") ? void (0) : __assert_fail ("Scalars != InstsToScalarize.end() && \"VF not yet analyzed for scalarization profitability\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 1338, __extension__ __PRETTY_FUNCTION__)) | ||||
1338 | "VF not yet analyzed for scalarization profitability")(static_cast <bool> (Scalars != InstsToScalarize.end() && "VF not yet analyzed for scalarization profitability") ? void (0) : __assert_fail ("Scalars != InstsToScalarize.end() && \"VF not yet analyzed for scalarization profitability\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 1338, __extension__ __PRETTY_FUNCTION__)); | ||||
1339 | return Scalars->second.find(I) != Scalars->second.end(); | ||||
1340 | } | ||||
1341 | |||||
1342 | /// Returns true if \p I is known to be uniform after vectorization. | ||||
1343 | bool isUniformAfterVectorization(Instruction *I, ElementCount VF) const { | ||||
1344 | if (VF.isScalar()) | ||||
1345 | return true; | ||||
1346 | |||||
1347 | // Cost model is not run in the VPlan-native path - return conservative | ||||
1348 | // result until this changes. | ||||
1349 | if (EnableVPlanNativePath) | ||||
1350 | return false; | ||||
1351 | |||||
1352 | auto UniformsPerVF = Uniforms.find(VF); | ||||
1353 | assert(UniformsPerVF != Uniforms.end() &&(static_cast <bool> (UniformsPerVF != Uniforms.end() && "VF not yet analyzed for uniformity") ? void (0) : __assert_fail ("UniformsPerVF != Uniforms.end() && \"VF not yet analyzed for uniformity\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 1354, __extension__ __PRETTY_FUNCTION__)) | ||||
1354 | "VF not yet analyzed for uniformity")(static_cast <bool> (UniformsPerVF != Uniforms.end() && "VF not yet analyzed for uniformity") ? void (0) : __assert_fail ("UniformsPerVF != Uniforms.end() && \"VF not yet analyzed for uniformity\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 1354, __extension__ __PRETTY_FUNCTION__)); | ||||
1355 | return UniformsPerVF->second.count(I); | ||||
1356 | } | ||||
1357 | |||||
1358 | /// Returns true if \p I is known to be scalar after vectorization. | ||||
1359 | bool isScalarAfterVectorization(Instruction *I, ElementCount VF) const { | ||||
1360 | if (VF.isScalar()) | ||||
1361 | return true; | ||||
1362 | |||||
1363 | // Cost model is not run in the VPlan-native path - return conservative | ||||
1364 | // result until this changes. | ||||
1365 | if (EnableVPlanNativePath) | ||||
1366 | return false; | ||||
1367 | |||||
1368 | auto ScalarsPerVF = Scalars.find(VF); | ||||
1369 | assert(ScalarsPerVF != Scalars.end() &&(static_cast <bool> (ScalarsPerVF != Scalars.end() && "Scalar values are not calculated for VF") ? void (0) : __assert_fail ("ScalarsPerVF != Scalars.end() && \"Scalar values are not calculated for VF\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 1370, __extension__ __PRETTY_FUNCTION__)) | ||||
1370 | "Scalar values are not calculated for VF")(static_cast <bool> (ScalarsPerVF != Scalars.end() && "Scalar values are not calculated for VF") ? void (0) : __assert_fail ("ScalarsPerVF != Scalars.end() && \"Scalar values are not calculated for VF\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 1370, __extension__ __PRETTY_FUNCTION__)); | ||||
1371 | return ScalarsPerVF->second.count(I); | ||||
1372 | } | ||||
1373 | |||||
1374 | /// \returns True if instruction \p I can be truncated to a smaller bitwidth | ||||
1375 | /// for vectorization factor \p VF. | ||||
1376 | bool canTruncateToMinimalBitwidth(Instruction *I, ElementCount VF) const { | ||||
1377 | return VF.isVector() && MinBWs.find(I) != MinBWs.end() && | ||||
1378 | !isProfitableToScalarize(I, VF) && | ||||
1379 | !isScalarAfterVectorization(I, VF); | ||||
1380 | } | ||||
1381 | |||||
1382 | /// Decision that was taken during cost calculation for memory instruction. | ||||
1383 | enum InstWidening { | ||||
1384 | CM_Unknown, | ||||
1385 | CM_Widen, // For consecutive accesses with stride +1. | ||||
1386 | CM_Widen_Reverse, // For consecutive accesses with stride -1. | ||||
1387 | CM_Interleave, | ||||
1388 | CM_GatherScatter, | ||||
1389 | CM_Scalarize | ||||
1390 | }; | ||||
1391 | |||||
1392 | /// Save vectorization decision \p W and \p Cost taken by the cost model for | ||||
1393 | /// instruction \p I and vector width \p VF. | ||||
1394 | void setWideningDecision(Instruction *I, ElementCount VF, InstWidening W, | ||||
1395 | InstructionCost Cost) { | ||||
1396 | assert(VF.isVector() && "Expected VF >=2")(static_cast <bool> (VF.isVector() && "Expected VF >=2" ) ? void (0) : __assert_fail ("VF.isVector() && \"Expected VF >=2\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 1396, __extension__ __PRETTY_FUNCTION__)); | ||||
1397 | WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost); | ||||
1398 | } | ||||
1399 | |||||
1400 | /// Save vectorization decision \p W and \p Cost taken by the cost model for | ||||
1401 | /// interleaving group \p Grp and vector width \p VF. | ||||
1402 | void setWideningDecision(const InterleaveGroup<Instruction> *Grp, | ||||
1403 | ElementCount VF, InstWidening W, | ||||
1404 | InstructionCost Cost) { | ||||
1405 | assert(VF.isVector() && "Expected VF >=2")(static_cast <bool> (VF.isVector() && "Expected VF >=2" ) ? void (0) : __assert_fail ("VF.isVector() && \"Expected VF >=2\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 1405, __extension__ __PRETTY_FUNCTION__)); | ||||
1406 | /// Broadcast this decicion to all instructions inside the group. | ||||
1407 | /// But the cost will be assigned to one instruction only. | ||||
1408 | for (unsigned i = 0; i < Grp->getFactor(); ++i) { | ||||
1409 | if (auto *I = Grp->getMember(i)) { | ||||
1410 | if (Grp->getInsertPos() == I) | ||||
1411 | WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost); | ||||
1412 | else | ||||
1413 | WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, 0); | ||||
1414 | } | ||||
1415 | } | ||||
1416 | } | ||||
1417 | |||||
1418 | /// Return the cost model decision for the given instruction \p I and vector | ||||
1419 | /// width \p VF. Return CM_Unknown if this instruction did not pass | ||||
1420 | /// through the cost modeling. | ||||
1421 | InstWidening getWideningDecision(Instruction *I, ElementCount VF) const { | ||||
1422 | assert(VF.isVector() && "Expected VF to be a vector VF")(static_cast <bool> (VF.isVector() && "Expected VF to be a vector VF" ) ? void (0) : __assert_fail ("VF.isVector() && \"Expected VF to be a vector VF\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 1422, __extension__ __PRETTY_FUNCTION__)); | ||||
1423 | // Cost model is not run in the VPlan-native path - return conservative | ||||
1424 | // result until this changes. | ||||
1425 | if (EnableVPlanNativePath) | ||||
1426 | return CM_GatherScatter; | ||||
1427 | |||||
1428 | std::pair<Instruction *, ElementCount> InstOnVF = std::make_pair(I, VF); | ||||
1429 | auto Itr = WideningDecisions.find(InstOnVF); | ||||
1430 | if (Itr == WideningDecisions.end()) | ||||
1431 | return CM_Unknown; | ||||
1432 | return Itr->second.first; | ||||
1433 | } | ||||
1434 | |||||
1435 | /// Return the vectorization cost for the given instruction \p I and vector | ||||
1436 | /// width \p VF. | ||||
1437 | InstructionCost getWideningCost(Instruction *I, ElementCount VF) { | ||||
1438 | assert(VF.isVector() && "Expected VF >=2")(static_cast <bool> (VF.isVector() && "Expected VF >=2" ) ? void (0) : __assert_fail ("VF.isVector() && \"Expected VF >=2\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 1438, __extension__ __PRETTY_FUNCTION__)); | ||||
1439 | std::pair<Instruction *, ElementCount> InstOnVF = std::make_pair(I, VF); | ||||
1440 | assert(WideningDecisions.find(InstOnVF) != WideningDecisions.end() &&(static_cast <bool> (WideningDecisions.find(InstOnVF) != WideningDecisions.end() && "The cost is not calculated" ) ? void (0) : __assert_fail ("WideningDecisions.find(InstOnVF) != WideningDecisions.end() && \"The cost is not calculated\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 1441, __extension__ __PRETTY_FUNCTION__)) | ||||
1441 | "The cost is not calculated")(static_cast <bool> (WideningDecisions.find(InstOnVF) != WideningDecisions.end() && "The cost is not calculated" ) ? void (0) : __assert_fail ("WideningDecisions.find(InstOnVF) != WideningDecisions.end() && \"The cost is not calculated\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 1441, __extension__ __PRETTY_FUNCTION__)); | ||||
1442 | return WideningDecisions[InstOnVF].second; | ||||
1443 | } | ||||
1444 | |||||
1445 | /// Return True if instruction \p I is an optimizable truncate whose operand | ||||
1446 | /// is an induction variable. Such a truncate will be removed by adding a new | ||||
1447 | /// induction variable with the destination type. | ||||
1448 | bool isOptimizableIVTruncate(Instruction *I, ElementCount VF) { | ||||
1449 | // If the instruction is not a truncate, return false. | ||||
1450 | auto *Trunc = dyn_cast<TruncInst>(I); | ||||
1451 | if (!Trunc) | ||||
1452 | return false; | ||||
1453 | |||||
1454 | // Get the source and destination types of the truncate. | ||||
1455 | Type *SrcTy = ToVectorTy(cast<CastInst>(I)->getSrcTy(), VF); | ||||
1456 | Type *DestTy = ToVectorTy(cast<CastInst>(I)->getDestTy(), VF); | ||||
1457 | |||||
1458 | // If the truncate is free for the given types, return false. Replacing a | ||||
1459 | // free truncate with an induction variable would add an induction variable | ||||
1460 | // update instruction to each iteration of the loop. We exclude from this | ||||
1461 | // check the primary induction variable since it will need an update | ||||
1462 | // instruction regardless. | ||||
1463 | Value *Op = Trunc->getOperand(0); | ||||
1464 | if (Op != Legal->getPrimaryInduction() && TTI.isTruncateFree(SrcTy, DestTy)) | ||||
1465 | return false; | ||||
1466 | |||||
1467 | // If the truncated value is not an induction variable, return false. | ||||
1468 | return Legal->isInductionPhi(Op); | ||||
1469 | } | ||||
1470 | |||||
1471 | /// Collects the instructions to scalarize for each predicated instruction in | ||||
1472 | /// the loop. | ||||
1473 | void collectInstsToScalarize(ElementCount VF); | ||||
1474 | |||||
1475 | /// Collect Uniform and Scalar values for the given \p VF. | ||||
1476 | /// The sets depend on CM decision for Load/Store instructions | ||||
1477 | /// that may be vectorized as interleave, gather-scatter or scalarized. | ||||
1478 | void collectUniformsAndScalars(ElementCount VF) { | ||||
1479 | // Do the analysis once. | ||||
1480 | if (VF.isScalar() || Uniforms.find(VF) != Uniforms.end()) | ||||
1481 | return; | ||||
1482 | setCostBasedWideningDecision(VF); | ||||
1483 | collectLoopUniforms(VF); | ||||
1484 | collectLoopScalars(VF); | ||||
1485 | } | ||||
1486 | |||||
1487 | /// Returns true if the target machine supports masked store operation | ||||
1488 | /// for the given \p DataType and kind of access to \p Ptr. | ||||
1489 | bool isLegalMaskedStore(Type *DataType, Value *Ptr, Align Alignment) const { | ||||
1490 | return Legal->isConsecutivePtr(Ptr) && | ||||
1491 | TTI.isLegalMaskedStore(DataType, Alignment); | ||||
1492 | } | ||||
1493 | |||||
1494 | /// Returns true if the target machine supports masked load operation | ||||
1495 | /// for the given \p DataType and kind of access to \p Ptr. | ||||
1496 | bool isLegalMaskedLoad(Type *DataType, Value *Ptr, Align Alignment) const { | ||||
1497 | return Legal->isConsecutivePtr(Ptr) && | ||||
1498 | TTI.isLegalMaskedLoad(DataType, Alignment); | ||||
1499 | } | ||||
1500 | |||||
1501 | /// Returns true if the target machine can represent \p V as a masked gather | ||||
1502 | /// or scatter operation. | ||||
1503 | bool isLegalGatherOrScatter(Value *V) { | ||||
1504 | bool LI = isa<LoadInst>(V); | ||||
1505 | bool SI = isa<StoreInst>(V); | ||||
1506 | if (!LI && !SI) | ||||
1507 | return false; | ||||
1508 | auto *Ty = getLoadStoreType(V); | ||||
1509 | Align Align = getLoadStoreAlignment(V); | ||||
1510 | return (LI && TTI.isLegalMaskedGather(Ty, Align)) || | ||||
1511 | (SI && TTI.isLegalMaskedScatter(Ty, Align)); | ||||
1512 | } | ||||
1513 | |||||
1514 | /// Returns true if the target machine supports all of the reduction | ||||
1515 | /// variables found for the given VF. | ||||
1516 | bool canVectorizeReductions(ElementCount VF) { | ||||
1517 | return (all_of(Legal->getReductionVars(), [&](auto &Reduction) -> bool { | ||||
1518 | const RecurrenceDescriptor &RdxDesc = Reduction.second; | ||||
1519 | return TTI.isLegalToVectorizeReduction(RdxDesc, VF); | ||||
1520 | })); | ||||
1521 | } | ||||
1522 | |||||
1523 | /// Returns true if \p I is an instruction that will be scalarized with | ||||
1524 | /// predication. Such instructions include conditional stores and | ||||
1525 | /// instructions that may divide by zero. | ||||
1526 | /// If a non-zero VF has been calculated, we check if I will be scalarized | ||||
1527 | /// predication for that VF. | ||||
1528 | bool isScalarWithPredication(Instruction *I) const; | ||||
1529 | |||||
1530 | // Returns true if \p I is an instruction that will be predicated either | ||||
1531 | // through scalar predication or masked load/store or masked gather/scatter. | ||||
1532 | // Superset of instructions that return true for isScalarWithPredication. | ||||
1533 | bool isPredicatedInst(Instruction *I) { | ||||
1534 | if (!blockNeedsPredication(I->getParent())) | ||||
1535 | return false; | ||||
1536 | // Loads and stores that need some form of masked operation are predicated | ||||
1537 | // instructions. | ||||
1538 | if (isa<LoadInst>(I) || isa<StoreInst>(I)) | ||||
1539 | return Legal->isMaskRequired(I); | ||||
1540 | return isScalarWithPredication(I); | ||||
1541 | } | ||||
1542 | |||||
1543 | /// Returns true if \p I is a memory instruction with consecutive memory | ||||
1544 | /// access that can be widened. | ||||
1545 | bool | ||||
1546 | memoryInstructionCanBeWidened(Instruction *I, | ||||
1547 | ElementCount VF = ElementCount::getFixed(1)); | ||||
1548 | |||||
1549 | /// Returns true if \p I is a memory instruction in an interleaved-group | ||||
1550 | /// of memory accesses that can be vectorized with wide vector loads/stores | ||||
1551 | /// and shuffles. | ||||
1552 | bool | ||||
1553 | interleavedAccessCanBeWidened(Instruction *I, | ||||
1554 | ElementCount VF = ElementCount::getFixed(1)); | ||||
1555 | |||||
1556 | /// Check if \p Instr belongs to any interleaved access group. | ||||
1557 | bool isAccessInterleaved(Instruction *Instr) { | ||||
1558 | return InterleaveInfo.isInterleaved(Instr); | ||||
1559 | } | ||||
1560 | |||||
1561 | /// Get the interleaved access group that \p Instr belongs to. | ||||
1562 | const InterleaveGroup<Instruction> * | ||||
1563 | getInterleavedAccessGroup(Instruction *Instr) { | ||||
1564 | return InterleaveInfo.getInterleaveGroup(Instr); | ||||
1565 | } | ||||
1566 | |||||
1567 | /// Returns true if we're required to use a scalar epilogue for at least | ||||
1568 | /// the final iteration of the original loop. | ||||
1569 | bool requiresScalarEpilogue() const { | ||||
1570 | if (!isScalarEpilogueAllowed()) | ||||
1571 | return false; | ||||
1572 | // If we might exit from anywhere but the latch, must run the exiting | ||||
1573 | // iteration in scalar form. | ||||
1574 | if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch()) | ||||
1575 | return true; | ||||
1576 | return InterleaveInfo.requiresScalarEpilogue(); | ||||
1577 | } | ||||
1578 | |||||
1579 | /// Returns true if a scalar epilogue is not allowed due to optsize or a | ||||
1580 | /// loop hint annotation. | ||||
1581 | bool isScalarEpilogueAllowed() const { | ||||
1582 | return ScalarEpilogueStatus == CM_ScalarEpilogueAllowed; | ||||
1583 | } | ||||
1584 | |||||
1585 | /// Returns true if all loop blocks should be masked to fold tail loop. | ||||
1586 | bool foldTailByMasking() const { return FoldTailByMasking; } | ||||
1587 | |||||
1588 | bool blockNeedsPredication(BasicBlock *BB) const { | ||||
1589 | return foldTailByMasking() || Legal->blockNeedsPredication(BB); | ||||
1590 | } | ||||
1591 | |||||
1592 | /// A SmallMapVector to store the InLoop reduction op chains, mapping phi | ||||
1593 | /// nodes to the chain of instructions representing the reductions. Uses a | ||||
1594 | /// MapVector to ensure deterministic iteration order. | ||||
1595 | using ReductionChainMap = | ||||
1596 | SmallMapVector<PHINode *, SmallVector<Instruction *, 4>, 4>; | ||||
1597 | |||||
1598 | /// Return the chain of instructions representing an inloop reduction. | ||||
1599 | const ReductionChainMap &getInLoopReductionChains() const { | ||||
1600 | return InLoopReductionChains; | ||||
1601 | } | ||||
1602 | |||||
1603 | /// Returns true if the Phi is part of an inloop reduction. | ||||
1604 | bool isInLoopReduction(PHINode *Phi) const { | ||||
1605 | return InLoopReductionChains.count(Phi); | ||||
1606 | } | ||||
1607 | |||||
1608 | /// Estimate cost of an intrinsic call instruction CI if it were vectorized | ||||
1609 | /// with factor VF. Return the cost of the instruction, including | ||||
1610 | /// scalarization overhead if it's needed. | ||||
1611 | InstructionCost getVectorIntrinsicCost(CallInst *CI, ElementCount VF) const; | ||||
1612 | |||||
1613 | /// Estimate cost of a call instruction CI if it were vectorized with factor | ||||
1614 | /// VF. Return the cost of the instruction, including scalarization overhead | ||||
1615 | /// if it's needed. The flag NeedToScalarize shows if the call needs to be | ||||
1616 | /// scalarized - | ||||
1617 | /// i.e. either vector version isn't available, or is too expensive. | ||||
1618 | InstructionCost getVectorCallCost(CallInst *CI, ElementCount VF, | ||||
1619 | bool &NeedToScalarize) const; | ||||
1620 | |||||
1621 | /// Returns true if the per-lane cost of VectorizationFactor A is lower than | ||||
1622 | /// that of B. | ||||
1623 | bool isMoreProfitable(const VectorizationFactor &A, | ||||
1624 | const VectorizationFactor &B) const; | ||||
1625 | |||||
1626 | /// Invalidates decisions already taken by the cost model. | ||||
1627 | void invalidateCostModelingDecisions() { | ||||
1628 | WideningDecisions.clear(); | ||||
1629 | Uniforms.clear(); | ||||
1630 | Scalars.clear(); | ||||
1631 | } | ||||
1632 | |||||
1633 | private: | ||||
1634 | unsigned NumPredStores = 0; | ||||
1635 | |||||
1636 | /// \return An upper bound for the vectorization factors for both | ||||
1637 | /// fixed and scalable vectorization, where the minimum-known number of | ||||
1638 | /// elements is a power-of-2 larger than zero. If scalable vectorization is | ||||
1639 | /// disabled or unsupported, then the scalable part will be equal to | ||||
1640 | /// ElementCount::getScalable(0). | ||||
1641 | FixedScalableVFPair computeFeasibleMaxVF(unsigned ConstTripCount, | ||||
1642 | ElementCount UserVF); | ||||
1643 | |||||
1644 | /// \return the maximized element count based on the targets vector | ||||
1645 | /// registers and the loop trip-count, but limited to a maximum safe VF. | ||||
1646 | /// This is a helper function of computeFeasibleMaxVF. | ||||
1647 | /// FIXME: MaxSafeVF is currently passed by reference to avoid some obscure | ||||
1648 | /// issue that occurred on one of the buildbots which cannot be reproduced | ||||
1649 | /// without having access to the properietary compiler (see comments on | ||||
1650 | /// D98509). The issue is currently under investigation and this workaround | ||||
1651 | /// will be removed as soon as possible. | ||||
1652 | ElementCount getMaximizedVFForTarget(unsigned ConstTripCount, | ||||
1653 | unsigned SmallestType, | ||||
1654 | unsigned WidestType, | ||||
1655 | const ElementCount &MaxSafeVF); | ||||
1656 | |||||
1657 | /// \return the maximum legal scalable VF, based on the safe max number | ||||
1658 | /// of elements. | ||||
1659 | ElementCount getMaxLegalScalableVF(unsigned MaxSafeElements); | ||||
1660 | |||||
1661 | /// The vectorization cost is a combination of the cost itself and a boolean | ||||
1662 | /// indicating whether any of the contributing operations will actually | ||||
1663 | /// operate on vector values after type legalization in the backend. If this | ||||
1664 | /// latter value is false, then all operations will be scalarized (i.e. no | ||||
1665 | /// vectorization has actually taken place). | ||||
1666 | using VectorizationCostTy = std::pair<InstructionCost, bool>; | ||||
1667 | |||||
1668 | /// Returns the expected execution cost. The unit of the cost does | ||||
1669 | /// not matter because we use the 'cost' units to compare different | ||||
1670 | /// vector widths. The cost that is returned is *not* normalized by | ||||
1671 | /// the factor width. | ||||
1672 | VectorizationCostTy expectedCost(ElementCount VF); | ||||
1673 | |||||
1674 | /// Returns the execution time cost of an instruction for a given vector | ||||
1675 | /// width. Vector width of one means scalar. | ||||
1676 | VectorizationCostTy getInstructionCost(Instruction *I, ElementCount VF); | ||||
1677 | |||||
1678 | /// The cost-computation logic from getInstructionCost which provides | ||||
1679 | /// the vector type as an output parameter. | ||||
1680 | InstructionCost getInstructionCost(Instruction *I, ElementCount VF, | ||||
1681 | Type *&VectorTy); | ||||
1682 | |||||
1683 | /// Return the cost of instructions in an inloop reduction pattern, if I is | ||||
1684 | /// part of that pattern. | ||||
1685 | InstructionCost getReductionPatternCost(Instruction *I, ElementCount VF, | ||||
1686 | Type *VectorTy, | ||||
1687 | TTI::TargetCostKind CostKind); | ||||
1688 | |||||
1689 | /// Calculate vectorization cost of memory instruction \p I. | ||||
1690 | InstructionCost getMemoryInstructionCost(Instruction *I, ElementCount VF); | ||||
1691 | |||||
1692 | /// The cost computation for scalarized memory instruction. | ||||
1693 | InstructionCost getMemInstScalarizationCost(Instruction *I, ElementCount VF); | ||||
1694 | |||||
1695 | /// The cost computation for interleaving group of memory instructions. | ||||
1696 | InstructionCost getInterleaveGroupCost(Instruction *I, ElementCount VF); | ||||
1697 | |||||
1698 | /// The cost computation for Gather/Scatter instruction. | ||||
1699 | InstructionCost getGatherScatterCost(Instruction *I, ElementCount VF); | ||||
1700 | |||||
1701 | /// The cost computation for widening instruction \p I with consecutive | ||||
1702 | /// memory access. | ||||
1703 | InstructionCost getConsecutiveMemOpCost(Instruction *I, ElementCount VF); | ||||
1704 | |||||
1705 | /// The cost calculation for Load/Store instruction \p I with uniform pointer - | ||||
1706 | /// Load: scalar load + broadcast. | ||||
1707 | /// Store: scalar store + (loop invariant value stored? 0 : extract of last | ||||
1708 | /// element) | ||||
1709 | InstructionCost getUniformMemOpCost(Instruction *I, ElementCount VF); | ||||
1710 | |||||
1711 | /// Estimate the overhead of scalarizing an instruction. This is a | ||||
1712 | /// convenience wrapper for the type-based getScalarizationOverhead API. | ||||
1713 | InstructionCost getScalarizationOverhead(Instruction *I, | ||||
1714 | ElementCount VF) const; | ||||
1715 | |||||
1716 | /// Returns whether the instruction is a load or store and will be a emitted | ||||
1717 | /// as a vector operation. | ||||
1718 | bool isConsecutiveLoadOrStore(Instruction *I); | ||||
1719 | |||||
1720 | /// Returns true if an artificially high cost for emulated masked memrefs | ||||
1721 | /// should be used. | ||||
1722 | bool useEmulatedMaskMemRefHack(Instruction *I); | ||||
1723 | |||||
1724 | /// Map of scalar integer values to the smallest bitwidth they can be legally | ||||
1725 | /// represented as. The vector equivalents of these values should be truncated | ||||
1726 | /// to this type. | ||||
1727 | MapVector<Instruction *, uint64_t> MinBWs; | ||||
1728 | |||||
1729 | /// A type representing the costs for instructions if they were to be | ||||
1730 | /// scalarized rather than vectorized. The entries are Instruction-Cost | ||||
1731 | /// pairs. | ||||
1732 | using ScalarCostsTy = DenseMap<Instruction *, InstructionCost>; | ||||
1733 | |||||
1734 | /// A set containing all BasicBlocks that are known to present after | ||||
1735 | /// vectorization as a predicated block. | ||||
1736 | SmallPtrSet<BasicBlock *, 4> PredicatedBBsAfterVectorization; | ||||
1737 | |||||
1738 | /// Records whether it is allowed to have the original scalar loop execute at | ||||
1739 | /// least once. This may be needed as a fallback loop in case runtime | ||||
1740 | /// aliasing/dependence checks fail, or to handle the tail/remainder | ||||
1741 | /// iterations when the trip count is unknown or doesn't divide by the VF, | ||||
1742 | /// or as a peel-loop to handle gaps in interleave-groups. | ||||
1743 | /// Under optsize and when the trip count is very small we don't allow any | ||||
1744 | /// iterations to execute in the scalar loop. | ||||
1745 | ScalarEpilogueLowering ScalarEpilogueStatus = CM_ScalarEpilogueAllowed; | ||||
1746 | |||||
1747 | /// All blocks of loop are to be masked to fold tail of scalar iterations. | ||||
1748 | bool FoldTailByMasking = false; | ||||
1749 | |||||
1750 | /// A map holding scalar costs for different vectorization factors. The | ||||
1751 | /// presence of a cost for an instruction in the mapping indicates that the | ||||
1752 | /// instruction will be scalarized when vectorizing with the associated | ||||
1753 | /// vectorization factor. The entries are VF-ScalarCostTy pairs. | ||||
1754 | DenseMap<ElementCount, ScalarCostsTy> InstsToScalarize; | ||||
1755 | |||||
1756 | /// Holds the instructions known to be uniform after vectorization. | ||||
1757 | /// The data is collected per VF. | ||||
1758 | DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> Uniforms; | ||||
1759 | |||||
1760 | /// Holds the instructions known to be scalar after vectorization. | ||||
1761 | /// The data is collected per VF. | ||||
1762 | DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> Scalars; | ||||
1763 | |||||
1764 | /// Holds the instructions (address computations) that are forced to be | ||||
1765 | /// scalarized. | ||||
1766 | DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> ForcedScalars; | ||||
1767 | |||||
1768 | /// PHINodes of the reductions that should be expanded in-loop along with | ||||
1769 | /// their associated chains of reduction operations, in program order from top | ||||
1770 | /// (PHI) to bottom | ||||
1771 | ReductionChainMap InLoopReductionChains; | ||||
1772 | |||||
1773 | /// A Map of inloop reduction operations and their immediate chain operand. | ||||
1774 | /// FIXME: This can be removed once reductions can be costed correctly in | ||||
1775 | /// vplan. This was added to allow quick lookup to the inloop operations, | ||||
1776 | /// without having to loop through InLoopReductionChains. | ||||
1777 | DenseMap<Instruction *, Instruction *> InLoopReductionImmediateChains; | ||||
1778 | |||||
1779 | /// Returns the expected difference in cost from scalarizing the expression | ||||
1780 | /// feeding a predicated instruction \p PredInst. The instructions to | ||||
1781 | /// scalarize and their scalar costs are collected in \p ScalarCosts. A | ||||
1782 | /// non-negative return value implies the expression will be scalarized. | ||||
1783 | /// Currently, only single-use chains are considered for scalarization. | ||||
1784 | int computePredInstDiscount(Instruction *PredInst, ScalarCostsTy &ScalarCosts, | ||||
1785 | ElementCount VF); | ||||
1786 | |||||
1787 | /// Collect the instructions that are uniform after vectorization. An | ||||
1788 | /// instruction is uniform if we represent it with a single scalar value in | ||||
1789 | /// the vectorized loop corresponding to each vector iteration. Examples of | ||||
1790 | /// uniform instructions include pointer operands of consecutive or | ||||
1791 | /// interleaved memory accesses. Note that although uniformity implies an | ||||
1792 | /// instruction will be scalar, the reverse is not true. In general, a | ||||
1793 | /// scalarized instruction will be represented by VF scalar values in the | ||||
1794 | /// vectorized loop, each corresponding to an iteration of the original | ||||
1795 | /// scalar loop. | ||||
1796 | void collectLoopUniforms(ElementCount VF); | ||||
1797 | |||||
1798 | /// Collect the instructions that are scalar after vectorization. An | ||||
1799 | /// instruction is scalar if it is known to be uniform or will be scalarized | ||||
1800 | /// during vectorization. Non-uniform scalarized instructions will be | ||||
1801 | /// represented by VF values in the vectorized loop, each corresponding to an | ||||
1802 | /// iteration of the original scalar loop. | ||||
1803 | void collectLoopScalars(ElementCount VF); | ||||
1804 | |||||
1805 | /// Keeps cost model vectorization decision and cost for instructions. | ||||
1806 | /// Right now it is used for memory instructions only. | ||||
1807 | using DecisionList = DenseMap<std::pair<Instruction *, ElementCount>, | ||||
1808 | std::pair<InstWidening, InstructionCost>>; | ||||
1809 | |||||
1810 | DecisionList WideningDecisions; | ||||
1811 | |||||
1812 | /// Returns true if \p V is expected to be vectorized and it needs to be | ||||
1813 | /// extracted. | ||||
1814 | bool needsExtract(Value *V, ElementCount VF) const { | ||||
1815 | Instruction *I = dyn_cast<Instruction>(V); | ||||
1816 | if (VF.isScalar() || !I || !TheLoop->contains(I) || | ||||
1817 | TheLoop->isLoopInvariant(I)) | ||||
1818 | return false; | ||||
1819 | |||||
1820 | // Assume we can vectorize V (and hence we need extraction) if the | ||||
1821 | // scalars are not computed yet. This can happen, because it is called | ||||
1822 | // via getScalarizationOverhead from setCostBasedWideningDecision, before | ||||
1823 | // the scalars are collected. That should be a safe assumption in most | ||||
1824 | // cases, because we check if the operands have vectorizable types | ||||
1825 | // beforehand in LoopVectorizationLegality. | ||||
1826 | return Scalars.find(VF) == Scalars.end() || | ||||
1827 | !isScalarAfterVectorization(I, VF); | ||||
1828 | }; | ||||
1829 | |||||
1830 | /// Returns a range containing only operands needing to be extracted. | ||||
1831 | SmallVector<Value *, 4> filterExtractingOperands(Instruction::op_range Ops, | ||||
1832 | ElementCount VF) const { | ||||
1833 | return SmallVector<Value *, 4>(make_filter_range( | ||||
1834 | Ops, [this, VF](Value *V) { return this->needsExtract(V, VF); })); | ||||
1835 | } | ||||
1836 | |||||
1837 | /// Determines if we have the infrastructure to vectorize loop \p L and its | ||||
1838 | /// epilogue, assuming the main loop is vectorized by \p VF. | ||||
1839 | bool isCandidateForEpilogueVectorization(const Loop &L, | ||||
1840 | const ElementCount VF) const; | ||||
1841 | |||||
1842 | /// Returns true if epilogue vectorization is considered profitable, and | ||||
1843 | /// false otherwise. | ||||
1844 | /// \p VF is the vectorization factor chosen for the original loop. | ||||
1845 | bool isEpilogueVectorizationProfitable(const ElementCount VF) const; | ||||
1846 | |||||
1847 | public: | ||||
1848 | /// The loop that we evaluate. | ||||
1849 | Loop *TheLoop; | ||||
1850 | |||||
1851 | /// Predicated scalar evolution analysis. | ||||
1852 | PredicatedScalarEvolution &PSE; | ||||
1853 | |||||
1854 | /// Loop Info analysis. | ||||
1855 | LoopInfo *LI; | ||||
1856 | |||||
1857 | /// Vectorization legality. | ||||
1858 | LoopVectorizationLegality *Legal; | ||||
1859 | |||||
1860 | /// Vector target information. | ||||
1861 | const TargetTransformInfo &TTI; | ||||
1862 | |||||
1863 | /// Target Library Info. | ||||
1864 | const TargetLibraryInfo *TLI; | ||||
1865 | |||||
1866 | /// Demanded bits analysis. | ||||
1867 | DemandedBits *DB; | ||||
1868 | |||||
1869 | /// Assumption cache. | ||||
1870 | AssumptionCache *AC; | ||||
1871 | |||||
1872 | /// Interface to emit optimization remarks. | ||||
1873 | OptimizationRemarkEmitter *ORE; | ||||
1874 | |||||
1875 | const Function *TheFunction; | ||||
1876 | |||||
1877 | /// Loop Vectorize Hint. | ||||
1878 | const LoopVectorizeHints *Hints; | ||||
1879 | |||||
1880 | /// The interleave access information contains groups of interleaved accesses | ||||
1881 | /// with the same stride and close to each other. | ||||
1882 | InterleavedAccessInfo &InterleaveInfo; | ||||
1883 | |||||
1884 | /// Values to ignore in the cost model. | ||||
1885 | SmallPtrSet<const Value *, 16> ValuesToIgnore; | ||||
1886 | |||||
1887 | /// Values to ignore in the cost model when VF > 1. | ||||
1888 | SmallPtrSet<const Value *, 16> VecValuesToIgnore; | ||||
1889 | |||||
1890 | /// Profitable vector factors. | ||||
1891 | SmallVector<VectorizationFactor, 8> ProfitableVFs; | ||||
1892 | }; | ||||
1893 | } // end namespace llvm | ||||
1894 | |||||
1895 | /// Helper struct to manage generating runtime checks for vectorization. | ||||
1896 | /// | ||||
1897 | /// The runtime checks are created up-front in temporary blocks to allow better | ||||
1898 | /// estimating the cost and un-linked from the existing IR. After deciding to | ||||
1899 | /// vectorize, the checks are moved back. If deciding not to vectorize, the | ||||
1900 | /// temporary blocks are completely removed. | ||||
1901 | class GeneratedRTChecks { | ||||
1902 | /// Basic block which contains the generated SCEV checks, if any. | ||||
1903 | BasicBlock *SCEVCheckBlock = nullptr; | ||||
1904 | |||||
1905 | /// The value representing the result of the generated SCEV checks. If it is | ||||
1906 | /// nullptr, either no SCEV checks have been generated or they have been used. | ||||
1907 | Value *SCEVCheckCond = nullptr; | ||||
1908 | |||||
1909 | /// Basic block which contains the generated memory runtime checks, if any. | ||||
1910 | BasicBlock *MemCheckBlock = nullptr; | ||||
1911 | |||||
1912 | /// The value representing the result of the generated memory runtime checks. | ||||
1913 | /// If it is nullptr, either no memory runtime checks have been generated or | ||||
1914 | /// they have been used. | ||||
1915 | Instruction *MemRuntimeCheckCond = nullptr; | ||||
1916 | |||||
1917 | DominatorTree *DT; | ||||
1918 | LoopInfo *LI; | ||||
1919 | |||||
1920 | SCEVExpander SCEVExp; | ||||
1921 | SCEVExpander MemCheckExp; | ||||
1922 | |||||
1923 | public: | ||||
1924 | GeneratedRTChecks(ScalarEvolution &SE, DominatorTree *DT, LoopInfo *LI, | ||||
1925 | const DataLayout &DL) | ||||
1926 | : DT(DT), LI(LI), SCEVExp(SE, DL, "scev.check"), | ||||
1927 | MemCheckExp(SE, DL, "scev.check") {} | ||||
1928 | |||||
1929 | /// Generate runtime checks in SCEVCheckBlock and MemCheckBlock, so we can | ||||
1930 | /// accurately estimate the cost of the runtime checks. The blocks are | ||||
1931 | /// un-linked from the IR and is added back during vector code generation. If | ||||
1932 | /// there is no vector code generation, the check blocks are removed | ||||
1933 | /// completely. | ||||
1934 | void Create(Loop *L, const LoopAccessInfo &LAI, | ||||
1935 | const SCEVUnionPredicate &UnionPred) { | ||||
1936 | |||||
1937 | BasicBlock *LoopHeader = L->getHeader(); | ||||
1938 | BasicBlock *Preheader = L->getLoopPreheader(); | ||||
1939 | |||||
1940 | // Use SplitBlock to create blocks for SCEV & memory runtime checks to | ||||
1941 | // ensure the blocks are properly added to LoopInfo & DominatorTree. Those | ||||
1942 | // may be used by SCEVExpander. The blocks will be un-linked from their | ||||
1943 | // predecessors and removed from LI & DT at the end of the function. | ||||
1944 | if (!UnionPred.isAlwaysTrue()) { | ||||
1945 | SCEVCheckBlock = SplitBlock(Preheader, Preheader->getTerminator(), DT, LI, | ||||
1946 | nullptr, "vector.scevcheck"); | ||||
1947 | |||||
1948 | SCEVCheckCond = SCEVExp.expandCodeForPredicate( | ||||
1949 | &UnionPred, SCEVCheckBlock->getTerminator()); | ||||
1950 | } | ||||
1951 | |||||
1952 | const auto &RtPtrChecking = *LAI.getRuntimePointerChecking(); | ||||
1953 | if (RtPtrChecking.Need) { | ||||
1954 | auto *Pred = SCEVCheckBlock ? SCEVCheckBlock : Preheader; | ||||
1955 | MemCheckBlock = SplitBlock(Pred, Pred->getTerminator(), DT, LI, nullptr, | ||||
1956 | "vector.memcheck"); | ||||
1957 | |||||
1958 | std::tie(std::ignore, MemRuntimeCheckCond) = | ||||
1959 | addRuntimeChecks(MemCheckBlock->getTerminator(), L, | ||||
1960 | RtPtrChecking.getChecks(), MemCheckExp); | ||||
1961 | assert(MemRuntimeCheckCond &&(static_cast <bool> (MemRuntimeCheckCond && "no RT checks generated although RtPtrChecking " "claimed checks are required") ? void (0) : __assert_fail ("MemRuntimeCheckCond && \"no RT checks generated although RtPtrChecking \" \"claimed checks are required\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 1963, __extension__ __PRETTY_FUNCTION__)) | ||||
1962 | "no RT checks generated although RtPtrChecking "(static_cast <bool> (MemRuntimeCheckCond && "no RT checks generated although RtPtrChecking " "claimed checks are required") ? void (0) : __assert_fail ("MemRuntimeCheckCond && \"no RT checks generated although RtPtrChecking \" \"claimed checks are required\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 1963, __extension__ __PRETTY_FUNCTION__)) | ||||
1963 | "claimed checks are required")(static_cast <bool> (MemRuntimeCheckCond && "no RT checks generated although RtPtrChecking " "claimed checks are required") ? void (0) : __assert_fail ("MemRuntimeCheckCond && \"no RT checks generated although RtPtrChecking \" \"claimed checks are required\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 1963, __extension__ __PRETTY_FUNCTION__)); | ||||
1964 | } | ||||
1965 | |||||
1966 | if (!MemCheckBlock && !SCEVCheckBlock) | ||||
1967 | return; | ||||
1968 | |||||
1969 | // Unhook the temporary block with the checks, update various places | ||||
1970 | // accordingly. | ||||
1971 | if (SCEVCheckBlock) | ||||
1972 | SCEVCheckBlock->replaceAllUsesWith(Preheader); | ||||
1973 | if (MemCheckBlock) | ||||
1974 | MemCheckBlock->replaceAllUsesWith(Preheader); | ||||
1975 | |||||
1976 | if (SCEVCheckBlock) { | ||||
1977 | SCEVCheckBlock->getTerminator()->moveBefore(Preheader->getTerminator()); | ||||
1978 | new UnreachableInst(Preheader->getContext(), SCEVCheckBlock); | ||||
1979 | Preheader->getTerminator()->eraseFromParent(); | ||||
1980 | } | ||||
1981 | if (MemCheckBlock) { | ||||
1982 | MemCheckBlock->getTerminator()->moveBefore(Preheader->getTerminator()); | ||||
1983 | new UnreachableInst(Preheader->getContext(), MemCheckBlock); | ||||
1984 | Preheader->getTerminator()->eraseFromParent(); | ||||
1985 | } | ||||
1986 | |||||
1987 | DT->changeImmediateDominator(LoopHeader, Preheader); | ||||
1988 | if (MemCheckBlock) { | ||||
1989 | DT->eraseNode(MemCheckBlock); | ||||
1990 | LI->removeBlock(MemCheckBlock); | ||||
1991 | } | ||||
1992 | if (SCEVCheckBlock) { | ||||
1993 | DT->eraseNode(SCEVCheckBlock); | ||||
1994 | LI->removeBlock(SCEVCheckBlock); | ||||
1995 | } | ||||
1996 | } | ||||
1997 | |||||
1998 | /// Remove the created SCEV & memory runtime check blocks & instructions, if | ||||
1999 | /// unused. | ||||
2000 | ~GeneratedRTChecks() { | ||||
2001 | SCEVExpanderCleaner SCEVCleaner(SCEVExp, *DT); | ||||
2002 | SCEVExpanderCleaner MemCheckCleaner(MemCheckExp, *DT); | ||||
2003 | if (!SCEVCheckCond) | ||||
2004 | SCEVCleaner.markResultUsed(); | ||||
2005 | |||||
2006 | if (!MemRuntimeCheckCond) | ||||
2007 | MemCheckCleaner.markResultUsed(); | ||||
2008 | |||||
2009 | if (MemRuntimeCheckCond) { | ||||
2010 | auto &SE = *MemCheckExp.getSE(); | ||||
2011 | // Memory runtime check generation creates compares that use expanded | ||||
2012 | // values. Remove them before running the SCEVExpanderCleaners. | ||||
2013 | for (auto &I : make_early_inc_range(reverse(*MemCheckBlock))) { | ||||
2014 | if (MemCheckExp.isInsertedInstruction(&I)) | ||||
2015 | continue; | ||||
2016 | SE.forgetValue(&I); | ||||
2017 | SE.eraseValueFromMap(&I); | ||||
2018 | I.eraseFromParent(); | ||||
2019 | } | ||||
2020 | } | ||||
2021 | MemCheckCleaner.cleanup(); | ||||
2022 | SCEVCleaner.cleanup(); | ||||
2023 | |||||
2024 | if (SCEVCheckCond) | ||||
2025 | SCEVCheckBlock->eraseFromParent(); | ||||
2026 | if (MemRuntimeCheckCond) | ||||
2027 | MemCheckBlock->eraseFromParent(); | ||||
2028 | } | ||||
2029 | |||||
2030 | /// Adds the generated SCEVCheckBlock before \p LoopVectorPreHeader and | ||||
2031 | /// adjusts the branches to branch to the vector preheader or \p Bypass, | ||||
2032 | /// depending on the generated condition. | ||||
2033 | BasicBlock *emitSCEVChecks(Loop *L, BasicBlock *Bypass, | ||||
2034 | BasicBlock *LoopVectorPreHeader, | ||||
2035 | BasicBlock *LoopExitBlock) { | ||||
2036 | if (!SCEVCheckCond) | ||||
2037 | return nullptr; | ||||
2038 | if (auto *C = dyn_cast<ConstantInt>(SCEVCheckCond)) | ||||
2039 | if (C->isZero()) | ||||
2040 | return nullptr; | ||||
2041 | |||||
2042 | auto *Pred = LoopVectorPreHeader->getSinglePredecessor(); | ||||
2043 | |||||
2044 | BranchInst::Create(LoopVectorPreHeader, SCEVCheckBlock); | ||||
2045 | // Create new preheader for vector loop. | ||||
2046 | if (auto *PL = LI->getLoopFor(LoopVectorPreHeader)) | ||||
2047 | PL->addBasicBlockToLoop(SCEVCheckBlock, *LI); | ||||
2048 | |||||
2049 | SCEVCheckBlock->getTerminator()->eraseFromParent(); | ||||
2050 | SCEVCheckBlock->moveBefore(LoopVectorPreHeader); | ||||
2051 | Pred->getTerminator()->replaceSuccessorWith(LoopVectorPreHeader, | ||||
2052 | SCEVCheckBlock); | ||||
2053 | |||||
2054 | DT->addNewBlock(SCEVCheckBlock, Pred); | ||||
2055 | DT->changeImmediateDominator(LoopVectorPreHeader, SCEVCheckBlock); | ||||
2056 | |||||
2057 | ReplaceInstWithInst( | ||||
2058 | SCEVCheckBlock->getTerminator(), | ||||
2059 | BranchInst::Create(Bypass, LoopVectorPreHeader, SCEVCheckCond)); | ||||
2060 | // Mark the check as used, to prevent it from being removed during cleanup. | ||||
2061 | SCEVCheckCond = nullptr; | ||||
2062 | return SCEVCheckBlock; | ||||
2063 | } | ||||
2064 | |||||
2065 | /// Adds the generated MemCheckBlock before \p LoopVectorPreHeader and adjusts | ||||
2066 | /// the branches to branch to the vector preheader or \p Bypass, depending on | ||||
2067 | /// the generated condition. | ||||
2068 | BasicBlock *emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass, | ||||
2069 | BasicBlock *LoopVectorPreHeader) { | ||||
2070 | // Check if we generated code that checks in runtime if arrays overlap. | ||||
2071 | if (!MemRuntimeCheckCond) | ||||
2072 | return nullptr; | ||||
2073 | |||||
2074 | auto *Pred = LoopVectorPreHeader->getSinglePredecessor(); | ||||
2075 | Pred->getTerminator()->replaceSuccessorWith(LoopVectorPreHeader, | ||||
2076 | MemCheckBlock); | ||||
2077 | |||||
2078 | DT->addNewBlock(MemCheckBlock, Pred); | ||||
2079 | DT->changeImmediateDominator(LoopVectorPreHeader, MemCheckBlock); | ||||
2080 | MemCheckBlock->moveBefore(LoopVectorPreHeader); | ||||
2081 | |||||
2082 | if (auto *PL = LI->getLoopFor(LoopVectorPreHeader)) | ||||
2083 | PL->addBasicBlockToLoop(MemCheckBlock, *LI); | ||||
2084 | |||||
2085 | ReplaceInstWithInst( | ||||
2086 | MemCheckBlock->getTerminator(), | ||||
2087 | BranchInst::Create(Bypass, LoopVectorPreHeader, MemRuntimeCheckCond)); | ||||
2088 | MemCheckBlock->getTerminator()->setDebugLoc( | ||||
2089 | Pred->getTerminator()->getDebugLoc()); | ||||
2090 | |||||
2091 | // Mark the check as used, to prevent it from being removed during cleanup. | ||||
2092 | MemRuntimeCheckCond = nullptr; | ||||
2093 | return MemCheckBlock; | ||||
2094 | } | ||||
2095 | }; | ||||
2096 | |||||
2097 | // Return true if \p OuterLp is an outer loop annotated with hints for explicit | ||||
2098 | // vectorization. The loop needs to be annotated with #pragma omp simd | ||||
2099 | // simdlen(#) or #pragma clang vectorize(enable) vectorize_width(#). If the | ||||
2100 | // vector length information is not provided, vectorization is not considered | ||||
2101 | // explicit. Interleave hints are not allowed either. These limitations will be | ||||
2102 | // relaxed in the future. | ||||
2103 | // Please, note that we are currently forced to abuse the pragma 'clang | ||||
2104 | // vectorize' semantics. This pragma provides *auto-vectorization hints* | ||||
2105 | // (i.e., LV must check that vectorization is legal) whereas pragma 'omp simd' | ||||
2106 | // provides *explicit vectorization hints* (LV can bypass legal checks and | ||||
2107 | // assume that vectorization is legal). However, both hints are implemented | ||||
2108 | // using the same metadata (llvm.loop.vectorize, processed by | ||||
2109 | // LoopVectorizeHints). This will be fixed in the future when the native IR | ||||
2110 | // representation for pragma 'omp simd' is introduced. | ||||
2111 | static bool isExplicitVecOuterLoop(Loop *OuterLp, | ||||
2112 | OptimizationRemarkEmitter *ORE) { | ||||
2113 | assert(!OuterLp->isInnermost() && "This is not an outer loop")(static_cast <bool> (!OuterLp->isInnermost() && "This is not an outer loop") ? void (0) : __assert_fail ("!OuterLp->isInnermost() && \"This is not an outer loop\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 2113, __extension__ __PRETTY_FUNCTION__)); | ||||
2114 | LoopVectorizeHints Hints(OuterLp, true /*DisableInterleaving*/, *ORE); | ||||
2115 | |||||
2116 | // Only outer loops with an explicit vectorization hint are supported. | ||||
2117 | // Unannotated outer loops are ignored. | ||||
2118 | if (Hints.getForce() == LoopVectorizeHints::FK_Undefined) | ||||
2119 | return false; | ||||
2120 | |||||
2121 | Function *Fn = OuterLp->getHeader()->getParent(); | ||||
2122 | if (!Hints.allowVectorization(Fn, OuterLp, | ||||
2123 | true /*VectorizeOnlyWhenForced*/)) { | ||||
2124 | LLVM_DEBUG(dbgs() << "LV: Loop hints prevent outer loop vectorization.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { dbgs() << "LV: Loop hints prevent outer loop vectorization.\n" ; } } while (false); | ||||
2125 | return false; | ||||
2126 | } | ||||
2127 | |||||
2128 | if (Hints.getInterleave() > 1) { | ||||
2129 | // TODO: Interleave support is future work. | ||||
2130 | LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Interleave is not supported for "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { dbgs() << "LV: Not vectorizing: Interleave is not supported for " "outer loops.\n"; } } while (false) | ||||
2131 | "outer loops.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { dbgs() << "LV: Not vectorizing: Interleave is not supported for " "outer loops.\n"; } } while (false); | ||||
2132 | Hints.emitRemarkWithHints(); | ||||
2133 | return false; | ||||
2134 | } | ||||
2135 | |||||
2136 | return true; | ||||
2137 | } | ||||
2138 | |||||
2139 | static void collectSupportedLoops(Loop &L, LoopInfo *LI, | ||||
2140 | OptimizationRemarkEmitter *ORE, | ||||
2141 | SmallVectorImpl<Loop *> &V) { | ||||
2142 | // Collect inner loops and outer loops without irreducible control flow. For | ||||
2143 | // now, only collect outer loops that have explicit vectorization hints. If we | ||||
2144 | // are stress testing the VPlan H-CFG construction, we collect the outermost | ||||
2145 | // loop of every loop nest. | ||||
2146 | if (L.isInnermost() || VPlanBuildStressTest || | ||||
2147 | (EnableVPlanNativePath && isExplicitVecOuterLoop(&L, ORE))) { | ||||
2148 | LoopBlocksRPO RPOT(&L); | ||||
2149 | RPOT.perform(LI); | ||||
2150 | if (!containsIrreducibleCFG<const BasicBlock *>(RPOT, *LI)) { | ||||
2151 | V.push_back(&L); | ||||
2152 | // TODO: Collect inner loops inside marked outer loops in case | ||||
2153 | // vectorization fails for the outer loop. Do not invoke | ||||
2154 | // 'containsIrreducibleCFG' again for inner loops when the outer loop is | ||||
2155 | // already known to be reducible. We can use an inherited attribute for | ||||
2156 | // that. | ||||
2157 | return; | ||||
2158 | } | ||||
2159 | } | ||||
2160 | for (Loop *InnerL : L) | ||||
2161 | collectSupportedLoops(*InnerL, LI, ORE, V); | ||||
2162 | } | ||||
2163 | |||||
2164 | namespace { | ||||
2165 | |||||
2166 | /// The LoopVectorize Pass. | ||||
2167 | struct LoopVectorize : public FunctionPass { | ||||
2168 | /// Pass identification, replacement for typeid | ||||
2169 | static char ID; | ||||
2170 | |||||
2171 | LoopVectorizePass Impl; | ||||
2172 | |||||
2173 | explicit LoopVectorize(bool InterleaveOnlyWhenForced = false, | ||||
2174 | bool VectorizeOnlyWhenForced = false) | ||||
2175 | : FunctionPass(ID), | ||||
2176 | Impl({InterleaveOnlyWhenForced, VectorizeOnlyWhenForced}) { | ||||
2177 | initializeLoopVectorizePass(*PassRegistry::getPassRegistry()); | ||||
2178 | } | ||||
2179 | |||||
2180 | bool runOnFunction(Function &F) override { | ||||
2181 | if (skipFunction(F)) | ||||
2182 | return false; | ||||
2183 | |||||
2184 | auto *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE(); | ||||
2185 | auto *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); | ||||
2186 | auto *TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); | ||||
2187 | auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); | ||||
2188 | auto *BFI = &getAnalysis<BlockFrequencyInfoWrapperPass>().getBFI(); | ||||
2189 | auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>(); | ||||
2190 | auto *TLI = TLIP ? &TLIP->getTLI(F) : nullptr; | ||||
2191 | auto *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults(); | ||||
2192 | auto *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); | ||||
2193 | auto *LAA = &getAnalysis<LoopAccessLegacyAnalysis>(); | ||||
2194 | auto *DB = &getAnalysis<DemandedBitsWrapperPass>().getDemandedBits(); | ||||
2195 | auto *ORE = &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE(); | ||||
2196 | auto *PSI = &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI(); | ||||
2197 | |||||
2198 | std::function<const LoopAccessInfo &(Loop &)> GetLAA = | ||||
2199 | [&](Loop &L) -> const LoopAccessInfo & { return LAA->getInfo(&L); }; | ||||
2200 | |||||
2201 | return Impl.runImpl(F, *SE, *LI, *TTI, *DT, *BFI, TLI, *DB, *AA, *AC, | ||||
2202 | GetLAA, *ORE, PSI).MadeAnyChange; | ||||
2203 | } | ||||
2204 | |||||
2205 | void getAnalysisUsage(AnalysisUsage &AU) const override { | ||||
2206 | AU.addRequired<AssumptionCacheTracker>(); | ||||
2207 | AU.addRequired<BlockFrequencyInfoWrapperPass>(); | ||||
2208 | AU.addRequired<DominatorTreeWrapperPass>(); | ||||
2209 | AU.addRequired<LoopInfoWrapperPass>(); | ||||
2210 | AU.addRequired<ScalarEvolutionWrapperPass>(); | ||||
2211 | AU.addRequired<TargetTransformInfoWrapperPass>(); | ||||
2212 | AU.addRequired<AAResultsWrapperPass>(); | ||||
2213 | AU.addRequired<LoopAccessLegacyAnalysis>(); | ||||
2214 | AU.addRequired<DemandedBitsWrapperPass>(); | ||||
2215 | AU.addRequired<OptimizationRemarkEmitterWrapperPass>(); | ||||
2216 | AU.addRequired<InjectTLIMappingsLegacy>(); | ||||
2217 | |||||
2218 | // We currently do not preserve loopinfo/dominator analyses with outer loop | ||||
2219 | // vectorization. Until this is addressed, mark these analyses as preserved | ||||
2220 | // only for non-VPlan-native path. | ||||
2221 | // TODO: Preserve Loop and Dominator analyses for VPlan-native path. | ||||
2222 | if (!EnableVPlanNativePath) { | ||||
2223 | AU.addPreserved<LoopInfoWrapperPass>(); | ||||
2224 | AU.addPreserved<DominatorTreeWrapperPass>(); | ||||
2225 | } | ||||
2226 | |||||
2227 | AU.addPreserved<BasicAAWrapperPass>(); | ||||
2228 | AU.addPreserved<GlobalsAAWrapperPass>(); | ||||
2229 | AU.addRequired<ProfileSummaryInfoWrapperPass>(); | ||||
2230 | } | ||||
2231 | }; | ||||
2232 | |||||
2233 | } // end anonymous namespace | ||||
2234 | |||||
2235 | //===----------------------------------------------------------------------===// | ||||
2236 | // Implementation of LoopVectorizationLegality, InnerLoopVectorizer and | ||||
2237 | // LoopVectorizationCostModel and LoopVectorizationPlanner. | ||||
2238 | //===----------------------------------------------------------------------===// | ||||
2239 | |||||
2240 | Value *InnerLoopVectorizer::getBroadcastInstrs(Value *V) { | ||||
2241 | // We need to place the broadcast of invariant variables outside the loop, | ||||
2242 | // but only if it's proven safe to do so. Else, broadcast will be inside | ||||
2243 | // vector loop body. | ||||
2244 | Instruction *Instr = dyn_cast<Instruction>(V); | ||||
2245 | bool SafeToHoist = OrigLoop->isLoopInvariant(V) && | ||||
2246 | (!Instr || | ||||
2247 | DT->dominates(Instr->getParent(), LoopVectorPreHeader)); | ||||
2248 | // Place the code for broadcasting invariant variables in the new preheader. | ||||
2249 | IRBuilder<>::InsertPointGuard Guard(Builder); | ||||
2250 | if (SafeToHoist) | ||||
2251 | Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); | ||||
2252 | |||||
2253 | // Broadcast the scalar into all locations in the vector. | ||||
2254 | Value *Shuf = Builder.CreateVectorSplat(VF, V, "broadcast"); | ||||
2255 | |||||
2256 | return Shuf; | ||||
2257 | } | ||||
2258 | |||||
2259 | void InnerLoopVectorizer::createVectorIntOrFpInductionPHI( | ||||
2260 | const InductionDescriptor &II, Value *Step, Value *Start, | ||||
2261 | Instruction *EntryVal, VPValue *Def, VPValue *CastDef, | ||||
2262 | VPTransformState &State) { | ||||
2263 | assert((isa<PHINode>(EntryVal) || isa<TruncInst>(EntryVal)) &&(static_cast <bool> ((isa<PHINode>(EntryVal) || isa <TruncInst>(EntryVal)) && "Expected either an induction phi-node or a truncate of it!" ) ? void (0) : __assert_fail ("(isa<PHINode>(EntryVal) || isa<TruncInst>(EntryVal)) && \"Expected either an induction phi-node or a truncate of it!\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 2264, __extension__ __PRETTY_FUNCTION__)) | ||||
2264 | "Expected either an induction phi-node or a truncate of it!")(static_cast <bool> ((isa<PHINode>(EntryVal) || isa <TruncInst>(EntryVal)) && "Expected either an induction phi-node or a truncate of it!" ) ? void (0) : __assert_fail ("(isa<PHINode>(EntryVal) || isa<TruncInst>(EntryVal)) && \"Expected either an induction phi-node or a truncate of it!\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 2264, __extension__ __PRETTY_FUNCTION__)); | ||||
2265 | |||||
2266 | // Construct the initial value of the vector IV in the vector loop preheader | ||||
2267 | auto CurrIP = Builder.saveIP(); | ||||
2268 | Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); | ||||
2269 | if (isa<TruncInst>(EntryVal)) { | ||||
2270 | assert(Start->getType()->isIntegerTy() &&(static_cast <bool> (Start->getType()->isIntegerTy () && "Truncation requires an integer type") ? void ( 0) : __assert_fail ("Start->getType()->isIntegerTy() && \"Truncation requires an integer type\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 2271, __extension__ __PRETTY_FUNCTION__)) | ||||
2271 | "Truncation requires an integer type")(static_cast <bool> (Start->getType()->isIntegerTy () && "Truncation requires an integer type") ? void ( 0) : __assert_fail ("Start->getType()->isIntegerTy() && \"Truncation requires an integer type\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 2271, __extension__ __PRETTY_FUNCTION__)); | ||||
2272 | auto *TruncType = cast<IntegerType>(EntryVal->getType()); | ||||
2273 | Step = Builder.CreateTrunc(Step, TruncType); | ||||
2274 | Start = Builder.CreateCast(Instruction::Trunc, Start, TruncType); | ||||
2275 | } | ||||
2276 | Value *SplatStart = Builder.CreateVectorSplat(VF, Start); | ||||
2277 | Value *SteppedStart = | ||||
2278 | getStepVector(SplatStart, 0, Step, II.getInductionOpcode()); | ||||
2279 | |||||
2280 | // We create vector phi nodes for both integer and floating-point induction | ||||
2281 | // variables. Here, we determine the kind of arithmetic we will perform. | ||||
2282 | Instruction::BinaryOps AddOp; | ||||
2283 | Instruction::BinaryOps MulOp; | ||||
2284 | if (Step->getType()->isIntegerTy()) { | ||||
2285 | AddOp = Instruction::Add; | ||||
2286 | MulOp = Instruction::Mul; | ||||
2287 | } else { | ||||
2288 | AddOp = II.getInductionOpcode(); | ||||
2289 | MulOp = Instruction::FMul; | ||||
2290 | } | ||||
2291 | |||||
2292 | // Multiply the vectorization factor by the step using integer or | ||||
2293 | // floating-point arithmetic as appropriate. | ||||
2294 | Type *StepType = Step->getType(); | ||||
2295 | if (Step->getType()->isFloatingPointTy()) | ||||
2296 | StepType = IntegerType::get(StepType->getContext(), | ||||
2297 | StepType->getScalarSizeInBits()); | ||||
2298 | Value *RuntimeVF = getRuntimeVF(Builder, StepType, VF); | ||||
2299 | if (Step->getType()->isFloatingPointTy()) | ||||
2300 | RuntimeVF = Builder.CreateSIToFP(RuntimeVF, Step->getType()); | ||||
2301 | Value *Mul = Builder.CreateBinOp(MulOp, Step, RuntimeVF); | ||||
2302 | |||||
2303 | // Create a vector splat to use in the induction update. | ||||
2304 | // | ||||
2305 | // FIXME: If the step is non-constant, we create the vector splat with | ||||
2306 | // IRBuilder. IRBuilder can constant-fold the multiply, but it doesn't | ||||
2307 | // handle a constant vector splat. | ||||
2308 | Value *SplatVF = isa<Constant>(Mul) | ||||
2309 | ? ConstantVector::getSplat(VF, cast<Constant>(Mul)) | ||||
2310 | : Builder.CreateVectorSplat(VF, Mul); | ||||
2311 | Builder.restoreIP(CurrIP); | ||||
2312 | |||||
2313 | // We may need to add the step a number of times, depending on the unroll | ||||
2314 | // factor. The last of those goes into the PHI. | ||||
2315 | PHINode *VecInd = PHINode::Create(SteppedStart->getType(), 2, "vec.ind", | ||||
2316 | &*LoopVectorBody->getFirstInsertionPt()); | ||||
2317 | VecInd->setDebugLoc(EntryVal->getDebugLoc()); | ||||
2318 | Instruction *LastInduction = VecInd; | ||||
2319 | for (unsigned Part = 0; Part < UF; ++Part) { | ||||
2320 | State.set(Def, LastInduction, Part); | ||||
2321 | |||||
2322 | if (isa<TruncInst>(EntryVal)) | ||||
2323 | addMetadata(LastInduction, EntryVal); | ||||
2324 | recordVectorLoopValueForInductionCast(II, EntryVal, LastInduction, CastDef, | ||||
2325 | State, Part); | ||||
2326 | |||||
2327 | LastInduction = cast<Instruction>( | ||||
2328 | Builder.CreateBinOp(AddOp, LastInduction, SplatVF, "step.add")); | ||||
2329 | LastInduction->setDebugLoc(EntryVal->getDebugLoc()); | ||||
2330 | } | ||||
2331 | |||||
2332 | // Move the last step to the end of the latch block. This ensures consistent | ||||
2333 | // placement of all induction updates. | ||||
2334 | auto *LoopVectorLatch = LI->getLoopFor(LoopVectorBody)->getLoopLatch(); | ||||
2335 | auto *Br = cast<BranchInst>(LoopVectorLatch->getTerminator()); | ||||
2336 | auto *ICmp = cast<Instruction>(Br->getCondition()); | ||||
2337 | LastInduction->moveBefore(ICmp); | ||||
2338 | LastInduction->setName("vec.ind.next"); | ||||
2339 | |||||
2340 | VecInd->addIncoming(SteppedStart, LoopVectorPreHeader); | ||||
2341 | VecInd->addIncoming(LastInduction, LoopVectorLatch); | ||||
2342 | } | ||||
2343 | |||||
2344 | bool InnerLoopVectorizer::shouldScalarizeInstruction(Instruction *I) const { | ||||
2345 | return Cost->isScalarAfterVectorization(I, VF) || | ||||
2346 | Cost->isProfitableToScalarize(I, VF); | ||||
2347 | } | ||||
2348 | |||||
2349 | bool InnerLoopVectorizer::needsScalarInduction(Instruction *IV) const { | ||||
2350 | if (shouldScalarizeInstruction(IV)) | ||||
2351 | return true; | ||||
2352 | auto isScalarInst = [&](User *U) -> bool { | ||||
2353 | auto *I = cast<Instruction>(U); | ||||
2354 | return (OrigLoop->contains(I) && shouldScalarizeInstruction(I)); | ||||
2355 | }; | ||||
2356 | return llvm::any_of(IV->users(), isScalarInst); | ||||
2357 | } | ||||
2358 | |||||
2359 | void InnerLoopVectorizer::recordVectorLoopValueForInductionCast( | ||||
2360 | const InductionDescriptor &ID, const Instruction *EntryVal, | ||||
2361 | Value *VectorLoopVal, VPValue *CastDef, VPTransformState &State, | ||||
2362 | unsigned Part, unsigned Lane) { | ||||
2363 | assert((isa<PHINode>(EntryVal) || isa<TruncInst>(EntryVal)) &&(static_cast <bool> ((isa<PHINode>(EntryVal) || isa <TruncInst>(EntryVal)) && "Expected either an induction phi-node or a truncate of it!" ) ? void (0) : __assert_fail ("(isa<PHINode>(EntryVal) || isa<TruncInst>(EntryVal)) && \"Expected either an induction phi-node or a truncate of it!\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 2364, __extension__ __PRETTY_FUNCTION__)) | ||||
2364 | "Expected either an induction phi-node or a truncate of it!")(static_cast <bool> ((isa<PHINode>(EntryVal) || isa <TruncInst>(EntryVal)) && "Expected either an induction phi-node or a truncate of it!" ) ? void (0) : __assert_fail ("(isa<PHINode>(EntryVal) || isa<TruncInst>(EntryVal)) && \"Expected either an induction phi-node or a truncate of it!\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 2364, __extension__ __PRETTY_FUNCTION__)); | ||||
2365 | |||||
2366 | // This induction variable is not the phi from the original loop but the | ||||
2367 | // newly-created IV based on the proof that casted Phi is equal to the | ||||
2368 | // uncasted Phi in the vectorized loop (under a runtime guard possibly). It | ||||
2369 | // re-uses the same InductionDescriptor that original IV uses but we don't | ||||
2370 | // have to do any recording in this case - that is done when original IV is | ||||
2371 | // processed. | ||||
2372 | if (isa<TruncInst>(EntryVal)) | ||||
2373 | return; | ||||
2374 | |||||
2375 | const SmallVectorImpl<Instruction *> &Casts = ID.getCastInsts(); | ||||
2376 | if (Casts.empty()) | ||||
2377 | return; | ||||
2378 | // Only the first Cast instruction in the Casts vector is of interest. | ||||
2379 | // The rest of the Casts (if exist) have no uses outside the | ||||
2380 | // induction update chain itself. | ||||
2381 | if (Lane < UINT_MAX(2147483647 *2U +1U)) | ||||
2382 | State.set(CastDef, VectorLoopVal, VPIteration(Part, Lane)); | ||||
2383 | else | ||||
2384 | State.set(CastDef, VectorLoopVal, Part); | ||||
2385 | } | ||||
2386 | |||||
2387 | void InnerLoopVectorizer::widenIntOrFpInduction(PHINode *IV, Value *Start, | ||||
2388 | TruncInst *Trunc, VPValue *Def, | ||||
2389 | VPValue *CastDef, | ||||
2390 | VPTransformState &State) { | ||||
2391 | assert((IV->getType()->isIntegerTy() || IV != OldInduction) &&(static_cast <bool> ((IV->getType()->isIntegerTy( ) || IV != OldInduction) && "Primary induction variable must have an integer type" ) ? void (0) : __assert_fail ("(IV->getType()->isIntegerTy() || IV != OldInduction) && \"Primary induction variable must have an integer type\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 2392, __extension__ __PRETTY_FUNCTION__)) | ||||
2392 | "Primary induction variable must have an integer type")(static_cast <bool> ((IV->getType()->isIntegerTy( ) || IV != OldInduction) && "Primary induction variable must have an integer type" ) ? void (0) : __assert_fail ("(IV->getType()->isIntegerTy() || IV != OldInduction) && \"Primary induction variable must have an integer type\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 2392, __extension__ __PRETTY_FUNCTION__)); | ||||
2393 | |||||
2394 | auto II = Legal->getInductionVars().find(IV); | ||||
2395 | assert(II != Legal->getInductionVars().end() && "IV is not an induction")(static_cast <bool> (II != Legal->getInductionVars() .end() && "IV is not an induction") ? void (0) : __assert_fail ("II != Legal->getInductionVars().end() && \"IV is not an induction\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 2395, __extension__ __PRETTY_FUNCTION__)); | ||||
2396 | |||||
2397 | auto ID = II->second; | ||||
2398 | assert(IV->getType() == ID.getStartValue()->getType() && "Types must match")(static_cast <bool> (IV->getType() == ID.getStartValue ()->getType() && "Types must match") ? void (0) : __assert_fail ("IV->getType() == ID.getStartValue()->getType() && \"Types must match\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 2398, __extension__ __PRETTY_FUNCTION__)); | ||||
2399 | |||||
2400 | // The value from the original loop to which we are mapping the new induction | ||||
2401 | // variable. | ||||
2402 | Instruction *EntryVal = Trunc ? cast<Instruction>(Trunc) : IV; | ||||
2403 | |||||
2404 | auto &DL = OrigLoop->getHeader()->getModule()->getDataLayout(); | ||||
2405 | |||||
2406 | // Generate code for the induction step. Note that induction steps are | ||||
2407 | // required to be loop-invariant | ||||
2408 | auto CreateStepValue = [&](const SCEV *Step) -> Value * { | ||||
2409 | assert(PSE.getSE()->isLoopInvariant(Step, OrigLoop) &&(static_cast <bool> (PSE.getSE()->isLoopInvariant(Step , OrigLoop) && "Induction step should be loop invariant" ) ? void (0) : __assert_fail ("PSE.getSE()->isLoopInvariant(Step, OrigLoop) && \"Induction step should be loop invariant\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 2410, __extension__ __PRETTY_FUNCTION__)) | ||||
2410 | "Induction step should be loop invariant")(static_cast <bool> (PSE.getSE()->isLoopInvariant(Step , OrigLoop) && "Induction step should be loop invariant" ) ? void (0) : __assert_fail ("PSE.getSE()->isLoopInvariant(Step, OrigLoop) && \"Induction step should be loop invariant\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 2410, __extension__ __PRETTY_FUNCTION__)); | ||||
2411 | if (PSE.getSE()->isSCEVable(IV->getType())) { | ||||
2412 | SCEVExpander Exp(*PSE.getSE(), DL, "induction"); | ||||
2413 | return Exp.expandCodeFor(Step, Step->getType(), | ||||
2414 | LoopVectorPreHeader->getTerminator()); | ||||
2415 | } | ||||
2416 | return cast<SCEVUnknown>(Step)->getValue(); | ||||
2417 | }; | ||||
2418 | |||||
2419 | // The scalar value to broadcast. This is derived from the canonical | ||||
2420 | // induction variable. If a truncation type is given, truncate the canonical | ||||
2421 | // induction variable and step. Otherwise, derive these values from the | ||||
2422 | // induction descriptor. | ||||
2423 | auto CreateScalarIV = [&](Value *&Step) -> Value * { | ||||
2424 | Value *ScalarIV = Induction; | ||||
2425 | if (IV != OldInduction) { | ||||
2426 | ScalarIV = IV->getType()->isIntegerTy() | ||||
2427 | ? Builder.CreateSExtOrTrunc(Induction, IV->getType()) | ||||
2428 | : Builder.CreateCast(Instruction::SIToFP, Induction, | ||||
2429 | IV->getType()); | ||||
2430 | ScalarIV = emitTransformedIndex(Builder, ScalarIV, PSE.getSE(), DL, ID); | ||||
2431 | ScalarIV->setName("offset.idx"); | ||||
2432 | } | ||||
2433 | if (Trunc) { | ||||
2434 | auto *TruncType = cast<IntegerType>(Trunc->getType()); | ||||
2435 | assert(Step->getType()->isIntegerTy() &&(static_cast <bool> (Step->getType()->isIntegerTy () && "Truncation requires an integer step") ? void ( 0) : __assert_fail ("Step->getType()->isIntegerTy() && \"Truncation requires an integer step\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 2436, __extension__ __PRETTY_FUNCTION__)) | ||||
2436 | "Truncation requires an integer step")(static_cast <bool> (Step->getType()->isIntegerTy () && "Truncation requires an integer step") ? void ( 0) : __assert_fail ("Step->getType()->isIntegerTy() && \"Truncation requires an integer step\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 2436, __extension__ __PRETTY_FUNCTION__)); | ||||
2437 | ScalarIV = Builder.CreateTrunc(ScalarIV, TruncType); | ||||
2438 | Step = Builder.CreateTrunc(Step, TruncType); | ||||
2439 | } | ||||
2440 | return ScalarIV; | ||||
2441 | }; | ||||
2442 | |||||
2443 | // Create the vector values from the scalar IV, in the absence of creating a | ||||
2444 | // vector IV. | ||||
2445 | auto CreateSplatIV = [&](Value *ScalarIV, Value *Step) { | ||||
2446 | Value *Broadcasted = getBroadcastInstrs(ScalarIV); | ||||
2447 | for (unsigned Part = 0; Part < UF; ++Part) { | ||||
2448 | assert(!VF.isScalable() && "scalable vectors not yet supported.")(static_cast <bool> (!VF.isScalable() && "scalable vectors not yet supported." ) ? void (0) : __assert_fail ("!VF.isScalable() && \"scalable vectors not yet supported.\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 2448, __extension__ __PRETTY_FUNCTION__)); | ||||
2449 | Value *EntryPart = | ||||
2450 | getStepVector(Broadcasted, VF.getKnownMinValue() * Part, Step, | ||||
2451 | ID.getInductionOpcode()); | ||||
2452 | State.set(Def, EntryPart, Part); | ||||
2453 | if (Trunc) | ||||
2454 | addMetadata(EntryPart, Trunc); | ||||
2455 | recordVectorLoopValueForInductionCast(ID, EntryVal, EntryPart, CastDef, | ||||
2456 | State, Part); | ||||
2457 | } | ||||
2458 | }; | ||||
2459 | |||||
2460 | // Fast-math-flags propagate from the original induction instruction. | ||||
2461 | IRBuilder<>::FastMathFlagGuard FMFG(Builder); | ||||
2462 | if (ID.getInductionBinOp() && isa<FPMathOperator>(ID.getInductionBinOp())) | ||||
2463 | Builder.setFastMathFlags(ID.getInductionBinOp()->getFastMathFlags()); | ||||
2464 | |||||
2465 | // Now do the actual transformations, and start with creating the step value. | ||||
2466 | Value *Step = CreateStepValue(ID.getStep()); | ||||
2467 | if (VF.isZero() || VF.isScalar()) { | ||||
2468 | Value *ScalarIV = CreateScalarIV(Step); | ||||
2469 | CreateSplatIV(ScalarIV, Step); | ||||
2470 | return; | ||||
2471 | } | ||||
2472 | |||||
2473 | // Determine if we want a scalar version of the induction variable. This is | ||||
2474 | // true if the induction variable itself is not widened, or if it has at | ||||
2475 | // least one user in the loop that is not widened. | ||||
2476 | auto NeedsScalarIV = needsScalarInduction(EntryVal); | ||||
2477 | if (!NeedsScalarIV) { | ||||
2478 | createVectorIntOrFpInductionPHI(ID, Step, Start, EntryVal, Def, CastDef, | ||||
2479 | State); | ||||
2480 | return; | ||||
2481 | } | ||||
2482 | |||||
2483 | // Try to create a new independent vector induction variable. If we can't | ||||
2484 | // create the phi node, we will splat the scalar induction variable in each | ||||
2485 | // loop iteration. | ||||
2486 | if (!shouldScalarizeInstruction(EntryVal)) { | ||||
2487 | createVectorIntOrFpInductionPHI(ID, Step, Start, EntryVal, Def, CastDef, | ||||
2488 | State); | ||||
2489 | Value *ScalarIV = CreateScalarIV(Step); | ||||
2490 | // Create scalar steps that can be used by instructions we will later | ||||
2491 | // scalarize. Note that the addition of the scalar steps will not increase | ||||
2492 | // the number of instructions in the loop in the common case prior to | ||||
2493 | // InstCombine. We will be trading one vector extract for each scalar step. | ||||
2494 | buildScalarSteps(ScalarIV, Step, EntryVal, ID, Def, CastDef, State); | ||||
2495 | return; | ||||
2496 | } | ||||
2497 | |||||
2498 | // All IV users are scalar instructions, so only emit a scalar IV, not a | ||||
2499 | // vectorised IV. Except when we tail-fold, then the splat IV feeds the | ||||
2500 | // predicate used by the masked loads/stores. | ||||
2501 | Value *ScalarIV = CreateScalarIV(Step); | ||||
2502 | if (!Cost->isScalarEpilogueAllowed()) | ||||
2503 | CreateSplatIV(ScalarIV, Step); | ||||
2504 | buildScalarSteps(ScalarIV, Step, EntryVal, ID, Def, CastDef, State); | ||||
2505 | } | ||||
2506 | |||||
2507 | Value *InnerLoopVectorizer::getStepVector(Value *Val, int StartIdx, Value *Step, | ||||
2508 | Instruction::BinaryOps BinOp) { | ||||
2509 | // Create and check the types. | ||||
2510 | auto *ValVTy = cast<VectorType>(Val->getType()); | ||||
2511 | ElementCount VLen = ValVTy->getElementCount(); | ||||
2512 | |||||
2513 | Type *STy = Val->getType()->getScalarType(); | ||||
2514 | assert((STy->isIntegerTy() || STy->isFloatingPointTy()) &&(static_cast <bool> ((STy->isIntegerTy() || STy-> isFloatingPointTy()) && "Induction Step must be an integer or FP" ) ? void (0) : __assert_fail ("(STy->isIntegerTy() || STy->isFloatingPointTy()) && \"Induction Step must be an integer or FP\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 2515, __extension__ __PRETTY_FUNCTION__)) | ||||
2515 | "Induction Step must be an integer or FP")(static_cast <bool> ((STy->isIntegerTy() || STy-> isFloatingPointTy()) && "Induction Step must be an integer or FP" ) ? void (0) : __assert_fail ("(STy->isIntegerTy() || STy->isFloatingPointTy()) && \"Induction Step must be an integer or FP\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 2515, __extension__ __PRETTY_FUNCTION__)); | ||||
2516 | assert(Step->getType() == STy && "Step has wrong type")(static_cast <bool> (Step->getType() == STy && "Step has wrong type") ? void (0) : __assert_fail ("Step->getType() == STy && \"Step has wrong type\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 2516, __extension__ __PRETTY_FUNCTION__)); | ||||
2517 | |||||
2518 | SmallVector<Constant *, 8> Indices; | ||||
2519 | |||||
2520 | // Create a vector of consecutive numbers from zero to VF. | ||||
2521 | VectorType *InitVecValVTy = ValVTy; | ||||
2522 | Type *InitVecValSTy = STy; | ||||
2523 | if (STy->isFloatingPointTy()) { | ||||
2524 | InitVecValSTy = | ||||
2525 | IntegerType::get(STy->getContext(), STy->getScalarSizeInBits()); | ||||
2526 | InitVecValVTy = VectorType::get(InitVecValSTy, VLen); | ||||
2527 | } | ||||
2528 | Value *InitVec = Builder.CreateStepVector(InitVecValVTy); | ||||
2529 | |||||
2530 | // Add on StartIdx | ||||
2531 | Value *StartIdxSplat = Builder.CreateVectorSplat( | ||||
2532 | VLen, ConstantInt::get(InitVecValSTy, StartIdx)); | ||||
2533 | InitVec = Builder.CreateAdd(InitVec, StartIdxSplat); | ||||
2534 | |||||
2535 | if (STy->isIntegerTy()) { | ||||
2536 | Step = Builder.CreateVectorSplat(VLen, Step); | ||||
2537 | assert(Step->getType() == Val->getType() && "Invalid step vec")(static_cast <bool> (Step->getType() == Val->getType () && "Invalid step vec") ? void (0) : __assert_fail ( "Step->getType() == Val->getType() && \"Invalid step vec\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 2537, __extension__ __PRETTY_FUNCTION__)); | ||||
2538 | // FIXME: The newly created binary instructions should contain nsw/nuw flags, | ||||
2539 | // which can be found from the original scalar operations. | ||||
2540 | Step = Builder.CreateMul(InitVec, Step); | ||||
2541 | return Builder.CreateAdd(Val, Step, "induction"); | ||||
2542 | } | ||||
2543 | |||||
2544 | // Floating point induction. | ||||
2545 | assert((BinOp == Instruction::FAdd || BinOp == Instruction::FSub) &&(static_cast <bool> ((BinOp == Instruction::FAdd || BinOp == Instruction::FSub) && "Binary Opcode should be specified for FP induction" ) ? void (0) : __assert_fail ("(BinOp == Instruction::FAdd || BinOp == Instruction::FSub) && \"Binary Opcode should be specified for FP induction\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 2546, __extension__ __PRETTY_FUNCTION__)) | ||||
2546 | "Binary Opcode should be specified for FP induction")(static_cast <bool> ((BinOp == Instruction::FAdd || BinOp == Instruction::FSub) && "Binary Opcode should be specified for FP induction" ) ? void (0) : __assert_fail ("(BinOp == Instruction::FAdd || BinOp == Instruction::FSub) && \"Binary Opcode should be specified for FP induction\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 2546, __extension__ __PRETTY_FUNCTION__)); | ||||
2547 | InitVec = Builder.CreateUIToFP(InitVec, ValVTy); | ||||
2548 | Step = Builder.CreateVectorSplat(VLen, Step); | ||||
2549 | Value *MulOp = Builder.CreateFMul(InitVec, Step); | ||||
2550 | return Builder.CreateBinOp(BinOp, Val, MulOp, "induction"); | ||||
2551 | } | ||||
2552 | |||||
2553 | void InnerLoopVectorizer::buildScalarSteps(Value *ScalarIV, Value *Step, | ||||
2554 | Instruction *EntryVal, | ||||
2555 | const InductionDescriptor &ID, | ||||
2556 | VPValue *Def, VPValue *CastDef, | ||||
2557 | VPTransformState &State) { | ||||
2558 | // We shouldn't have to build scalar steps if we aren't vectorizing. | ||||
2559 | assert(VF.isVector() && "VF should be greater than one")(static_cast <bool> (VF.isVector() && "VF should be greater than one" ) ? void (0) : __assert_fail ("VF.isVector() && \"VF should be greater than one\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 2559, __extension__ __PRETTY_FUNCTION__)); | ||||
2560 | // Get the value type and ensure it and the step have the same integer type. | ||||
2561 | Type *ScalarIVTy = ScalarIV->getType()->getScalarType(); | ||||
2562 | assert(ScalarIVTy == Step->getType() &&(static_cast <bool> (ScalarIVTy == Step->getType() && "Val and Step should have the same type") ? void (0) : __assert_fail ("ScalarIVTy == Step->getType() && \"Val and Step should have the same type\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 2563, __extension__ __PRETTY_FUNCTION__)) | ||||
2563 | "Val and Step should have the same type")(static_cast <bool> (ScalarIVTy == Step->getType() && "Val and Step should have the same type") ? void (0) : __assert_fail ("ScalarIVTy == Step->getType() && \"Val and Step should have the same type\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 2563, __extension__ __PRETTY_FUNCTION__)); | ||||
2564 | |||||
2565 | // We build scalar steps for both integer and floating-point induction | ||||
2566 | // variables. Here, we determine the kind of arithmetic we will perform. | ||||
2567 | Instruction::BinaryOps AddOp; | ||||
2568 | Instruction::BinaryOps MulOp; | ||||
2569 | if (ScalarIVTy->isIntegerTy()) { | ||||
2570 | AddOp = Instruction::Add; | ||||
2571 | MulOp = Instruction::Mul; | ||||
2572 | } else { | ||||
2573 | AddOp = ID.getInductionOpcode(); | ||||
2574 | MulOp = Instruction::FMul; | ||||
2575 | } | ||||
2576 | |||||
2577 | // Determine the number of scalars we need to generate for each unroll | ||||
2578 | // iteration. If EntryVal is uniform, we only need to generate the first | ||||
2579 | // lane. Otherwise, we generate all VF values. | ||||
2580 | bool IsUniform = | ||||
2581 | Cost->isUniformAfterVectorization(cast<Instruction>(EntryVal), VF); | ||||
2582 | unsigned Lanes = IsUniform ? 1 : VF.getKnownMinValue(); | ||||
2583 | // Compute the scalar steps and save the results in State. | ||||
2584 | Type *IntStepTy = IntegerType::get(ScalarIVTy->getContext(), | ||||
2585 | ScalarIVTy->getScalarSizeInBits()); | ||||
2586 | Type *VecIVTy = nullptr; | ||||
2587 | Value *UnitStepVec = nullptr, *SplatStep = nullptr, *SplatIV = nullptr; | ||||
2588 | if (!IsUniform && VF.isScalable()) { | ||||
2589 | VecIVTy = VectorType::get(ScalarIVTy, VF); | ||||
2590 | UnitStepVec = Builder.CreateStepVector(VectorType::get(IntStepTy, VF)); | ||||
2591 | SplatStep = Builder.CreateVectorSplat(VF, Step); | ||||
2592 | SplatIV = Builder.CreateVectorSplat(VF, ScalarIV); | ||||
2593 | } | ||||
2594 | |||||
2595 | for (unsigned Part = 0; Part < UF; ++Part) { | ||||
2596 | Value *StartIdx0 = | ||||
2597 | createStepForVF(Builder, ConstantInt::get(IntStepTy, Part), VF); | ||||
2598 | |||||
2599 | if (!IsUniform && VF.isScalable()) { | ||||
2600 | auto *SplatStartIdx = Builder.CreateVectorSplat(VF, StartIdx0); | ||||
2601 | auto *InitVec = Builder.CreateAdd(SplatStartIdx, UnitStepVec); | ||||
2602 | if (ScalarIVTy->isFloatingPointTy()) | ||||
2603 | InitVec = Builder.CreateSIToFP(InitVec, VecIVTy); | ||||
2604 | auto *Mul = Builder.CreateBinOp(MulOp, InitVec, SplatStep); | ||||
2605 | auto *Add = Builder.CreateBinOp(AddOp, SplatIV, Mul); | ||||
2606 | State.set(Def, Add, Part); | ||||
2607 | recordVectorLoopValueForInductionCast(ID, EntryVal, Add, CastDef, State, | ||||
2608 | Part); | ||||
2609 | // It's useful to record the lane values too for the known minimum number | ||||
2610 | // of elements so we do those below. This improves the code quality when | ||||
2611 | // trying to extract the first element, for example. | ||||
2612 | } | ||||
2613 | |||||
2614 | if (ScalarIVTy->isFloatingPointTy()) | ||||
2615 | StartIdx0 = Builder.CreateSIToFP(StartIdx0, ScalarIVTy); | ||||
2616 | |||||
2617 | for (unsigned Lane = 0; Lane < Lanes; ++Lane) { | ||||
2618 | Value *StartIdx = Builder.CreateBinOp( | ||||
2619 | AddOp, StartIdx0, getSignedIntOrFpConstant(ScalarIVTy, Lane)); | ||||
2620 | // The step returned by `createStepForVF` is a runtime-evaluated value | ||||
2621 | // when VF is scalable. Otherwise, it should be folded into a Constant. | ||||
2622 | assert((VF.isScalable() || isa<Constant>(StartIdx)) &&(static_cast <bool> ((VF.isScalable() || isa<Constant >(StartIdx)) && "Expected StartIdx to be folded to a constant when VF is not " "scalable") ? void (0) : __assert_fail ("(VF.isScalable() || isa<Constant>(StartIdx)) && \"Expected StartIdx to be folded to a constant when VF is not \" \"scalable\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 2624, __extension__ __PRETTY_FUNCTION__)) | ||||
2623 | "Expected StartIdx to be folded to a constant when VF is not "(static_cast <bool> ((VF.isScalable() || isa<Constant >(StartIdx)) && "Expected StartIdx to be folded to a constant when VF is not " "scalable") ? void (0) : __assert_fail ("(VF.isScalable() || isa<Constant>(StartIdx)) && \"Expected StartIdx to be folded to a constant when VF is not \" \"scalable\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 2624, __extension__ __PRETTY_FUNCTION__)) | ||||
2624 | "scalable")(static_cast <bool> ((VF.isScalable() || isa<Constant >(StartIdx)) && "Expected StartIdx to be folded to a constant when VF is not " "scalable") ? void (0) : __assert_fail ("(VF.isScalable() || isa<Constant>(StartIdx)) && \"Expected StartIdx to be folded to a constant when VF is not \" \"scalable\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 2624, __extension__ __PRETTY_FUNCTION__)); | ||||
2625 | auto *Mul = Builder.CreateBinOp(MulOp, StartIdx, Step); | ||||
2626 | auto *Add = Builder.CreateBinOp(AddOp, ScalarIV, Mul); | ||||
2627 | State.set(Def, Add, VPIteration(Part, Lane)); | ||||
2628 | recordVectorLoopValueForInductionCast(ID, EntryVal, Add, CastDef, State, | ||||
2629 | Part, Lane); | ||||
2630 | } | ||||
2631 | } | ||||
2632 | } | ||||
2633 | |||||
2634 | void InnerLoopVectorizer::packScalarIntoVectorValue(VPValue *Def, | ||||
2635 | const VPIteration &Instance, | ||||
2636 | VPTransformState &State) { | ||||
2637 | Value *ScalarInst = State.get(Def, Instance); | ||||
2638 | Value *VectorValue = State.get(Def, Instance.Part); | ||||
2639 | VectorValue = Builder.CreateInsertElement( | ||||
2640 | VectorValue, ScalarInst, | ||||
2641 | Instance.Lane.getAsRuntimeExpr(State.Builder, VF)); | ||||
2642 | State.set(Def, VectorValue, Instance.Part); | ||||
2643 | } | ||||
2644 | |||||
2645 | Value *InnerLoopVectorizer::reverseVector(Value *Vec) { | ||||
2646 | assert(Vec->getType()->isVectorTy() && "Invalid type")(static_cast <bool> (Vec->getType()->isVectorTy() && "Invalid type") ? void (0) : __assert_fail ("Vec->getType()->isVectorTy() && \"Invalid type\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 2646, __extension__ __PRETTY_FUNCTION__)); | ||||
2647 | return Builder.CreateVectorReverse(Vec, "reverse"); | ||||
2648 | } | ||||
2649 | |||||
2650 | // Return whether we allow using masked interleave-groups (for dealing with | ||||
2651 | // strided loads/stores that reside in predicated blocks, or for dealing | ||||
2652 | // with gaps). | ||||
2653 | static bool useMaskedInterleavedAccesses(const TargetTransformInfo &TTI) { | ||||
2654 | // If an override option has been passed in for interleaved accesses, use it. | ||||
2655 | if (EnableMaskedInterleavedMemAccesses.getNumOccurrences() > 0) | ||||
2656 | return EnableMaskedInterleavedMemAccesses; | ||||
2657 | |||||
2658 | return TTI.enableMaskedInterleavedAccessVectorization(); | ||||
2659 | } | ||||
2660 | |||||
2661 | // Try to vectorize the interleave group that \p Instr belongs to. | ||||
2662 | // | ||||
2663 | // E.g. Translate following interleaved load group (factor = 3): | ||||
2664 | // for (i = 0; i < N; i+=3) { | ||||
2665 | // R = Pic[i]; // Member of index 0 | ||||
2666 | // G = Pic[i+1]; // Member of index 1 | ||||
2667 | // B = Pic[i+2]; // Member of index 2 | ||||
2668 | // ... // do something to R, G, B | ||||
2669 | // } | ||||
2670 | // To: | ||||
2671 | // %wide.vec = load <12 x i32> ; Read 4 tuples of R,G,B | ||||
2672 | // %R.vec = shuffle %wide.vec, poison, <0, 3, 6, 9> ; R elements | ||||
2673 | // %G.vec = shuffle %wide.vec, poison, <1, 4, 7, 10> ; G elements | ||||
2674 | // %B.vec = shuffle %wide.vec, poison, <2, 5, 8, 11> ; B elements | ||||
2675 | // | ||||
2676 | // Or translate following interleaved store group (factor = 3): | ||||
2677 | // for (i = 0; i < N; i+=3) { | ||||
2678 | // ... do something to R, G, B | ||||
2679 | // Pic[i] = R; // Member of index 0 | ||||
2680 | // Pic[i+1] = G; // Member of index 1 | ||||
2681 | // Pic[i+2] = B; // Member of index 2 | ||||
2682 | // } | ||||
2683 | // To: | ||||
2684 | // %R_G.vec = shuffle %R.vec, %G.vec, <0, 1, 2, ..., 7> | ||||
2685 | // %B_U.vec = shuffle %B.vec, poison, <0, 1, 2, 3, u, u, u, u> | ||||
2686 | // %interleaved.vec = shuffle %R_G.vec, %B_U.vec, | ||||
2687 | // <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11> ; Interleave R,G,B elements | ||||
2688 | // store <12 x i32> %interleaved.vec ; Write 4 tuples of R,G,B | ||||
2689 | void InnerLoopVectorizer::vectorizeInterleaveGroup( | ||||
2690 | const InterleaveGroup<Instruction> *Group, ArrayRef<VPValue *> VPDefs, | ||||
2691 | VPTransformState &State, VPValue *Addr, ArrayRef<VPValue *> StoredValues, | ||||
2692 | VPValue *BlockInMask) { | ||||
2693 | Instruction *Instr = Group->getInsertPos(); | ||||
2694 | const DataLayout &DL = Instr->getModule()->getDataLayout(); | ||||
2695 | |||||
2696 | // Prepare for the vector type of the interleaved load/store. | ||||
2697 | Type *ScalarTy = getLoadStoreType(Instr); | ||||
2698 | unsigned InterleaveFactor = Group->getFactor(); | ||||
2699 | assert(!VF.isScalable() && "scalable vectors not yet supported.")(static_cast <bool> (!VF.isScalable() && "scalable vectors not yet supported." ) ? void (0) : __assert_fail ("!VF.isScalable() && \"scalable vectors not yet supported.\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 2699, __extension__ __PRETTY_FUNCTION__)); | ||||
2700 | auto *VecTy = VectorType::get(ScalarTy, VF * InterleaveFactor); | ||||
2701 | |||||
2702 | // Prepare for the new pointers. | ||||
2703 | SmallVector<Value *, 2> AddrParts; | ||||
2704 | unsigned Index = Group->getIndex(Instr); | ||||
2705 | |||||
2706 | // TODO: extend the masked interleaved-group support to reversed access. | ||||
2707 | assert((!BlockInMask || !Group->isReverse()) &&(static_cast <bool> ((!BlockInMask || !Group->isReverse ()) && "Reversed masked interleave-group not supported." ) ? void (0) : __assert_fail ("(!BlockInMask || !Group->isReverse()) && \"Reversed masked interleave-group not supported.\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 2708, __extension__ __PRETTY_FUNCTION__)) | ||||
2708 | "Reversed masked interleave-group not supported.")(static_cast <bool> ((!BlockInMask || !Group->isReverse ()) && "Reversed masked interleave-group not supported." ) ? void (0) : __assert_fail ("(!BlockInMask || !Group->isReverse()) && \"Reversed masked interleave-group not supported.\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 2708, __extension__ __PRETTY_FUNCTION__)); | ||||
2709 | |||||
2710 | // If the group is reverse, adjust the index to refer to the last vector lane | ||||
2711 | // instead of the first. We adjust the index from the first vector lane, | ||||
2712 | // rather than directly getting the pointer for lane VF - 1, because the | ||||
2713 | // pointer operand of the interleaved access is supposed to be uniform. For | ||||
2714 | // uniform instructions, we're only required to generate a value for the | ||||
2715 | // first vector lane in each unroll iteration. | ||||
2716 | if (Group->isReverse()) | ||||
2717 | Index += (VF.getKnownMinValue() - 1) * Group->getFactor(); | ||||
2718 | |||||
2719 | for (unsigned Part = 0; Part < UF; Part++) { | ||||
2720 | Value *AddrPart = State.get(Addr, VPIteration(Part, 0)); | ||||
2721 | setDebugLocFromInst(Builder, AddrPart); | ||||
2722 | |||||
2723 | // Notice current instruction could be any index. Need to adjust the address | ||||
2724 | // to the member of index 0. | ||||
2725 | // | ||||
2726 | // E.g. a = A[i+1]; // Member of index 1 (Current instruction) | ||||
2727 | // b = A[i]; // Member of index 0 | ||||
2728 | // Current pointer is pointed to A[i+1], adjust it to A[i]. | ||||
2729 | // | ||||
2730 | // E.g. A[i+1] = a; // Member of index 1 | ||||
2731 | // A[i] = b; // Member of index 0 | ||||
2732 | // A[i+2] = c; // Member of index 2 (Current instruction) | ||||
2733 | // Current pointer is pointed to A[i+2], adjust it to A[i]. | ||||
2734 | |||||
2735 | bool InBounds = false; | ||||
2736 | if (auto *gep = dyn_cast<GetElementPtrInst>(AddrPart->stripPointerCasts())) | ||||
2737 | InBounds = gep->isInBounds(); | ||||
2738 | AddrPart = Builder.CreateGEP(ScalarTy, AddrPart, Builder.getInt32(-Index)); | ||||
2739 | cast<GetElementPtrInst>(AddrPart)->setIsInBounds(InBounds); | ||||
2740 | |||||
2741 | // Cast to the vector pointer type. | ||||
2742 | unsigned AddressSpace = AddrPart->getType()->getPointerAddressSpace(); | ||||
2743 | Type *PtrTy = VecTy->getPointerTo(AddressSpace); | ||||
2744 | AddrParts.push_back(Builder.CreateBitCast(AddrPart, PtrTy)); | ||||
2745 | } | ||||
2746 | |||||
2747 | setDebugLocFromInst(Builder, Instr); | ||||
2748 | Value *PoisonVec = PoisonValue::get(VecTy); | ||||
2749 | |||||
2750 | Value *MaskForGaps = nullptr; | ||||
2751 | if (Group->requiresScalarEpilogue() && !Cost->isScalarEpilogueAllowed()) { | ||||
2752 | MaskForGaps = createBitMaskForGaps(Builder, VF.getKnownMinValue(), *Group); | ||||
2753 | assert(MaskForGaps && "Mask for Gaps is required but it is null")(static_cast <bool> (MaskForGaps && "Mask for Gaps is required but it is null" ) ? void (0) : __assert_fail ("MaskForGaps && \"Mask for Gaps is required but it is null\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 2753, __extension__ __PRETTY_FUNCTION__)); | ||||
2754 | } | ||||
2755 | |||||
2756 | // Vectorize the interleaved load group. | ||||
2757 | if (isa<LoadInst>(Instr)) { | ||||
2758 | // For each unroll part, create a wide load for the group. | ||||
2759 | SmallVector<Value *, 2> NewLoads; | ||||
2760 | for (unsigned Part = 0; Part < UF; Part++) { | ||||
2761 | Instruction *NewLoad; | ||||
2762 | if (BlockInMask || MaskForGaps) { | ||||
2763 | assert(useMaskedInterleavedAccesses(*TTI) &&(static_cast <bool> (useMaskedInterleavedAccesses(*TTI) && "masked interleaved groups are not allowed.") ? void (0) : __assert_fail ("useMaskedInterleavedAccesses(*TTI) && \"masked interleaved groups are not allowed.\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 2764, __extension__ __PRETTY_FUNCTION__)) | ||||
2764 | "masked interleaved groups are not allowed.")(static_cast <bool> (useMaskedInterleavedAccesses(*TTI) && "masked interleaved groups are not allowed.") ? void (0) : __assert_fail ("useMaskedInterleavedAccesses(*TTI) && \"masked interleaved groups are not allowed.\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 2764, __extension__ __PRETTY_FUNCTION__)); | ||||
2765 | Value *GroupMask = MaskForGaps; | ||||
2766 | if (BlockInMask) { | ||||
2767 | Value *BlockInMaskPart = State.get(BlockInMask, Part); | ||||
2768 | Value *ShuffledMask = Builder.CreateShuffleVector( | ||||
2769 | BlockInMaskPart, | ||||
2770 | createReplicatedMask(InterleaveFactor, VF.getKnownMinValue()), | ||||
2771 | "interleaved.mask"); | ||||
2772 | GroupMask = MaskForGaps | ||||
2773 | ? Builder.CreateBinOp(Instruction::And, ShuffledMask, | ||||
2774 | MaskForGaps) | ||||
2775 | : ShuffledMask; | ||||
2776 | } | ||||
2777 | NewLoad = | ||||
2778 | Builder.CreateMaskedLoad(AddrParts[Part], Group->getAlign(), | ||||
2779 | GroupMask, PoisonVec, "wide.masked.vec"); | ||||
2780 | } | ||||
2781 | else | ||||
2782 | NewLoad = Builder.CreateAlignedLoad(VecTy, AddrParts[Part], | ||||
2783 | Group->getAlign(), "wide.vec"); | ||||
2784 | Group->addMetadata(NewLoad); | ||||
2785 | NewLoads.push_back(NewLoad); | ||||
2786 | } | ||||
2787 | |||||
2788 | // For each member in the group, shuffle out the appropriate data from the | ||||
2789 | // wide loads. | ||||
2790 | unsigned J = 0; | ||||
2791 | for (unsigned I = 0; I < InterleaveFactor; ++I) { | ||||
2792 | Instruction *Member = Group->getMember(I); | ||||
2793 | |||||
2794 | // Skip the gaps in the group. | ||||
2795 | if (!Member) | ||||
2796 | continue; | ||||
2797 | |||||
2798 | auto StrideMask = | ||||
2799 | createStrideMask(I, InterleaveFactor, VF.getKnownMinValue()); | ||||
2800 | for (unsigned Part = 0; Part < UF; Part++) { | ||||
2801 | Value *StridedVec = Builder.CreateShuffleVector( | ||||
2802 | NewLoads[Part], StrideMask, "strided.vec"); | ||||
2803 | |||||
2804 | // If this member has different type, cast the result type. | ||||
2805 | if (Member->getType() != ScalarTy) { | ||||
2806 | assert(!VF.isScalable() && "VF is assumed to be non scalable.")(static_cast <bool> (!VF.isScalable() && "VF is assumed to be non scalable." ) ? void (0) : __assert_fail ("!VF.isScalable() && \"VF is assumed to be non scalable.\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 2806, __extension__ __PRETTY_FUNCTION__)); | ||||
2807 | VectorType *OtherVTy = VectorType::get(Member->getType(), VF); | ||||
2808 | StridedVec = createBitOrPointerCast(StridedVec, OtherVTy, DL); | ||||
2809 | } | ||||
2810 | |||||
2811 | if (Group->isReverse()) | ||||
2812 | StridedVec = reverseVector(StridedVec); | ||||
2813 | |||||
2814 | State.set(VPDefs[J], StridedVec, Part); | ||||
2815 | } | ||||
2816 | ++J; | ||||
2817 | } | ||||
2818 | return; | ||||
2819 | } | ||||
2820 | |||||
2821 | // The sub vector type for current instruction. | ||||
2822 | auto *SubVT = VectorType::get(ScalarTy, VF); | ||||
2823 | |||||
2824 | // Vectorize the interleaved store group. | ||||
2825 | for (unsigned Part = 0; Part < UF; Part++) { | ||||
2826 | // Collect the stored vector from each member. | ||||
2827 | SmallVector<Value *, 4> StoredVecs; | ||||
2828 | for (unsigned i = 0; i < InterleaveFactor; i++) { | ||||
2829 | // Interleaved store group doesn't allow a gap, so each index has a member | ||||
2830 | assert(Group->getMember(i) && "Fail to get a member from an interleaved store group")(static_cast <bool> (Group->getMember(i) && "Fail to get a member from an interleaved store group" ) ? void (0) : __assert_fail ("Group->getMember(i) && \"Fail to get a member from an interleaved store group\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 2830, __extension__ __PRETTY_FUNCTION__)); | ||||
2831 | |||||
2832 | Value *StoredVec = State.get(StoredValues[i], Part); | ||||
2833 | |||||
2834 | if (Group->isReverse()) | ||||
2835 | StoredVec = reverseVector(StoredVec); | ||||
2836 | |||||
2837 | // If this member has different type, cast it to a unified type. | ||||
2838 | |||||
2839 | if (StoredVec->getType() != SubVT) | ||||
2840 | StoredVec = createBitOrPointerCast(StoredVec, SubVT, DL); | ||||
2841 | |||||
2842 | StoredVecs.push_back(StoredVec); | ||||
2843 | } | ||||
2844 | |||||
2845 | // Concatenate all vectors into a wide vector. | ||||
2846 | Value *WideVec = concatenateVectors(Builder, StoredVecs); | ||||
2847 | |||||
2848 | // Interleave the elements in the wide vector. | ||||
2849 | Value *IVec = Builder.CreateShuffleVector( | ||||
2850 | WideVec, createInterleaveMask(VF.getKnownMinValue(), InterleaveFactor), | ||||
2851 | "interleaved.vec"); | ||||
2852 | |||||
2853 | Instruction *NewStoreInstr; | ||||
2854 | if (BlockInMask) { | ||||
2855 | Value *BlockInMaskPart = State.get(BlockInMask, Part); | ||||
2856 | Value *ShuffledMask = Builder.CreateShuffleVector( | ||||
2857 | BlockInMaskPart, | ||||
2858 | createReplicatedMask(InterleaveFactor, VF.getKnownMinValue()), | ||||
2859 | "interleaved.mask"); | ||||
2860 | NewStoreInstr = Builder.CreateMaskedStore( | ||||
2861 | IVec, AddrParts[Part], Group->getAlign(), ShuffledMask); | ||||
2862 | } | ||||
2863 | else | ||||
2864 | NewStoreInstr = | ||||
2865 | Builder.CreateAlignedStore(IVec, AddrParts[Part], Group->getAlign()); | ||||
2866 | |||||
2867 | Group->addMetadata(NewStoreInstr); | ||||
2868 | } | ||||
2869 | } | ||||
2870 | |||||
2871 | void InnerLoopVectorizer::vectorizeMemoryInstruction( | ||||
2872 | Instruction *Instr, VPTransformState &State, VPValue *Def, VPValue *Addr, | ||||
2873 | VPValue *StoredValue, VPValue *BlockInMask) { | ||||
2874 | // Attempt to issue a wide load. | ||||
2875 | LoadInst *LI = dyn_cast<LoadInst>(Instr); | ||||
2876 | StoreInst *SI = dyn_cast<StoreInst>(Instr); | ||||
2877 | |||||
2878 | assert((LI || SI) && "Invalid Load/Store instruction")(static_cast <bool> ((LI || SI) && "Invalid Load/Store instruction" ) ? void (0) : __assert_fail ("(LI || SI) && \"Invalid Load/Store instruction\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 2878, __extension__ __PRETTY_FUNCTION__)); | ||||
2879 | assert((!SI || StoredValue) && "No stored value provided for widened store")(static_cast <bool> ((!SI || StoredValue) && "No stored value provided for widened store" ) ? void (0) : __assert_fail ("(!SI || StoredValue) && \"No stored value provided for widened store\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 2879, __extension__ __PRETTY_FUNCTION__)); | ||||
2880 | assert((!LI || !StoredValue) && "Stored value provided for widened load")(static_cast <bool> ((!LI || !StoredValue) && "Stored value provided for widened load" ) ? void (0) : __assert_fail ("(!LI || !StoredValue) && \"Stored value provided for widened load\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 2880, __extension__ __PRETTY_FUNCTION__)); | ||||
2881 | |||||
2882 | LoopVectorizationCostModel::InstWidening Decision = | ||||
2883 | Cost->getWideningDecision(Instr, VF); | ||||
2884 | assert((Decision == LoopVectorizationCostModel::CM_Widen ||(static_cast <bool> ((Decision == LoopVectorizationCostModel ::CM_Widen || Decision == LoopVectorizationCostModel::CM_Widen_Reverse || Decision == LoopVectorizationCostModel::CM_GatherScatter) && "CM decision is not to widen the memory instruction" ) ? void (0) : __assert_fail ("(Decision == LoopVectorizationCostModel::CM_Widen || Decision == LoopVectorizationCostModel::CM_Widen_Reverse || Decision == LoopVectorizationCostModel::CM_GatherScatter) && \"CM decision is not to widen the memory instruction\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 2887, __extension__ __PRETTY_FUNCTION__)) | ||||
2885 | Decision == LoopVectorizationCostModel::CM_Widen_Reverse ||(static_cast <bool> ((Decision == LoopVectorizationCostModel ::CM_Widen || Decision == LoopVectorizationCostModel::CM_Widen_Reverse || Decision == LoopVectorizationCostModel::CM_GatherScatter) && "CM decision is not to widen the memory instruction" ) ? void (0) : __assert_fail ("(Decision == LoopVectorizationCostModel::CM_Widen || Decision == LoopVectorizationCostModel::CM_Widen_Reverse || Decision == LoopVectorizationCostModel::CM_GatherScatter) && \"CM decision is not to widen the memory instruction\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 2887, __extension__ __PRETTY_FUNCTION__)) | ||||
2886 | Decision == LoopVectorizationCostModel::CM_GatherScatter) &&(static_cast <bool> ((Decision == LoopVectorizationCostModel ::CM_Widen || Decision == LoopVectorizationCostModel::CM_Widen_Reverse || Decision == LoopVectorizationCostModel::CM_GatherScatter) && "CM decision is not to widen the memory instruction" ) ? void (0) : __assert_fail ("(Decision == LoopVectorizationCostModel::CM_Widen || Decision == LoopVectorizationCostModel::CM_Widen_Reverse || Decision == LoopVectorizationCostModel::CM_GatherScatter) && \"CM decision is not to widen the memory instruction\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 2887, __extension__ __PRETTY_FUNCTION__)) | ||||
2887 | "CM decision is not to widen the memory instruction")(static_cast <bool> ((Decision == LoopVectorizationCostModel ::CM_Widen || Decision == LoopVectorizationCostModel::CM_Widen_Reverse || Decision == LoopVectorizationCostModel::CM_GatherScatter) && "CM decision is not to widen the memory instruction" ) ? void (0) : __assert_fail ("(Decision == LoopVectorizationCostModel::CM_Widen || Decision == LoopVectorizationCostModel::CM_Widen_Reverse || Decision == LoopVectorizationCostModel::CM_GatherScatter) && \"CM decision is not to widen the memory instruction\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 2887, __extension__ __PRETTY_FUNCTION__)); | ||||
2888 | |||||
2889 | Type *ScalarDataTy = getLoadStoreType(Instr); | ||||
2890 | |||||
2891 | auto *DataTy = VectorType::get(ScalarDataTy, VF); | ||||
2892 | const Align Alignment = getLoadStoreAlignment(Instr); | ||||
2893 | |||||
2894 | // Determine if the pointer operand of the access is either consecutive or | ||||
2895 | // reverse consecutive. | ||||
2896 | bool Reverse = (Decision == LoopVectorizationCostModel::CM_Widen_Reverse); | ||||
2897 | bool ConsecutiveStride = | ||||
2898 | Reverse || (Decision == LoopVectorizationCostModel::CM_Widen); | ||||
2899 | bool CreateGatherScatter = | ||||
2900 | (Decision == LoopVectorizationCostModel::CM_GatherScatter); | ||||
2901 | |||||
2902 | // Either Ptr feeds a vector load/store, or a vector GEP should feed a vector | ||||
2903 | // gather/scatter. Otherwise Decision should have been to Scalarize. | ||||
2904 | assert((ConsecutiveStride || CreateGatherScatter) &&(static_cast <bool> ((ConsecutiveStride || CreateGatherScatter ) && "The instruction should be scalarized") ? void ( 0) : __assert_fail ("(ConsecutiveStride || CreateGatherScatter) && \"The instruction should be scalarized\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 2905, __extension__ __PRETTY_FUNCTION__)) | ||||
2905 | "The instruction should be scalarized")(static_cast <bool> ((ConsecutiveStride || CreateGatherScatter ) && "The instruction should be scalarized") ? void ( 0) : __assert_fail ("(ConsecutiveStride || CreateGatherScatter) && \"The instruction should be scalarized\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 2905, __extension__ __PRETTY_FUNCTION__)); | ||||
2906 | (void)ConsecutiveStride; | ||||
2907 | |||||
2908 | VectorParts BlockInMaskParts(UF); | ||||
2909 | bool isMaskRequired = BlockInMask; | ||||
2910 | if (isMaskRequired) | ||||
2911 | for (unsigned Part = 0; Part < UF; ++Part) | ||||
2912 | BlockInMaskParts[Part] = State.get(BlockInMask, Part); | ||||
2913 | |||||
2914 | const auto CreateVecPtr = [&](unsigned Part, Value *Ptr) -> Value * { | ||||
2915 | // Calculate the pointer for the specific unroll-part. | ||||
2916 | GetElementPtrInst *PartPtr = nullptr; | ||||
2917 | |||||
2918 | bool InBounds = false; | ||||
2919 | if (auto *gep = dyn_cast<GetElementPtrInst>(Ptr->stripPointerCasts())) | ||||
2920 | InBounds = gep->isInBounds(); | ||||
2921 | if (Reverse) { | ||||
2922 | // If the address is consecutive but reversed, then the | ||||
2923 | // wide store needs to start at the last vector element. | ||||
2924 | // RunTimeVF = VScale * VF.getKnownMinValue() | ||||
2925 | // For fixed-width VScale is 1, then RunTimeVF = VF.getKnownMinValue() | ||||
2926 | Value *RunTimeVF = getRuntimeVF(Builder, Builder.getInt32Ty(), VF); | ||||
2927 | // NumElt = -Part * RunTimeVF | ||||
2928 | Value *NumElt = Builder.CreateMul(Builder.getInt32(-Part), RunTimeVF); | ||||
2929 | // LastLane = 1 - RunTimeVF | ||||
2930 | Value *LastLane = Builder.CreateSub(Builder.getInt32(1), RunTimeVF); | ||||
2931 | PartPtr = | ||||
2932 | cast<GetElementPtrInst>(Builder.CreateGEP(ScalarDataTy, Ptr, NumElt)); | ||||
2933 | PartPtr->setIsInBounds(InBounds); | ||||
2934 | PartPtr = cast<GetElementPtrInst>( | ||||
2935 | Builder.CreateGEP(ScalarDataTy, PartPtr, LastLane)); | ||||
2936 | PartPtr->setIsInBounds(InBounds); | ||||
2937 | if (isMaskRequired) // Reverse of a null all-one mask is a null mask. | ||||
2938 | BlockInMaskParts[Part] = reverseVector(BlockInMaskParts[Part]); | ||||
2939 | } else { | ||||
2940 | Value *Increment = createStepForVF(Builder, Builder.getInt32(Part), VF); | ||||
2941 | PartPtr = cast<GetElementPtrInst>( | ||||
2942 | Builder.CreateGEP(ScalarDataTy, Ptr, Increment)); | ||||
2943 | PartPtr->setIsInBounds(InBounds); | ||||
2944 | } | ||||
2945 | |||||
2946 | unsigned AddressSpace = Ptr->getType()->getPointerAddressSpace(); | ||||
2947 | return Builder.CreateBitCast(PartPtr, DataTy->getPointerTo(AddressSpace)); | ||||
2948 | }; | ||||
2949 | |||||
2950 | // Handle Stores: | ||||
2951 | if (SI) { | ||||
2952 | setDebugLocFromInst(Builder, SI); | ||||
2953 | |||||
2954 | for (unsigned Part = 0; Part < UF; ++Part) { | ||||
2955 | Instruction *NewSI = nullptr; | ||||
2956 | Value *StoredVal = State.get(StoredValue, Part); | ||||
2957 | if (CreateGatherScatter) { | ||||
2958 | Value *MaskPart = isMaskRequired ? BlockInMaskParts[Part] : nullptr; | ||||
2959 | Value *VectorGep = State.get(Addr, Part); | ||||
2960 | NewSI = Builder.CreateMaskedScatter(StoredVal, VectorGep, Alignment, | ||||
2961 | MaskPart); | ||||
2962 | } else { | ||||
2963 | if (Reverse) { | ||||
2964 | // If we store to reverse consecutive memory locations, then we need | ||||
2965 | // to reverse the order of elements in the stored value. | ||||
2966 | StoredVal = reverseVector(StoredVal); | ||||
2967 | // We don't want to update the value in the map as it might be used in | ||||
2968 | // another expression. So don't call resetVectorValue(StoredVal). | ||||
2969 | } | ||||
2970 | auto *VecPtr = CreateVecPtr(Part, State.get(Addr, VPIteration(0, 0))); | ||||
2971 | if (isMaskRequired) | ||||
2972 | NewSI = Builder.CreateMaskedStore(StoredVal, VecPtr, Alignment, | ||||
2973 | BlockInMaskParts[Part]); | ||||
2974 | else | ||||
2975 | NewSI = Builder.CreateAlignedStore(StoredVal, VecPtr, Alignment); | ||||
2976 | } | ||||
2977 | addMetadata(NewSI, SI); | ||||
2978 | } | ||||
2979 | return; | ||||
2980 | } | ||||
2981 | |||||
2982 | // Handle loads. | ||||
2983 | assert(LI && "Must have a load instruction")(static_cast <bool> (LI && "Must have a load instruction" ) ? void (0) : __assert_fail ("LI && \"Must have a load instruction\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 2983, __extension__ __PRETTY_FUNCTION__)); | ||||
2984 | setDebugLocFromInst(Builder, LI); | ||||
2985 | for (unsigned Part = 0; Part < UF; ++Part) { | ||||
2986 | Value *NewLI; | ||||
2987 | if (CreateGatherScatter) { | ||||
2988 | Value *MaskPart = isMaskRequired ? BlockInMaskParts[Part] : nullptr; | ||||
2989 | Value *VectorGep = State.get(Addr, Part); | ||||
2990 | NewLI = Builder.CreateMaskedGather(VectorGep, Alignment, MaskPart, | ||||
2991 | nullptr, "wide.masked.gather"); | ||||
2992 | addMetadata(NewLI, LI); | ||||
2993 | } else { | ||||
2994 | auto *VecPtr = CreateVecPtr(Part, State.get(Addr, VPIteration(0, 0))); | ||||
2995 | if (isMaskRequired) | ||||
2996 | NewLI = Builder.CreateMaskedLoad( | ||||
2997 | VecPtr, Alignment, BlockInMaskParts[Part], PoisonValue::get(DataTy), | ||||
2998 | "wide.masked.load"); | ||||
2999 | else | ||||
3000 | NewLI = | ||||
3001 | Builder.CreateAlignedLoad(DataTy, VecPtr, Alignment, "wide.load"); | ||||
3002 | |||||
3003 | // Add metadata to the load, but setVectorValue to the reverse shuffle. | ||||
3004 | addMetadata(NewLI, LI); | ||||
3005 | if (Reverse) | ||||
3006 | NewLI = reverseVector(NewLI); | ||||
3007 | } | ||||
3008 | |||||
3009 | State.set(Def, NewLI, Part); | ||||
3010 | } | ||||
3011 | } | ||||
3012 | |||||
3013 | void InnerLoopVectorizer::scalarizeInstruction(Instruction *Instr, VPValue *Def, | ||||
3014 | VPUser &User, | ||||
3015 | const VPIteration &Instance, | ||||
3016 | bool IfPredicateInstr, | ||||
3017 | VPTransformState &State) { | ||||
3018 | assert(!Instr->getType()->isAggregateType() && "Can't handle vectors")(static_cast <bool> (!Instr->getType()->isAggregateType () && "Can't handle vectors") ? void (0) : __assert_fail ("!Instr->getType()->isAggregateType() && \"Can't handle vectors\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 3018, __extension__ __PRETTY_FUNCTION__)); | ||||
3019 | |||||
3020 | // llvm.experimental.noalias.scope.decl intrinsics must only be duplicated for | ||||
3021 | // the first lane and part. | ||||
3022 | if (isa<NoAliasScopeDeclInst>(Instr)) | ||||
3023 | if (!Instance.isFirstIteration()) | ||||
3024 | return; | ||||
3025 | |||||
3026 | setDebugLocFromInst(Builder, Instr); | ||||
3027 | |||||
3028 | // Does this instruction return a value ? | ||||
3029 | bool IsVoidRetTy = Instr->getType()->isVoidTy(); | ||||
3030 | |||||
3031 | Instruction *Cloned = Instr->clone(); | ||||
3032 | if (!IsVoidRetTy) | ||||
3033 | Cloned->setName(Instr->getName() + ".cloned"); | ||||
3034 | |||||
3035 | State.Builder.SetInsertPoint(Builder.GetInsertBlock(), | ||||
3036 | Builder.GetInsertPoint()); | ||||
3037 | // Replace the operands of the cloned instructions with their scalar | ||||
3038 | // equivalents in the new loop. | ||||
3039 | for (unsigned op = 0, e = User.getNumOperands(); op != e; ++op) { | ||||
3040 | auto *Operand = dyn_cast<Instruction>(Instr->getOperand(op)); | ||||
3041 | auto InputInstance = Instance; | ||||
3042 | if (!Operand || !OrigLoop->contains(Operand) || | ||||
3043 | (Cost->isUniformAfterVectorization(Operand, State.VF))) | ||||
3044 | InputInstance.Lane = VPLane::getFirstLane(); | ||||
3045 | auto *NewOp = State.get(User.getOperand(op), InputInstance); | ||||
3046 | Cloned->setOperand(op, NewOp); | ||||
3047 | } | ||||
3048 | addNewMetadata(Cloned, Instr); | ||||
3049 | |||||
3050 | // Place the cloned scalar in the new loop. | ||||
3051 | Builder.Insert(Cloned); | ||||
3052 | |||||
3053 | State.set(Def, Cloned, Instance); | ||||
3054 | |||||
3055 | // If we just cloned a new assumption, add it the assumption cache. | ||||
3056 | if (auto *II = dyn_cast<AssumeInst>(Cloned)) | ||||
3057 | AC->registerAssumption(II); | ||||
3058 | |||||
3059 | // End if-block. | ||||
3060 | if (IfPredicateInstr) | ||||
3061 | PredicatedInstructions.push_back(Cloned); | ||||
3062 | } | ||||
3063 | |||||
3064 | PHINode *InnerLoopVectorizer::createInductionVariable(Loop *L, Value *Start, | ||||
3065 | Value *End, Value *Step, | ||||
3066 | Instruction *DL) { | ||||
3067 | BasicBlock *Header = L->getHeader(); | ||||
3068 | BasicBlock *Latch = L->getLoopLatch(); | ||||
3069 | // As we're just creating this loop, it's possible no latch exists | ||||
3070 | // yet. If so, use the header as this will be a single block loop. | ||||
3071 | if (!Latch) | ||||
3072 | Latch = Header; | ||||
3073 | |||||
3074 | IRBuilder<> Builder(&*Header->getFirstInsertionPt()); | ||||
3075 | Instruction *OldInst = getDebugLocFromInstOrOperands(OldInduction); | ||||
3076 | setDebugLocFromInst(Builder, OldInst); | ||||
3077 | auto *Induction = Builder.CreatePHI(Start->getType(), 2, "index"); | ||||
3078 | |||||
3079 | Builder.SetInsertPoint(Latch->getTerminator()); | ||||
3080 | setDebugLocFromInst(Builder, OldInst); | ||||
3081 | |||||
3082 | // Create i+1 and fill the PHINode. | ||||
3083 | // | ||||
3084 | // If the tail is not folded, we know that End - Start >= Step (either | ||||
3085 | // statically or through the minimum iteration checks). We also know that both | ||||
3086 | // Start % Step == 0 and End % Step == 0. We exit the vector loop if %IV + | ||||
3087 | // %Step == %End. Hence we must exit the loop before %IV + %Step unsigned | ||||
3088 | // overflows and we can mark the induction increment as NUW. | ||||
3089 | Value *Next = | ||||
3090 | Builder.CreateAdd(Induction, Step, "index.next", | ||||
3091 | /*NUW=*/!Cost->foldTailByMasking(), /*NSW=*/false); | ||||
3092 | Induction->addIncoming(Start, L->getLoopPreheader()); | ||||
3093 | Induction->addIncoming(Next, Latch); | ||||
3094 | // Create the compare. | ||||
3095 | Value *ICmp = Builder.CreateICmpEQ(Next, End); | ||||
3096 | Builder.CreateCondBr(ICmp, L->getUniqueExitBlock(), Header); | ||||
3097 | |||||
3098 | // Now we have two terminators. Remove the old one from the block. | ||||
3099 | Latch->getTerminator()->eraseFromParent(); | ||||
3100 | |||||
3101 | return Induction; | ||||
3102 | } | ||||
3103 | |||||
3104 | Value *InnerLoopVectorizer::getOrCreateTripCount(Loop *L) { | ||||
3105 | if (TripCount) | ||||
3106 | return TripCount; | ||||
3107 | |||||
3108 | assert(L && "Create Trip Count for null loop.")(static_cast <bool> (L && "Create Trip Count for null loop." ) ? void (0) : __assert_fail ("L && \"Create Trip Count for null loop.\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 3108, __extension__ __PRETTY_FUNCTION__)); | ||||
3109 | IRBuilder<> Builder(L->getLoopPreheader()->getTerminator()); | ||||
3110 | // Find the loop boundaries. | ||||
3111 | ScalarEvolution *SE = PSE.getSE(); | ||||
3112 | const SCEV *BackedgeTakenCount = PSE.getBackedgeTakenCount(); | ||||
3113 | assert(!isa<SCEVCouldNotCompute>(BackedgeTakenCount) &&(static_cast <bool> (!isa<SCEVCouldNotCompute>(BackedgeTakenCount ) && "Invalid loop count") ? void (0) : __assert_fail ("!isa<SCEVCouldNotCompute>(BackedgeTakenCount) && \"Invalid loop count\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 3114, __extension__ __PRETTY_FUNCTION__)) | ||||
3114 | "Invalid loop count")(static_cast <bool> (!isa<SCEVCouldNotCompute>(BackedgeTakenCount ) && "Invalid loop count") ? void (0) : __assert_fail ("!isa<SCEVCouldNotCompute>(BackedgeTakenCount) && \"Invalid loop count\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 3114, __extension__ __PRETTY_FUNCTION__)); | ||||
3115 | |||||
3116 | Type *IdxTy = Legal->getWidestInductionType(); | ||||
3117 | assert(IdxTy && "No type for induction")(static_cast <bool> (IdxTy && "No type for induction" ) ? void (0) : __assert_fail ("IdxTy && \"No type for induction\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 3117, __extension__ __PRETTY_FUNCTION__)); | ||||
3118 | |||||
3119 | // The exit count might have the type of i64 while the phi is i32. This can | ||||
3120 | // happen if we have an induction variable that is sign extended before the | ||||
3121 | // compare. The only way that we get a backedge taken count is that the | ||||
3122 | // induction variable was signed and as such will not overflow. In such a case | ||||
3123 | // truncation is legal. | ||||
3124 | if (SE->getTypeSizeInBits(BackedgeTakenCount->getType()) > | ||||
3125 | IdxTy->getPrimitiveSizeInBits()) | ||||
3126 | BackedgeTakenCount = SE->getTruncateOrNoop(BackedgeTakenCount, IdxTy); | ||||
3127 | BackedgeTakenCount = SE->getNoopOrZeroExtend(BackedgeTakenCount, IdxTy); | ||||
3128 | |||||
3129 | // Get the total trip count from the count by adding 1. | ||||
3130 | const SCEV *ExitCount = SE->getAddExpr( | ||||
3131 | BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType())); | ||||
3132 | |||||
3133 | const DataLayout &DL = L->getHeader()->getModule()->getDataLayout(); | ||||
3134 | |||||
3135 | // Expand the trip count and place the new instructions in the preheader. | ||||
3136 | // Notice that the pre-header does not change, only the loop body. | ||||
3137 | SCEVExpander Exp(*SE, DL, "induction"); | ||||
3138 | |||||
3139 | // Count holds the overall loop count (N). | ||||
3140 | TripCount = Exp.expandCodeFor(ExitCount, ExitCount->getType(), | ||||
3141 | L->getLoopPreheader()->getTerminator()); | ||||
3142 | |||||
3143 | if (TripCount->getType()->isPointerTy()) | ||||
3144 | TripCount = | ||||
3145 | CastInst::CreatePointerCast(TripCount, IdxTy, "exitcount.ptrcnt.to.int", | ||||
3146 | L->getLoopPreheader()->getTerminator()); | ||||
3147 | |||||
3148 | return TripCount; | ||||
3149 | } | ||||
3150 | |||||
3151 | Value *InnerLoopVectorizer::getOrCreateVectorTripCount(Loop *L) { | ||||
3152 | if (VectorTripCount) | ||||
3153 | return VectorTripCount; | ||||
3154 | |||||
3155 | Value *TC = getOrCreateTripCount(L); | ||||
3156 | IRBuilder<> Builder(L->getLoopPreheader()->getTerminator()); | ||||
3157 | |||||
3158 | Type *Ty = TC->getType(); | ||||
3159 | // This is where we can make the step a runtime constant. | ||||
3160 | Value *Step = createStepForVF(Builder, ConstantInt::get(Ty, UF), VF); | ||||
3161 | |||||
3162 | // If the tail is to be folded by masking, round the number of iterations N | ||||
3163 | // up to a multiple of Step instead of rounding down. This is done by first | ||||
3164 | // adding Step-1 and then rounding down. Note that it's ok if this addition | ||||
3165 | // overflows: the vector induction variable will eventually wrap to zero given | ||||
3166 | // that it starts at zero and its Step is a power of two; the loop will then | ||||
3167 | // exit, with the last early-exit vector comparison also producing all-true. | ||||
3168 | if (Cost->foldTailByMasking()) { | ||||
3169 | assert(isPowerOf2_32(VF.getKnownMinValue() * UF) &&(static_cast <bool> (isPowerOf2_32(VF.getKnownMinValue( ) * UF) && "VF*UF must be a power of 2 when folding tail by masking" ) ? void (0) : __assert_fail ("isPowerOf2_32(VF.getKnownMinValue() * UF) && \"VF*UF must be a power of 2 when folding tail by masking\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 3170, __extension__ __PRETTY_FUNCTION__)) | ||||
3170 | "VF*UF must be a power of 2 when folding tail by masking")(static_cast <bool> (isPowerOf2_32(VF.getKnownMinValue( ) * UF) && "VF*UF must be a power of 2 when folding tail by masking" ) ? void (0) : __assert_fail ("isPowerOf2_32(VF.getKnownMinValue() * UF) && \"VF*UF must be a power of 2 when folding tail by masking\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 3170, __extension__ __PRETTY_FUNCTION__)); | ||||
3171 | assert(!VF.isScalable() &&(static_cast <bool> (!VF.isScalable() && "Tail folding not yet supported for scalable vectors" ) ? void (0) : __assert_fail ("!VF.isScalable() && \"Tail folding not yet supported for scalable vectors\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 3172, __extension__ __PRETTY_FUNCTION__)) | ||||
3172 | "Tail folding not yet supported for scalable vectors")(static_cast <bool> (!VF.isScalable() && "Tail folding not yet supported for scalable vectors" ) ? void (0) : __assert_fail ("!VF.isScalable() && \"Tail folding not yet supported for scalable vectors\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 3172, __extension__ __PRETTY_FUNCTION__)); | ||||
3173 | TC = Builder.CreateAdd( | ||||
3174 | TC, ConstantInt::get(Ty, VF.getKnownMinValue() * UF - 1), "n.rnd.up"); | ||||
3175 | } | ||||
3176 | |||||
3177 | // Now we need to generate the expression for the part of the loop that the | ||||
3178 | // vectorized body will execute. This is equal to N - (N % Step) if scalar | ||||
3179 | // iterations are not required for correctness, or N - Step, otherwise. Step | ||||
3180 | // is equal to the vectorization factor (number of SIMD elements) times the | ||||
3181 | // unroll factor (number of SIMD instructions). | ||||
3182 | Value *R = Builder.CreateURem(TC, Step, "n.mod.vf"); | ||||
3183 | |||||
3184 | // There are two cases where we need to ensure (at least) the last iteration | ||||
3185 | // runs in the scalar remainder loop. Thus, if the step evenly divides | ||||
3186 | // the trip count, we set the remainder to be equal to the step. If the step | ||||
3187 | // does not evenly divide the trip count, no adjustment is necessary since | ||||
3188 | // there will already be scalar iterations. Note that the minimum iterations | ||||
3189 | // check ensures that N >= Step. The cases are: | ||||
3190 | // 1) If there is a non-reversed interleaved group that may speculatively | ||||
3191 | // access memory out-of-bounds. | ||||
3192 | // 2) If any instruction may follow a conditionally taken exit. That is, if | ||||
3193 | // the loop contains multiple exiting blocks, or a single exiting block | ||||
3194 | // which is not the latch. | ||||
3195 | if (VF.isVector() && Cost->requiresScalarEpilogue()) { | ||||
3196 | auto *IsZero = Builder.CreateICmpEQ(R, ConstantInt::get(R->getType(), 0)); | ||||
3197 | R = Builder.CreateSelect(IsZero, Step, R); | ||||
3198 | } | ||||
3199 | |||||
3200 | VectorTripCount = Builder.CreateSub(TC, R, "n.vec"); | ||||
3201 | |||||
3202 | return VectorTripCount; | ||||
3203 | } | ||||
3204 | |||||
3205 | Value *InnerLoopVectorizer::createBitOrPointerCast(Value *V, VectorType *DstVTy, | ||||
3206 | const DataLayout &DL) { | ||||
3207 | // Verify that V is a vector type with same number of elements as DstVTy. | ||||
3208 | auto *DstFVTy = cast<FixedVectorType>(DstVTy); | ||||
3209 | unsigned VF = DstFVTy->getNumElements(); | ||||
3210 | auto *SrcVecTy = cast<FixedVectorType>(V->getType()); | ||||
3211 | assert((VF == SrcVecTy->getNumElements()) && "Vector dimensions do not match")(static_cast <bool> ((VF == SrcVecTy->getNumElements ()) && "Vector dimensions do not match") ? void (0) : __assert_fail ("(VF == SrcVecTy->getNumElements()) && \"Vector dimensions do not match\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 3211, __extension__ __PRETTY_FUNCTION__)); | ||||
3212 | Type *SrcElemTy = SrcVecTy->getElementType(); | ||||
3213 | Type *DstElemTy = DstFVTy->getElementType(); | ||||
3214 | assert((DL.getTypeSizeInBits(SrcElemTy) == DL.getTypeSizeInBits(DstElemTy)) &&(static_cast <bool> ((DL.getTypeSizeInBits(SrcElemTy) == DL.getTypeSizeInBits(DstElemTy)) && "Vector elements must have same size" ) ? void (0) : __assert_fail ("(DL.getTypeSizeInBits(SrcElemTy) == DL.getTypeSizeInBits(DstElemTy)) && \"Vector elements must have same size\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 3215, __extension__ __PRETTY_FUNCTION__)) | ||||
3215 | "Vector elements must have same size")(static_cast <bool> ((DL.getTypeSizeInBits(SrcElemTy) == DL.getTypeSizeInBits(DstElemTy)) && "Vector elements must have same size" ) ? void (0) : __assert_fail ("(DL.getTypeSizeInBits(SrcElemTy) == DL.getTypeSizeInBits(DstElemTy)) && \"Vector elements must have same size\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 3215, __extension__ __PRETTY_FUNCTION__)); | ||||
3216 | |||||
3217 | // Do a direct cast if element types are castable. | ||||
3218 | if (CastInst::isBitOrNoopPointerCastable(SrcElemTy, DstElemTy, DL)) { | ||||
3219 | return Builder.CreateBitOrPointerCast(V, DstFVTy); | ||||
3220 | } | ||||
3221 | // V cannot be directly casted to desired vector type. | ||||
3222 | // May happen when V is a floating point vector but DstVTy is a vector of | ||||
3223 | // pointers or vice-versa. Handle this using a two-step bitcast using an | ||||
3224 | // intermediate Integer type for the bitcast i.e. Ptr <-> Int <-> Float. | ||||
3225 | assert((DstElemTy->isPointerTy() != SrcElemTy->isPointerTy()) &&(static_cast <bool> ((DstElemTy->isPointerTy() != SrcElemTy ->isPointerTy()) && "Only one type should be a pointer type" ) ? void (0) : __assert_fail ("(DstElemTy->isPointerTy() != SrcElemTy->isPointerTy()) && \"Only one type should be a pointer type\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 3226, __extension__ __PRETTY_FUNCTION__)) | ||||
3226 | "Only one type should be a pointer type")(static_cast <bool> ((DstElemTy->isPointerTy() != SrcElemTy ->isPointerTy()) && "Only one type should be a pointer type" ) ? void (0) : __assert_fail ("(DstElemTy->isPointerTy() != SrcElemTy->isPointerTy()) && \"Only one type should be a pointer type\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 3226, __extension__ __PRETTY_FUNCTION__)); | ||||
3227 | assert((DstElemTy->isFloatingPointTy() != SrcElemTy->isFloatingPointTy()) &&(static_cast <bool> ((DstElemTy->isFloatingPointTy() != SrcElemTy->isFloatingPointTy()) && "Only one type should be a floating point type" ) ? void (0) : __assert_fail ("(DstElemTy->isFloatingPointTy() != SrcElemTy->isFloatingPointTy()) && \"Only one type should be a floating point type\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 3228, __extension__ __PRETTY_FUNCTION__)) | ||||
3228 | "Only one type should be a floating point type")(static_cast <bool> ((DstElemTy->isFloatingPointTy() != SrcElemTy->isFloatingPointTy()) && "Only one type should be a floating point type" ) ? void (0) : __assert_fail ("(DstElemTy->isFloatingPointTy() != SrcElemTy->isFloatingPointTy()) && \"Only one type should be a floating point type\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 3228, __extension__ __PRETTY_FUNCTION__)); | ||||
3229 | Type *IntTy = | ||||
3230 | IntegerType::getIntNTy(V->getContext(), DL.getTypeSizeInBits(SrcElemTy)); | ||||
3231 | auto *VecIntTy = FixedVectorType::get(IntTy, VF); | ||||
3232 | Value *CastVal = Builder.CreateBitOrPointerCast(V, VecIntTy); | ||||
3233 | return Builder.CreateBitOrPointerCast(CastVal, DstFVTy); | ||||
3234 | } | ||||
3235 | |||||
3236 | void InnerLoopVectorizer::emitMinimumIterationCountCheck(Loop *L, | ||||
3237 | BasicBlock *Bypass) { | ||||
3238 | Value *Count = getOrCreateTripCount(L); | ||||
3239 | // Reuse existing vector loop preheader for TC checks. | ||||
3240 | // Note that new preheader block is generated for vector loop. | ||||
3241 | BasicBlock *const TCCheckBlock = LoopVectorPreHeader; | ||||
3242 | IRBuilder<> Builder(TCCheckBlock->getTerminator()); | ||||
3243 | |||||
3244 | // Generate code to check if the loop's trip count is less than VF * UF, or | ||||
3245 | // equal to it in case a scalar epilogue is required; this implies that the | ||||
3246 | // vector trip count is zero. This check also covers the case where adding one | ||||
3247 | // to the backedge-taken count overflowed leading to an incorrect trip count | ||||
3248 | // of zero. In this case we will also jump to the scalar loop. | ||||
3249 | auto P = Cost->requiresScalarEpilogue() ? ICmpInst::ICMP_ULE | ||||
3250 | : ICmpInst::ICMP_ULT; | ||||
3251 | |||||
3252 | // If tail is to be folded, vector loop takes care of all iterations. | ||||
3253 | Value *CheckMinIters = Builder.getFalse(); | ||||
3254 | if (!Cost->foldTailByMasking()) { | ||||
3255 | Value *Step = | ||||
3256 | createStepForVF(Builder, ConstantInt::get(Count->getType(), UF), VF); | ||||
3257 | CheckMinIters = Builder.CreateICmp(P, Count, Step, "min.iters.check"); | ||||
3258 | } | ||||
3259 | // Create new preheader for vector loop. | ||||
3260 | LoopVectorPreHeader = | ||||
3261 | SplitBlock(TCCheckBlock, TCCheckBlock->getTerminator(), DT, LI, nullptr, | ||||
3262 | "vector.ph"); | ||||
3263 | |||||
3264 | assert(DT->properlyDominates(DT->getNode(TCCheckBlock),(static_cast <bool> (DT->properlyDominates(DT->getNode (TCCheckBlock), DT->getNode(Bypass)->getIDom()) && "TC check is expected to dominate Bypass") ? void (0) : __assert_fail ("DT->properlyDominates(DT->getNode(TCCheckBlock), DT->getNode(Bypass)->getIDom()) && \"TC check is expected to dominate Bypass\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 3266, __extension__ __PRETTY_FUNCTION__)) | ||||
3265 | DT->getNode(Bypass)->getIDom()) &&(static_cast <bool> (DT->properlyDominates(DT->getNode (TCCheckBlock), DT->getNode(Bypass)->getIDom()) && "TC check is expected to dominate Bypass") ? void (0) : __assert_fail ("DT->properlyDominates(DT->getNode(TCCheckBlock), DT->getNode(Bypass)->getIDom()) && \"TC check is expected to dominate Bypass\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 3266, __extension__ __PRETTY_FUNCTION__)) | ||||
3266 | "TC check is expected to dominate Bypass")(static_cast <bool> (DT->properlyDominates(DT->getNode (TCCheckBlock), DT->getNode(Bypass)->getIDom()) && "TC check is expected to dominate Bypass") ? void (0) : __assert_fail ("DT->properlyDominates(DT->getNode(TCCheckBlock), DT->getNode(Bypass)->getIDom()) && \"TC check is expected to dominate Bypass\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 3266, __extension__ __PRETTY_FUNCTION__)); | ||||
3267 | |||||
3268 | // Update dominator for Bypass & LoopExit. | ||||
3269 | DT->changeImmediateDominator(Bypass, TCCheckBlock); | ||||
3270 | DT->changeImmediateDominator(LoopExitBlock, TCCheckBlock); | ||||
3271 | |||||
3272 | ReplaceInstWithInst( | ||||
3273 | TCCheckBlock->getTerminator(), | ||||
3274 | BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters)); | ||||
3275 | LoopBypassBlocks.push_back(TCCheckBlock); | ||||
3276 | } | ||||
3277 | |||||
3278 | BasicBlock *InnerLoopVectorizer::emitSCEVChecks(Loop *L, BasicBlock *Bypass) { | ||||
3279 | |||||
3280 | BasicBlock *const SCEVCheckBlock = | ||||
3281 | RTChecks.emitSCEVChecks(L, Bypass, LoopVectorPreHeader, LoopExitBlock); | ||||
3282 | if (!SCEVCheckBlock) | ||||
3283 | return nullptr; | ||||
3284 | |||||
3285 | assert(!(SCEVCheckBlock->getParent()->hasOptSize() ||(static_cast <bool> (!(SCEVCheckBlock->getParent()-> hasOptSize() || (OptForSizeBasedOnProfile && Cost-> Hints->getForce() != LoopVectorizeHints::FK_Enabled)) && "Cannot SCEV check stride or overflow when optimizing for size" ) ? void (0) : __assert_fail ("!(SCEVCheckBlock->getParent()->hasOptSize() || (OptForSizeBasedOnProfile && Cost->Hints->getForce() != LoopVectorizeHints::FK_Enabled)) && \"Cannot SCEV check stride or overflow when optimizing for size\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 3288, __extension__ __PRETTY_FUNCTION__)) | ||||
3286 | (OptForSizeBasedOnProfile &&(static_cast <bool> (!(SCEVCheckBlock->getParent()-> hasOptSize() || (OptForSizeBasedOnProfile && Cost-> Hints->getForce() != LoopVectorizeHints::FK_Enabled)) && "Cannot SCEV check stride or overflow when optimizing for size" ) ? void (0) : __assert_fail ("!(SCEVCheckBlock->getParent()->hasOptSize() || (OptForSizeBasedOnProfile && Cost->Hints->getForce() != LoopVectorizeHints::FK_Enabled)) && \"Cannot SCEV check stride or overflow when optimizing for size\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 3288, __extension__ __PRETTY_FUNCTION__)) | ||||
3287 | Cost->Hints->getForce() != LoopVectorizeHints::FK_Enabled)) &&(static_cast <bool> (!(SCEVCheckBlock->getParent()-> hasOptSize() || (OptForSizeBasedOnProfile && Cost-> Hints->getForce() != LoopVectorizeHints::FK_Enabled)) && "Cannot SCEV check stride or overflow when optimizing for size" ) ? void (0) : __assert_fail ("!(SCEVCheckBlock->getParent()->hasOptSize() || (OptForSizeBasedOnProfile && Cost->Hints->getForce() != LoopVectorizeHints::FK_Enabled)) && \"Cannot SCEV check stride or overflow when optimizing for size\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 3288, __extension__ __PRETTY_FUNCTION__)) | ||||
3288 | "Cannot SCEV check stride or overflow when optimizing for size")(static_cast <bool> (!(SCEVCheckBlock->getParent()-> hasOptSize() || (OptForSizeBasedOnProfile && Cost-> Hints->getForce() != LoopVectorizeHints::FK_Enabled)) && "Cannot SCEV check stride or overflow when optimizing for size" ) ? void (0) : __assert_fail ("!(SCEVCheckBlock->getParent()->hasOptSize() || (OptForSizeBasedOnProfile && Cost->Hints->getForce() != LoopVectorizeHints::FK_Enabled)) && \"Cannot SCEV check stride or overflow when optimizing for size\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 3288, __extension__ __PRETTY_FUNCTION__)); | ||||
3289 | |||||
3290 | |||||
3291 | // Update dominator only if this is first RT check. | ||||
3292 | if (LoopBypassBlocks.empty()) { | ||||
3293 | DT->changeImmediateDominator(Bypass, SCEVCheckBlock); | ||||
3294 | DT->changeImmediateDominator(LoopExitBlock, SCEVCheckBlock); | ||||
3295 | } | ||||
3296 | |||||
3297 | LoopBypassBlocks.push_back(SCEVCheckBlock); | ||||
3298 | AddedSafetyChecks = true; | ||||
3299 | return SCEVCheckBlock; | ||||
3300 | } | ||||
3301 | |||||
3302 | BasicBlock *InnerLoopVectorizer::emitMemRuntimeChecks(Loop *L, | ||||
3303 | BasicBlock *Bypass) { | ||||
3304 | // VPlan-native path does not do any analysis for runtime checks currently. | ||||
3305 | if (EnableVPlanNativePath) | ||||
3306 | return nullptr; | ||||
3307 | |||||
3308 | BasicBlock *const MemCheckBlock = | ||||
3309 | RTChecks.emitMemRuntimeChecks(L, Bypass, LoopVectorPreHeader); | ||||
3310 | |||||
3311 | // Check if we generated code that checks in runtime if arrays overlap. We put | ||||
3312 | // the checks into a separate block to make the more common case of few | ||||
3313 | // elements faster. | ||||
3314 | if (!MemCheckBlock) | ||||
3315 | return nullptr; | ||||
3316 | |||||
3317 | if (MemCheckBlock->getParent()->hasOptSize() || OptForSizeBasedOnProfile) { | ||||
3318 | assert(Cost->Hints->getForce() == LoopVectorizeHints::FK_Enabled &&(static_cast <bool> (Cost->Hints->getForce() == LoopVectorizeHints ::FK_Enabled && "Cannot emit memory checks when optimizing for size, unless forced " "to vectorize.") ? void (0) : __assert_fail ("Cost->Hints->getForce() == LoopVectorizeHints::FK_Enabled && \"Cannot emit memory checks when optimizing for size, unless forced \" \"to vectorize.\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 3320, __extension__ __PRETTY_FUNCTION__)) | ||||
3319 | "Cannot emit memory checks when optimizing for size, unless forced "(static_cast <bool> (Cost->Hints->getForce() == LoopVectorizeHints ::FK_Enabled && "Cannot emit memory checks when optimizing for size, unless forced " "to vectorize.") ? void (0) : __assert_fail ("Cost->Hints->getForce() == LoopVectorizeHints::FK_Enabled && \"Cannot emit memory checks when optimizing for size, unless forced \" \"to vectorize.\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 3320, __extension__ __PRETTY_FUNCTION__)) | ||||
3320 | "to vectorize.")(static_cast <bool> (Cost->Hints->getForce() == LoopVectorizeHints ::FK_Enabled && "Cannot emit memory checks when optimizing for size, unless forced " "to vectorize.") ? void (0) : __assert_fail ("Cost->Hints->getForce() == LoopVectorizeHints::FK_Enabled && \"Cannot emit memory checks when optimizing for size, unless forced \" \"to vectorize.\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 3320, __extension__ __PRETTY_FUNCTION__)); | ||||
3321 | ORE->emit([&]() { | ||||
3322 | return OptimizationRemarkAnalysis(DEBUG_TYPE"loop-vectorize", "VectorizationCodeSize", | ||||
3323 | L->getStartLoc(), L->getHeader()) | ||||
3324 | << "Code-size may be reduced by not forcing " | ||||
3325 | "vectorization, or by source-code modifications " | ||||
3326 | "eliminating the need for runtime checks " | ||||
3327 | "(e.g., adding 'restrict')."; | ||||
3328 | }); | ||||
3329 | } | ||||
3330 | |||||
3331 | LoopBypassBlocks.push_back(MemCheckBlock); | ||||
3332 | |||||
3333 | AddedSafetyChecks = true; | ||||
3334 | |||||
3335 | // We currently don't use LoopVersioning for the actual loop cloning but we | ||||
3336 | // still use it to add the noalias metadata. | ||||
3337 | LVer = std::make_unique<LoopVersioning>( | ||||
3338 | *Legal->getLAI(), | ||||
3339 | Legal->getLAI()->getRuntimePointerChecking()->getChecks(), OrigLoop, LI, | ||||
3340 | DT, PSE.getSE()); | ||||
3341 | LVer->prepareNoAliasMetadata(); | ||||
3342 | return MemCheckBlock; | ||||
3343 | } | ||||
3344 | |||||
3345 | Value *InnerLoopVectorizer::emitTransformedIndex( | ||||
3346 | IRBuilder<> &B, Value *Index, ScalarEvolution *SE, const DataLayout &DL, | ||||
3347 | const InductionDescriptor &ID) const { | ||||
3348 | |||||
3349 | SCEVExpander Exp(*SE, DL, "induction"); | ||||
3350 | auto Step = ID.getStep(); | ||||
3351 | auto StartValue = ID.getStartValue(); | ||||
3352 | assert(Index->getType()->getScalarType() == Step->getType() &&(static_cast <bool> (Index->getType()->getScalarType () == Step->getType() && "Index scalar type does not match StepValue type" ) ? void (0) : __assert_fail ("Index->getType()->getScalarType() == Step->getType() && \"Index scalar type does not match StepValue type\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 3353, __extension__ __PRETTY_FUNCTION__)) | ||||
3353 | "Index scalar type does not match StepValue type")(static_cast <bool> (Index->getType()->getScalarType () == Step->getType() && "Index scalar type does not match StepValue type" ) ? void (0) : __assert_fail ("Index->getType()->getScalarType() == Step->getType() && \"Index scalar type does not match StepValue type\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 3353, __extension__ __PRETTY_FUNCTION__)); | ||||
3354 | |||||
3355 | // Note: the IR at this point is broken. We cannot use SE to create any new | ||||
3356 | // SCEV and then expand it, hoping that SCEV's simplification will give us | ||||
3357 | // a more optimal code. Unfortunately, attempt of doing so on invalid IR may | ||||
3358 | // lead to various SCEV crashes. So all we can do is to use builder and rely | ||||
3359 | // on InstCombine for future simplifications. Here we handle some trivial | ||||
3360 | // cases only. | ||||
3361 | auto CreateAdd = [&B](Value *X, Value *Y) { | ||||
3362 | assert(X->getType() == Y->getType() && "Types don't match!")(static_cast <bool> (X->getType() == Y->getType() && "Types don't match!") ? void (0) : __assert_fail ( "X->getType() == Y->getType() && \"Types don't match!\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 3362, __extension__ __PRETTY_FUNCTION__)); | ||||
3363 | if (auto *CX = dyn_cast<ConstantInt>(X)) | ||||
3364 | if (CX->isZero()) | ||||
3365 | return Y; | ||||
3366 | if (auto *CY = dyn_cast<ConstantInt>(Y)) | ||||
3367 | if (CY->isZero()) | ||||
3368 | return X; | ||||
3369 | return B.CreateAdd(X, Y); | ||||
3370 | }; | ||||
3371 | |||||
3372 | // We allow X to be a vector type, in which case Y will potentially be | ||||
3373 | // splatted into a vector with the same element count. | ||||
3374 | auto CreateMul = [&B](Value *X, Value *Y) { | ||||
3375 | assert(X->getType()->getScalarType() == Y->getType() &&(static_cast <bool> (X->getType()->getScalarType( ) == Y->getType() && "Types don't match!") ? void ( 0) : __assert_fail ("X->getType()->getScalarType() == Y->getType() && \"Types don't match!\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 3376, __extension__ __PRETTY_FUNCTION__)) | ||||
3376 | "Types don't match!")(static_cast <bool> (X->getType()->getScalarType( ) == Y->getType() && "Types don't match!") ? void ( 0) : __assert_fail ("X->getType()->getScalarType() == Y->getType() && \"Types don't match!\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 3376, __extension__ __PRETTY_FUNCTION__)); | ||||
3377 | if (auto *CX = dyn_cast<ConstantInt>(X)) | ||||
3378 | if (CX->isOne()) | ||||
3379 | return Y; | ||||
3380 | if (auto *CY = dyn_cast<ConstantInt>(Y)) | ||||
3381 | if (CY->isOne()) | ||||
3382 | return X; | ||||
3383 | VectorType *XVTy = dyn_cast<VectorType>(X->getType()); | ||||
3384 | if (XVTy && !isa<VectorType>(Y->getType())) | ||||
3385 | Y = B.CreateVectorSplat(XVTy->getElementCount(), Y); | ||||
3386 | return B.CreateMul(X, Y); | ||||
3387 | }; | ||||
3388 | |||||
3389 | // Get a suitable insert point for SCEV expansion. For blocks in the vector | ||||
3390 | // loop, choose the end of the vector loop header (=LoopVectorBody), because | ||||
3391 | // the DomTree is not kept up-to-date for additional blocks generated in the | ||||
3392 | // vector loop. By using the header as insertion point, we guarantee that the | ||||
3393 | // expanded instructions dominate all their uses. | ||||
3394 | auto GetInsertPoint = [this, &B]() { | ||||
3395 | BasicBlock *InsertBB = B.GetInsertPoint()->getParent(); | ||||
3396 | if (InsertBB != LoopVectorBody && | ||||
3397 | LI->getLoopFor(LoopVectorBody) == LI->getLoopFor(InsertBB)) | ||||
3398 | return LoopVectorBody->getTerminator(); | ||||
3399 | return &*B.GetInsertPoint(); | ||||
3400 | }; | ||||
3401 | |||||
3402 | switch (ID.getKind()) { | ||||
3403 | case InductionDescriptor::IK_IntInduction: { | ||||
3404 | assert(!isa<VectorType>(Index->getType()) &&(static_cast <bool> (!isa<VectorType>(Index->getType ()) && "Vector indices not supported for integer inductions yet" ) ? void (0) : __assert_fail ("!isa<VectorType>(Index->getType()) && \"Vector indices not supported for integer inductions yet\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 3405, __extension__ __PRETTY_FUNCTION__)) | ||||
3405 | "Vector indices not supported for integer inductions yet")(static_cast <bool> (!isa<VectorType>(Index->getType ()) && "Vector indices not supported for integer inductions yet" ) ? void (0) : __assert_fail ("!isa<VectorType>(Index->getType()) && \"Vector indices not supported for integer inductions yet\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 3405, __extension__ __PRETTY_FUNCTION__)); | ||||
3406 | assert(Index->getType() == StartValue->getType() &&(static_cast <bool> (Index->getType() == StartValue-> getType() && "Index type does not match StartValue type" ) ? void (0) : __assert_fail ("Index->getType() == StartValue->getType() && \"Index type does not match StartValue type\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 3407, __extension__ __PRETTY_FUNCTION__)) | ||||
3407 | "Index type does not match StartValue type")(static_cast <bool> (Index->getType() == StartValue-> getType() && "Index type does not match StartValue type" ) ? void (0) : __assert_fail ("Index->getType() == StartValue->getType() && \"Index type does not match StartValue type\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 3407, __extension__ __PRETTY_FUNCTION__)); | ||||
3408 | if (ID.getConstIntStepValue() && ID.getConstIntStepValue()->isMinusOne()) | ||||
3409 | return B.CreateSub(StartValue, Index); | ||||
3410 | auto *Offset = CreateMul( | ||||
3411 | Index, Exp.expandCodeFor(Step, Index->getType(), GetInsertPoint())); | ||||
3412 | return CreateAdd(StartValue, Offset); | ||||
3413 | } | ||||
3414 | case InductionDescriptor::IK_PtrInduction: { | ||||
3415 | assert(isa<SCEVConstant>(Step) &&(static_cast <bool> (isa<SCEVConstant>(Step) && "Expected constant step for pointer induction") ? void (0) : __assert_fail ("isa<SCEVConstant>(Step) && \"Expected constant step for pointer induction\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 3416, __extension__ __PRETTY_FUNCTION__)) | ||||
3416 | "Expected constant step for pointer induction")(static_cast <bool> (isa<SCEVConstant>(Step) && "Expected constant step for pointer induction") ? void (0) : __assert_fail ("isa<SCEVConstant>(Step) && \"Expected constant step for pointer induction\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 3416, __extension__ __PRETTY_FUNCTION__)); | ||||
3417 | return B.CreateGEP( | ||||
3418 | StartValue->getType()->getPointerElementType(), StartValue, | ||||
3419 | CreateMul(Index, | ||||
3420 | Exp.expandCodeFor(Step, Index->getType()->getScalarType(), | ||||
3421 | GetInsertPoint()))); | ||||
3422 | } | ||||
3423 | case InductionDescriptor::IK_FpInduction: { | ||||
3424 | assert(!isa<VectorType>(Index->getType()) &&(static_cast <bool> (!isa<VectorType>(Index->getType ()) && "Vector indices not supported for FP inductions yet" ) ? void (0) : __assert_fail ("!isa<VectorType>(Index->getType()) && \"Vector indices not supported for FP inductions yet\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 3425, __extension__ __PRETTY_FUNCTION__)) | ||||
3425 | "Vector indices not supported for FP inductions yet")(static_cast <bool> (!isa<VectorType>(Index->getType ()) && "Vector indices not supported for FP inductions yet" ) ? void (0) : __assert_fail ("!isa<VectorType>(Index->getType()) && \"Vector indices not supported for FP inductions yet\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 3425, __extension__ __PRETTY_FUNCTION__)); | ||||
3426 | assert(Step->getType()->isFloatingPointTy() && "Expected FP Step value")(static_cast <bool> (Step->getType()->isFloatingPointTy () && "Expected FP Step value") ? void (0) : __assert_fail ("Step->getType()->isFloatingPointTy() && \"Expected FP Step value\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 3426, __extension__ __PRETTY_FUNCTION__)); | ||||
3427 | auto InductionBinOp = ID.getInductionBinOp(); | ||||
3428 | assert(InductionBinOp &&(static_cast <bool> (InductionBinOp && (InductionBinOp ->getOpcode() == Instruction::FAdd || InductionBinOp->getOpcode () == Instruction::FSub) && "Original bin op should be defined for FP induction" ) ? void (0) : __assert_fail ("InductionBinOp && (InductionBinOp->getOpcode() == Instruction::FAdd || InductionBinOp->getOpcode() == Instruction::FSub) && \"Original bin op should be defined for FP induction\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 3431, __extension__ __PRETTY_FUNCTION__)) | ||||
3429 | (InductionBinOp->getOpcode() == Instruction::FAdd ||(static_cast <bool> (InductionBinOp && (InductionBinOp ->getOpcode() == Instruction::FAdd || InductionBinOp->getOpcode () == Instruction::FSub) && "Original bin op should be defined for FP induction" ) ? void (0) : __assert_fail ("InductionBinOp && (InductionBinOp->getOpcode() == Instruction::FAdd || InductionBinOp->getOpcode() == Instruction::FSub) && \"Original bin op should be defined for FP induction\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 3431, __extension__ __PRETTY_FUNCTION__)) | ||||
3430 | InductionBinOp->getOpcode() == Instruction::FSub) &&(static_cast <bool> (InductionBinOp && (InductionBinOp ->getOpcode() == Instruction::FAdd || InductionBinOp->getOpcode () == Instruction::FSub) && "Original bin op should be defined for FP induction" ) ? void (0) : __assert_fail ("InductionBinOp && (InductionBinOp->getOpcode() == Instruction::FAdd || InductionBinOp->getOpcode() == Instruction::FSub) && \"Original bin op should be defined for FP induction\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 3431, __extension__ __PRETTY_FUNCTION__)) | ||||
3431 | "Original bin op should be defined for FP induction")(static_cast <bool> (InductionBinOp && (InductionBinOp ->getOpcode() == Instruction::FAdd || InductionBinOp->getOpcode () == Instruction::FSub) && "Original bin op should be defined for FP induction" ) ? void (0) : __assert_fail ("InductionBinOp && (InductionBinOp->getOpcode() == Instruction::FAdd || InductionBinOp->getOpcode() == Instruction::FSub) && \"Original bin op should be defined for FP induction\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 3431, __extension__ __PRETTY_FUNCTION__)); | ||||
3432 | |||||
3433 | Value *StepValue = cast<SCEVUnknown>(Step)->getValue(); | ||||
3434 | Value *MulExp = B.CreateFMul(StepValue, Index); | ||||
3435 | return B.CreateBinOp(InductionBinOp->getOpcode(), StartValue, MulExp, | ||||
3436 | "induction"); | ||||
3437 | } | ||||
3438 | case InductionDescriptor::IK_NoInduction: | ||||
3439 | return nullptr; | ||||
3440 | } | ||||
3441 | llvm_unreachable("invalid enum")::llvm::llvm_unreachable_internal("invalid enum", "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 3441); | ||||
3442 | } | ||||
3443 | |||||
3444 | Loop *InnerLoopVectorizer::createVectorLoopSkeleton(StringRef Prefix) { | ||||
3445 | LoopScalarBody = OrigLoop->getHeader(); | ||||
3446 | LoopVectorPreHeader = OrigLoop->getLoopPreheader(); | ||||
3447 | LoopExitBlock = OrigLoop->getUniqueExitBlock(); | ||||
3448 | assert(LoopExitBlock && "Must have an exit block")(static_cast <bool> (LoopExitBlock && "Must have an exit block" ) ? void (0) : __assert_fail ("LoopExitBlock && \"Must have an exit block\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 3448, __extension__ __PRETTY_FUNCTION__)); | ||||
3449 | assert(LoopVectorPreHeader && "Invalid loop structure")(static_cast <bool> (LoopVectorPreHeader && "Invalid loop structure" ) ? void (0) : __assert_fail ("LoopVectorPreHeader && \"Invalid loop structure\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 3449, __extension__ __PRETTY_FUNCTION__)); | ||||
3450 | |||||
3451 | LoopMiddleBlock = | ||||
3452 | SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT, | ||||
3453 | LI, nullptr, Twine(Prefix) + "middle.block"); | ||||
3454 | LoopScalarPreHeader = | ||||
3455 | SplitBlock(LoopMiddleBlock, LoopMiddleBlock->getTerminator(), DT, LI, | ||||
3456 | nullptr, Twine(Prefix) + "scalar.ph"); | ||||
3457 | |||||
3458 | // Set up branch from middle block to the exit and scalar preheader blocks. | ||||
3459 | // completeLoopSkeleton will update the condition to use an iteration check, | ||||
3460 | // if required to decide whether to execute the remainder. | ||||
3461 | BranchInst *BrInst = | ||||
3462 | BranchInst::Create(LoopExitBlock, LoopScalarPreHeader, Builder.getTrue()); | ||||
3463 | auto *ScalarLatchTerm = OrigLoop->getLoopLatch()->getTerminator(); | ||||
3464 | BrInst->setDebugLoc(ScalarLatchTerm->getDebugLoc()); | ||||
3465 | ReplaceInstWithInst(LoopMiddleBlock->getTerminator(), BrInst); | ||||
3466 | |||||
3467 | // We intentionally don't let SplitBlock to update LoopInfo since | ||||
3468 | // LoopVectorBody should belong to another loop than LoopVectorPreHeader. | ||||
3469 | // LoopVectorBody is explicitly added to the correct place few lines later. | ||||
3470 | LoopVectorBody = | ||||
3471 | SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT, | ||||
3472 | nullptr, nullptr, Twine(Prefix) + "vector.body"); | ||||
3473 | |||||
3474 | // Update dominator for loop exit. | ||||
3475 | DT->changeImmediateDominator(LoopExitBlock, LoopMiddleBlock); | ||||
3476 | |||||
3477 | // Create and register the new vector loop. | ||||
3478 | Loop *Lp = LI->AllocateLoop(); | ||||
3479 | Loop *ParentLoop = OrigLoop->getParentLoop(); | ||||
3480 | |||||
3481 | // Insert the new loop into the loop nest and register the new basic blocks | ||||
3482 | // before calling any utilities such as SCEV that require valid LoopInfo. | ||||
3483 | if (ParentLoop) { | ||||
3484 | ParentLoop->addChildLoop(Lp); | ||||
3485 | } else { | ||||
3486 | LI->addTopLevelLoop(Lp); | ||||
3487 | } | ||||
3488 | Lp->addBasicBlockToLoop(LoopVectorBody, *LI); | ||||
3489 | return Lp; | ||||
3490 | } | ||||
3491 | |||||
3492 | void InnerLoopVectorizer::createInductionResumeValues( | ||||
3493 | Loop *L, Value *VectorTripCount, | ||||
3494 | std::pair<BasicBlock *, Value *> AdditionalBypass) { | ||||
3495 | assert(VectorTripCount && L && "Expected valid arguments")(static_cast <bool> (VectorTripCount && L && "Expected valid arguments") ? void (0) : __assert_fail ("VectorTripCount && L && \"Expected valid arguments\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 3495, __extension__ __PRETTY_FUNCTION__)); | ||||
3496 | assert(((AdditionalBypass.first && AdditionalBypass.second) ||(static_cast <bool> (((AdditionalBypass.first && AdditionalBypass.second) || (!AdditionalBypass.first && !AdditionalBypass.second)) && "Inconsistent information about additional bypass." ) ? void (0) : __assert_fail ("((AdditionalBypass.first && AdditionalBypass.second) || (!AdditionalBypass.first && !AdditionalBypass.second)) && \"Inconsistent information about additional bypass.\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 3498, __extension__ __PRETTY_FUNCTION__)) | ||||
3497 | (!AdditionalBypass.first && !AdditionalBypass.second)) &&(static_cast <bool> (((AdditionalBypass.first && AdditionalBypass.second) || (!AdditionalBypass.first && !AdditionalBypass.second)) && "Inconsistent information about additional bypass." ) ? void (0) : __assert_fail ("((AdditionalBypass.first && AdditionalBypass.second) || (!AdditionalBypass.first && !AdditionalBypass.second)) && \"Inconsistent information about additional bypass.\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 3498, __extension__ __PRETTY_FUNCTION__)) | ||||
3498 | "Inconsistent information about additional bypass.")(static_cast <bool> (((AdditionalBypass.first && AdditionalBypass.second) || (!AdditionalBypass.first && !AdditionalBypass.second)) && "Inconsistent information about additional bypass." ) ? void (0) : __assert_fail ("((AdditionalBypass.first && AdditionalBypass.second) || (!AdditionalBypass.first && !AdditionalBypass.second)) && \"Inconsistent information about additional bypass.\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 3498, __extension__ __PRETTY_FUNCTION__)); | ||||
3499 | // We are going to resume the execution of the scalar loop. | ||||
3500 | // Go over all of the induction variables that we found and fix the | ||||
3501 | // PHIs that are left in the scalar version of the loop. | ||||
3502 | // The starting values of PHI nodes depend on the counter of the last | ||||
3503 | // iteration in the vectorized loop. | ||||
3504 | // If we come from a bypass edge then we need to start from the original | ||||
3505 | // start value. | ||||
3506 | for (auto &InductionEntry : Legal->getInductionVars()) { | ||||
3507 | PHINode *OrigPhi = InductionEntry.first; | ||||
3508 | InductionDescriptor II = InductionEntry.second; | ||||
3509 | |||||
3510 | // Create phi nodes to merge from the backedge-taken check block. | ||||
3511 | PHINode *BCResumeVal = | ||||
3512 | PHINode::Create(OrigPhi->getType(), 3, "bc.resume.val", | ||||
3513 | LoopScalarPreHeader->getTerminator()); | ||||
3514 | // Copy original phi DL over to the new one. | ||||
3515 | BCResumeVal->setDebugLoc(OrigPhi->getDebugLoc()); | ||||
3516 | Value *&EndValue = IVEndValues[OrigPhi]; | ||||
3517 | Value *EndValueFromAdditionalBypass = AdditionalBypass.second; | ||||
3518 | if (OrigPhi == OldInduction) { | ||||
3519 | // We know what the end value is. | ||||
3520 | EndValue = VectorTripCount; | ||||
3521 | } else { | ||||
3522 | IRBuilder<> B(L->getLoopPreheader()->getTerminator()); | ||||
3523 | |||||
3524 | // Fast-math-flags propagate from the original induction instruction. | ||||
3525 | if (II.getInductionBinOp() && isa<FPMathOperator>(II.getInductionBinOp())) | ||||
3526 | B.setFastMathFlags(II.getInductionBinOp()->getFastMathFlags()); | ||||
3527 | |||||
3528 | Type *StepType = II.getStep()->getType(); | ||||
3529 | Instruction::CastOps CastOp = | ||||
3530 | CastInst::getCastOpcode(VectorTripCount, true, StepType, true); | ||||
3531 | Value *CRD = B.CreateCast(CastOp, VectorTripCount, StepType, "cast.crd"); | ||||
3532 | const DataLayout &DL = LoopScalarBody->getModule()->getDataLayout(); | ||||
3533 | EndValue = emitTransformedIndex(B, CRD, PSE.getSE(), DL, II); | ||||
3534 | EndValue->setName("ind.end"); | ||||
3535 | |||||
3536 | // Compute the end value for the additional bypass (if applicable). | ||||
3537 | if (AdditionalBypass.first) { | ||||
3538 | B.SetInsertPoint(&(*AdditionalBypass.first->getFirstInsertionPt())); | ||||
3539 | CastOp = CastInst::getCastOpcode(AdditionalBypass.second, true, | ||||
3540 | StepType, true); | ||||
3541 | CRD = | ||||
3542 | B.CreateCast(CastOp, AdditionalBypass.second, StepType, "cast.crd"); | ||||
3543 | EndValueFromAdditionalBypass = | ||||
3544 | emitTransformedIndex(B, CRD, PSE.getSE(), DL, II); | ||||
3545 | EndValueFromAdditionalBypass->setName("ind.end"); | ||||
3546 | } | ||||
3547 | } | ||||
3548 | // The new PHI merges the original incoming value, in case of a bypass, | ||||
3549 | // or the value at the end of the vectorized loop. | ||||
3550 | BCResumeVal->addIncoming(EndValue, LoopMiddleBlock); | ||||
3551 | |||||
3552 | // Fix the scalar body counter (PHI node). | ||||
3553 | // The old induction's phi node in the scalar body needs the truncated | ||||
3554 | // value. | ||||
3555 | for (BasicBlock *BB : LoopBypassBlocks) | ||||
3556 | BCResumeVal->addIncoming(II.getStartValue(), BB); | ||||
3557 | |||||
3558 | if (AdditionalBypass.first) | ||||
3559 | BCResumeVal->setIncomingValueForBlock(AdditionalBypass.first, | ||||
3560 | EndValueFromAdditionalBypass); | ||||
3561 | |||||
3562 | OrigPhi->setIncomingValueForBlock(LoopScalarPreHeader, BCResumeVal); | ||||
3563 | } | ||||
3564 | } | ||||
3565 | |||||
3566 | BasicBlock *InnerLoopVectorizer::completeLoopSkeleton(Loop *L, | ||||
3567 | MDNode *OrigLoopID) { | ||||
3568 | assert(L && "Expected valid loop.")(static_cast <bool> (L && "Expected valid loop." ) ? void (0) : __assert_fail ("L && \"Expected valid loop.\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 3568, __extension__ __PRETTY_FUNCTION__)); | ||||
3569 | |||||
3570 | // The trip counts should be cached by now. | ||||
3571 | Value *Count = getOrCreateTripCount(L); | ||||
3572 | Value *VectorTripCount = getOrCreateVectorTripCount(L); | ||||
3573 | |||||
3574 | auto *ScalarLatchTerm = OrigLoop->getLoopLatch()->getTerminator(); | ||||
3575 | |||||
3576 | // Add a check in the middle block to see if we have completed | ||||
3577 | // all of the iterations in the first vector loop. | ||||
3578 | // If (N - N%VF) == N, then we *don't* need to run the remainder. | ||||
3579 | // If tail is to be folded, we know we don't need to run the remainder. | ||||
3580 | if (!Cost->foldTailByMasking()) { | ||||
3581 | Instruction *CmpN = CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_EQ, | ||||
3582 | Count, VectorTripCount, "cmp.n", | ||||
3583 | LoopMiddleBlock->getTerminator()); | ||||
3584 | |||||
3585 | // Here we use the same DebugLoc as the scalar loop latch terminator instead | ||||
3586 | // of the corresponding compare because they may have ended up with | ||||
3587 | // different line numbers and we want to avoid awkward line stepping while | ||||
3588 | // debugging. Eg. if the compare has got a line number inside the loop. | ||||
3589 | CmpN->setDebugLoc(ScalarLatchTerm->getDebugLoc()); | ||||
3590 | cast<BranchInst>(LoopMiddleBlock->getTerminator())->setCondition(CmpN); | ||||
3591 | } | ||||
3592 | |||||
3593 | // Get ready to start creating new instructions into the vectorized body. | ||||
3594 | assert(LoopVectorPreHeader == L->getLoopPreheader() &&(static_cast <bool> (LoopVectorPreHeader == L->getLoopPreheader () && "Inconsistent vector loop preheader") ? void (0 ) : __assert_fail ("LoopVectorPreHeader == L->getLoopPreheader() && \"Inconsistent vector loop preheader\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 3595, __extension__ __PRETTY_FUNCTION__)) | ||||
3595 | "Inconsistent vector loop preheader")(static_cast <bool> (LoopVectorPreHeader == L->getLoopPreheader () && "Inconsistent vector loop preheader") ? void (0 ) : __assert_fail ("LoopVectorPreHeader == L->getLoopPreheader() && \"Inconsistent vector loop preheader\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 3595, __extension__ __PRETTY_FUNCTION__)); | ||||
3596 | Builder.SetInsertPoint(&*LoopVectorBody->getFirstInsertionPt()); | ||||
3597 | |||||
3598 | Optional<MDNode *> VectorizedLoopID = | ||||
3599 | makeFollowupLoopID(OrigLoopID, {LLVMLoopVectorizeFollowupAll, | ||||
3600 | LLVMLoopVectorizeFollowupVectorized}); | ||||
3601 | if (VectorizedLoopID.hasValue()) { | ||||
3602 | L->setLoopID(VectorizedLoopID.getValue()); | ||||
3603 | |||||
3604 | // Do not setAlreadyVectorized if loop attributes have been defined | ||||
3605 | // explicitly. | ||||
3606 | return LoopVectorPreHeader; | ||||
3607 | } | ||||
3608 | |||||
3609 | // Keep all loop hints from the original loop on the vector loop (we'll | ||||
3610 | // replace the vectorizer-specific hints below). | ||||
3611 | if (MDNode *LID = OrigLoop->getLoopID()) | ||||
3612 | L->setLoopID(LID); | ||||
3613 | |||||
3614 | LoopVectorizeHints Hints(L, true, *ORE); | ||||
3615 | Hints.setAlreadyVectorized(); | ||||
3616 | |||||
3617 | #ifdef EXPENSIVE_CHECKS | ||||
3618 | assert(DT->verify(DominatorTree::VerificationLevel::Fast))(static_cast <bool> (DT->verify(DominatorTree::VerificationLevel ::Fast)) ? void (0) : __assert_fail ("DT->verify(DominatorTree::VerificationLevel::Fast)" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 3618, __extension__ __PRETTY_FUNCTION__)); | ||||
3619 | LI->verify(*DT); | ||||
3620 | #endif | ||||
3621 | |||||
3622 | return LoopVectorPreHeader; | ||||
3623 | } | ||||
3624 | |||||
3625 | BasicBlock *InnerLoopVectorizer::createVectorizedLoopSkeleton() { | ||||
3626 | /* | ||||
3627 | In this function we generate a new loop. The new loop will contain | ||||
3628 | the vectorized instructions while the old loop will continue to run the | ||||
3629 | scalar remainder. | ||||
3630 | |||||
3631 | [ ] <-- loop iteration number check. | ||||
3632 | / | | ||||
3633 | / v | ||||
3634 | | [ ] <-- vector loop bypass (may consist of multiple blocks). | ||||
3635 | | / | | ||||
3636 | | / v | ||||
3637 | || [ ] <-- vector pre header. | ||||
3638 | |/ | | ||||
3639 | | v | ||||
3640 | | [ ] \ | ||||
3641 | | [ ]_| <-- vector loop. | ||||
3642 | | | | ||||
3643 | | v | ||||
3644 | | -[ ] <--- middle-block. | ||||
3645 | | / | | ||||
3646 | | / v | ||||
3647 | -|- >[ ] <--- new preheader. | ||||
3648 | | | | ||||
3649 | | v | ||||
3650 | | [ ] \ | ||||
3651 | | [ ]_| <-- old scalar loop to handle remainder. | ||||
3652 | \ | | ||||
3653 | \ v | ||||
3654 | >[ ] <-- exit block. | ||||
3655 | ... | ||||
3656 | */ | ||||
3657 | |||||
3658 | // Get the metadata of the original loop before it gets modified. | ||||
3659 | MDNode *OrigLoopID = OrigLoop->getLoopID(); | ||||
3660 | |||||
3661 | // Workaround! Compute the trip count of the original loop and cache it | ||||
3662 | // before we start modifying the CFG. This code has a systemic problem | ||||
3663 | // wherein it tries to run analysis over partially constructed IR; this is | ||||
3664 | // wrong, and not simply for SCEV. The trip count of the original loop | ||||
3665 | // simply happens to be prone to hitting this in practice. In theory, we | ||||
3666 | // can hit the same issue for any SCEV, or ValueTracking query done during | ||||
3667 | // mutation. See PR49900. | ||||
3668 | getOrCreateTripCount(OrigLoop); | ||||
3669 | |||||
3670 | // Create an empty vector loop, and prepare basic blocks for the runtime | ||||
3671 | // checks. | ||||
3672 | Loop *Lp = createVectorLoopSkeleton(""); | ||||
3673 | |||||
3674 | // Now, compare the new count to zero. If it is zero skip the vector loop and | ||||
3675 | // jump to the scalar loop. This check also covers the case where the | ||||
3676 | // backedge-taken count is uint##_max: adding one to it will overflow leading | ||||
3677 | // to an incorrect trip count of zero. In this (rare) case we will also jump | ||||
3678 | // to the scalar loop. | ||||
3679 | emitMinimumIterationCountCheck(Lp, LoopScalarPreHeader); | ||||
3680 | |||||
3681 | // Generate the code to check any assumptions that we've made for SCEV | ||||
3682 | // expressions. | ||||
3683 | emitSCEVChecks(Lp, LoopScalarPreHeader); | ||||
3684 | |||||
3685 | // Generate the code that checks in runtime if arrays overlap. We put the | ||||
3686 | // checks into a separate block to make the more common case of few elements | ||||
3687 | // faster. | ||||
3688 | emitMemRuntimeChecks(Lp, LoopScalarPreHeader); | ||||
3689 | |||||
3690 | // Some loops have a single integer induction variable, while other loops | ||||
3691 | // don't. One example is c++ iterators that often have multiple pointer | ||||
3692 | // induction variables. In the code below we also support a case where we | ||||
3693 | // don't have a single induction variable. | ||||
3694 | // | ||||
3695 | // We try to obtain an induction variable from the original loop as hard | ||||
3696 | // as possible. However if we don't find one that: | ||||
3697 | // - is an integer | ||||
3698 | // - counts from zero, stepping by one | ||||
3699 | // - is the size of the widest induction variable type | ||||
3700 | // then we create a new one. | ||||
3701 | OldInduction = Legal->getPrimaryInduction(); | ||||
3702 | Type *IdxTy = Legal->getWidestInductionType(); | ||||
3703 | Value *StartIdx = ConstantInt::get(IdxTy, 0); | ||||
3704 | // The loop step is equal to the vectorization factor (num of SIMD elements) | ||||
3705 | // times the unroll factor (num of SIMD instructions). | ||||
3706 | Builder.SetInsertPoint(&*Lp->getHeader()->getFirstInsertionPt()); | ||||
3707 | Value *Step = createStepForVF(Builder, ConstantInt::get(IdxTy, UF), VF); | ||||
3708 | Value *CountRoundDown = getOrCreateVectorTripCount(Lp); | ||||
3709 | Induction = | ||||
3710 | createInductionVariable(Lp, StartIdx, CountRoundDown, Step, | ||||
3711 | getDebugLocFromInstOrOperands(OldInduction)); | ||||
3712 | |||||
3713 | // Emit phis for the new starting index of the scalar loop. | ||||
3714 | createInductionResumeValues(Lp, CountRoundDown); | ||||
3715 | |||||
3716 | return completeLoopSkeleton(Lp, OrigLoopID); | ||||
3717 | } | ||||
3718 | |||||
3719 | // Fix up external users of the induction variable. At this point, we are | ||||
3720 | // in LCSSA form, with all external PHIs that use the IV having one input value, | ||||
3721 | // coming from the remainder loop. We need those PHIs to also have a correct | ||||
3722 | // value for the IV when arriving directly from the middle block. | ||||
3723 | void InnerLoopVectorizer::fixupIVUsers(PHINode *OrigPhi, | ||||
3724 | const InductionDescriptor &II, | ||||
3725 | Value *CountRoundDown, Value *EndValue, | ||||
3726 | BasicBlock *MiddleBlock) { | ||||
3727 | // There are two kinds of external IV usages - those that use the value | ||||
3728 | // computed in the last iteration (the PHI) and those that use the penultimate | ||||
3729 | // value (the value that feeds into the phi from the loop latch). | ||||
3730 | // We allow both, but they, obviously, have different values. | ||||
3731 | |||||
3732 | assert(OrigLoop->getUniqueExitBlock() && "Expected a single exit block")(static_cast <bool> (OrigLoop->getUniqueExitBlock() && "Expected a single exit block") ? void (0) : __assert_fail ( "OrigLoop->getUniqueExitBlock() && \"Expected a single exit block\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 3732, __extension__ __PRETTY_FUNCTION__)); | ||||
3733 | |||||
3734 | DenseMap<Value *, Value *> MissingVals; | ||||
3735 | |||||
3736 | // An external user of the last iteration's value should see the value that | ||||
3737 | // the remainder loop uses to initialize its own IV. | ||||
3738 | Value *PostInc = OrigPhi->getIncomingValueForBlock(OrigLoop->getLoopLatch()); | ||||
3739 | for (User *U : PostInc->users()) { | ||||
3740 | Instruction *UI = cast<Instruction>(U); | ||||
3741 | if (!OrigLoop->contains(UI)) { | ||||
3742 | assert(isa<PHINode>(UI) && "Expected LCSSA form")(static_cast <bool> (isa<PHINode>(UI) && "Expected LCSSA form" ) ? void (0) : __assert_fail ("isa<PHINode>(UI) && \"Expected LCSSA form\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 3742, __extension__ __PRETTY_FUNCTION__)); | ||||
3743 | MissingVals[UI] = EndValue; | ||||
3744 | } | ||||
3745 | } | ||||
3746 | |||||
3747 | // An external user of the penultimate value need to see EndValue - Step. | ||||
3748 | // The simplest way to get this is to recompute it from the constituent SCEVs, | ||||
3749 | // that is Start + (Step * (CRD - 1)). | ||||
3750 | for (User *U : OrigPhi->users()) { | ||||
3751 | auto *UI = cast<Instruction>(U); | ||||
3752 | if (!OrigLoop->contains(UI)) { | ||||
3753 | const DataLayout &DL = | ||||
3754 | OrigLoop->getHeader()->getModule()->getDataLayout(); | ||||
3755 | assert(isa<PHINode>(UI) && "Expected LCSSA form")(static_cast <bool> (isa<PHINode>(UI) && "Expected LCSSA form" ) ? void (0) : __assert_fail ("isa<PHINode>(UI) && \"Expected LCSSA form\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 3755, __extension__ __PRETTY_FUNCTION__)); | ||||
3756 | |||||
3757 | IRBuilder<> B(MiddleBlock->getTerminator()); | ||||
3758 | |||||
3759 | // Fast-math-flags propagate from the original induction instruction. | ||||
3760 | if (II.getInductionBinOp() && isa<FPMathOperator>(II.getInductionBinOp())) | ||||
3761 | B.setFastMathFlags(II.getInductionBinOp()->getFastMathFlags()); | ||||
3762 | |||||
3763 | Value *CountMinusOne = B.CreateSub( | ||||
3764 | CountRoundDown, ConstantInt::get(CountRoundDown->getType(), 1)); | ||||
3765 | Value *CMO = | ||||
3766 | !II.getStep()->getType()->isIntegerTy() | ||||
3767 | ? B.CreateCast(Instruction::SIToFP, CountMinusOne, | ||||
3768 | II.getStep()->getType()) | ||||
3769 | : B.CreateSExtOrTrunc(CountMinusOne, II.getStep()->getType()); | ||||
3770 | CMO->setName("cast.cmo"); | ||||
3771 | Value *Escape = emitTransformedIndex(B, CMO, PSE.getSE(), DL, II); | ||||
3772 | Escape->setName("ind.escape"); | ||||
3773 | MissingVals[UI] = Escape; | ||||
3774 | } | ||||
3775 | } | ||||
3776 | |||||
3777 | for (auto &I : MissingVals) { | ||||
3778 | PHINode *PHI = cast<PHINode>(I.first); | ||||
3779 | // One corner case we have to handle is two IVs "chasing" each-other, | ||||
3780 | // that is %IV2 = phi [...], [ %IV1, %latch ] | ||||
3781 | // In this case, if IV1 has an external use, we need to avoid adding both | ||||
3782 | // "last value of IV1" and "penultimate value of IV2". So, verify that we | ||||
3783 | // don't already have an incoming value for the middle block. | ||||
3784 | if (PHI->getBasicBlockIndex(MiddleBlock) == -1) | ||||
3785 | PHI->addIncoming(I.second, MiddleBlock); | ||||
3786 | } | ||||
3787 | } | ||||
3788 | |||||
3789 | namespace { | ||||
3790 | |||||
3791 | struct CSEDenseMapInfo { | ||||
3792 | static bool canHandle(const Instruction *I) { | ||||
3793 | return isa<InsertElementInst>(I) || isa<ExtractElementInst>(I) || | ||||
3794 | isa<ShuffleVectorInst>(I) || isa<GetElementPtrInst>(I); | ||||
3795 | } | ||||
3796 | |||||
3797 | static inline Instruction *getEmptyKey() { | ||||
3798 | return DenseMapInfo<Instruction *>::getEmptyKey(); | ||||
3799 | } | ||||
3800 | |||||
3801 | static inline Instruction *getTombstoneKey() { | ||||
3802 | return DenseMapInfo<Instruction *>::getTombstoneKey(); | ||||
3803 | } | ||||
3804 | |||||
3805 | static unsigned getHashValue(const Instruction *I) { | ||||
3806 | assert(canHandle(I) && "Unknown instruction!")(static_cast <bool> (canHandle(I) && "Unknown instruction!" ) ? void (0) : __assert_fail ("canHandle(I) && \"Unknown instruction!\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 3806, __extension__ __PRETTY_FUNCTION__)); | ||||
3807 | return hash_combine(I->getOpcode(), hash_combine_range(I->value_op_begin(), | ||||
3808 | I->value_op_end())); | ||||
3809 | } | ||||
3810 | |||||
3811 | static bool isEqual(const Instruction *LHS, const Instruction *RHS) { | ||||
3812 | if (LHS == getEmptyKey() || RHS == getEmptyKey() || | ||||
3813 | LHS == getTombstoneKey() || RHS == getTombstoneKey()) | ||||
3814 | return LHS == RHS; | ||||
3815 | return LHS->isIdenticalTo(RHS); | ||||
3816 | } | ||||
3817 | }; | ||||
3818 | |||||
3819 | } // end anonymous namespace | ||||
3820 | |||||
3821 | ///Perform cse of induction variable instructions. | ||||
3822 | static void cse(BasicBlock *BB) { | ||||
3823 | // Perform simple cse. | ||||
3824 | SmallDenseMap<Instruction *, Instruction *, 4, CSEDenseMapInfo> CSEMap; | ||||
3825 | for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E;) { | ||||
3826 | Instruction *In = &*I++; | ||||
3827 | |||||
3828 | if (!CSEDenseMapInfo::canHandle(In)) | ||||
3829 | continue; | ||||
3830 | |||||
3831 | // Check if we can replace this instruction with any of the | ||||
3832 | // visited instructions. | ||||
3833 | if (Instruction *V = CSEMap.lookup(In)) { | ||||
3834 | In->replaceAllUsesWith(V); | ||||
3835 | In->eraseFromParent(); | ||||
3836 | continue; | ||||
3837 | } | ||||
3838 | |||||
3839 | CSEMap[In] = In; | ||||
3840 | } | ||||
3841 | } | ||||
3842 | |||||
3843 | InstructionCost | ||||
3844 | LoopVectorizationCostModel::getVectorCallCost(CallInst *CI, ElementCount VF, | ||||
3845 | bool &NeedToScalarize) const { | ||||
3846 | Function *F = CI->getCalledFunction(); | ||||
3847 | Type *ScalarRetTy = CI->getType(); | ||||
3848 | SmallVector<Type *, 4> Tys, ScalarTys; | ||||
3849 | for (auto &ArgOp : CI->arg_operands()) | ||||
3850 | ScalarTys.push_back(ArgOp->getType()); | ||||
3851 | |||||
3852 | // Estimate cost of scalarized vector call. The source operands are assumed | ||||
3853 | // to be vectors, so we need to extract individual elements from there, | ||||
3854 | // execute VF scalar calls, and then gather the result into the vector return | ||||
3855 | // value. | ||||
3856 | InstructionCost ScalarCallCost = | ||||
3857 | TTI.getCallInstrCost(F, ScalarRetTy, ScalarTys, TTI::TCK_RecipThroughput); | ||||
3858 | if (VF.isScalar()) | ||||
3859 | return ScalarCallCost; | ||||
3860 | |||||
3861 | // Compute corresponding vector type for return value and arguments. | ||||
3862 | Type *RetTy = ToVectorTy(ScalarRetTy, VF); | ||||
3863 | for (Type *ScalarTy : ScalarTys) | ||||
3864 | Tys.push_back(ToVectorTy(ScalarTy, VF)); | ||||
3865 | |||||
3866 | // Compute costs of unpacking argument values for the scalar calls and | ||||
3867 | // packing the return values to a vector. | ||||
3868 | InstructionCost ScalarizationCost = getScalarizationOverhead(CI, VF); | ||||
3869 | |||||
3870 | InstructionCost Cost = | ||||
3871 | ScalarCallCost * VF.getKnownMinValue() + ScalarizationCost; | ||||
3872 | |||||
3873 | // If we can't emit a vector call for this function, then the currently found | ||||
3874 | // cost is the cost we need to return. | ||||
3875 | NeedToScalarize = true; | ||||
3876 | VFShape Shape = VFShape::get(*CI, VF, false /*HasGlobalPred*/); | ||||
3877 | Function *VecFunc = VFDatabase(*CI).getVectorizedFunction(Shape); | ||||
3878 | |||||
3879 | if (!TLI || CI->isNoBuiltin() || !VecFunc) | ||||
3880 | return Cost; | ||||
3881 | |||||
3882 | // If the corresponding vector cost is cheaper, return its cost. | ||||
3883 | InstructionCost VectorCallCost = | ||||
3884 | TTI.getCallInstrCost(nullptr, RetTy, Tys, TTI::TCK_RecipThroughput); | ||||
3885 | if (VectorCallCost < Cost) { | ||||
3886 | NeedToScalarize = false; | ||||
3887 | Cost = VectorCallCost; | ||||
3888 | } | ||||
3889 | return Cost; | ||||
3890 | } | ||||
3891 | |||||
3892 | static Type *MaybeVectorizeType(Type *Elt, ElementCount VF) { | ||||
3893 | if (VF.isScalar() || (!Elt->isIntOrPtrTy() && !Elt->isFloatingPointTy())) | ||||
3894 | return Elt; | ||||
3895 | return VectorType::get(Elt, VF); | ||||
3896 | } | ||||
3897 | |||||
3898 | InstructionCost | ||||
3899 | LoopVectorizationCostModel::getVectorIntrinsicCost(CallInst *CI, | ||||
3900 | ElementCount VF) const { | ||||
3901 | Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); | ||||
3902 | assert(ID && "Expected intrinsic call!")(static_cast <bool> (ID && "Expected intrinsic call!" ) ? void (0) : __assert_fail ("ID && \"Expected intrinsic call!\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 3902, __extension__ __PRETTY_FUNCTION__)); | ||||
3903 | Type *RetTy = MaybeVectorizeType(CI->getType(), VF); | ||||
3904 | FastMathFlags FMF; | ||||
3905 | if (auto *FPMO = dyn_cast<FPMathOperator>(CI)) | ||||
3906 | FMF = FPMO->getFastMathFlags(); | ||||
3907 | |||||
3908 | SmallVector<const Value *> Arguments(CI->arg_begin(), CI->arg_end()); | ||||
3909 | FunctionType *FTy = CI->getCalledFunction()->getFunctionType(); | ||||
3910 | SmallVector<Type *> ParamTys; | ||||
3911 | std::transform(FTy->param_begin(), FTy->param_end(), | ||||
3912 | std::back_inserter(ParamTys), | ||||
3913 | [&](Type *Ty) { return MaybeVectorizeType(Ty, VF); }); | ||||
3914 | |||||
3915 | IntrinsicCostAttributes CostAttrs(ID, RetTy, Arguments, ParamTys, FMF, | ||||
3916 | dyn_cast<IntrinsicInst>(CI)); | ||||
3917 | return TTI.getIntrinsicInstrCost(CostAttrs, | ||||
3918 | TargetTransformInfo::TCK_RecipThroughput); | ||||
3919 | } | ||||
3920 | |||||
3921 | static Type *smallestIntegerVectorType(Type *T1, Type *T2) { | ||||
3922 | auto *I1 = cast<IntegerType>(cast<VectorType>(T1)->getElementType()); | ||||
3923 | auto *I2 = cast<IntegerType>(cast<VectorType>(T2)->getElementType()); | ||||
3924 | return I1->getBitWidth() < I2->getBitWidth() ? T1 : T2; | ||||
3925 | } | ||||
3926 | |||||
3927 | static Type *largestIntegerVectorType(Type *T1, Type *T2) { | ||||
3928 | auto *I1 = cast<IntegerType>(cast<VectorType>(T1)->getElementType()); | ||||
3929 | auto *I2 = cast<IntegerType>(cast<VectorType>(T2)->getElementType()); | ||||
3930 | return I1->getBitWidth() > I2->getBitWidth() ? T1 : T2; | ||||
3931 | } | ||||
3932 | |||||
3933 | void InnerLoopVectorizer::truncateToMinimalBitwidths(VPTransformState &State) { | ||||
3934 | // For every instruction `I` in MinBWs, truncate the operands, create a | ||||
3935 | // truncated version of `I` and reextend its result. InstCombine runs | ||||
3936 | // later and will remove any ext/trunc pairs. | ||||
3937 | SmallPtrSet<Value *, 4> Erased; | ||||
3938 | for (const auto &KV : Cost->getMinimalBitwidths()) { | ||||
3939 | // If the value wasn't vectorized, we must maintain the original scalar | ||||
3940 | // type. The absence of the value from State indicates that it | ||||
3941 | // wasn't vectorized. | ||||
3942 | VPValue *Def = State.Plan->getVPValue(KV.first); | ||||
3943 | if (!State.hasAnyVectorValue(Def)) | ||||
3944 | continue; | ||||
3945 | for (unsigned Part = 0; Part < UF; ++Part) { | ||||
3946 | Value *I = State.get(Def, Part); | ||||
3947 | if (Erased.count(I) || I->use_empty() || !isa<Instruction>(I)) | ||||
3948 | continue; | ||||
3949 | Type *OriginalTy = I->getType(); | ||||
3950 | Type *ScalarTruncatedTy = | ||||
3951 | IntegerType::get(OriginalTy->getContext(), KV.second); | ||||
3952 | auto *TruncatedTy = FixedVectorType::get( | ||||
3953 | ScalarTruncatedTy, | ||||
3954 | cast<FixedVectorType>(OriginalTy)->getNumElements()); | ||||
3955 | if (TruncatedTy == OriginalTy) | ||||
3956 | continue; | ||||
3957 | |||||
3958 | IRBuilder<> B(cast<Instruction>(I)); | ||||
3959 | auto ShrinkOperand = [&](Value *V) -> Value * { | ||||
3960 | if (auto *ZI = dyn_cast<ZExtInst>(V)) | ||||
3961 | if (ZI->getSrcTy() == TruncatedTy) | ||||
3962 | return ZI->getOperand(0); | ||||
3963 | return B.CreateZExtOrTrunc(V, TruncatedTy); | ||||
3964 | }; | ||||
3965 | |||||
3966 | // The actual instruction modification depends on the instruction type, | ||||
3967 | // unfortunately. | ||||
3968 | Value *NewI = nullptr; | ||||
3969 | if (auto *BO = dyn_cast<BinaryOperator>(I)) { | ||||
3970 | NewI = B.CreateBinOp(BO->getOpcode(), ShrinkOperand(BO->getOperand(0)), | ||||
3971 | ShrinkOperand(BO->getOperand(1))); | ||||
3972 | |||||
3973 | // Any wrapping introduced by shrinking this operation shouldn't be | ||||
3974 | // considered undefined behavior. So, we can't unconditionally copy | ||||
3975 | // arithmetic wrapping flags to NewI. | ||||
3976 | cast<BinaryOperator>(NewI)->copyIRFlags(I, /*IncludeWrapFlags=*/false); | ||||
3977 | } else if (auto *CI = dyn_cast<ICmpInst>(I)) { | ||||
3978 | NewI = | ||||
3979 | B.CreateICmp(CI->getPredicate(), ShrinkOperand(CI->getOperand(0)), | ||||
3980 | ShrinkOperand(CI->getOperand(1))); | ||||
3981 | } else if (auto *SI = dyn_cast<SelectInst>(I)) { | ||||
3982 | NewI = B.CreateSelect(SI->getCondition(), | ||||
3983 | ShrinkOperand(SI->getTrueValue()), | ||||
3984 | ShrinkOperand(SI->getFalseValue())); | ||||
3985 | } else if (auto *CI = dyn_cast<CastInst>(I)) { | ||||
3986 | switch (CI->getOpcode()) { | ||||
3987 | default: | ||||
3988 | llvm_unreachable("Unhandled cast!")::llvm::llvm_unreachable_internal("Unhandled cast!", "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 3988); | ||||
3989 | case Instruction::Trunc: | ||||
3990 | NewI = ShrinkOperand(CI->getOperand(0)); | ||||
3991 | break; | ||||
3992 | case Instruction::SExt: | ||||
3993 | NewI = B.CreateSExtOrTrunc( | ||||
3994 | CI->getOperand(0), | ||||
3995 | smallestIntegerVectorType(OriginalTy, TruncatedTy)); | ||||
3996 | break; | ||||
3997 | case Instruction::ZExt: | ||||
3998 | NewI = B.CreateZExtOrTrunc( | ||||
3999 | CI->getOperand(0), | ||||
4000 | smallestIntegerVectorType(OriginalTy, TruncatedTy)); | ||||
4001 | break; | ||||
4002 | } | ||||
4003 | } else if (auto *SI = dyn_cast<ShuffleVectorInst>(I)) { | ||||
4004 | auto Elements0 = cast<FixedVectorType>(SI->getOperand(0)->getType()) | ||||
4005 | ->getNumElements(); | ||||
4006 | auto *O0 = B.CreateZExtOrTrunc( | ||||
4007 | SI->getOperand(0), | ||||
4008 | FixedVectorType::get(ScalarTruncatedTy, Elements0)); | ||||
4009 | auto Elements1 = cast<FixedVectorType>(SI->getOperand(1)->getType()) | ||||
4010 | ->getNumElements(); | ||||
4011 | auto *O1 = B.CreateZExtOrTrunc( | ||||
4012 | SI->getOperand(1), | ||||
4013 | FixedVectorType::get(ScalarTruncatedTy, Elements1)); | ||||
4014 | |||||
4015 | NewI = B.CreateShuffleVector(O0, O1, SI->getShuffleMask()); | ||||
4016 | } else if (isa<LoadInst>(I) || isa<PHINode>(I)) { | ||||
4017 | // Don't do anything with the operands, just extend the result. | ||||
4018 | continue; | ||||
4019 | } else if (auto *IE = dyn_cast<InsertElementInst>(I)) { | ||||
4020 | auto Elements = cast<FixedVectorType>(IE->getOperand(0)->getType()) | ||||
4021 | ->getNumElements(); | ||||
4022 | auto *O0 = B.CreateZExtOrTrunc( | ||||
4023 | IE->getOperand(0), | ||||
4024 | FixedVectorType::get(ScalarTruncatedTy, Elements)); | ||||
4025 | auto *O1 = B.CreateZExtOrTrunc(IE->getOperand(1), ScalarTruncatedTy); | ||||
4026 | NewI = B.CreateInsertElement(O0, O1, IE->getOperand(2)); | ||||
4027 | } else if (auto *EE = dyn_cast<ExtractElementInst>(I)) { | ||||
4028 | auto Elements = cast<FixedVectorType>(EE->getOperand(0)->getType()) | ||||
4029 | ->getNumElements(); | ||||
4030 | auto *O0 = B.CreateZExtOrTrunc( | ||||
4031 | EE->getOperand(0), | ||||
4032 | FixedVectorType::get(ScalarTruncatedTy, Elements)); | ||||
4033 | NewI = B.CreateExtractElement(O0, EE->getOperand(2)); | ||||
4034 | } else { | ||||
4035 | // If we don't know what to do, be conservative and don't do anything. | ||||
4036 | continue; | ||||
4037 | } | ||||
4038 | |||||
4039 | // Lastly, extend the result. | ||||
4040 | NewI->takeName(cast<Instruction>(I)); | ||||
4041 | Value *Res = B.CreateZExtOrTrunc(NewI, OriginalTy); | ||||
4042 | I->replaceAllUsesWith(Res); | ||||
4043 | cast<Instruction>(I)->eraseFromParent(); | ||||
4044 | Erased.insert(I); | ||||
4045 | State.reset(Def, Res, Part); | ||||
4046 | } | ||||
4047 | } | ||||
4048 | |||||
4049 | // We'll have created a bunch of ZExts that are now parentless. Clean up. | ||||
4050 | for (const auto &KV : Cost->getMinimalBitwidths()) { | ||||
4051 | // If the value wasn't vectorized, we must maintain the original scalar | ||||
4052 | // type. The absence of the value from State indicates that it | ||||
4053 | // wasn't vectorized. | ||||
4054 | VPValue *Def = State.Plan->getVPValue(KV.first); | ||||
4055 | if (!State.hasAnyVectorValue(Def)) | ||||
4056 | continue; | ||||
4057 | for (unsigned Part = 0; Part < UF; ++Part) { | ||||
4058 | Value *I = State.get(Def, Part); | ||||
4059 | ZExtInst *Inst = dyn_cast<ZExtInst>(I); | ||||
4060 | if (Inst && Inst->use_empty()) { | ||||
4061 | Value *NewI = Inst->getOperand(0); | ||||
4062 | Inst->eraseFromParent(); | ||||
4063 | State.reset(Def, NewI, Part); | ||||
4064 | } | ||||
4065 | } | ||||
4066 | } | ||||
4067 | } | ||||
4068 | |||||
4069 | void InnerLoopVectorizer::fixVectorizedLoop(VPTransformState &State) { | ||||
4070 | // Insert truncates and extends for any truncated instructions as hints to | ||||
4071 | // InstCombine. | ||||
4072 | if (VF.isVector()) | ||||
4073 | truncateToMinimalBitwidths(State); | ||||
4074 | |||||
4075 | // Fix widened non-induction PHIs by setting up the PHI operands. | ||||
4076 | if (OrigPHIsToFix.size()) { | ||||
4077 | assert(EnableVPlanNativePath &&(static_cast <bool> (EnableVPlanNativePath && "Unexpected non-induction PHIs for fixup in non VPlan-native path" ) ? void (0) : __assert_fail ("EnableVPlanNativePath && \"Unexpected non-induction PHIs for fixup in non VPlan-native path\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 4078, __extension__ __PRETTY_FUNCTION__)) | ||||
4078 | "Unexpected non-induction PHIs for fixup in non VPlan-native path")(static_cast <bool> (EnableVPlanNativePath && "Unexpected non-induction PHIs for fixup in non VPlan-native path" ) ? void (0) : __assert_fail ("EnableVPlanNativePath && \"Unexpected non-induction PHIs for fixup in non VPlan-native path\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 4078, __extension__ __PRETTY_FUNCTION__)); | ||||
4079 | fixNonInductionPHIs(State); | ||||
4080 | } | ||||
4081 | |||||
4082 | // At this point every instruction in the original loop is widened to a | ||||
4083 | // vector form. Now we need to fix the recurrences in the loop. These PHI | ||||
4084 | // nodes are currently empty because we did not want to introduce cycles. | ||||
4085 | // This is the second stage of vectorizing recurrences. | ||||
4086 | fixCrossIterationPHIs(State); | ||||
4087 | |||||
4088 | // Forget the original basic block. | ||||
4089 | PSE.getSE()->forgetLoop(OrigLoop); | ||||
4090 | |||||
4091 | // Fix-up external users of the induction variables. | ||||
4092 | for (auto &Entry : Legal->getInductionVars()) | ||||
4093 | fixupIVUsers(Entry.first, Entry.second, | ||||
4094 | getOrCreateVectorTripCount(LI->getLoopFor(LoopVectorBody)), | ||||
4095 | IVEndValues[Entry.first], LoopMiddleBlock); | ||||
4096 | |||||
4097 | fixLCSSAPHIs(State); | ||||
4098 | for (Instruction *PI : PredicatedInstructions) | ||||
4099 | sinkScalarOperands(&*PI); | ||||
4100 | |||||
4101 | // Remove redundant induction instructions. | ||||
4102 | cse(LoopVectorBody); | ||||
4103 | |||||
4104 | // Set/update profile weights for the vector and remainder loops as original | ||||
4105 | // loop iterations are now distributed among them. Note that original loop | ||||
4106 | // represented by LoopScalarBody becomes remainder loop after vectorization. | ||||
4107 | // | ||||
4108 | // For cases like foldTailByMasking() and requiresScalarEpiloque() we may | ||||
4109 | // end up getting slightly roughened result but that should be OK since | ||||
4110 | // profile is not inherently precise anyway. Note also possible bypass of | ||||
4111 | // vector code caused by legality checks is ignored, assigning all the weight | ||||
4112 | // to the vector loop, optimistically. | ||||
4113 | // | ||||
4114 | // For scalable vectorization we can't know at compile time how many iterations | ||||
4115 | // of the loop are handled in one vector iteration, so instead assume a pessimistic | ||||
4116 | // vscale of '1'. | ||||
4117 | setProfileInfoAfterUnrolling( | ||||
4118 | LI->getLoopFor(LoopScalarBody), LI->getLoopFor(LoopVectorBody), | ||||
4119 | LI->getLoopFor(LoopScalarBody), VF.getKnownMinValue() * UF); | ||||
4120 | } | ||||
4121 | |||||
4122 | void InnerLoopVectorizer::fixCrossIterationPHIs(VPTransformState &State) { | ||||
4123 | // In order to support recurrences we need to be able to vectorize Phi nodes. | ||||
4124 | // Phi nodes have cycles, so we need to vectorize them in two stages. This is | ||||
4125 | // stage #2: We now need to fix the recurrences by adding incoming edges to | ||||
4126 | // the currently empty PHI nodes. At this point every instruction in the | ||||
4127 | // original loop is widened to a vector form so we can use them to construct | ||||
4128 | // the incoming edges. | ||||
4129 | VPBasicBlock *Header = State.Plan->getEntry()->getEntryBasicBlock(); | ||||
4130 | for (VPRecipeBase &R : Header->phis()) { | ||||
4131 | auto *PhiR = dyn_cast<VPWidenPHIRecipe>(&R); | ||||
4132 | if (!PhiR) | ||||
4133 | continue; | ||||
4134 | auto *OrigPhi = cast<PHINode>(PhiR->getUnderlyingValue()); | ||||
4135 | if (PhiR->getRecurrenceDescriptor()) { | ||||
4136 | fixReduction(PhiR, State); | ||||
4137 | } else if (Legal->isFirstOrderRecurrence(OrigPhi)) | ||||
4138 | fixFirstOrderRecurrence(OrigPhi, State); | ||||
4139 | } | ||||
4140 | } | ||||
4141 | |||||
4142 | void InnerLoopVectorizer::fixFirstOrderRecurrence(PHINode *Phi, | ||||
4143 | VPTransformState &State) { | ||||
4144 | // This is the second phase of vectorizing first-order recurrences. An | ||||
4145 | // overview of the transformation is described below. Suppose we have the | ||||
4146 | // following loop. | ||||
4147 | // | ||||
4148 | // for (int i = 0; i < n; ++i) | ||||
4149 | // b[i] = a[i] - a[i - 1]; | ||||
4150 | // | ||||
4151 | // There is a first-order recurrence on "a". For this loop, the shorthand | ||||
4152 | // scalar IR looks like: | ||||
4153 | // | ||||
4154 | // scalar.ph: | ||||
4155 | // s_init = a[-1] | ||||
4156 | // br scalar.body | ||||
4157 | // | ||||
4158 | // scalar.body: | ||||
4159 | // i = phi [0, scalar.ph], [i+1, scalar.body] | ||||
4160 | // s1 = phi [s_init, scalar.ph], [s2, scalar.body] | ||||
4161 | // s2 = a[i] | ||||
4162 | // b[i] = s2 - s1 | ||||
4163 | // br cond, scalar.body, ... | ||||
4164 | // | ||||
4165 | // In this example, s1 is a recurrence because it's value depends on the | ||||
4166 | // previous iteration. In the first phase of vectorization, we created a | ||||
4167 | // temporary value for s1. We now complete the vectorization and produce the | ||||
4168 | // shorthand vector IR shown below (for VF = 4, UF = 1). | ||||
4169 | // | ||||
4170 | // vector.ph: | ||||
4171 | // v_init = vector(..., ..., ..., a[-1]) | ||||
4172 | // br vector.body | ||||
4173 | // | ||||
4174 | // vector.body | ||||
4175 | // i = phi [0, vector.ph], [i+4, vector.body] | ||||
4176 | // v1 = phi [v_init, vector.ph], [v2, vector.body] | ||||
4177 | // v2 = a[i, i+1, i+2, i+3]; | ||||
4178 | // v3 = vector(v1(3), v2(0, 1, 2)) | ||||
4179 | // b[i, i+1, i+2, i+3] = v2 - v3 | ||||
4180 | // br cond, vector.body, middle.block | ||||
4181 | // | ||||
4182 | // middle.block: | ||||
4183 | // x = v2(3) | ||||
4184 | // br scalar.ph | ||||
4185 | // | ||||
4186 | // scalar.ph: | ||||
4187 | // s_init = phi [x, middle.block], [a[-1], otherwise] | ||||
4188 | // br scalar.body | ||||
4189 | // | ||||
4190 | // After execution completes the vector loop, we extract the next value of | ||||
4191 | // the recurrence (x) to use as the initial value in the scalar loop. | ||||
4192 | |||||
4193 | // Get the original loop preheader and single loop latch. | ||||
4194 | auto *Preheader = OrigLoop->getLoopPreheader(); | ||||
4195 | auto *Latch = OrigLoop->getLoopLatch(); | ||||
4196 | |||||
4197 | // Get the initial and previous values of the scalar recurrence. | ||||
4198 | auto *ScalarInit = Phi->getIncomingValueForBlock(Preheader); | ||||
4199 | auto *Previous = Phi->getIncomingValueForBlock(Latch); | ||||
4200 | |||||
4201 | auto *IdxTy = Builder.getInt32Ty(); | ||||
4202 | auto *One = ConstantInt::get(IdxTy, 1); | ||||
4203 | |||||
4204 | // Create a vector from the initial value. | ||||
4205 | auto *VectorInit = ScalarInit; | ||||
4206 | if (VF.isVector()) { | ||||
4207 | Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); | ||||
4208 | auto *RuntimeVF = getRuntimeVF(Builder, IdxTy, VF); | ||||
4209 | auto *LastIdx = Builder.CreateSub(RuntimeVF, One); | ||||
4210 | VectorInit = Builder.CreateInsertElement( | ||||
4211 | PoisonValue::get(VectorType::get(VectorInit->getType(), VF)), | ||||
4212 | VectorInit, LastIdx, "vector.recur.init"); | ||||
4213 | } | ||||
4214 | |||||
4215 | VPValue *PhiDef = State.Plan->getVPValue(Phi); | ||||
4216 | VPValue *PreviousDef = State.Plan->getVPValue(Previous); | ||||
4217 | // We constructed a temporary phi node in the first phase of vectorization. | ||||
4218 | // This phi node will eventually be deleted. | ||||
4219 | Builder.SetInsertPoint(cast<Instruction>(State.get(PhiDef, 0))); | ||||
4220 | |||||
4221 | // Create a phi node for the new recurrence. The current value will either be | ||||
4222 | // the initial value inserted into a vector or loop-varying vector value. | ||||
4223 | auto *VecPhi = Builder.CreatePHI(VectorInit->getType(), 2, "vector.recur"); | ||||
4224 | VecPhi->addIncoming(VectorInit, LoopVectorPreHeader); | ||||
4225 | |||||
4226 | // Get the vectorized previous value of the last part UF - 1. It appears last | ||||
4227 | // among all unrolled iterations, due to the order of their construction. | ||||
4228 | Value *PreviousLastPart = State.get(PreviousDef, UF - 1); | ||||
4229 | |||||
4230 | // Find and set the insertion point after the previous value if it is an | ||||
4231 | // instruction. | ||||
4232 | BasicBlock::iterator InsertPt; | ||||
4233 | // Note that the previous value may have been constant-folded so it is not | ||||
4234 | // guaranteed to be an instruction in the vector loop. | ||||
4235 | // FIXME: Loop invariant values do not form recurrences. We should deal with | ||||
4236 | // them earlier. | ||||
4237 | if (LI->getLoopFor(LoopVectorBody)->isLoopInvariant(PreviousLastPart)) | ||||
4238 | InsertPt = LoopVectorBody->getFirstInsertionPt(); | ||||
4239 | else { | ||||
4240 | Instruction *PreviousInst = cast<Instruction>(PreviousLastPart); | ||||
4241 | if (isa<PHINode>(PreviousLastPart)) | ||||
4242 | // If the previous value is a phi node, we should insert after all the phi | ||||
4243 | // nodes in the block containing the PHI to avoid breaking basic block | ||||
4244 | // verification. Note that the basic block may be different to | ||||
4245 | // LoopVectorBody, in case we predicate the loop. | ||||
4246 | InsertPt = PreviousInst->getParent()->getFirstInsertionPt(); | ||||
4247 | else | ||||
4248 | InsertPt = ++PreviousInst->getIterator(); | ||||
4249 | } | ||||
4250 | Builder.SetInsertPoint(&*InsertPt); | ||||
4251 | |||||
4252 | // The vector from which to take the initial value for the current iteration | ||||
4253 | // (actual or unrolled). Initially, this is the vector phi node. | ||||
4254 | Value *Incoming = VecPhi; | ||||
4255 | |||||
4256 | // Shuffle the current and previous vector and update the vector parts. | ||||
4257 | for (unsigned Part = 0; Part < UF; ++Part) { | ||||
4258 | Value *PreviousPart = State.get(PreviousDef, Part); | ||||
4259 | Value *PhiPart = State.get(PhiDef, Part); | ||||
4260 | auto *Shuffle = VF.isVector() | ||||
4261 | ? Builder.CreateVectorSplice(Incoming, PreviousPart, -1) | ||||
4262 | : Incoming; | ||||
4263 | PhiPart->replaceAllUsesWith(Shuffle); | ||||
4264 | cast<Instruction>(PhiPart)->eraseFromParent(); | ||||
4265 | State.reset(PhiDef, Shuffle, Part); | ||||
4266 | Incoming = PreviousPart; | ||||
4267 | } | ||||
4268 | |||||
4269 | // Fix the latch value of the new recurrence in the vector loop. | ||||
4270 | VecPhi->addIncoming(Incoming, LI->getLoopFor(LoopVectorBody)->getLoopLatch()); | ||||
4271 | |||||
4272 | // Extract the last vector element in the middle block. This will be the | ||||
4273 | // initial value for the recurrence when jumping to the scalar loop. | ||||
4274 | auto *ExtractForScalar = Incoming; | ||||
4275 | if (VF.isVector()) { | ||||
4276 | Builder.SetInsertPoint(LoopMiddleBlock->getTerminator()); | ||||
4277 | auto *RuntimeVF = getRuntimeVF(Builder, IdxTy, VF); | ||||
4278 | auto *LastIdx = Builder.CreateSub(RuntimeVF, One); | ||||
4279 | ExtractForScalar = Builder.CreateExtractElement(ExtractForScalar, LastIdx, | ||||
4280 | "vector.recur.extract"); | ||||
4281 | } | ||||
4282 | // Extract the second last element in the middle block if the | ||||
4283 | // Phi is used outside the loop. We need to extract the phi itself | ||||
4284 | // and not the last element (the phi update in the current iteration). This | ||||
4285 | // will be the value when jumping to the exit block from the LoopMiddleBlock, | ||||
4286 | // when the scalar loop is not run at all. | ||||
4287 | Value *ExtractForPhiUsedOutsideLoop = nullptr; | ||||
4288 | if (VF.isVector()) { | ||||
4289 | auto *RuntimeVF = getRuntimeVF(Builder, IdxTy, VF); | ||||
4290 | auto *Idx = Builder.CreateSub(RuntimeVF, ConstantInt::get(IdxTy, 2)); | ||||
4291 | ExtractForPhiUsedOutsideLoop = Builder.CreateExtractElement( | ||||
4292 | Incoming, Idx, "vector.recur.extract.for.phi"); | ||||
4293 | } else if (UF > 1) | ||||
4294 | // When loop is unrolled without vectorizing, initialize | ||||
4295 | // ExtractForPhiUsedOutsideLoop with the value just prior to unrolled value | ||||
4296 | // of `Incoming`. This is analogous to the vectorized case above: extracting | ||||
4297 | // the second last element when VF > 1. | ||||
4298 | ExtractForPhiUsedOutsideLoop = State.get(PreviousDef, UF - 2); | ||||
4299 | |||||
4300 | // Fix the initial value of the original recurrence in the scalar loop. | ||||
4301 | Builder.SetInsertPoint(&*LoopScalarPreHeader->begin()); | ||||
4302 | auto *Start = Builder.CreatePHI(Phi->getType(), 2, "scalar.recur.init"); | ||||
4303 | for (auto *BB : predecessors(LoopScalarPreHeader)) { | ||||
4304 | auto *Incoming = BB == LoopMiddleBlock ? ExtractForScalar : ScalarInit; | ||||
4305 | Start->addIncoming(Incoming, BB); | ||||
4306 | } | ||||
4307 | |||||
4308 | Phi->setIncomingValueForBlock(LoopScalarPreHeader, Start); | ||||
4309 | Phi->setName("scalar.recur"); | ||||
4310 | |||||
4311 | // Finally, fix users of the recurrence outside the loop. The users will need | ||||
4312 | // either the last value of the scalar recurrence or the last value of the | ||||
4313 | // vector recurrence we extracted in the middle block. Since the loop is in | ||||
4314 | // LCSSA form, we just need to find all the phi nodes for the original scalar | ||||
4315 | // recurrence in the exit block, and then add an edge for the middle block. | ||||
4316 | // Note that LCSSA does not imply single entry when the original scalar loop | ||||
4317 | // had multiple exiting edges (as we always run the last iteration in the | ||||
4318 | // scalar epilogue); in that case, the exiting path through middle will be | ||||
4319 | // dynamically dead and the value picked for the phi doesn't matter. | ||||
4320 | for (PHINode &LCSSAPhi : LoopExitBlock->phis()) | ||||
4321 | if (any_of(LCSSAPhi.incoming_values(), | ||||
4322 | [Phi](Value *V) { return V == Phi; })) | ||||
4323 | LCSSAPhi.addIncoming(ExtractForPhiUsedOutsideLoop, LoopMiddleBlock); | ||||
4324 | } | ||||
4325 | |||||
4326 | void InnerLoopVectorizer::fixReduction(VPWidenPHIRecipe *PhiR, | ||||
4327 | VPTransformState &State) { | ||||
4328 | PHINode *OrigPhi = cast<PHINode>(PhiR->getUnderlyingValue()); | ||||
4329 | // Get it's reduction variable descriptor. | ||||
4330 | assert(Legal->isReductionVariable(OrigPhi) &&(static_cast <bool> (Legal->isReductionVariable(OrigPhi ) && "Unable to find the reduction variable") ? void ( 0) : __assert_fail ("Legal->isReductionVariable(OrigPhi) && \"Unable to find the reduction variable\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 4331, __extension__ __PRETTY_FUNCTION__)) | ||||
4331 | "Unable to find the reduction variable")(static_cast <bool> (Legal->isReductionVariable(OrigPhi ) && "Unable to find the reduction variable") ? void ( 0) : __assert_fail ("Legal->isReductionVariable(OrigPhi) && \"Unable to find the reduction variable\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 4331, __extension__ __PRETTY_FUNCTION__)); | ||||
4332 | const RecurrenceDescriptor &RdxDesc = *PhiR->getRecurrenceDescriptor(); | ||||
4333 | |||||
4334 | RecurKind RK = RdxDesc.getRecurrenceKind(); | ||||
4335 | TrackingVH<Value> ReductionStartValue = RdxDesc.getRecurrenceStartValue(); | ||||
4336 | Instruction *LoopExitInst = RdxDesc.getLoopExitInstr(); | ||||
4337 | setDebugLocFromInst(Builder, ReductionStartValue); | ||||
4338 | bool IsInLoopReductionPhi = Cost->isInLoopReduction(OrigPhi); | ||||
4339 | |||||
4340 | VPValue *LoopExitInstDef = State.Plan->getVPValue(LoopExitInst); | ||||
4341 | // This is the vector-clone of the value that leaves the loop. | ||||
4342 | Type *VecTy = State.get(LoopExitInstDef, 0)->getType(); | ||||
4343 | |||||
4344 | // Wrap flags are in general invalid after vectorization, clear them. | ||||
4345 | clearReductionWrapFlags(RdxDesc, State); | ||||
4346 | |||||
4347 | // Fix the vector-loop phi. | ||||
4348 | |||||
4349 | // Reductions do not have to start at zero. They can start with | ||||
4350 | // any loop invariant values. | ||||
4351 | BasicBlock *VectorLoopLatch = LI->getLoopFor(LoopVectorBody)->getLoopLatch(); | ||||
4352 | |||||
4353 | bool IsOrdered = State.VF.isVector() && IsInLoopReductionPhi && | ||||
4354 | Cost->useOrderedReductions(RdxDesc); | ||||
4355 | |||||
4356 | for (unsigned Part = 0; Part < UF; ++Part) { | ||||
4357 | if (IsOrdered && Part > 0) | ||||
4358 | break; | ||||
4359 | Value *VecRdxPhi = State.get(PhiR->getVPSingleValue(), Part); | ||||
4360 | Value *Val = State.get(PhiR->getBackedgeValue(), Part); | ||||
4361 | if (IsOrdered) | ||||
4362 | Val = State.get(PhiR->getBackedgeValue(), UF - 1); | ||||
4363 | |||||
4364 | cast<PHINode>(VecRdxPhi)->addIncoming(Val, VectorLoopLatch); | ||||
4365 | } | ||||
4366 | |||||
4367 | // Before each round, move the insertion point right between | ||||
4368 | // the PHIs and the values we are going to write. | ||||
4369 | // This allows us to write both PHINodes and the extractelement | ||||
4370 | // instructions. | ||||
4371 | Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt()); | ||||
4372 | |||||
4373 | setDebugLocFromInst(Builder, LoopExitInst); | ||||
4374 | |||||
4375 | Type *PhiTy = OrigPhi->getType(); | ||||
4376 | // If tail is folded by masking, the vector value to leave the loop should be | ||||
4377 | // a Select choosing between the vectorized LoopExitInst and vectorized Phi, | ||||
4378 | // instead of the former. For an inloop reduction the reduction will already | ||||
4379 | // be predicated, and does not need to be handled here. | ||||
4380 | if (Cost->foldTailByMasking() && !IsInLoopReductionPhi) { | ||||
4381 | for (unsigned Part = 0; Part < UF; ++Part) { | ||||
4382 | Value *VecLoopExitInst = State.get(LoopExitInstDef, Part); | ||||
4383 | Value *Sel = nullptr; | ||||
4384 | for (User *U : VecLoopExitInst->users()) { | ||||
4385 | if (isa<SelectInst>(U)) { | ||||
4386 | assert(!Sel && "Reduction exit feeding two selects")(static_cast <bool> (!Sel && "Reduction exit feeding two selects" ) ? void (0) : __assert_fail ("!Sel && \"Reduction exit feeding two selects\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 4386, __extension__ __PRETTY_FUNCTION__)); | ||||
4387 | Sel = U; | ||||
4388 | } else | ||||
4389 | assert(isa<PHINode>(U) && "Reduction exit must feed Phi's or select")(static_cast <bool> (isa<PHINode>(U) && "Reduction exit must feed Phi's or select" ) ? void (0) : __assert_fail ("isa<PHINode>(U) && \"Reduction exit must feed Phi's or select\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 4389, __extension__ __PRETTY_FUNCTION__)); | ||||
4390 | } | ||||
4391 | assert(Sel && "Reduction exit feeds no select")(static_cast <bool> (Sel && "Reduction exit feeds no select" ) ? void (0) : __assert_fail ("Sel && \"Reduction exit feeds no select\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 4391, __extension__ __PRETTY_FUNCTION__)); | ||||
4392 | State.reset(LoopExitInstDef, Sel, Part); | ||||
4393 | |||||
4394 | // If the target can create a predicated operator for the reduction at no | ||||
4395 | // extra cost in the loop (for example a predicated vadd), it can be | ||||
4396 | // cheaper for the select to remain in the loop than be sunk out of it, | ||||
4397 | // and so use the select value for the phi instead of the old | ||||
4398 | // LoopExitValue. | ||||
4399 | if (PreferPredicatedReductionSelect || | ||||
4400 | TTI->preferPredicatedReductionSelect( | ||||
4401 | RdxDesc.getOpcode(), PhiTy, | ||||
4402 | TargetTransformInfo::ReductionFlags())) { | ||||
4403 | auto *VecRdxPhi = | ||||
4404 | cast<PHINode>(State.get(PhiR->getVPSingleValue(), Part)); | ||||
4405 | VecRdxPhi->setIncomingValueForBlock( | ||||
4406 | LI->getLoopFor(LoopVectorBody)->getLoopLatch(), Sel); | ||||
4407 | } | ||||
4408 | } | ||||
4409 | } | ||||
4410 | |||||
4411 | // If the vector reduction can be performed in a smaller type, we truncate | ||||
4412 | // then extend the loop exit value to enable InstCombine to evaluate the | ||||
4413 | // entire expression in the smaller type. | ||||
4414 | if (VF.isVector() && PhiTy != RdxDesc.getRecurrenceType()) { | ||||
4415 | assert(!IsInLoopReductionPhi && "Unexpected truncated inloop reduction!")(static_cast <bool> (!IsInLoopReductionPhi && "Unexpected truncated inloop reduction!" ) ? void (0) : __assert_fail ("!IsInLoopReductionPhi && \"Unexpected truncated inloop reduction!\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 4415, __extension__ __PRETTY_FUNCTION__)); | ||||
4416 | Type *RdxVecTy = VectorType::get(RdxDesc.getRecurrenceType(), VF); | ||||
4417 | Builder.SetInsertPoint( | ||||
4418 | LI->getLoopFor(LoopVectorBody)->getLoopLatch()->getTerminator()); | ||||
4419 | VectorParts RdxParts(UF); | ||||
4420 | for (unsigned Part = 0; Part < UF; ++Part) { | ||||
4421 | RdxParts[Part] = State.get(LoopExitInstDef, Part); | ||||
4422 | Value *Trunc = Builder.CreateTrunc(RdxParts[Part], RdxVecTy); | ||||
4423 | Value *Extnd = RdxDesc.isSigned() ? Builder.CreateSExt(Trunc, VecTy) | ||||
4424 | : Builder.CreateZExt(Trunc, VecTy); | ||||
4425 | for (Value::user_iterator UI = RdxParts[Part]->user_begin(); | ||||
4426 | UI != RdxParts[Part]->user_end();) | ||||
4427 | if (*UI != Trunc) { | ||||
4428 | (*UI++)->replaceUsesOfWith(RdxParts[Part], Extnd); | ||||
4429 | RdxParts[Part] = Extnd; | ||||
4430 | } else { | ||||
4431 | ++UI; | ||||
4432 | } | ||||
4433 | } | ||||
4434 | Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt()); | ||||
4435 | for (unsigned Part = 0; Part < UF; ++Part) { | ||||
4436 | RdxParts[Part] = Builder.CreateTrunc(RdxParts[Part], RdxVecTy); | ||||
4437 | State.reset(LoopExitInstDef, RdxParts[Part], Part); | ||||
4438 | } | ||||
4439 | } | ||||
4440 | |||||
4441 | // Reduce all of the unrolled parts into a single vector. | ||||
4442 | Value *ReducedPartRdx = State.get(LoopExitInstDef, 0); | ||||
4443 | unsigned Op = RecurrenceDescriptor::getOpcode(RK); | ||||
4444 | |||||
4445 | // The middle block terminator has already been assigned a DebugLoc here (the | ||||
4446 | // OrigLoop's single latch terminator). We want the whole middle block to | ||||
4447 | // appear to execute on this line because: (a) it is all compiler generated, | ||||
4448 | // (b) these instructions are always executed after evaluating the latch | ||||
4449 | // conditional branch, and (c) other passes may add new predecessors which | ||||
4450 | // terminate on this line. This is the easiest way to ensure we don't | ||||
4451 | // accidentally cause an extra step back into the loop while debugging. | ||||
4452 | setDebugLocFromInst(Builder, LoopMiddleBlock->getTerminator()); | ||||
4453 | if (IsOrdered) | ||||
4454 | ReducedPartRdx = State.get(LoopExitInstDef, UF - 1); | ||||
4455 | else { | ||||
4456 | // Floating-point operations should have some FMF to enable the reduction. | ||||
4457 | IRBuilderBase::FastMathFlagGuard FMFG(Builder); | ||||
4458 | Builder.setFastMathFlags(RdxDesc.getFastMathFlags()); | ||||
4459 | for (unsigned Part = 1; Part < UF; ++Part) { | ||||
4460 | Value *RdxPart = State.get(LoopExitInstDef, Part); | ||||
4461 | if (Op != Instruction::ICmp && Op != Instruction::FCmp) { | ||||
4462 | ReducedPartRdx = Builder.CreateBinOp( | ||||
4463 | (Instruction::BinaryOps)Op, RdxPart, ReducedPartRdx, "bin.rdx"); | ||||
4464 | } else { | ||||
4465 | ReducedPartRdx = createMinMaxOp(Builder, RK, ReducedPartRdx, RdxPart); | ||||
4466 | } | ||||
4467 | } | ||||
4468 | } | ||||
4469 | |||||
4470 | // Create the reduction after the loop. Note that inloop reductions create the | ||||
4471 | // target reduction in the loop using a Reduction recipe. | ||||
4472 | if (VF.isVector() && !IsInLoopReductionPhi) { | ||||
4473 | ReducedPartRdx = | ||||
4474 | createTargetReduction(Builder, TTI, RdxDesc, ReducedPartRdx); | ||||
4475 | // If the reduction can be performed in a smaller type, we need to extend | ||||
4476 | // the reduction to the wider type before we branch to the original loop. | ||||
4477 | if (PhiTy != RdxDesc.getRecurrenceType()) | ||||
4478 | ReducedPartRdx = RdxDesc.isSigned() | ||||
4479 | ? Builder.CreateSExt(ReducedPartRdx, PhiTy) | ||||
4480 | : Builder.CreateZExt(ReducedPartRdx, PhiTy); | ||||
4481 | } | ||||
4482 | |||||
4483 | // Create a phi node that merges control-flow from the backedge-taken check | ||||
4484 | // block and the middle block. | ||||
4485 | PHINode *BCBlockPhi = PHINode::Create(PhiTy, 2, "bc.merge.rdx", | ||||
4486 | LoopScalarPreHeader->getTerminator()); | ||||
4487 | for (unsigned I = 0, E = LoopBypassBlocks.size(); I != E; ++I) | ||||
4488 | BCBlockPhi->addIncoming(ReductionStartValue, LoopBypassBlocks[I]); | ||||
4489 | BCBlockPhi->addIncoming(ReducedPartRdx, LoopMiddleBlock); | ||||
4490 | |||||
4491 | // Now, we need to fix the users of the reduction variable | ||||
4492 | // inside and outside of the scalar remainder loop. | ||||
4493 | |||||
4494 | // We know that the loop is in LCSSA form. We need to update the PHI nodes | ||||
4495 | // in the exit blocks. See comment on analogous loop in | ||||
4496 | // fixFirstOrderRecurrence for a more complete explaination of the logic. | ||||
4497 | for (PHINode &LCSSAPhi : LoopExitBlock->phis()) | ||||
4498 | if (any_of(LCSSAPhi.incoming_values(), | ||||
4499 | [LoopExitInst](Value *V) { return V == LoopExitInst; })) | ||||
4500 | LCSSAPhi.addIncoming(ReducedPartRdx, LoopMiddleBlock); | ||||
4501 | |||||
4502 | // Fix the scalar loop reduction variable with the incoming reduction sum | ||||
4503 | // from the vector body and from the backedge value. | ||||
4504 | int IncomingEdgeBlockIdx = | ||||
4505 | OrigPhi->getBasicBlockIndex(OrigLoop->getLoopLatch()); | ||||
4506 | assert(IncomingEdgeBlockIdx >= 0 && "Invalid block index")(static_cast <bool> (IncomingEdgeBlockIdx >= 0 && "Invalid block index") ? void (0) : __assert_fail ("IncomingEdgeBlockIdx >= 0 && \"Invalid block index\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 4506, __extension__ __PRETTY_FUNCTION__)); | ||||
4507 | // Pick the other block. | ||||
4508 | int SelfEdgeBlockIdx = (IncomingEdgeBlockIdx ? 0 : 1); | ||||
4509 | OrigPhi->setIncomingValue(SelfEdgeBlockIdx, BCBlockPhi); | ||||
4510 | OrigPhi->setIncomingValue(IncomingEdgeBlockIdx, LoopExitInst); | ||||
4511 | } | ||||
4512 | |||||
4513 | void InnerLoopVectorizer::clearReductionWrapFlags(const RecurrenceDescriptor &RdxDesc, | ||||
4514 | VPTransformState &State) { | ||||
4515 | RecurKind RK = RdxDesc.getRecurrenceKind(); | ||||
4516 | if (RK != RecurKind::Add && RK != RecurKind::Mul) | ||||
4517 | return; | ||||
4518 | |||||
4519 | Instruction *LoopExitInstr = RdxDesc.getLoopExitInstr(); | ||||
4520 | assert(LoopExitInstr && "null loop exit instruction")(static_cast <bool> (LoopExitInstr && "null loop exit instruction" ) ? void (0) : __assert_fail ("LoopExitInstr && \"null loop exit instruction\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 4520, __extension__ __PRETTY_FUNCTION__)); | ||||
4521 | SmallVector<Instruction *, 8> Worklist; | ||||
4522 | SmallPtrSet<Instruction *, 8> Visited; | ||||
4523 | Worklist.push_back(LoopExitInstr); | ||||
4524 | Visited.insert(LoopExitInstr); | ||||
4525 | |||||
4526 | while (!Worklist.empty()) { | ||||
4527 | Instruction *Cur = Worklist.pop_back_val(); | ||||
4528 | if (isa<OverflowingBinaryOperator>(Cur)) | ||||
4529 | for (unsigned Part = 0; Part < UF; ++Part) { | ||||
4530 | Value *V = State.get(State.Plan->getVPValue(Cur), Part); | ||||
4531 | cast<Instruction>(V)->dropPoisonGeneratingFlags(); | ||||
4532 | } | ||||
4533 | |||||
4534 | for (User *U : Cur->users()) { | ||||
4535 | Instruction *UI = cast<Instruction>(U); | ||||
4536 | if ((Cur != LoopExitInstr || OrigLoop->contains(UI->getParent())) && | ||||
4537 | Visited.insert(UI).second) | ||||
4538 | Worklist.push_back(UI); | ||||
4539 | } | ||||
4540 | } | ||||
4541 | } | ||||
4542 | |||||
4543 | void InnerLoopVectorizer::fixLCSSAPHIs(VPTransformState &State) { | ||||
4544 | for (PHINode &LCSSAPhi : LoopExitBlock->phis()) { | ||||
4545 | if (LCSSAPhi.getBasicBlockIndex(LoopMiddleBlock) != -1) | ||||
4546 | // Some phis were already hand updated by the reduction and recurrence | ||||
4547 | // code above, leave them alone. | ||||
4548 | continue; | ||||
4549 | |||||
4550 | auto *IncomingValue = LCSSAPhi.getIncomingValue(0); | ||||
4551 | // Non-instruction incoming values will have only one value. | ||||
4552 | |||||
4553 | VPLane Lane = VPLane::getFirstLane(); | ||||
4554 | if (isa<Instruction>(IncomingValue) && | ||||
4555 | !Cost->isUniformAfterVectorization(cast<Instruction>(IncomingValue), | ||||
4556 | VF)) | ||||
4557 | Lane = VPLane::getLastLaneForVF(VF); | ||||
4558 | |||||
4559 | // Can be a loop invariant incoming value or the last scalar value to be | ||||
4560 | // extracted from the vectorized loop. | ||||
4561 | Builder.SetInsertPoint(LoopMiddleBlock->getTerminator()); | ||||
4562 | Value *lastIncomingValue = | ||||
4563 | OrigLoop->isLoopInvariant(IncomingValue) | ||||
4564 | ? IncomingValue | ||||
4565 | : State.get(State.Plan->getVPValue(IncomingValue), | ||||
4566 | VPIteration(UF - 1, Lane)); | ||||
4567 | LCSSAPhi.addIncoming(lastIncomingValue, LoopMiddleBlock); | ||||
4568 | } | ||||
4569 | } | ||||
4570 | |||||
4571 | void InnerLoopVectorizer::sinkScalarOperands(Instruction *PredInst) { | ||||
4572 | // The basic block and loop containing the predicated instruction. | ||||
4573 | auto *PredBB = PredInst->getParent(); | ||||
4574 | auto *VectorLoop = LI->getLoopFor(PredBB); | ||||
4575 | |||||
4576 | // Initialize a worklist with the operands of the predicated instruction. | ||||
4577 | SetVector<Value *> Worklist(PredInst->op_begin(), PredInst->op_end()); | ||||
4578 | |||||
4579 | // Holds instructions that we need to analyze again. An instruction may be | ||||
4580 | // reanalyzed if we don't yet know if we can sink it or not. | ||||
4581 | SmallVector<Instruction *, 8> InstsToReanalyze; | ||||
4582 | |||||
4583 | // Returns true if a given use occurs in the predicated block. Phi nodes use | ||||
4584 | // their operands in their corresponding predecessor blocks. | ||||
4585 | auto isBlockOfUsePredicated = [&](Use &U) -> bool { | ||||
4586 | auto *I = cast<Instruction>(U.getUser()); | ||||
4587 | BasicBlock *BB = I->getParent(); | ||||
4588 | if (auto *Phi = dyn_cast<PHINode>(I)) | ||||
4589 | BB = Phi->getIncomingBlock( | ||||
4590 | PHINode::getIncomingValueNumForOperand(U.getOperandNo())); | ||||
4591 | return BB == PredBB; | ||||
4592 | }; | ||||
4593 | |||||
4594 | // Iteratively sink the scalarized operands of the predicated instruction | ||||
4595 | // into the block we created for it. When an instruction is sunk, it's | ||||
4596 | // operands are then added to the worklist. The algorithm ends after one pass | ||||
4597 | // through the worklist doesn't sink a single instruction. | ||||
4598 | bool Changed; | ||||
4599 | do { | ||||
4600 | // Add the instructions that need to be reanalyzed to the worklist, and | ||||
4601 | // reset the changed indicator. | ||||
4602 | Worklist.insert(InstsToReanalyze.begin(), InstsToReanalyze.end()); | ||||
4603 | InstsToReanalyze.clear(); | ||||
4604 | Changed = false; | ||||
4605 | |||||
4606 | while (!Worklist.empty()) { | ||||
4607 | auto *I = dyn_cast<Instruction>(Worklist.pop_back_val()); | ||||
4608 | |||||
4609 | // We can't sink an instruction if it is a phi node, is not in the loop, | ||||
4610 | // or may have side effects. | ||||
4611 | if (!I || isa<PHINode>(I) || !VectorLoop->contains(I) || | ||||
4612 | I->mayHaveSideEffects()) | ||||
4613 | continue; | ||||
4614 | |||||
4615 | // If the instruction is already in PredBB, check if we can sink its | ||||
4616 | // operands. In that case, VPlan's sinkScalarOperands() succeeded in | ||||
4617 | // sinking the scalar instruction I, hence it appears in PredBB; but it | ||||
4618 | // may have failed to sink I's operands (recursively), which we try | ||||
4619 | // (again) here. | ||||
4620 | if (I->getParent() == PredBB) { | ||||
4621 | Worklist.insert(I->op_begin(), I->op_end()); | ||||
4622 | continue; | ||||
4623 | } | ||||
4624 | |||||
4625 | // It's legal to sink the instruction if all its uses occur in the | ||||
4626 | // predicated block. Otherwise, there's nothing to do yet, and we may | ||||
4627 | // need to reanalyze the instruction. | ||||
4628 | if (!llvm::all_of(I->uses(), isBlockOfUsePredicated)) { | ||||
4629 | InstsToReanalyze.push_back(I); | ||||
4630 | continue; | ||||
4631 | } | ||||
4632 | |||||
4633 | // Move the instruction to the beginning of the predicated block, and add | ||||
4634 | // it's operands to the worklist. | ||||
4635 | I->moveBefore(&*PredBB->getFirstInsertionPt()); | ||||
4636 | Worklist.insert(I->op_begin(), I->op_end()); | ||||
4637 | |||||
4638 | // The sinking may have enabled other instructions to be sunk, so we will | ||||
4639 | // need to iterate. | ||||
4640 | Changed = true; | ||||
4641 | } | ||||
4642 | } while (Changed); | ||||
4643 | } | ||||
4644 | |||||
4645 | void InnerLoopVectorizer::fixNonInductionPHIs(VPTransformState &State) { | ||||
4646 | for (PHINode *OrigPhi : OrigPHIsToFix) { | ||||
4647 | VPWidenPHIRecipe *VPPhi = | ||||
4648 | cast<VPWidenPHIRecipe>(State.Plan->getVPValue(OrigPhi)); | ||||
4649 | PHINode *NewPhi = cast<PHINode>(State.get(VPPhi, 0)); | ||||
4650 | // Make sure the builder has a valid insert point. | ||||
4651 | Builder.SetInsertPoint(NewPhi); | ||||
4652 | for (unsigned i = 0; i < VPPhi->getNumOperands(); ++i) { | ||||
4653 | VPValue *Inc = VPPhi->getIncomingValue(i); | ||||
4654 | VPBasicBlock *VPBB = VPPhi->getIncomingBlock(i); | ||||
4655 | NewPhi->addIncoming(State.get(Inc, 0), State.CFG.VPBB2IRBB[VPBB]); | ||||
4656 | } | ||||
4657 | } | ||||
4658 | } | ||||
4659 | |||||
4660 | bool InnerLoopVectorizer::useOrderedReductions(RecurrenceDescriptor &RdxDesc) { | ||||
4661 | return Cost->useOrderedReductions(RdxDesc); | ||||
4662 | } | ||||
4663 | |||||
4664 | void InnerLoopVectorizer::widenGEP(GetElementPtrInst *GEP, VPValue *VPDef, | ||||
4665 | VPUser &Operands, unsigned UF, | ||||
4666 | ElementCount VF, bool IsPtrLoopInvariant, | ||||
4667 | SmallBitVector &IsIndexLoopInvariant, | ||||
4668 | VPTransformState &State) { | ||||
4669 | // Construct a vector GEP by widening the operands of the scalar GEP as | ||||
4670 | // necessary. We mark the vector GEP 'inbounds' if appropriate. A GEP | ||||
4671 | // results in a vector of pointers when at least one operand of the GEP | ||||
4672 | // is vector-typed. Thus, to keep the representation compact, we only use | ||||
4673 | // vector-typed operands for loop-varying values. | ||||
4674 | |||||
4675 | if (VF.isVector() && IsPtrLoopInvariant && IsIndexLoopInvariant.all()) { | ||||
4676 | // If we are vectorizing, but the GEP has only loop-invariant operands, | ||||
4677 | // the GEP we build (by only using vector-typed operands for | ||||
4678 | // loop-varying values) would be a scalar pointer. Thus, to ensure we | ||||
4679 | // produce a vector of pointers, we need to either arbitrarily pick an | ||||
4680 | // operand to broadcast, or broadcast a clone of the original GEP. | ||||
4681 | // Here, we broadcast a clone of the original. | ||||
4682 | // | ||||
4683 | // TODO: If at some point we decide to scalarize instructions having | ||||
4684 | // loop-invariant operands, this special case will no longer be | ||||
4685 | // required. We would add the scalarization decision to | ||||
4686 | // collectLoopScalars() and teach getVectorValue() to broadcast | ||||
4687 | // the lane-zero scalar value. | ||||
4688 | auto *Clone = Builder.Insert(GEP->clone()); | ||||
4689 | for (unsigned Part = 0; Part < UF; ++Part) { | ||||
4690 | Value *EntryPart = Builder.CreateVectorSplat(VF, Clone); | ||||
4691 | State.set(VPDef, EntryPart, Part); | ||||
4692 | addMetadata(EntryPart, GEP); | ||||
4693 | } | ||||
4694 | } else { | ||||
4695 | // If the GEP has at least one loop-varying operand, we are sure to | ||||
4696 | // produce a vector of pointers. But if we are only unrolling, we want | ||||
4697 | // to produce a scalar GEP for each unroll part. Thus, the GEP we | ||||
4698 | // produce with the code below will be scalar (if VF == 1) or vector | ||||
4699 | // (otherwise). Note that for the unroll-only case, we still maintain | ||||
4700 | // values in the vector mapping with initVector, as we do for other | ||||
4701 | // instructions. | ||||
4702 | for (unsigned Part = 0; Part < UF; ++Part) { | ||||
4703 | // The pointer operand of the new GEP. If it's loop-invariant, we | ||||
4704 | // won't broadcast it. | ||||
4705 | auto *Ptr = IsPtrLoopInvariant | ||||
4706 | ? State.get(Operands.getOperand(0), VPIteration(0, 0)) | ||||
4707 | : State.get(Operands.getOperand(0), Part); | ||||
4708 | |||||
4709 | // Collect all the indices for the new GEP. If any index is | ||||
4710 | // loop-invariant, we won't broadcast it. | ||||
4711 | SmallVector<Value *, 4> Indices; | ||||
4712 | for (unsigned I = 1, E = Operands.getNumOperands(); I < E; I++) { | ||||
4713 | VPValue *Operand = Operands.getOperand(I); | ||||
4714 | if (IsIndexLoopInvariant[I - 1]) | ||||
4715 | Indices.push_back(State.get(Operand, VPIteration(0, 0))); | ||||
4716 | else | ||||
4717 | Indices.push_back(State.get(Operand, Part)); | ||||
4718 | } | ||||
4719 | |||||
4720 | // Create the new GEP. Note that this GEP may be a scalar if VF == 1, | ||||
4721 | // but it should be a vector, otherwise. | ||||
4722 | auto *NewGEP = | ||||
4723 | GEP->isInBounds() | ||||
4724 | ? Builder.CreateInBoundsGEP(GEP->getSourceElementType(), Ptr, | ||||
4725 | Indices) | ||||
4726 | : Builder.CreateGEP(GEP->getSourceElementType(), Ptr, Indices); | ||||
4727 | assert((VF.isScalar() || NewGEP->getType()->isVectorTy()) &&(static_cast <bool> ((VF.isScalar() || NewGEP->getType ()->isVectorTy()) && "NewGEP is not a pointer vector" ) ? void (0) : __assert_fail ("(VF.isScalar() || NewGEP->getType()->isVectorTy()) && \"NewGEP is not a pointer vector\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 4728, __extension__ __PRETTY_FUNCTION__)) | ||||
4728 | "NewGEP is not a pointer vector")(static_cast <bool> ((VF.isScalar() || NewGEP->getType ()->isVectorTy()) && "NewGEP is not a pointer vector" ) ? void (0) : __assert_fail ("(VF.isScalar() || NewGEP->getType()->isVectorTy()) && \"NewGEP is not a pointer vector\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 4728, __extension__ __PRETTY_FUNCTION__)); | ||||
4729 | State.set(VPDef, NewGEP, Part); | ||||
4730 | addMetadata(NewGEP, GEP); | ||||
4731 | } | ||||
4732 | } | ||||
4733 | } | ||||
4734 | |||||
4735 | void InnerLoopVectorizer::widenPHIInstruction(Instruction *PN, | ||||
4736 | RecurrenceDescriptor *RdxDesc, | ||||
4737 | VPWidenPHIRecipe *PhiR, | ||||
4738 | VPTransformState &State) { | ||||
4739 | PHINode *P = cast<PHINode>(PN); | ||||
4740 | if (EnableVPlanNativePath) { | ||||
4741 | // Currently we enter here in the VPlan-native path for non-induction | ||||
4742 | // PHIs where all control flow is uniform. We simply widen these PHIs. | ||||
4743 | // Create a vector phi with no operands - the vector phi operands will be | ||||
4744 | // set at the end of vector code generation. | ||||
4745 | Type *VecTy = (State.VF.isScalar()) | ||||
4746 | ? PN->getType() | ||||
4747 | : VectorType::get(PN->getType(), State.VF); | ||||
4748 | Value *VecPhi = Builder.CreatePHI(VecTy, PN->getNumOperands(), "vec.phi"); | ||||
4749 | State.set(PhiR, VecPhi, 0); | ||||
4750 | OrigPHIsToFix.push_back(P); | ||||
4751 | |||||
4752 | return; | ||||
4753 | } | ||||
4754 | |||||
4755 | assert(PN->getParent() == OrigLoop->getHeader() &&(static_cast <bool> (PN->getParent() == OrigLoop-> getHeader() && "Non-header phis should have been handled elsewhere" ) ? void (0) : __assert_fail ("PN->getParent() == OrigLoop->getHeader() && \"Non-header phis should have been handled elsewhere\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 4756, __extension__ __PRETTY_FUNCTION__)) | ||||
4756 | "Non-header phis should have been handled elsewhere")(static_cast <bool> (PN->getParent() == OrigLoop-> getHeader() && "Non-header phis should have been handled elsewhere" ) ? void (0) : __assert_fail ("PN->getParent() == OrigLoop->getHeader() && \"Non-header phis should have been handled elsewhere\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 4756, __extension__ __PRETTY_FUNCTION__)); | ||||
4757 | |||||
4758 | VPValue *StartVPV = PhiR->getStartValue(); | ||||
4759 | Value *StartV = StartVPV
| ||||
4760 | // In order to support recurrences we need to be able to vectorize Phi nodes. | ||||
4761 | // Phi nodes have cycles, so we need to vectorize them in two stages. This is | ||||
4762 | // stage #1: We create a new vector PHI node with no incoming edges. We'll use | ||||
4763 | // this value when we vectorize all of the instructions that use the PHI. | ||||
4764 | if (RdxDesc || Legal->isFirstOrderRecurrence(P)) { | ||||
4765 | Value *Iden = nullptr; | ||||
4766 | bool ScalarPHI = | ||||
4767 | (State.VF.isScalar()) || Cost->isInLoopReduction(cast<PHINode>(PN)); | ||||
4768 | Type *VecTy = | ||||
4769 | ScalarPHI ? PN->getType() : VectorType::get(PN->getType(), State.VF); | ||||
4770 | |||||
4771 | if (RdxDesc
| ||||
4772 | assert(Legal->isReductionVariable(P) && StartV &&(static_cast <bool> (Legal->isReductionVariable(P) && StartV && "RdxDesc should only be set for reduction variables; in that case " "a StartV is also required") ? void (0) : __assert_fail ("Legal->isReductionVariable(P) && StartV && \"RdxDesc should only be set for reduction variables; in that case \" \"a StartV is also required\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 4774, __extension__ __PRETTY_FUNCTION__)) | ||||
4773 | "RdxDesc should only be set for reduction variables; in that case "(static_cast <bool> (Legal->isReductionVariable(P) && StartV && "RdxDesc should only be set for reduction variables; in that case " "a StartV is also required") ? void (0) : __assert_fail ("Legal->isReductionVariable(P) && StartV && \"RdxDesc should only be set for reduction variables; in that case \" \"a StartV is also required\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 4774, __extension__ __PRETTY_FUNCTION__)) | ||||
4774 | "a StartV is also required")(static_cast <bool> (Legal->isReductionVariable(P) && StartV && "RdxDesc should only be set for reduction variables; in that case " "a StartV is also required") ? void (0) : __assert_fail ("Legal->isReductionVariable(P) && StartV && \"RdxDesc should only be set for reduction variables; in that case \" \"a StartV is also required\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 4774, __extension__ __PRETTY_FUNCTION__)); | ||||
4775 | RecurKind RK = RdxDesc->getRecurrenceKind(); | ||||
4776 | if (RecurrenceDescriptor::isMinMaxRecurrenceKind(RK)) { | ||||
4777 | // MinMax reduction have the start value as their identify. | ||||
4778 | if (ScalarPHI) { | ||||
4779 | Iden = StartV; | ||||
4780 | } else { | ||||
4781 | IRBuilderBase::InsertPointGuard IPBuilder(Builder); | ||||
4782 | Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); | ||||
4783 | StartV = Iden = | ||||
4784 | Builder.CreateVectorSplat(State.VF, StartV, "minmax.ident"); | ||||
4785 | } | ||||
4786 | } else { | ||||
4787 | Constant *IdenC = RecurrenceDescriptor::getRecurrenceIdentity( | ||||
4788 | RK, VecTy->getScalarType(), RdxDesc->getFastMathFlags()); | ||||
4789 | Iden = IdenC; | ||||
4790 | |||||
4791 | if (!ScalarPHI) { | ||||
4792 | Iden = ConstantVector::getSplat(State.VF, IdenC); | ||||
4793 | IRBuilderBase::InsertPointGuard IPBuilder(Builder); | ||||
4794 | Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); | ||||
4795 | Constant *Zero = Builder.getInt32(0); | ||||
4796 | StartV = Builder.CreateInsertElement(Iden, StartV, Zero); | ||||
4797 | } | ||||
4798 | } | ||||
4799 | } | ||||
4800 | |||||
4801 | bool IsOrdered = State.VF.isVector() && | ||||
4802 | Cost->isInLoopReduction(cast<PHINode>(PN)) && | ||||
4803 | Cost->useOrderedReductions(*RdxDesc); | ||||
| |||||
4804 | unsigned LastPartForNewPhi = IsOrdered ? 1 : State.UF; | ||||
4805 | for (unsigned Part = 0; Part < LastPartForNewPhi; ++Part) { | ||||
4806 | Value *EntryPart = PHINode::Create( | ||||
4807 | VecTy, 2, "vec.phi", &*LoopVectorBody->getFirstInsertionPt()); | ||||
4808 | State.set(PhiR, EntryPart, Part); | ||||
4809 | if (StartV) { | ||||
4810 | // Make sure to add the reduction start value only to the | ||||
4811 | // first unroll part. | ||||
4812 | Value *StartVal = (Part == 0) ? StartV : Iden; | ||||
4813 | cast<PHINode>(EntryPart)->addIncoming(StartVal, LoopVectorPreHeader); | ||||
4814 | } | ||||
4815 | } | ||||
4816 | return; | ||||
4817 | } | ||||
4818 | |||||
4819 | assert(!Legal->isReductionVariable(P) &&(static_cast <bool> (!Legal->isReductionVariable(P) && "reductions should be handled above") ? void (0) : __assert_fail ("!Legal->isReductionVariable(P) && \"reductions should be handled above\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 4820, __extension__ __PRETTY_FUNCTION__)) | ||||
4820 | "reductions should be handled above")(static_cast <bool> (!Legal->isReductionVariable(P) && "reductions should be handled above") ? void (0) : __assert_fail ("!Legal->isReductionVariable(P) && \"reductions should be handled above\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 4820, __extension__ __PRETTY_FUNCTION__)); | ||||
4821 | |||||
4822 | setDebugLocFromInst(Builder, P); | ||||
4823 | |||||
4824 | // This PHINode must be an induction variable. | ||||
4825 | // Make sure that we know about it. | ||||
4826 | assert(Legal->getInductionVars().count(P) && "Not an induction variable")(static_cast <bool> (Legal->getInductionVars().count (P) && "Not an induction variable") ? void (0) : __assert_fail ("Legal->getInductionVars().count(P) && \"Not an induction variable\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 4826, __extension__ __PRETTY_FUNCTION__)); | ||||
4827 | |||||
4828 | InductionDescriptor II = Legal->getInductionVars().lookup(P); | ||||
4829 | const DataLayout &DL = OrigLoop->getHeader()->getModule()->getDataLayout(); | ||||
4830 | |||||
4831 | // FIXME: The newly created binary instructions should contain nsw/nuw flags, | ||||
4832 | // which can be found from the original scalar operations. | ||||
4833 | switch (II.getKind()) { | ||||
4834 | case InductionDescriptor::IK_NoInduction: | ||||
4835 | llvm_unreachable("Unknown induction")::llvm::llvm_unreachable_internal("Unknown induction", "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 4835); | ||||
4836 | case InductionDescriptor::IK_IntInduction: | ||||
4837 | case InductionDescriptor::IK_FpInduction: | ||||
4838 | llvm_unreachable("Integer/fp induction is handled elsewhere.")::llvm::llvm_unreachable_internal("Integer/fp induction is handled elsewhere." , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 4838); | ||||
4839 | case InductionDescriptor::IK_PtrInduction: { | ||||
4840 | // Handle the pointer induction variable case. | ||||
4841 | assert(P->getType()->isPointerTy() && "Unexpected type.")(static_cast <bool> (P->getType()->isPointerTy() && "Unexpected type.") ? void (0) : __assert_fail ("P->getType()->isPointerTy() && \"Unexpected type.\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 4841, __extension__ __PRETTY_FUNCTION__)); | ||||
4842 | |||||
4843 | if (Cost->isScalarAfterVectorization(P, State.VF)) { | ||||
4844 | // This is the normalized GEP that starts counting at zero. | ||||
4845 | Value *PtrInd = | ||||
4846 | Builder.CreateSExtOrTrunc(Induction, II.getStep()->getType()); | ||||
4847 | // Determine the number of scalars we need to generate for each unroll | ||||
4848 | // iteration. If the instruction is uniform, we only need to generate the | ||||
4849 | // first lane. Otherwise, we generate all VF values. | ||||
4850 | bool IsUniform = Cost->isUniformAfterVectorization(P, State.VF); | ||||
4851 | unsigned Lanes = IsUniform ? 1 : State.VF.getKnownMinValue(); | ||||
4852 | |||||
4853 | bool NeedsVectorIndex = !IsUniform && VF.isScalable(); | ||||
4854 | Value *UnitStepVec = nullptr, *PtrIndSplat = nullptr; | ||||
4855 | if (NeedsVectorIndex) { | ||||
4856 | Type *VecIVTy = VectorType::get(PtrInd->getType(), VF); | ||||
4857 | UnitStepVec = Builder.CreateStepVector(VecIVTy); | ||||
4858 | PtrIndSplat = Builder.CreateVectorSplat(VF, PtrInd); | ||||
4859 | } | ||||
4860 | |||||
4861 | for (unsigned Part = 0; Part < UF; ++Part) { | ||||
4862 | Value *PartStart = createStepForVF( | ||||
4863 | Builder, ConstantInt::get(PtrInd->getType(), Part), VF); | ||||
4864 | |||||
4865 | if (NeedsVectorIndex) { | ||||
4866 | Value *PartStartSplat = Builder.CreateVectorSplat(VF, PartStart); | ||||
4867 | Value *Indices = Builder.CreateAdd(PartStartSplat, UnitStepVec); | ||||
4868 | Value *GlobalIndices = Builder.CreateAdd(PtrIndSplat, Indices); | ||||
4869 | Value *SclrGep = | ||||
4870 | emitTransformedIndex(Builder, GlobalIndices, PSE.getSE(), DL, II); | ||||
4871 | SclrGep->setName("next.gep"); | ||||
4872 | State.set(PhiR, SclrGep, Part); | ||||
4873 | // We've cached the whole vector, which means we can support the | ||||
4874 | // extraction of any lane. | ||||
4875 | continue; | ||||
4876 | } | ||||
4877 | |||||
4878 | for (unsigned Lane = 0; Lane < Lanes; ++Lane) { | ||||
4879 | Value *Idx = Builder.CreateAdd( | ||||
4880 | PartStart, ConstantInt::get(PtrInd->getType(), Lane)); | ||||
4881 | Value *GlobalIdx = Builder.CreateAdd(PtrInd, Idx); | ||||
4882 | Value *SclrGep = | ||||
4883 | emitTransformedIndex(Builder, GlobalIdx, PSE.getSE(), DL, II); | ||||
4884 | SclrGep->setName("next.gep"); | ||||
4885 | State.set(PhiR, SclrGep, VPIteration(Part, Lane)); | ||||
4886 | } | ||||
4887 | } | ||||
4888 | return; | ||||
4889 | } | ||||
4890 | assert(isa<SCEVConstant>(II.getStep()) &&(static_cast <bool> (isa<SCEVConstant>(II.getStep ()) && "Induction step not a SCEV constant!") ? void ( 0) : __assert_fail ("isa<SCEVConstant>(II.getStep()) && \"Induction step not a SCEV constant!\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 4891, __extension__ __PRETTY_FUNCTION__)) | ||||
4891 | "Induction step not a SCEV constant!")(static_cast <bool> (isa<SCEVConstant>(II.getStep ()) && "Induction step not a SCEV constant!") ? void ( 0) : __assert_fail ("isa<SCEVConstant>(II.getStep()) && \"Induction step not a SCEV constant!\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 4891, __extension__ __PRETTY_FUNCTION__)); | ||||
4892 | Type *PhiType = II.getStep()->getType(); | ||||
4893 | |||||
4894 | // Build a pointer phi | ||||
4895 | Value *ScalarStartValue = II.getStartValue(); | ||||
4896 | Type *ScStValueType = ScalarStartValue->getType(); | ||||
4897 | PHINode *NewPointerPhi = | ||||
4898 | PHINode::Create(ScStValueType, 2, "pointer.phi", Induction); | ||||
4899 | NewPointerPhi->addIncoming(ScalarStartValue, LoopVectorPreHeader); | ||||
4900 | |||||
4901 | // A pointer induction, performed by using a gep | ||||
4902 | BasicBlock *LoopLatch = LI->getLoopFor(LoopVectorBody)->getLoopLatch(); | ||||
4903 | Instruction *InductionLoc = LoopLatch->getTerminator(); | ||||
4904 | const SCEV *ScalarStep = II.getStep(); | ||||
4905 | SCEVExpander Exp(*PSE.getSE(), DL, "induction"); | ||||
4906 | Value *ScalarStepValue = | ||||
4907 | Exp.expandCodeFor(ScalarStep, PhiType, InductionLoc); | ||||
4908 | Value *RuntimeVF = getRuntimeVF(Builder, PhiType, VF); | ||||
4909 | Value *NumUnrolledElems = | ||||
4910 | Builder.CreateMul(RuntimeVF, ConstantInt::get(PhiType, State.UF)); | ||||
4911 | Value *InductionGEP = GetElementPtrInst::Create( | ||||
4912 | ScStValueType->getPointerElementType(), NewPointerPhi, | ||||
4913 | Builder.CreateMul(ScalarStepValue, NumUnrolledElems), "ptr.ind", | ||||
4914 | InductionLoc); | ||||
4915 | NewPointerPhi->addIncoming(InductionGEP, LoopLatch); | ||||
4916 | |||||
4917 | // Create UF many actual address geps that use the pointer | ||||
4918 | // phi as base and a vectorized version of the step value | ||||
4919 | // (<step*0, ..., step*N>) as offset. | ||||
4920 | for (unsigned Part = 0; Part < State.UF; ++Part) { | ||||
4921 | Type *VecPhiType = VectorType::get(PhiType, State.VF); | ||||
4922 | Value *StartOffsetScalar = | ||||
4923 | Builder.CreateMul(RuntimeVF, ConstantInt::get(PhiType, Part)); | ||||
4924 | Value *StartOffset = | ||||
4925 | Builder.CreateVectorSplat(State.VF, StartOffsetScalar); | ||||
4926 | // Create a vector of consecutive numbers from zero to VF. | ||||
4927 | StartOffset = | ||||
4928 | Builder.CreateAdd(StartOffset, Builder.CreateStepVector(VecPhiType)); | ||||
4929 | |||||
4930 | Value *GEP = Builder.CreateGEP( | ||||
4931 | ScStValueType->getPointerElementType(), NewPointerPhi, | ||||
4932 | Builder.CreateMul( | ||||
4933 | StartOffset, Builder.CreateVectorSplat(State.VF, ScalarStepValue), | ||||
4934 | "vector.gep")); | ||||
4935 | State.set(PhiR, GEP, Part); | ||||
4936 | } | ||||
4937 | } | ||||
4938 | } | ||||
4939 | } | ||||
4940 | |||||
4941 | /// A helper function for checking whether an integer division-related | ||||
4942 | /// instruction may divide by zero (in which case it must be predicated if | ||||
4943 | /// executed conditionally in the scalar code). | ||||
4944 | /// TODO: It may be worthwhile to generalize and check isKnownNonZero(). | ||||
4945 | /// Non-zero divisors that are non compile-time constants will not be | ||||
4946 | /// converted into multiplication, so we will still end up scalarizing | ||||
4947 | /// the division, but can do so w/o predication. | ||||
4948 | static bool mayDivideByZero(Instruction &I) { | ||||
4949 | assert((I.getOpcode() == Instruction::UDiv ||(static_cast <bool> ((I.getOpcode() == Instruction::UDiv || I.getOpcode() == Instruction::SDiv || I.getOpcode() == Instruction ::URem || I.getOpcode() == Instruction::SRem) && "Unexpected instruction" ) ? void (0) : __assert_fail ("(I.getOpcode() == Instruction::UDiv || I.getOpcode() == Instruction::SDiv || I.getOpcode() == Instruction::URem || I.getOpcode() == Instruction::SRem) && \"Unexpected instruction\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 4953, __extension__ __PRETTY_FUNCTION__)) | ||||
4950 | I.getOpcode() == Instruction::SDiv ||(static_cast <bool> ((I.getOpcode() == Instruction::UDiv || I.getOpcode() == Instruction::SDiv || I.getOpcode() == Instruction ::URem || I.getOpcode() == Instruction::SRem) && "Unexpected instruction" ) ? void (0) : __assert_fail ("(I.getOpcode() == Instruction::UDiv || I.getOpcode() == Instruction::SDiv || I.getOpcode() == Instruction::URem || I.getOpcode() == Instruction::SRem) && \"Unexpected instruction\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 4953, __extension__ __PRETTY_FUNCTION__)) | ||||
4951 | I.getOpcode() == Instruction::URem ||(static_cast <bool> ((I.getOpcode() == Instruction::UDiv || I.getOpcode() == Instruction::SDiv || I.getOpcode() == Instruction ::URem || I.getOpcode() == Instruction::SRem) && "Unexpected instruction" ) ? void (0) : __assert_fail ("(I.getOpcode() == Instruction::UDiv || I.getOpcode() == Instruction::SDiv || I.getOpcode() == Instruction::URem || I.getOpcode() == Instruction::SRem) && \"Unexpected instruction\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 4953, __extension__ __PRETTY_FUNCTION__)) | ||||
4952 | I.getOpcode() == Instruction::SRem) &&(static_cast <bool> ((I.getOpcode() == Instruction::UDiv || I.getOpcode() == Instruction::SDiv || I.getOpcode() == Instruction ::URem || I.getOpcode() == Instruction::SRem) && "Unexpected instruction" ) ? void (0) : __assert_fail ("(I.getOpcode() == Instruction::UDiv || I.getOpcode() == Instruction::SDiv || I.getOpcode() == Instruction::URem || I.getOpcode() == Instruction::SRem) && \"Unexpected instruction\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 4953, __extension__ __PRETTY_FUNCTION__)) | ||||
4953 | "Unexpected instruction")(static_cast <bool> ((I.getOpcode() == Instruction::UDiv || I.getOpcode() == Instruction::SDiv || I.getOpcode() == Instruction ::URem || I.getOpcode() == Instruction::SRem) && "Unexpected instruction" ) ? void (0) : __assert_fail ("(I.getOpcode() == Instruction::UDiv || I.getOpcode() == Instruction::SDiv || I.getOpcode() == Instruction::URem || I.getOpcode() == Instruction::SRem) && \"Unexpected instruction\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 4953, __extension__ __PRETTY_FUNCTION__)); | ||||
4954 | Value *Divisor = I.getOperand(1); | ||||
4955 | auto *CInt = dyn_cast<ConstantInt>(Divisor); | ||||
4956 | return !CInt || CInt->isZero(); | ||||
4957 | } | ||||
4958 | |||||
4959 | void InnerLoopVectorizer::widenInstruction(Instruction &I, VPValue *Def, | ||||
4960 | VPUser &User, | ||||
4961 | VPTransformState &State) { | ||||
4962 | switch (I.getOpcode()) { | ||||
4963 | case Instruction::Call: | ||||
4964 | case Instruction::Br: | ||||
4965 | case Instruction::PHI: | ||||
4966 | case Instruction::GetElementPtr: | ||||
4967 | case Instruction::Select: | ||||
4968 | llvm_unreachable("This instruction is handled by a different recipe.")::llvm::llvm_unreachable_internal("This instruction is handled by a different recipe." , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 4968); | ||||
4969 | case Instruction::UDiv: | ||||
4970 | case Instruction::SDiv: | ||||
4971 | case Instruction::SRem: | ||||
4972 | case Instruction::URem: | ||||
4973 | case Instruction::Add: | ||||
4974 | case Instruction::FAdd: | ||||
4975 | case Instruction::Sub: | ||||
4976 | case Instruction::FSub: | ||||
4977 | case Instruction::FNeg: | ||||
4978 | case Instruction::Mul: | ||||
4979 | case Instruction::FMul: | ||||
4980 | case Instruction::FDiv: | ||||
4981 | case Instruction::FRem: | ||||
4982 | case Instruction::Shl: | ||||
4983 | case Instruction::LShr: | ||||
4984 | case Instruction::AShr: | ||||
4985 | case Instruction::And: | ||||
4986 | case Instruction::Or: | ||||
4987 | case Instruction::Xor: { | ||||
4988 | // Just widen unops and binops. | ||||
4989 | setDebugLocFromInst(Builder, &I); | ||||
4990 | |||||
4991 | for (unsigned Part = 0; Part < UF; ++Part) { | ||||
4992 | SmallVector<Value *, 2> Ops; | ||||
4993 | for (VPValue *VPOp : User.operands()) | ||||
4994 | Ops.push_back(State.get(VPOp, Part)); | ||||
4995 | |||||
4996 | Value *V = Builder.CreateNAryOp(I.getOpcode(), Ops); | ||||
4997 | |||||
4998 | if (auto *VecOp = dyn_cast<Instruction>(V)) | ||||
4999 | VecOp->copyIRFlags(&I); | ||||
5000 | |||||
5001 | // Use this vector value for all users of the original instruction. | ||||
5002 | State.set(Def, V, Part); | ||||
5003 | addMetadata(V, &I); | ||||
5004 | } | ||||
5005 | |||||
5006 | break; | ||||
5007 | } | ||||
5008 | case Instruction::ICmp: | ||||
5009 | case Instruction::FCmp: { | ||||
5010 | // Widen compares. Generate vector compares. | ||||
5011 | bool FCmp = (I.getOpcode() == Instruction::FCmp); | ||||
5012 | auto *Cmp = cast<CmpInst>(&I); | ||||
5013 | setDebugLocFromInst(Builder, Cmp); | ||||
5014 | for (unsigned Part = 0; Part < UF; ++Part) { | ||||
5015 | Value *A = State.get(User.getOperand(0), Part); | ||||
5016 | Value *B = State.get(User.getOperand(1), Part); | ||||
5017 | Value *C = nullptr; | ||||
5018 | if (FCmp) { | ||||
5019 | // Propagate fast math flags. | ||||
5020 | IRBuilder<>::FastMathFlagGuard FMFG(Builder); | ||||
5021 | Builder.setFastMathFlags(Cmp->getFastMathFlags()); | ||||
5022 | C = Builder.CreateFCmp(Cmp->getPredicate(), A, B); | ||||
5023 | } else { | ||||
5024 | C = Builder.CreateICmp(Cmp->getPredicate(), A, B); | ||||
5025 | } | ||||
5026 | State.set(Def, C, Part); | ||||
5027 | addMetadata(C, &I); | ||||
5028 | } | ||||
5029 | |||||
5030 | break; | ||||
5031 | } | ||||
5032 | |||||
5033 | case Instruction::ZExt: | ||||
5034 | case Instruction::SExt: | ||||
5035 | case Instruction::FPToUI: | ||||
5036 | case Instruction::FPToSI: | ||||
5037 | case Instruction::FPExt: | ||||
5038 | case Instruction::PtrToInt: | ||||
5039 | case Instruction::IntToPtr: | ||||
5040 | case Instruction::SIToFP: | ||||
5041 | case Instruction::UIToFP: | ||||
5042 | case Instruction::Trunc: | ||||
5043 | case Instruction::FPTrunc: | ||||
5044 | case Instruction::BitCast: { | ||||
5045 | auto *CI = cast<CastInst>(&I); | ||||
5046 | setDebugLocFromInst(Builder, CI); | ||||
5047 | |||||
5048 | /// Vectorize casts. | ||||
5049 | Type *DestTy = | ||||
5050 | (VF.isScalar()) ? CI->getType() : VectorType::get(CI->getType(), VF); | ||||
5051 | |||||
5052 | for (unsigned Part = 0; Part < UF; ++Part) { | ||||
5053 | Value *A = State.get(User.getOperand(0), Part); | ||||
5054 | Value *Cast = Builder.CreateCast(CI->getOpcode(), A, DestTy); | ||||
5055 | State.set(Def, Cast, Part); | ||||
5056 | addMetadata(Cast, &I); | ||||
5057 | } | ||||
5058 | break; | ||||
5059 | } | ||||
5060 | default: | ||||
5061 | // This instruction is not vectorized by simple widening. | ||||
5062 | LLVM_DEBUG(dbgs() << "LV: Found an unhandled instruction: " << I)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { dbgs() << "LV: Found an unhandled instruction: " << I; } } while (false); | ||||
5063 | llvm_unreachable("Unhandled instruction!")::llvm::llvm_unreachable_internal("Unhandled instruction!", "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 5063); | ||||
5064 | } // end of switch. | ||||
5065 | } | ||||
5066 | |||||
5067 | void InnerLoopVectorizer::widenCallInstruction(CallInst &I, VPValue *Def, | ||||
5068 | VPUser &ArgOperands, | ||||
5069 | VPTransformState &State) { | ||||
5070 | assert(!isa<DbgInfoIntrinsic>(I) &&(static_cast <bool> (!isa<DbgInfoIntrinsic>(I) && "DbgInfoIntrinsic should have been dropped during VPlan construction" ) ? void (0) : __assert_fail ("!isa<DbgInfoIntrinsic>(I) && \"DbgInfoIntrinsic should have been dropped during VPlan construction\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 5071, __extension__ __PRETTY_FUNCTION__)) | ||||
5071 | "DbgInfoIntrinsic should have been dropped during VPlan construction")(static_cast <bool> (!isa<DbgInfoIntrinsic>(I) && "DbgInfoIntrinsic should have been dropped during VPlan construction" ) ? void (0) : __assert_fail ("!isa<DbgInfoIntrinsic>(I) && \"DbgInfoIntrinsic should have been dropped during VPlan construction\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 5071, __extension__ __PRETTY_FUNCTION__)); | ||||
5072 | setDebugLocFromInst(Builder, &I); | ||||
5073 | |||||
5074 | Module *M = I.getParent()->getParent()->getParent(); | ||||
5075 | auto *CI = cast<CallInst>(&I); | ||||
5076 | |||||
5077 | SmallVector<Type *, 4> Tys; | ||||
5078 | for (Value *ArgOperand : CI->arg_operands()) | ||||
5079 | Tys.push_back(ToVectorTy(ArgOperand->getType(), VF.getKnownMinValue())); | ||||
5080 | |||||
5081 | Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); | ||||
5082 | |||||
5083 | // The flag shows whether we use Intrinsic or a usual Call for vectorized | ||||
5084 | // version of the instruction. | ||||
5085 | // Is it beneficial to perform intrinsic call compared to lib call? | ||||
5086 | bool NeedToScalarize = false; | ||||
5087 | InstructionCost CallCost = Cost->getVectorCallCost(CI, VF, NeedToScalarize); | ||||
5088 | InstructionCost IntrinsicCost = ID ? Cost->getVectorIntrinsicCost(CI, VF) : 0; | ||||
5089 | bool UseVectorIntrinsic = ID && IntrinsicCost <= CallCost; | ||||
5090 | assert((UseVectorIntrinsic || !NeedToScalarize) &&(static_cast <bool> ((UseVectorIntrinsic || !NeedToScalarize ) && "Instruction should be scalarized elsewhere.") ? void (0) : __assert_fail ("(UseVectorIntrinsic || !NeedToScalarize) && \"Instruction should be scalarized elsewhere.\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 5091, __extension__ __PRETTY_FUNCTION__)) | ||||
5091 | "Instruction should be scalarized elsewhere.")(static_cast <bool> ((UseVectorIntrinsic || !NeedToScalarize ) && "Instruction should be scalarized elsewhere.") ? void (0) : __assert_fail ("(UseVectorIntrinsic || !NeedToScalarize) && \"Instruction should be scalarized elsewhere.\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 5091, __extension__ __PRETTY_FUNCTION__)); | ||||
5092 | assert((IntrinsicCost.isValid() || CallCost.isValid()) &&(static_cast <bool> ((IntrinsicCost.isValid() || CallCost .isValid()) && "Either the intrinsic cost or vector call cost must be valid" ) ? void (0) : __assert_fail ("(IntrinsicCost.isValid() || CallCost.isValid()) && \"Either the intrinsic cost or vector call cost must be valid\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 5093, __extension__ __PRETTY_FUNCTION__)) | ||||
5093 | "Either the intrinsic cost or vector call cost must be valid")(static_cast <bool> ((IntrinsicCost.isValid() || CallCost .isValid()) && "Either the intrinsic cost or vector call cost must be valid" ) ? void (0) : __assert_fail ("(IntrinsicCost.isValid() || CallCost.isValid()) && \"Either the intrinsic cost or vector call cost must be valid\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 5093, __extension__ __PRETTY_FUNCTION__)); | ||||
5094 | |||||
5095 | for (unsigned Part = 0; Part < UF; ++Part) { | ||||
5096 | SmallVector<Type *, 2> TysForDecl = {CI->getType()}; | ||||
5097 | SmallVector<Value *, 4> Args; | ||||
5098 | for (auto &I : enumerate(ArgOperands.operands())) { | ||||
5099 | // Some intrinsics have a scalar argument - don't replace it with a | ||||
5100 | // vector. | ||||
5101 | Value *Arg; | ||||
5102 | if (!UseVectorIntrinsic || !hasVectorInstrinsicScalarOpd(ID, I.index())) | ||||
5103 | Arg = State.get(I.value(), Part); | ||||
5104 | else { | ||||
5105 | Arg = State.get(I.value(), VPIteration(0, 0)); | ||||
5106 | if (hasVectorInstrinsicOverloadedScalarOpd(ID, I.index())) | ||||
5107 | TysForDecl.push_back(Arg->getType()); | ||||
5108 | } | ||||
5109 | Args.push_back(Arg); | ||||
5110 | } | ||||
5111 | |||||
5112 | Function *VectorF; | ||||
5113 | if (UseVectorIntrinsic) { | ||||
5114 | // Use vector version of the intrinsic. | ||||
5115 | if (VF.isVector()) | ||||
5116 | TysForDecl[0] = VectorType::get(CI->getType()->getScalarType(), VF); | ||||
5117 | VectorF = Intrinsic::getDeclaration(M, ID, TysForDecl); | ||||
5118 | assert(VectorF && "Can't retrieve vector intrinsic.")(static_cast <bool> (VectorF && "Can't retrieve vector intrinsic." ) ? void (0) : __assert_fail ("VectorF && \"Can't retrieve vector intrinsic.\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 5118, __extension__ __PRETTY_FUNCTION__)); | ||||
5119 | } else { | ||||
5120 | // Use vector version of the function call. | ||||
5121 | const VFShape Shape = VFShape::get(*CI, VF, false /*HasGlobalPred*/); | ||||
5122 | #ifndef NDEBUG | ||||
5123 | assert(VFDatabase(*CI).getVectorizedFunction(Shape) != nullptr &&(static_cast <bool> (VFDatabase(*CI).getVectorizedFunction (Shape) != nullptr && "Can't create vector function." ) ? void (0) : __assert_fail ("VFDatabase(*CI).getVectorizedFunction(Shape) != nullptr && \"Can't create vector function.\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 5124, __extension__ __PRETTY_FUNCTION__)) | ||||
5124 | "Can't create vector function.")(static_cast <bool> (VFDatabase(*CI).getVectorizedFunction (Shape) != nullptr && "Can't create vector function." ) ? void (0) : __assert_fail ("VFDatabase(*CI).getVectorizedFunction(Shape) != nullptr && \"Can't create vector function.\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 5124, __extension__ __PRETTY_FUNCTION__)); | ||||
5125 | #endif | ||||
5126 | VectorF = VFDatabase(*CI).getVectorizedFunction(Shape); | ||||
5127 | } | ||||
5128 | SmallVector<OperandBundleDef, 1> OpBundles; | ||||
5129 | CI->getOperandBundlesAsDefs(OpBundles); | ||||
5130 | CallInst *V = Builder.CreateCall(VectorF, Args, OpBundles); | ||||
5131 | |||||
5132 | if (isa<FPMathOperator>(V)) | ||||
5133 | V->copyFastMathFlags(CI); | ||||
5134 | |||||
5135 | State.set(Def, V, Part); | ||||
5136 | addMetadata(V, &I); | ||||
5137 | } | ||||
5138 | } | ||||
5139 | |||||
5140 | void InnerLoopVectorizer::widenSelectInstruction(SelectInst &I, VPValue *VPDef, | ||||
5141 | VPUser &Operands, | ||||
5142 | bool InvariantCond, | ||||
5143 | VPTransformState &State) { | ||||
5144 | setDebugLocFromInst(Builder, &I); | ||||
5145 | |||||
5146 | // The condition can be loop invariant but still defined inside the | ||||
5147 | // loop. This means that we can't just use the original 'cond' value. | ||||
5148 | // We have to take the 'vectorized' value and pick the first lane. | ||||
5149 | // Instcombine will make this a no-op. | ||||
5150 | auto *InvarCond = InvariantCond | ||||
5151 | ? State.get(Operands.getOperand(0), VPIteration(0, 0)) | ||||
5152 | : nullptr; | ||||
5153 | |||||
5154 | for (unsigned Part = 0; Part < UF; ++Part) { | ||||
5155 | Value *Cond = | ||||
5156 | InvarCond ? InvarCond : State.get(Operands.getOperand(0), Part); | ||||
5157 | Value *Op0 = State.get(Operands.getOperand(1), Part); | ||||
5158 | Value *Op1 = State.get(Operands.getOperand(2), Part); | ||||
5159 | Value *Sel = Builder.CreateSelect(Cond, Op0, Op1); | ||||
5160 | State.set(VPDef, Sel, Part); | ||||
5161 | addMetadata(Sel, &I); | ||||
5162 | } | ||||
5163 | } | ||||
5164 | |||||
5165 | void LoopVectorizationCostModel::collectLoopScalars(ElementCount VF) { | ||||
5166 | // We should not collect Scalars more than once per VF. Right now, this | ||||
5167 | // function is called from collectUniformsAndScalars(), which already does | ||||
5168 | // this check. Collecting Scalars for VF=1 does not make any sense. | ||||
5169 | assert(VF.isVector() && Scalars.find(VF) == Scalars.end() &&(static_cast <bool> (VF.isVector() && Scalars.find (VF) == Scalars.end() && "This function should not be visited twice for the same VF" ) ? void (0) : __assert_fail ("VF.isVector() && Scalars.find(VF) == Scalars.end() && \"This function should not be visited twice for the same VF\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 5170, __extension__ __PRETTY_FUNCTION__)) | ||||
5170 | "This function should not be visited twice for the same VF")(static_cast <bool> (VF.isVector() && Scalars.find (VF) == Scalars.end() && "This function should not be visited twice for the same VF" ) ? void (0) : __assert_fail ("VF.isVector() && Scalars.find(VF) == Scalars.end() && \"This function should not be visited twice for the same VF\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 5170, __extension__ __PRETTY_FUNCTION__)); | ||||
5171 | |||||
5172 | SmallSetVector<Instruction *, 8> Worklist; | ||||
5173 | |||||
5174 | // These sets are used to seed the analysis with pointers used by memory | ||||
5175 | // accesses that will remain scalar. | ||||
5176 | SmallSetVector<Instruction *, 8> ScalarPtrs; | ||||
5177 | SmallPtrSet<Instruction *, 8> PossibleNonScalarPtrs; | ||||
5178 | auto *Latch = TheLoop->getLoopLatch(); | ||||
5179 | |||||
5180 | // A helper that returns true if the use of Ptr by MemAccess will be scalar. | ||||
5181 | // The pointer operands of loads and stores will be scalar as long as the | ||||
5182 | // memory access is not a gather or scatter operation. The value operand of a | ||||
5183 | // store will remain scalar if the store is scalarized. | ||||
5184 | auto isScalarUse = [&](Instruction *MemAccess, Value *Ptr) { | ||||
5185 | InstWidening WideningDecision = getWideningDecision(MemAccess, VF); | ||||
5186 | assert(WideningDecision != CM_Unknown &&(static_cast <bool> (WideningDecision != CM_Unknown && "Widening decision should be ready at this moment") ? void ( 0) : __assert_fail ("WideningDecision != CM_Unknown && \"Widening decision should be ready at this moment\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 5187, __extension__ __PRETTY_FUNCTION__)) | ||||
5187 | "Widening decision should be ready at this moment")(static_cast <bool> (WideningDecision != CM_Unknown && "Widening decision should be ready at this moment") ? void ( 0) : __assert_fail ("WideningDecision != CM_Unknown && \"Widening decision should be ready at this moment\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 5187, __extension__ __PRETTY_FUNCTION__)); | ||||
5188 | if (auto *Store = dyn_cast<StoreInst>(MemAccess)) | ||||
5189 | if (Ptr == Store->getValueOperand()) | ||||
5190 | return WideningDecision == CM_Scalarize; | ||||
5191 | assert(Ptr == getLoadStorePointerOperand(MemAccess) &&(static_cast <bool> (Ptr == getLoadStorePointerOperand( MemAccess) && "Ptr is neither a value or pointer operand" ) ? void (0) : __assert_fail ("Ptr == getLoadStorePointerOperand(MemAccess) && \"Ptr is neither a value or pointer operand\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 5192, __extension__ __PRETTY_FUNCTION__)) | ||||
5192 | "Ptr is neither a value or pointer operand")(static_cast <bool> (Ptr == getLoadStorePointerOperand( MemAccess) && "Ptr is neither a value or pointer operand" ) ? void (0) : __assert_fail ("Ptr == getLoadStorePointerOperand(MemAccess) && \"Ptr is neither a value or pointer operand\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 5192, __extension__ __PRETTY_FUNCTION__)); | ||||
5193 | return WideningDecision != CM_GatherScatter; | ||||
5194 | }; | ||||
5195 | |||||
5196 | // A helper that returns true if the given value is a bitcast or | ||||
5197 | // getelementptr instruction contained in the loop. | ||||
5198 | auto isLoopVaryingBitCastOrGEP = [&](Value *V) { | ||||
5199 | return ((isa<BitCastInst>(V) && V->getType()->isPointerTy()) || | ||||
5200 | isa<GetElementPtrInst>(V)) && | ||||
5201 | !TheLoop->isLoopInvariant(V); | ||||
5202 | }; | ||||
5203 | |||||
5204 | auto isScalarPtrInduction = [&](Instruction *MemAccess, Value *Ptr) { | ||||
5205 | if (!isa<PHINode>(Ptr) || | ||||
5206 | !Legal->getInductionVars().count(cast<PHINode>(Ptr))) | ||||
5207 | return false; | ||||
5208 | auto &Induction = Legal->getInductionVars()[cast<PHINode>(Ptr)]; | ||||
5209 | if (Induction.getKind() != InductionDescriptor::IK_PtrInduction) | ||||
5210 | return false; | ||||
5211 | return isScalarUse(MemAccess, Ptr); | ||||
5212 | }; | ||||
5213 | |||||
5214 | // A helper that evaluates a memory access's use of a pointer. If the | ||||
5215 | // pointer is actually the pointer induction of a loop, it is being | ||||
5216 | // inserted into Worklist. If the use will be a scalar use, and the | ||||
5217 | // pointer is only used by memory accesses, we place the pointer in | ||||
5218 | // ScalarPtrs. Otherwise, the pointer is placed in PossibleNonScalarPtrs. | ||||
5219 | auto evaluatePtrUse = [&](Instruction *MemAccess, Value *Ptr) { | ||||
5220 | if (isScalarPtrInduction(MemAccess, Ptr)) { | ||||
5221 | Worklist.insert(cast<Instruction>(Ptr)); | ||||
5222 | Instruction *Update = cast<Instruction>( | ||||
5223 | cast<PHINode>(Ptr)->getIncomingValueForBlock(Latch)); | ||||
5224 | Worklist.insert(Update); | ||||
5225 | LLVM_DEBUG(dbgs() << "LV: Found new scalar instruction: " << *Ptrdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { dbgs() << "LV: Found new scalar instruction: " << *Ptr << "\n"; } } while (false) | ||||
5226 | << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { dbgs() << "LV: Found new scalar instruction: " << *Ptr << "\n"; } } while (false); | ||||
5227 | LLVM_DEBUG(dbgs() << "LV: Found new scalar instruction: " << *Updatedo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { dbgs() << "LV: Found new scalar instruction: " << *Update << "\n"; } } while (false) | ||||
5228 | << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { dbgs() << "LV: Found new scalar instruction: " << *Update << "\n"; } } while (false); | ||||
5229 | return; | ||||
5230 | } | ||||
5231 | // We only care about bitcast and getelementptr instructions contained in | ||||
5232 | // the loop. | ||||
5233 | if (!isLoopVaryingBitCastOrGEP(Ptr)) | ||||
5234 | return; | ||||
5235 | |||||
5236 | // If the pointer has already been identified as scalar (e.g., if it was | ||||
5237 | // also identified as uniform), there's nothing to do. | ||||
5238 | auto *I = cast<Instruction>(Ptr); | ||||
5239 | if (Worklist.count(I)) | ||||
5240 | return; | ||||
5241 | |||||
5242 | // If the use of the pointer will be a scalar use, and all users of the | ||||
5243 | // pointer are memory accesses, place the pointer in ScalarPtrs. Otherwise, | ||||
5244 | // place the pointer in PossibleNonScalarPtrs. | ||||
5245 | if (isScalarUse(MemAccess, Ptr) && llvm::all_of(I->users(), [&](User *U) { | ||||
5246 | return isa<LoadInst>(U) || isa<StoreInst>(U); | ||||
5247 | })) | ||||
5248 | ScalarPtrs.insert(I); | ||||
5249 | else | ||||
5250 | PossibleNonScalarPtrs.insert(I); | ||||
5251 | }; | ||||
5252 | |||||
5253 | // We seed the scalars analysis with three classes of instructions: (1) | ||||
5254 | // instructions marked uniform-after-vectorization and (2) bitcast, | ||||
5255 | // getelementptr and (pointer) phi instructions used by memory accesses | ||||
5256 | // requiring a scalar use. | ||||
5257 | // | ||||
5258 | // (1) Add to the worklist all instructions that have been identified as | ||||
5259 | // uniform-after-vectorization. | ||||
5260 | Worklist.insert(Uniforms[VF].begin(), Uniforms[VF].end()); | ||||
5261 | |||||
5262 | // (2) Add to the worklist all bitcast and getelementptr instructions used by | ||||
5263 | // memory accesses requiring a scalar use. The pointer operands of loads and | ||||
5264 | // stores will be scalar as long as the memory accesses is not a gather or | ||||
5265 | // scatter operation. The value operand of a store will remain scalar if the | ||||
5266 | // store is scalarized. | ||||
5267 | for (auto *BB : TheLoop->blocks()) | ||||
5268 | for (auto &I : *BB) { | ||||
5269 | if (auto *Load = dyn_cast<LoadInst>(&I)) { | ||||
5270 | evaluatePtrUse(Load, Load->getPointerOperand()); | ||||
5271 | } else if (auto *Store = dyn_cast<StoreInst>(&I)) { | ||||
5272 | evaluatePtrUse(Store, Store->getPointerOperand()); | ||||
5273 | evaluatePtrUse(Store, Store->getValueOperand()); | ||||
5274 | } | ||||
5275 | } | ||||
5276 | for (auto *I : ScalarPtrs) | ||||
5277 | if (!PossibleNonScalarPtrs.count(I)) { | ||||
5278 | LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *I << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { dbgs() << "LV: Found scalar instruction: " << *I << "\n"; } } while (false); | ||||
5279 | Worklist.insert(I); | ||||
5280 | } | ||||
5281 | |||||
5282 | // Insert the forced scalars. | ||||
5283 | // FIXME: Currently widenPHIInstruction() often creates a dead vector | ||||
5284 | // induction variable when the PHI user is scalarized. | ||||
5285 | auto ForcedScalar = ForcedScalars.find(VF); | ||||
5286 | if (ForcedScalar != ForcedScalars.end()) | ||||
5287 | for (auto *I : ForcedScalar->second) | ||||
5288 | Worklist.insert(I); | ||||
5289 | |||||
5290 | // Expand the worklist by looking through any bitcasts and getelementptr | ||||
5291 | // instructions we've already identified as scalar. This is similar to the | ||||
5292 | // expansion step in collectLoopUniforms(); however, here we're only | ||||
5293 | // expanding to include additional bitcasts and getelementptr instructions. | ||||
5294 | unsigned Idx = 0; | ||||
5295 | while (Idx != Worklist.size()) { | ||||
5296 | Instruction *Dst = Worklist[Idx++]; | ||||
5297 | if (!isLoopVaryingBitCastOrGEP(Dst->getOperand(0))) | ||||
5298 | continue; | ||||
5299 | auto *Src = cast<Instruction>(Dst->getOperand(0)); | ||||
5300 | if (llvm::all_of(Src->users(), [&](User *U) -> bool { | ||||
5301 | auto *J = cast<Instruction>(U); | ||||
5302 | return !TheLoop->contains(J) || Worklist.count(J) || | ||||
5303 | ((isa<LoadInst>(J) || isa<StoreInst>(J)) && | ||||
5304 | isScalarUse(J, Src)); | ||||
5305 | })) { | ||||
5306 | Worklist.insert(Src); | ||||
5307 | LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Src << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { dbgs() << "LV: Found scalar instruction: " << *Src << "\n"; } } while (false); | ||||
5308 | } | ||||
5309 | } | ||||
5310 | |||||
5311 | // An induction variable will remain scalar if all users of the induction | ||||
5312 | // variable and induction variable update remain scalar. | ||||
5313 | for (auto &Induction : Legal->getInductionVars()) { | ||||
5314 | auto *Ind = Induction.first; | ||||
5315 | auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); | ||||
5316 | |||||
5317 | // If tail-folding is applied, the primary induction variable will be used | ||||
5318 | // to feed a vector compare. | ||||
5319 | if (Ind == Legal->getPrimaryInduction() && foldTailByMasking()) | ||||
5320 | continue; | ||||
5321 | |||||
5322 | // Determine if all users of the induction variable are scalar after | ||||
5323 | // vectorization. | ||||
5324 | auto ScalarInd = llvm::all_of(Ind->users(), [&](User *U) -> bool { | ||||
5325 | auto *I = cast<Instruction>(U); | ||||
5326 | return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I); | ||||
5327 | }); | ||||
5328 | if (!ScalarInd) | ||||
5329 | continue; | ||||
5330 | |||||
5331 | // Determine if all users of the induction variable update instruction are | ||||
5332 | // scalar after vectorization. | ||||
5333 | auto ScalarIndUpdate = | ||||
5334 | llvm::all_of(IndUpdate->users(), [&](User *U) -> bool { | ||||
5335 | auto *I = cast<Instruction>(U); | ||||
5336 | return I == Ind || !TheLoop->contains(I) || Worklist.count(I); | ||||
5337 | }); | ||||
5338 | if (!ScalarIndUpdate) | ||||
5339 | continue; | ||||
5340 | |||||
5341 | // The induction variable and its update instruction will remain scalar. | ||||
5342 | Worklist.insert(Ind); | ||||
5343 | Worklist.insert(IndUpdate); | ||||
5344 | LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Ind << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { dbgs() << "LV: Found scalar instruction: " << *Ind << "\n"; } } while (false); | ||||
5345 | LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *IndUpdatedo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { dbgs() << "LV: Found scalar instruction: " << *IndUpdate << "\n"; } } while (false) | ||||
5346 | << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { dbgs() << "LV: Found scalar instruction: " << *IndUpdate << "\n"; } } while (false); | ||||
5347 | } | ||||
5348 | |||||
5349 | Scalars[VF].insert(Worklist.begin(), Worklist.end()); | ||||
5350 | } | ||||
5351 | |||||
5352 | bool LoopVectorizationCostModel::isScalarWithPredication(Instruction *I) const { | ||||
5353 | if (!blockNeedsPredication(I->getParent())) | ||||
5354 | return false; | ||||
5355 | switch(I->getOpcode()) { | ||||
5356 | default: | ||||
5357 | break; | ||||
5358 | case Instruction::Load: | ||||
5359 | case Instruction::Store: { | ||||
5360 | if (!Legal->isMaskRequired(I)) | ||||
5361 | return false; | ||||
5362 | auto *Ptr = getLoadStorePointerOperand(I); | ||||
5363 | auto *Ty = getLoadStoreType(I); | ||||
5364 | const Align Alignment = getLoadStoreAlignment(I); | ||||
5365 | return isa<LoadInst>(I) ? !(isLegalMaskedLoad(Ty, Ptr, Alignment) || | ||||
5366 | TTI.isLegalMaskedGather(Ty, Alignment)) | ||||
5367 | : !(isLegalMaskedStore(Ty, Ptr, Alignment) || | ||||
5368 | TTI.isLegalMaskedScatter(Ty, Alignment)); | ||||
5369 | } | ||||
5370 | case Instruction::UDiv: | ||||
5371 | case Instruction::SDiv: | ||||
5372 | case Instruction::SRem: | ||||
5373 | case Instruction::URem: | ||||
5374 | return mayDivideByZero(*I); | ||||
5375 | } | ||||
5376 | return false; | ||||
5377 | } | ||||
5378 | |||||
5379 | bool LoopVectorizationCostModel::interleavedAccessCanBeWidened( | ||||
5380 | Instruction *I, ElementCount VF) { | ||||
5381 | assert(isAccessInterleaved(I) && "Expecting interleaved access.")(static_cast <bool> (isAccessInterleaved(I) && "Expecting interleaved access." ) ? void (0) : __assert_fail ("isAccessInterleaved(I) && \"Expecting interleaved access.\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 5381, __extension__ __PRETTY_FUNCTION__)); | ||||
5382 | assert(getWideningDecision(I, VF) == CM_Unknown &&(static_cast <bool> (getWideningDecision(I, VF) == CM_Unknown && "Decision should not be set yet.") ? void (0) : __assert_fail ("getWideningDecision(I, VF) == CM_Unknown && \"Decision should not be set yet.\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 5383, __extension__ __PRETTY_FUNCTION__)) | ||||
5383 | "Decision should not be set yet.")(static_cast <bool> (getWideningDecision(I, VF) == CM_Unknown && "Decision should not be set yet.") ? void (0) : __assert_fail ("getWideningDecision(I, VF) == CM_Unknown && \"Decision should not be set yet.\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 5383, __extension__ __PRETTY_FUNCTION__)); | ||||
5384 | auto *Group = getInterleavedAccessGroup(I); | ||||
5385 | assert(Group && "Must have a group.")(static_cast <bool> (Group && "Must have a group." ) ? void (0) : __assert_fail ("Group && \"Must have a group.\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 5385, __extension__ __PRETTY_FUNCTION__)); | ||||
5386 | |||||
5387 | // If the instruction's allocated size doesn't equal it's type size, it | ||||
5388 | // requires padding and will be scalarized. | ||||
5389 | auto &DL = I->getModule()->getDataLayout(); | ||||
5390 | auto *ScalarTy = getLoadStoreType(I); | ||||
5391 | if (hasIrregularType(ScalarTy, DL)) | ||||
5392 | return false; | ||||
5393 | |||||
5394 | // Check if masking is required. | ||||
5395 | // A Group may need masking for one of two reasons: it resides in a block that | ||||
5396 | // needs predication, or it was decided to use masking to deal with gaps. | ||||
5397 | bool PredicatedAccessRequiresMasking = | ||||
5398 | Legal->blockNeedsPredication(I->getParent()) && Legal->isMaskRequired(I); | ||||
5399 | bool AccessWithGapsRequiresMasking = | ||||
5400 | Group->requiresScalarEpilogue() && !isScalarEpilogueAllowed(); | ||||
5401 | if (!PredicatedAccessRequiresMasking && !AccessWithGapsRequiresMasking) | ||||
5402 | return true; | ||||
5403 | |||||
5404 | // If masked interleaving is required, we expect that the user/target had | ||||
5405 | // enabled it, because otherwise it either wouldn't have been created or | ||||
5406 | // it should have been invalidated by the CostModel. | ||||
5407 | assert(useMaskedInterleavedAccesses(TTI) &&(static_cast <bool> (useMaskedInterleavedAccesses(TTI) && "Masked interleave-groups for predicated accesses are not enabled." ) ? void (0) : __assert_fail ("useMaskedInterleavedAccesses(TTI) && \"Masked interleave-groups for predicated accesses are not enabled.\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 5408, __extension__ __PRETTY_FUNCTION__)) | ||||
5408 | "Masked interleave-groups for predicated accesses are not enabled.")(static_cast <bool> (useMaskedInterleavedAccesses(TTI) && "Masked interleave-groups for predicated accesses are not enabled." ) ? void (0) : __assert_fail ("useMaskedInterleavedAccesses(TTI) && \"Masked interleave-groups for predicated accesses are not enabled.\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 5408, __extension__ __PRETTY_FUNCTION__)); | ||||
5409 | |||||
5410 | auto *Ty = getLoadStoreType(I); | ||||
5411 | const Align Alignment = getLoadStoreAlignment(I); | ||||
5412 | return isa<LoadInst>(I) ? TTI.isLegalMaskedLoad(Ty, Alignment) | ||||
5413 | : TTI.isLegalMaskedStore(Ty, Alignment); | ||||
5414 | } | ||||
5415 | |||||
5416 | bool LoopVectorizationCostModel::memoryInstructionCanBeWidened( | ||||
5417 | Instruction *I, ElementCount VF) { | ||||
5418 | // Get and ensure we have a valid memory instruction. | ||||
5419 | LoadInst *LI = dyn_cast<LoadInst>(I); | ||||
5420 | StoreInst *SI = dyn_cast<StoreInst>(I); | ||||
5421 | assert((LI || SI) && "Invalid memory instruction")(static_cast <bool> ((LI || SI) && "Invalid memory instruction" ) ? void (0) : __assert_fail ("(LI || SI) && \"Invalid memory instruction\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 5421, __extension__ __PRETTY_FUNCTION__)); | ||||
5422 | |||||
5423 | auto *Ptr = getLoadStorePointerOperand(I); | ||||
5424 | |||||
5425 | // In order to be widened, the pointer should be consecutive, first of all. | ||||
5426 | if (!Legal->isConsecutivePtr(Ptr)) | ||||
5427 | return false; | ||||
5428 | |||||
5429 | // If the instruction is a store located in a predicated block, it will be | ||||
5430 | // scalarized. | ||||
5431 | if (isScalarWithPredication(I)) | ||||
5432 | return false; | ||||
5433 | |||||
5434 | // If the instruction's allocated size doesn't equal it's type size, it | ||||
5435 | // requires padding and will be scalarized. | ||||
5436 | auto &DL = I->getModule()->getDataLayout(); | ||||
5437 | auto *ScalarTy = LI ? LI->getType() : SI->getValueOperand()->getType(); | ||||
5438 | if (hasIrregularType(ScalarTy, DL)) | ||||
5439 | return false; | ||||
5440 | |||||
5441 | return true; | ||||
5442 | } | ||||
5443 | |||||
5444 | void LoopVectorizationCostModel::collectLoopUniforms(ElementCount VF) { | ||||
5445 | // We should not collect Uniforms more than once per VF. Right now, | ||||
5446 | // this function is called from collectUniformsAndScalars(), which | ||||
5447 | // already does this check. Collecting Uniforms for VF=1 does not make any | ||||
5448 | // sense. | ||||
5449 | |||||
5450 | assert(VF.isVector() && Uniforms.find(VF) == Uniforms.end() &&(static_cast <bool> (VF.isVector() && Uniforms. find(VF) == Uniforms.end() && "This function should not be visited twice for the same VF" ) ? void (0) : __assert_fail ("VF.isVector() && Uniforms.find(VF) == Uniforms.end() && \"This function should not be visited twice for the same VF\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 5451, __extension__ __PRETTY_FUNCTION__)) | ||||
5451 | "This function should not be visited twice for the same VF")(static_cast <bool> (VF.isVector() && Uniforms. find(VF) == Uniforms.end() && "This function should not be visited twice for the same VF" ) ? void (0) : __assert_fail ("VF.isVector() && Uniforms.find(VF) == Uniforms.end() && \"This function should not be visited twice for the same VF\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 5451, __extension__ __PRETTY_FUNCTION__)); | ||||
5452 | |||||
5453 | // Visit the list of Uniforms. If we'll not find any uniform value, we'll | ||||
5454 | // not analyze again. Uniforms.count(VF) will return 1. | ||||
5455 | Uniforms[VF].clear(); | ||||
5456 | |||||
5457 | // We now know that the loop is vectorizable! | ||||
5458 | // Collect instructions inside the loop that will remain uniform after | ||||
5459 | // vectorization. | ||||
5460 | |||||
5461 | // Global values, params and instructions outside of current loop are out of | ||||
5462 | // scope. | ||||
5463 | auto isOutOfScope = [&](Value *V) -> bool { | ||||
5464 | Instruction *I = dyn_cast<Instruction>(V); | ||||
5465 | return (!I || !TheLoop->contains(I)); | ||||
5466 | }; | ||||
5467 | |||||
5468 | SetVector<Instruction *> Worklist; | ||||
5469 | BasicBlock *Latch = TheLoop->getLoopLatch(); | ||||
5470 | |||||
5471 | // Instructions that are scalar with predication must not be considered | ||||
5472 | // uniform after vectorization, because that would create an erroneous | ||||
5473 | // replicating region where only a single instance out of VF should be formed. | ||||
5474 | // TODO: optimize such seldom cases if found important, see PR40816. | ||||
5475 | auto addToWorklistIfAllowed = [&](Instruction *I) -> void { | ||||
5476 | if (isOutOfScope(I)) { | ||||
5477 | LLVM_DEBUG(dbgs() << "LV: Found not uniform due to scope: "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { dbgs() << "LV: Found not uniform due to scope: " << *I << "\n"; } } while (false) | ||||
5478 | << *I << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { dbgs() << "LV: Found not uniform due to scope: " << *I << "\n"; } } while (false); | ||||
5479 | return; | ||||
5480 | } | ||||
5481 | if (isScalarWithPredication(I)) { | ||||
5482 | LLVM_DEBUG(dbgs() << "LV: Found not uniform being ScalarWithPredication: "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { dbgs() << "LV: Found not uniform being ScalarWithPredication: " << *I << "\n"; } } while (false) | ||||
5483 | << *I << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { dbgs() << "LV: Found not uniform being ScalarWithPredication: " << *I << "\n"; } } while (false); | ||||
5484 | return; | ||||
5485 | } | ||||
5486 | LLVM_DEBUG(dbgs() << "LV: Found uniform instruction: " << *I << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { dbgs() << "LV: Found uniform instruction: " << *I << "\n"; } } while (false); | ||||
5487 | Worklist.insert(I); | ||||
5488 | }; | ||||
5489 | |||||
5490 | // Start with the conditional branch. If the branch condition is an | ||||
5491 | // instruction contained in the loop that is only used by the branch, it is | ||||
5492 | // uniform. | ||||
5493 | auto *Cmp = dyn_cast<Instruction>(Latch->getTerminator()->getOperand(0)); | ||||
5494 | if (Cmp && TheLoop->contains(Cmp) && Cmp->hasOneUse()) | ||||
5495 | addToWorklistIfAllowed(Cmp); | ||||
5496 | |||||
5497 | auto isUniformDecision = [&](Instruction *I, ElementCount VF) { | ||||
5498 | InstWidening WideningDecision = getWideningDecision(I, VF); | ||||
5499 | assert(WideningDecision != CM_Unknown &&(static_cast <bool> (WideningDecision != CM_Unknown && "Widening decision should be ready at this moment") ? void ( 0) : __assert_fail ("WideningDecision != CM_Unknown && \"Widening decision should be ready at this moment\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 5500, __extension__ __PRETTY_FUNCTION__)) | ||||
5500 | "Widening decision should be ready at this moment")(static_cast <bool> (WideningDecision != CM_Unknown && "Widening decision should be ready at this moment") ? void ( 0) : __assert_fail ("WideningDecision != CM_Unknown && \"Widening decision should be ready at this moment\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 5500, __extension__ __PRETTY_FUNCTION__)); | ||||
5501 | |||||
5502 | // A uniform memory op is itself uniform. We exclude uniform stores | ||||
5503 | // here as they demand the last lane, not the first one. | ||||
5504 | if (isa<LoadInst>(I) && Legal->isUniformMemOp(*I)) { | ||||
5505 | assert(WideningDecision == CM_Scalarize)(static_cast <bool> (WideningDecision == CM_Scalarize) ? void (0) : __assert_fail ("WideningDecision == CM_Scalarize" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 5505, __extension__ __PRETTY_FUNCTION__)); | ||||
5506 | return true; | ||||
5507 | } | ||||
5508 | |||||
5509 | return (WideningDecision == CM_Widen || | ||||
5510 | WideningDecision == CM_Widen_Reverse || | ||||
5511 | WideningDecision == CM_Interleave); | ||||
5512 | }; | ||||
5513 | |||||
5514 | |||||
5515 | // Returns true if Ptr is the pointer operand of a memory access instruction | ||||
5516 | // I, and I is known to not require scalarization. | ||||
5517 | auto isVectorizedMemAccessUse = [&](Instruction *I, Value *Ptr) -> bool { | ||||
5518 | return getLoadStorePointerOperand(I) == Ptr && isUniformDecision(I, VF); | ||||
5519 | }; | ||||
5520 | |||||
5521 | // Holds a list of values which are known to have at least one uniform use. | ||||
5522 | // Note that there may be other uses which aren't uniform. A "uniform use" | ||||
5523 | // here is something which only demands lane 0 of the unrolled iterations; | ||||
5524 | // it does not imply that all lanes produce the same value (e.g. this is not | ||||
5525 | // the usual meaning of uniform) | ||||
5526 | SetVector<Value *> HasUniformUse; | ||||
5527 | |||||
5528 | // Scan the loop for instructions which are either a) known to have only | ||||
5529 | // lane 0 demanded or b) are uses which demand only lane 0 of their operand. | ||||
5530 | for (auto *BB : TheLoop->blocks()) | ||||
5531 | for (auto &I : *BB) { | ||||
5532 | // If there's no pointer operand, there's nothing to do. | ||||
5533 | auto *Ptr = getLoadStorePointerOperand(&I); | ||||
5534 | if (!Ptr) | ||||
5535 | continue; | ||||
5536 | |||||
5537 | // A uniform memory op is itself uniform. We exclude uniform stores | ||||
5538 | // here as they demand the last lane, not the first one. | ||||
5539 | if (isa<LoadInst>(I) && Legal->isUniformMemOp(I)) | ||||
5540 | addToWorklistIfAllowed(&I); | ||||
5541 | |||||
5542 | if (isUniformDecision(&I, VF)) { | ||||
5543 | assert(isVectorizedMemAccessUse(&I, Ptr) && "consistency check")(static_cast <bool> (isVectorizedMemAccessUse(&I, Ptr ) && "consistency check") ? void (0) : __assert_fail ( "isVectorizedMemAccessUse(&I, Ptr) && \"consistency check\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 5543, __extension__ __PRETTY_FUNCTION__)); | ||||
5544 | HasUniformUse.insert(Ptr); | ||||
5545 | } | ||||
5546 | } | ||||
5547 | |||||
5548 | // Add to the worklist any operands which have *only* uniform (e.g. lane 0 | ||||
5549 | // demanding) users. Since loops are assumed to be in LCSSA form, this | ||||
5550 | // disallows uses outside the loop as well. | ||||
5551 | for (auto *V : HasUniformUse) { | ||||
5552 | if (isOutOfScope(V)) | ||||
5553 | continue; | ||||
5554 | auto *I = cast<Instruction>(V); | ||||
5555 | auto UsersAreMemAccesses = | ||||
5556 | llvm::all_of(I->users(), [&](User *U) -> bool { | ||||
5557 | return isVectorizedMemAccessUse(cast<Instruction>(U), V); | ||||
5558 | }); | ||||
5559 | if (UsersAreMemAccesses) | ||||
5560 | addToWorklistIfAllowed(I); | ||||
5561 | } | ||||
5562 | |||||
5563 | // Expand Worklist in topological order: whenever a new instruction | ||||
5564 | // is added , its users should be already inside Worklist. It ensures | ||||
5565 | // a uniform instruction will only be used by uniform instructions. | ||||
5566 | unsigned idx = 0; | ||||
5567 | while (idx != Worklist.size()) { | ||||
5568 | Instruction *I = Worklist[idx++]; | ||||
5569 | |||||
5570 | for (auto OV : I->operand_values()) { | ||||
5571 | // isOutOfScope operands cannot be uniform instructions. | ||||
5572 | if (isOutOfScope(OV)) | ||||
5573 | continue; | ||||
5574 | // First order recurrence Phi's should typically be considered | ||||
5575 | // non-uniform. | ||||
5576 | auto *OP = dyn_cast<PHINode>(OV); | ||||
5577 | if (OP && Legal->isFirstOrderRecurrence(OP)) | ||||
5578 | continue; | ||||
5579 | // If all the users of the operand are uniform, then add the | ||||
5580 | // operand into the uniform worklist. | ||||
5581 | auto *OI = cast<Instruction>(OV); | ||||
5582 | if (llvm::all_of(OI->users(), [&](User *U) -> bool { | ||||
5583 | auto *J = cast<Instruction>(U); | ||||
5584 | return Worklist.count(J) || isVectorizedMemAccessUse(J, OI); | ||||
5585 | })) | ||||
5586 | addToWorklistIfAllowed(OI); | ||||
5587 | } | ||||
5588 | } | ||||
5589 | |||||
5590 | // For an instruction to be added into Worklist above, all its users inside | ||||
5591 | // the loop should also be in Worklist. However, this condition cannot be | ||||
5592 | // true for phi nodes that form a cyclic dependence. We must process phi | ||||
5593 | // nodes separately. An induction variable will remain uniform if all users | ||||
5594 | // of the induction variable and induction variable update remain uniform. | ||||
5595 | // The code below handles both pointer and non-pointer induction variables. | ||||
5596 | for (auto &Induction : Legal->getInductionVars()) { | ||||
5597 | auto *Ind = Induction.first; | ||||
5598 | auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); | ||||
5599 | |||||
5600 | // Determine if all users of the induction variable are uniform after | ||||
5601 | // vectorization. | ||||
5602 | auto UniformInd = llvm::all_of(Ind->users(), [&](User *U) -> bool { | ||||
5603 | auto *I = cast<Instruction>(U); | ||||
5604 | return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I) || | ||||
5605 | isVectorizedMemAccessUse(I, Ind); | ||||
5606 | }); | ||||
5607 | if (!UniformInd) | ||||
5608 | continue; | ||||
5609 | |||||
5610 | // Determine if all users of the induction variable update instruction are | ||||
5611 | // uniform after vectorization. | ||||
5612 | auto UniformIndUpdate = | ||||
5613 | llvm::all_of(IndUpdate->users(), [&](User *U) -> bool { | ||||
5614 | auto *I = cast<Instruction>(U); | ||||
5615 | return I == Ind || !TheLoop->contains(I) || Worklist.count(I) || | ||||
5616 | isVectorizedMemAccessUse(I, IndUpdate); | ||||
5617 | }); | ||||
5618 | if (!UniformIndUpdate) | ||||
5619 | continue; | ||||
5620 | |||||
5621 | // The induction variable and its update instruction will remain uniform. | ||||
5622 | addToWorklistIfAllowed(Ind); | ||||
5623 | addToWorklistIfAllowed(IndUpdate); | ||||
5624 | } | ||||
5625 | |||||
5626 | Uniforms[VF].insert(Worklist.begin(), Worklist.end()); | ||||
5627 | } | ||||
5628 | |||||
5629 | bool LoopVectorizationCostModel::runtimeChecksRequired() { | ||||
5630 | LLVM_DEBUG(dbgs() << "LV: Performing code size checks.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { dbgs() << "LV: Performing code size checks.\n" ; } } while (false); | ||||
5631 | |||||
5632 | if (Legal->getRuntimePointerChecking()->Need) { | ||||
5633 | reportVectorizationFailure("Runtime ptr check is required with -Os/-Oz", | ||||
5634 | "runtime pointer checks needed. Enable vectorization of this " | ||||
5635 | "loop with '#pragma clang loop vectorize(enable)' when " | ||||
5636 | "compiling with -Os/-Oz", | ||||
5637 | "CantVersionLoopWithOptForSize", ORE, TheLoop); | ||||
5638 | return true; | ||||
5639 | } | ||||
5640 | |||||
5641 | if (!PSE.getUnionPredicate().getPredicates().empty()) { | ||||
5642 | reportVectorizationFailure("Runtime SCEV check is required with -Os/-Oz", | ||||
5643 | "runtime SCEV checks needed. Enable vectorization of this " | ||||
5644 | "loop with '#pragma clang loop vectorize(enable)' when " | ||||
5645 | "compiling with -Os/-Oz", | ||||
5646 | "CantVersionLoopWithOptForSize", ORE, TheLoop); | ||||
5647 | return true; | ||||
5648 | } | ||||
5649 | |||||
5650 | // FIXME: Avoid specializing for stride==1 instead of bailing out. | ||||
5651 | if (!Legal->getLAI()->getSymbolicStrides().empty()) { | ||||
5652 | reportVectorizationFailure("Runtime stride check for small trip count", | ||||
5653 | "runtime stride == 1 checks needed. Enable vectorization of " | ||||
5654 | "this loop without such check by compiling with -Os/-Oz", | ||||
5655 | "CantVersionLoopWithOptForSize", ORE, TheLoop); | ||||
5656 | return true; | ||||
5657 | } | ||||
5658 | |||||
5659 | return false; | ||||
5660 | } | ||||
5661 | |||||
5662 | ElementCount | ||||
5663 | LoopVectorizationCostModel::getMaxLegalScalableVF(unsigned MaxSafeElements) { | ||||
5664 | if (!TTI.supportsScalableVectors() && !ForceTargetSupportsScalableVectors) { | ||||
5665 | reportVectorizationInfo( | ||||
5666 | "Disabling scalable vectorization, because target does not " | ||||
5667 | "support scalable vectors.", | ||||
5668 | "ScalableVectorsUnsupported", ORE, TheLoop); | ||||
5669 | return ElementCount::getScalable(0); | ||||
5670 | } | ||||
5671 | |||||
5672 | if (Hints->isScalableVectorizationDisabled()) { | ||||
5673 | reportVectorizationInfo("Scalable vectorization is explicitly disabled", | ||||
5674 | "ScalableVectorizationDisabled", ORE, TheLoop); | ||||
5675 | return ElementCount::getScalable(0); | ||||
5676 | } | ||||
5677 | |||||
5678 | auto MaxScalableVF = ElementCount::getScalable( | ||||
5679 | std::numeric_limits<ElementCount::ScalarTy>::max()); | ||||
5680 | |||||
5681 | // Disable scalable vectorization if the loop contains unsupported reductions. | ||||
5682 | // Test that the loop-vectorizer can legalize all operations for this MaxVF. | ||||
5683 | // FIXME: While for scalable vectors this is currently sufficient, this should | ||||
5684 | // be replaced by a more detailed mechanism that filters out specific VFs, | ||||
5685 | // instead of invalidating vectorization for a whole set of VFs based on the | ||||
5686 | // MaxVF. | ||||
5687 | if (!canVectorizeReductions(MaxScalableVF)) { | ||||
5688 | reportVectorizationInfo( | ||||
5689 | "Scalable vectorization not supported for the reduction " | ||||
5690 | "operations found in this loop.", | ||||
5691 | "ScalableVFUnfeasible", ORE, TheLoop); | ||||
5692 | return ElementCount::getScalable(0); | ||||
5693 | } | ||||
5694 | |||||
5695 | if (Legal->isSafeForAnyVectorWidth()) | ||||
5696 | return MaxScalableVF; | ||||
5697 | |||||
5698 | // Limit MaxScalableVF by the maximum safe dependence distance. | ||||
5699 | Optional<unsigned> MaxVScale = TTI.getMaxVScale(); | ||||
5700 | MaxScalableVF = ElementCount::getScalable( | ||||
5701 | MaxVScale ? (MaxSafeElements / MaxVScale.getValue()) : 0); | ||||
5702 | if (!MaxScalableVF) | ||||
5703 | reportVectorizationInfo( | ||||
5704 | "Max legal vector width too small, scalable vectorization " | ||||
5705 | "unfeasible.", | ||||
5706 | "ScalableVFUnfeasible", ORE, TheLoop); | ||||
5707 | |||||
5708 | return MaxScalableVF; | ||||
5709 | } | ||||
5710 | |||||
5711 | FixedScalableVFPair | ||||
5712 | LoopVectorizationCostModel::computeFeasibleMaxVF(unsigned ConstTripCount, | ||||
5713 | ElementCount UserVF) { | ||||
5714 | MinBWs = computeMinimumValueSizes(TheLoop->getBlocks(), *DB, &TTI); | ||||
5715 | unsigned SmallestType, WidestType; | ||||
5716 | std::tie(SmallestType, WidestType) = getSmallestAndWidestTypes(); | ||||
5717 | |||||
5718 | // Get the maximum safe dependence distance in bits computed by LAA. | ||||
5719 | // It is computed by MaxVF * sizeOf(type) * 8, where type is taken from | ||||
5720 | // the memory accesses that is most restrictive (involved in the smallest | ||||
5721 | // dependence distance). | ||||
5722 | unsigned MaxSafeElements = | ||||
5723 | PowerOf2Floor(Legal->getMaxSafeVectorWidthInBits() / WidestType); | ||||
5724 | |||||
5725 | auto MaxSafeFixedVF = ElementCount::getFixed(MaxSafeElements); | ||||
5726 | auto MaxSafeScalableVF = getMaxLegalScalableVF(MaxSafeElements); | ||||
5727 | |||||
5728 | LLVM_DEBUG(dbgs() << "LV: The max safe fixed VF is: " << MaxSafeFixedVFdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { dbgs() << "LV: The max safe fixed VF is: " << MaxSafeFixedVF << ".\n"; } } while (false) | ||||
5729 | << ".\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { dbgs() << "LV: The max safe fixed VF is: " << MaxSafeFixedVF << ".\n"; } } while (false); | ||||
5730 | LLVM_DEBUG(dbgs() << "LV: The max safe scalable VF is: " << MaxSafeScalableVFdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { dbgs() << "LV: The max safe scalable VF is: " << MaxSafeScalableVF << ".\n"; } } while (false) | ||||
5731 | << ".\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { dbgs() << "LV: The max safe scalable VF is: " << MaxSafeScalableVF << ".\n"; } } while (false); | ||||
5732 | |||||
5733 | // First analyze the UserVF, fall back if the UserVF should be ignored. | ||||
5734 | if (UserVF) { | ||||
5735 | auto MaxSafeUserVF = | ||||
5736 | UserVF.isScalable() ? MaxSafeScalableVF : MaxSafeFixedVF; | ||||
5737 | |||||
5738 | if (ElementCount::isKnownLE(UserVF, MaxSafeUserVF)) | ||||
5739 | return UserVF; | ||||
5740 | |||||
5741 | assert(ElementCount::isKnownGT(UserVF, MaxSafeUserVF))(static_cast <bool> (ElementCount::isKnownGT(UserVF, MaxSafeUserVF )) ? void (0) : __assert_fail ("ElementCount::isKnownGT(UserVF, MaxSafeUserVF)" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 5741, __extension__ __PRETTY_FUNCTION__)); | ||||
5742 | |||||
5743 | // Only clamp if the UserVF is not scalable. If the UserVF is scalable, it | ||||
5744 | // is better to ignore the hint and let the compiler choose a suitable VF. | ||||
5745 | if (!UserVF.isScalable()) { | ||||
5746 | LLVM_DEBUG(dbgs() << "LV: User VF=" << UserVFdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { dbgs() << "LV: User VF=" << UserVF << " is unsafe, clamping to max safe VF=" << MaxSafeFixedVF << ".\n"; } } while (false) | ||||
5747 | << " is unsafe, clamping to max safe VF="do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { dbgs() << "LV: User VF=" << UserVF << " is unsafe, clamping to max safe VF=" << MaxSafeFixedVF << ".\n"; } } while (false) | ||||
5748 | << MaxSafeFixedVF << ".\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { dbgs() << "LV: User VF=" << UserVF << " is unsafe, clamping to max safe VF=" << MaxSafeFixedVF << ".\n"; } } while (false); | ||||
5749 | ORE->emit([&]() { | ||||
5750 | return OptimizationRemarkAnalysis(DEBUG_TYPE"loop-vectorize", "VectorizationFactor", | ||||
5751 | TheLoop->getStartLoc(), | ||||
5752 | TheLoop->getHeader()) | ||||
5753 | << "User-specified vectorization factor " | ||||
5754 | << ore::NV("UserVectorizationFactor", UserVF) | ||||
5755 | << " is unsafe, clamping to maximum safe vectorization factor " | ||||
5756 | << ore::NV("VectorizationFactor", MaxSafeFixedVF); | ||||
5757 | }); | ||||
5758 | return MaxSafeFixedVF; | ||||
5759 | } | ||||
5760 | |||||
5761 | LLVM_DEBUG(dbgs() << "LV: User VF=" << UserVFdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { dbgs() << "LV: User VF=" << UserVF << " is unsafe. Ignoring scalable UserVF.\n"; } } while (false) | ||||
5762 | << " is unsafe. Ignoring scalable UserVF.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { dbgs() << "LV: User VF=" << UserVF << " is unsafe. Ignoring scalable UserVF.\n"; } } while (false); | ||||
5763 | ORE->emit([&]() { | ||||
5764 | return OptimizationRemarkAnalysis(DEBUG_TYPE"loop-vectorize", "VectorizationFactor", | ||||
5765 | TheLoop->getStartLoc(), | ||||
5766 | TheLoop->getHeader()) | ||||
5767 | << "User-specified vectorization factor " | ||||
5768 | << ore::NV("UserVectorizationFactor", UserVF) | ||||
5769 | << " is unsafe. Ignoring the hint to let the compiler pick a " | ||||
5770 | "suitable VF."; | ||||
5771 | }); | ||||
5772 | } | ||||
5773 | |||||
5774 | LLVM_DEBUG(dbgs() << "LV: The Smallest and Widest types: " << SmallestTypedo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { dbgs() << "LV: The Smallest and Widest types: " << SmallestType << " / " << WidestType << " bits.\n"; } } while (false) | ||||
5775 | << " / " << WidestType << " bits.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { dbgs() << "LV: The Smallest and Widest types: " << SmallestType << " / " << WidestType << " bits.\n"; } } while (false); | ||||
5776 | |||||
5777 | FixedScalableVFPair Result(ElementCount::getFixed(1), | ||||
5778 | ElementCount::getScalable(0)); | ||||
5779 | if (auto MaxVF = getMaximizedVFForTarget(ConstTripCount, SmallestType, | ||||
5780 | WidestType, MaxSafeFixedVF)) | ||||
5781 | Result.FixedVF = MaxVF; | ||||
5782 | |||||
5783 | if (auto MaxVF = getMaximizedVFForTarget(ConstTripCount, SmallestType, | ||||
5784 | WidestType, MaxSafeScalableVF)) | ||||
5785 | if (MaxVF.isScalable()) { | ||||
5786 | Result.ScalableVF = MaxVF; | ||||
5787 | LLVM_DEBUG(dbgs() << "LV: Found feasible scalable VF = " << MaxVFdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { dbgs() << "LV: Found feasible scalable VF = " << MaxVF << "\n"; } } while (false) | ||||
5788 | << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { dbgs() << "LV: Found feasible scalable VF = " << MaxVF << "\n"; } } while (false); | ||||
5789 | } | ||||
5790 | |||||
5791 | return Result; | ||||
5792 | } | ||||
5793 | |||||
5794 | FixedScalableVFPair | ||||
5795 | LoopVectorizationCostModel::computeMaxVF(ElementCount UserVF, unsigned UserIC) { | ||||
5796 | if (Legal->getRuntimePointerChecking()->Need && TTI.hasBranchDivergence()) { | ||||
5797 | // TODO: It may by useful to do since it's still likely to be dynamically | ||||
5798 | // uniform if the target can skip. | ||||
5799 | reportVectorizationFailure( | ||||
5800 | "Not inserting runtime ptr check for divergent target", | ||||
5801 | "runtime pointer checks needed. Not enabled for divergent target", | ||||
5802 | "CantVersionLoopWithDivergentTarget", ORE, TheLoop); | ||||
5803 | return FixedScalableVFPair::getNone(); | ||||
5804 | } | ||||
5805 | |||||
5806 | unsigned TC = PSE.getSE()->getSmallConstantTripCount(TheLoop); | ||||
5807 | LLVM_DEBUG(dbgs() << "LV: Found trip count: " << TC << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { dbgs() << "LV: Found trip count: " << TC << '\n'; } } while (false); | ||||
5808 | if (TC == 1) { | ||||
5809 | reportVectorizationFailure("Single iteration (non) loop", | ||||
5810 | "loop trip count is one, irrelevant for vectorization", | ||||
5811 | "SingleIterationLoop", ORE, TheLoop); | ||||
5812 | return FixedScalableVFPair::getNone(); | ||||
5813 | } | ||||
5814 | |||||
5815 | switch (ScalarEpilogueStatus) { | ||||
5816 | case CM_ScalarEpilogueAllowed: | ||||
5817 | return computeFeasibleMaxVF(TC, UserVF); | ||||
5818 | case CM_ScalarEpilogueNotAllowedUsePredicate: | ||||
5819 | LLVM_FALLTHROUGH[[gnu::fallthrough]]; | ||||
5820 | case CM_ScalarEpilogueNotNeededUsePredicate: | ||||
5821 | LLVM_DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { dbgs() << "LV: vector predicate hint/switch found.\n" << "LV: Not allowing scalar epilogue, creating predicated " << "vector loop.\n"; } } while (false) | ||||
5822 | dbgs() << "LV: vector predicate hint/switch found.\n"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { dbgs() << "LV: vector predicate hint/switch found.\n" << "LV: Not allowing scalar epilogue, creating predicated " << "vector loop.\n"; } } while (false) | ||||
5823 | << "LV: Not allowing scalar epilogue, creating predicated "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { dbgs() << "LV: vector predicate hint/switch found.\n" << "LV: Not allowing scalar epilogue, creating predicated " << "vector loop.\n"; } } while (false) | ||||
5824 | << "vector loop.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { dbgs() << "LV: vector predicate hint/switch found.\n" << "LV: Not allowing scalar epilogue, creating predicated " << "vector loop.\n"; } } while (false); | ||||
5825 | break; | ||||
5826 | case CM_ScalarEpilogueNotAllowedLowTripLoop: | ||||
5827 | // fallthrough as a special case of OptForSize | ||||
5828 | case CM_ScalarEpilogueNotAllowedOptSize: | ||||
5829 | if (ScalarEpilogueStatus == CM_ScalarEpilogueNotAllowedOptSize) | ||||
5830 | LLVM_DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { dbgs() << "LV: Not allowing scalar epilogue due to -Os/-Oz.\n" ; } } while (false) | ||||
5831 | dbgs() << "LV: Not allowing scalar epilogue due to -Os/-Oz.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { dbgs() << "LV: Not allowing scalar epilogue due to -Os/-Oz.\n" ; } } while (false); | ||||
5832 | else | ||||
5833 | LLVM_DEBUG(dbgs() << "LV: Not allowing scalar epilogue due to low trip "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { dbgs() << "LV: Not allowing scalar epilogue due to low trip " << "count.\n"; } } while (false) | ||||
5834 | << "count.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { dbgs() << "LV: Not allowing scalar epilogue due to low trip " << "count.\n"; } } while (false); | ||||
5835 | |||||
5836 | // Bail if runtime checks are required, which are not good when optimising | ||||
5837 | // for size. | ||||
5838 | if (runtimeChecksRequired()) | ||||
5839 | return FixedScalableVFPair::getNone(); | ||||
5840 | |||||
5841 | break; | ||||
5842 | } | ||||
5843 | |||||
5844 | // The only loops we can vectorize without a scalar epilogue, are loops with | ||||
5845 | // a bottom-test and a single exiting block. We'd have to handle the fact | ||||
5846 | // that not every instruction executes on the last iteration. This will | ||||
5847 | // require a lane mask which varies through the vector loop body. (TODO) | ||||
5848 | if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch()) { | ||||
5849 | // If there was a tail-folding hint/switch, but we can't fold the tail by | ||||
5850 | // masking, fallback to a vectorization with a scalar epilogue. | ||||
5851 | if (ScalarEpilogueStatus == CM_ScalarEpilogueNotNeededUsePredicate) { | ||||
5852 | LLVM_DEBUG(dbgs() << "LV: Cannot fold tail by masking: vectorize with a "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { dbgs() << "LV: Cannot fold tail by masking: vectorize with a " "scalar epilogue instead.\n"; } } while (false) | ||||
5853 | "scalar epilogue instead.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { dbgs() << "LV: Cannot fold tail by masking: vectorize with a " "scalar epilogue instead.\n"; } } while (false); | ||||
5854 | ScalarEpilogueStatus = CM_ScalarEpilogueAllowed; | ||||
5855 | return computeFeasibleMaxVF(TC, UserVF); | ||||
5856 | } | ||||
5857 | return FixedScalableVFPair::getNone(); | ||||
5858 | } | ||||
5859 | |||||
5860 | // Now try the tail folding | ||||
5861 | |||||
5862 | // Invalidate interleave groups that require an epilogue if we can't mask | ||||
5863 | // the interleave-group. | ||||
5864 | if (!useMaskedInterleavedAccesses(TTI)) { | ||||
5865 | assert(WideningDecisions.empty() && Uniforms.empty() && Scalars.empty() &&(static_cast <bool> (WideningDecisions.empty() && Uniforms.empty() && Scalars.empty() && "No decisions should have been taken at this point" ) ? void (0) : __assert_fail ("WideningDecisions.empty() && Uniforms.empty() && Scalars.empty() && \"No decisions should have been taken at this point\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 5866, __extension__ __PRETTY_FUNCTION__)) | ||||
5866 | "No decisions should have been taken at this point")(static_cast <bool> (WideningDecisions.empty() && Uniforms.empty() && Scalars.empty() && "No decisions should have been taken at this point" ) ? void (0) : __assert_fail ("WideningDecisions.empty() && Uniforms.empty() && Scalars.empty() && \"No decisions should have been taken at this point\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 5866, __extension__ __PRETTY_FUNCTION__)); | ||||
5867 | // Note: There is no need to invalidate any cost modeling decisions here, as | ||||
5868 | // non where taken so far. | ||||
5869 | InterleaveInfo.invalidateGroupsRequiringScalarEpilogue(); | ||||
5870 | } | ||||
5871 | |||||
5872 | FixedScalableVFPair MaxFactors = computeFeasibleMaxVF(TC, UserVF); | ||||
5873 | // Avoid tail folding if the trip count is known to be a multiple of any VF | ||||
5874 | // we chose. | ||||
5875 | // FIXME: The condition below pessimises the case for fixed-width vectors, | ||||
5876 | // when scalable VFs are also candidates for vectorization. | ||||
5877 | if (MaxFactors.FixedVF.isVector() && !MaxFactors.ScalableVF) { | ||||
5878 | ElementCount MaxFixedVF = MaxFactors.FixedVF; | ||||
5879 | assert((UserVF.isNonZero() || isPowerOf2_32(MaxFixedVF.getFixedValue())) &&(static_cast <bool> ((UserVF.isNonZero() || isPowerOf2_32 (MaxFixedVF.getFixedValue())) && "MaxFixedVF must be a power of 2" ) ? void (0) : __assert_fail ("(UserVF.isNonZero() || isPowerOf2_32(MaxFixedVF.getFixedValue())) && \"MaxFixedVF must be a power of 2\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 5880, __extension__ __PRETTY_FUNCTION__)) | ||||
5880 | "MaxFixedVF must be a power of 2")(static_cast <bool> ((UserVF.isNonZero() || isPowerOf2_32 (MaxFixedVF.getFixedValue())) && "MaxFixedVF must be a power of 2" ) ? void (0) : __assert_fail ("(UserVF.isNonZero() || isPowerOf2_32(MaxFixedVF.getFixedValue())) && \"MaxFixedVF must be a power of 2\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 5880, __extension__ __PRETTY_FUNCTION__)); | ||||
5881 | unsigned MaxVFtimesIC = UserIC ? MaxFixedVF.getFixedValue() * UserIC | ||||
5882 | : MaxFixedVF.getFixedValue(); | ||||
5883 | ScalarEvolution *SE = PSE.getSE(); | ||||
5884 | const SCEV *BackedgeTakenCount = PSE.getBackedgeTakenCount(); | ||||
5885 | const SCEV *ExitCount = SE->getAddExpr( | ||||
5886 | BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType())); | ||||
5887 | const SCEV *Rem = SE->getURemExpr( | ||||
5888 | SE->applyLoopGuards(ExitCount, TheLoop), | ||||
5889 | SE->getConstant(BackedgeTakenCount->getType(), MaxVFtimesIC)); | ||||
5890 | if (Rem->isZero()) { | ||||
5891 | // Accept MaxFixedVF if we do not have a tail. | ||||
5892 | LLVM_DEBUG(dbgs() << "LV: No tail will remain for any chosen VF.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { dbgs() << "LV: No tail will remain for any chosen VF.\n" ; } } while (false); | ||||
5893 | return MaxFactors; | ||||
5894 | } | ||||
5895 | } | ||||
5896 | |||||
5897 | // If we don't know the precise trip count, or if the trip count that we | ||||
5898 | // found modulo the vectorization factor is not zero, try to fold the tail | ||||
5899 | // by masking. | ||||
5900 | // FIXME: look for a smaller MaxVF that does divide TC rather than masking. | ||||
5901 | if (Legal->prepareToFoldTailByMasking()) { | ||||
5902 | FoldTailByMasking = true; | ||||
5903 | return MaxFactors; | ||||
5904 | } | ||||
5905 | |||||
5906 | // If there was a tail-folding hint/switch, but we can't fold the tail by | ||||
5907 | // masking, fallback to a vectorization with a scalar epilogue. | ||||
5908 | if (ScalarEpilogueStatus == CM_ScalarEpilogueNotNeededUsePredicate) { | ||||
5909 | LLVM_DEBUG(dbgs() << "LV: Cannot fold tail by masking: vectorize with a "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { dbgs() << "LV: Cannot fold tail by masking: vectorize with a " "scalar epilogue instead.\n"; } } while (false) | ||||
5910 | "scalar epilogue instead.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { dbgs() << "LV: Cannot fold tail by masking: vectorize with a " "scalar epilogue instead.\n"; } } while (false); | ||||
5911 | ScalarEpilogueStatus = CM_ScalarEpilogueAllowed; | ||||
5912 | return MaxFactors; | ||||
5913 | } | ||||
5914 | |||||
5915 | if (ScalarEpilogueStatus == CM_ScalarEpilogueNotAllowedUsePredicate) { | ||||
5916 | LLVM_DEBUG(dbgs() << "LV: Can't fold tail by masking: don't vectorize\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { dbgs() << "LV: Can't fold tail by masking: don't vectorize\n" ; } } while (false); | ||||
5917 | return FixedScalableVFPair::getNone(); | ||||
5918 | } | ||||
5919 | |||||
5920 | if (TC == 0) { | ||||
5921 | reportVectorizationFailure( | ||||
5922 | "Unable to calculate the loop count due to complex control flow", | ||||
5923 | "unable to calculate the loop count due to complex control flow", | ||||
5924 | "UnknownLoopCountComplexCFG", ORE, TheLoop); | ||||
5925 | return FixedScalableVFPair::getNone(); | ||||
5926 | } | ||||
5927 | |||||
5928 | reportVectorizationFailure( | ||||
5929 | "Cannot optimize for size and vectorize at the same time.", | ||||
5930 | "cannot optimize for size and vectorize at the same time. " | ||||
5931 | "Enable vectorization of this loop with '#pragma clang loop " | ||||
5932 | "vectorize(enable)' when compiling with -Os/-Oz", | ||||
5933 | "NoTailLoopWithOptForSize", ORE, TheLoop); | ||||
5934 | return FixedScalableVFPair::getNone(); | ||||
5935 | } | ||||
5936 | |||||
5937 | ElementCount LoopVectorizationCostModel::getMaximizedVFForTarget( | ||||
5938 | unsigned ConstTripCount, unsigned SmallestType, unsigned WidestType, | ||||
5939 | const ElementCount &MaxSafeVF) { | ||||
5940 | bool ComputeScalableMaxVF = MaxSafeVF.isScalable(); | ||||
5941 | TypeSize WidestRegister = TTI.getRegisterBitWidth( | ||||
5942 | ComputeScalableMaxVF ? TargetTransformInfo::RGK_ScalableVector | ||||
5943 | : TargetTransformInfo::RGK_FixedWidthVector); | ||||
5944 | |||||
5945 | // Convenience function to return the minimum of two ElementCounts. | ||||
5946 | auto MinVF = [](const ElementCount &LHS, const ElementCount &RHS) { | ||||
5947 | assert((LHS.isScalable() == RHS.isScalable()) &&(static_cast <bool> ((LHS.isScalable() == RHS.isScalable ()) && "Scalable flags must match") ? void (0) : __assert_fail ("(LHS.isScalable() == RHS.isScalable()) && \"Scalable flags must match\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 5948, __extension__ __PRETTY_FUNCTION__)) | ||||
5948 | "Scalable flags must match")(static_cast <bool> ((LHS.isScalable() == RHS.isScalable ()) && "Scalable flags must match") ? void (0) : __assert_fail ("(LHS.isScalable() == RHS.isScalable()) && \"Scalable flags must match\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 5948, __extension__ __PRETTY_FUNCTION__)); | ||||
5949 | return ElementCount::isKnownLT(LHS, RHS) ? LHS : RHS; | ||||
5950 | }; | ||||
5951 | |||||
5952 | // Ensure MaxVF is a power of 2; the dependence distance bound may not be. | ||||
5953 | // Note that both WidestRegister and WidestType may not be a powers of 2. | ||||
5954 | auto MaxVectorElementCount = ElementCount::get( | ||||
5955 | PowerOf2Floor(WidestRegister.getKnownMinSize() / WidestType), | ||||
5956 | ComputeScalableMaxVF); | ||||
5957 | MaxVectorElementCount = MinVF(MaxVectorElementCount, MaxSafeVF); | ||||
5958 | LLVM_DEBUG(dbgs() << "LV: The Widest register safe to use is: "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { dbgs() << "LV: The Widest register safe to use is: " << (MaxVectorElementCount * WidestType) << " bits.\n" ; } } while (false) | ||||
5959 | << (MaxVectorElementCount * WidestType) << " bits.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { dbgs() << "LV: The Widest register safe to use is: " << (MaxVectorElementCount * WidestType) << " bits.\n" ; } } while (false); | ||||
5960 | |||||
5961 | if (!MaxVectorElementCount) { | ||||
5962 | LLVM_DEBUG(dbgs() << "LV: The target has no "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { dbgs() << "LV: The target has no " << (ComputeScalableMaxVF ? "scalable" : "fixed") << " vector registers.\n"; } } while (false) | ||||
5963 | << (ComputeScalableMaxVF ? "scalable" : "fixed")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { dbgs() << "LV: The target has no " << (ComputeScalableMaxVF ? "scalable" : "fixed") << " vector registers.\n"; } } while (false) | ||||
5964 | << " vector registers.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { dbgs() << "LV: The target has no " << (ComputeScalableMaxVF ? "scalable" : "fixed") << " vector registers.\n"; } } while (false); | ||||
5965 | return ElementCount::getFixed(1); | ||||
5966 | } | ||||
5967 | |||||
5968 | const auto TripCountEC = ElementCount::getFixed(ConstTripCount); | ||||
5969 | if (ConstTripCount && | ||||
5970 | ElementCount::isKnownLE(TripCountEC, MaxVectorElementCount) && | ||||
5971 | isPowerOf2_32(ConstTripCount)) { | ||||
5972 | // We need to clamp the VF to be the ConstTripCount. There is no point in | ||||
5973 | // choosing a higher viable VF as done in the loop below. If | ||||
5974 | // MaxVectorElementCount is scalable, we only fall back on a fixed VF when | ||||
5975 | // the TC is less than or equal to the known number of lanes. | ||||
5976 | LLVM_DEBUG(dbgs() << "LV: Clamping the MaxVF to the constant trip count: "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { dbgs() << "LV: Clamping the MaxVF to the constant trip count: " << ConstTripCount << "\n"; } } while (false) | ||||
5977 | << ConstTripCount << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { dbgs() << "LV: Clamping the MaxVF to the constant trip count: " << ConstTripCount << "\n"; } } while (false); | ||||
5978 | return TripCountEC; | ||||
5979 | } | ||||
5980 | |||||
5981 | ElementCount MaxVF = MaxVectorElementCount; | ||||
5982 | if (TTI.shouldMaximizeVectorBandwidth() || | ||||
5983 | (MaximizeBandwidth && isScalarEpilogueAllowed())) { | ||||
5984 | auto MaxVectorElementCountMaxBW = ElementCount::get( | ||||
5985 | PowerOf2Floor(WidestRegister.getKnownMinSize() / SmallestType), | ||||
5986 | ComputeScalableMaxVF); | ||||
5987 | MaxVectorElementCountMaxBW = MinVF(MaxVectorElementCountMaxBW, MaxSafeVF); | ||||
5988 | |||||
5989 | // Collect all viable vectorization factors larger than the default MaxVF | ||||
5990 | // (i.e. MaxVectorElementCount). | ||||
5991 | SmallVector<ElementCount, 8> VFs; | ||||
5992 | for (ElementCount VS = MaxVectorElementCount * 2; | ||||
5993 | ElementCount::isKnownLE(VS, MaxVectorElementCountMaxBW); VS *= 2) | ||||
5994 | VFs.push_back(VS); | ||||
5995 | |||||
5996 | // For each VF calculate its register usage. | ||||
5997 | auto RUs = calculateRegisterUsage(VFs); | ||||
5998 | |||||
5999 | // Select the largest VF which doesn't require more registers than existing | ||||
6000 | // ones. | ||||
6001 | for (int i = RUs.size() - 1; i >= 0; --i) { | ||||
6002 | bool Selected = true; | ||||
6003 | for (auto &pair : RUs[i].MaxLocalUsers) { | ||||
6004 | unsigned TargetNumRegisters = TTI.getNumberOfRegisters(pair.first); | ||||
6005 | if (pair.second > TargetNumRegisters) | ||||
6006 | Selected = false; | ||||
6007 | } | ||||
6008 | if (Selected) { | ||||
6009 | MaxVF = VFs[i]; | ||||
6010 | break; | ||||
6011 | } | ||||
6012 | } | ||||
6013 | if (ElementCount MinVF = | ||||
6014 | TTI.getMinimumVF(SmallestType, ComputeScalableMaxVF)) { | ||||
6015 | if (ElementCount::isKnownLT(MaxVF, MinVF)) { | ||||
6016 | LLVM_DEBUG(dbgs() << "LV: Overriding calculated MaxVF(" << MaxVFdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { dbgs() << "LV: Overriding calculated MaxVF(" << MaxVF << ") with target's minimum: " << MinVF << '\n'; } } while (false) | ||||
6017 | << ") with target's minimum: " << MinVF << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { dbgs() << "LV: Overriding calculated MaxVF(" << MaxVF << ") with target's minimum: " << MinVF << '\n'; } } while (false); | ||||
6018 | MaxVF = MinVF; | ||||
6019 | } | ||||
6020 | } | ||||
6021 | } | ||||
6022 | return MaxVF; | ||||
6023 | } | ||||
6024 | |||||
6025 | bool LoopVectorizationCostModel::isMoreProfitable( | ||||
6026 | const VectorizationFactor &A, const VectorizationFactor &B) const { | ||||
6027 | InstructionCost::CostType CostA = *A.Cost.getValue(); | ||||
6028 | InstructionCost::CostType CostB = *B.Cost.getValue(); | ||||
6029 | |||||
6030 | unsigned MaxTripCount = PSE.getSE()->getSmallConstantMaxTripCount(TheLoop); | ||||
6031 | |||||
6032 | if (!A.Width.isScalable() && !B.Width.isScalable() && FoldTailByMasking && | ||||
6033 | MaxTripCount) { | ||||
6034 | // If we are folding the tail and the trip count is a known (possibly small) | ||||
6035 | // constant, the trip count will be rounded up to an integer number of | ||||
6036 | // iterations. The total cost will be PerIterationCost*ceil(TripCount/VF), | ||||
6037 | // which we compare directly. When not folding the tail, the total cost will | ||||
6038 | // be PerIterationCost*floor(TC/VF) + Scalar remainder cost, and so is | ||||
6039 | // approximated with the per-lane cost below instead of using the tripcount | ||||
6040 | // as here. | ||||
6041 | int64_t RTCostA = CostA * divideCeil(MaxTripCount, A.Width.getFixedValue()); | ||||
6042 | int64_t RTCostB = CostB * divideCeil(MaxTripCount, B.Width.getFixedValue()); | ||||
6043 | return RTCostA < RTCostB; | ||||
6044 | } | ||||
6045 | |||||
6046 | // When set to preferred, for now assume vscale may be larger than 1, so | ||||
6047 | // that scalable vectorization is slightly favorable over fixed-width | ||||
6048 | // vectorization. | ||||
6049 | if (Hints->isScalableVectorizationPreferred()) | ||||
6050 | if (A.Width.isScalable() && !B.Width.isScalable()) | ||||
6051 | return (CostA * B.Width.getKnownMinValue()) <= | ||||
6052 | (CostB * A.Width.getKnownMinValue()); | ||||
6053 | |||||
6054 | // To avoid the need for FP division: | ||||
6055 | // (CostA / A.Width) < (CostB / B.Width) | ||||
6056 | // <=> (CostA * B.Width) < (CostB * A.Width) | ||||
6057 | return (CostA * B.Width.getKnownMinValue()) < | ||||
6058 | (CostB * A.Width.getKnownMinValue()); | ||||
6059 | } | ||||
6060 | |||||
6061 | VectorizationFactor LoopVectorizationCostModel::selectVectorizationFactor( | ||||
6062 | const ElementCountSet &VFCandidates) { | ||||
6063 | InstructionCost ExpectedCost = expectedCost(ElementCount::getFixed(1)).first; | ||||
6064 | LLVM_DEBUG(dbgs() << "LV: Scalar loop costs: " << ExpectedCost << ".\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { dbgs() << "LV: Scalar loop costs: " << ExpectedCost << ".\n"; } } while (false); | ||||
6065 | assert(ExpectedCost.isValid() && "Unexpected invalid cost for scalar loop")(static_cast <bool> (ExpectedCost.isValid() && "Unexpected invalid cost for scalar loop" ) ? void (0) : __assert_fail ("ExpectedCost.isValid() && \"Unexpected invalid cost for scalar loop\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 6065, __extension__ __PRETTY_FUNCTION__)); | ||||
6066 | assert(VFCandidates.count(ElementCount::getFixed(1)) &&(static_cast <bool> (VFCandidates.count(ElementCount::getFixed (1)) && "Expected Scalar VF to be a candidate") ? void (0) : __assert_fail ("VFCandidates.count(ElementCount::getFixed(1)) && \"Expected Scalar VF to be a candidate\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 6067, __extension__ __PRETTY_FUNCTION__)) | ||||
6067 | "Expected Scalar VF to be a candidate")(static_cast <bool> (VFCandidates.count(ElementCount::getFixed (1)) && "Expected Scalar VF to be a candidate") ? void (0) : __assert_fail ("VFCandidates.count(ElementCount::getFixed(1)) && \"Expected Scalar VF to be a candidate\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 6067, __extension__ __PRETTY_FUNCTION__)); | ||||
6068 | |||||
6069 | const VectorizationFactor ScalarCost(ElementCount::getFixed(1), ExpectedCost); | ||||
6070 | VectorizationFactor ChosenFactor = ScalarCost; | ||||
6071 | |||||
6072 | bool ForceVectorization = Hints->getForce() == LoopVectorizeHints::FK_Enabled; | ||||
6073 | if (ForceVectorization && VFCandidates.size() > 1) { | ||||
6074 | // Ignore scalar width, because the user explicitly wants vectorization. | ||||
6075 | // Initialize cost to max so that VF = 2 is, at least, chosen during cost | ||||
6076 | // evaluation. | ||||
6077 | ChosenFactor.Cost = std::numeric_limits<InstructionCost::CostType>::max(); | ||||
6078 | } | ||||
6079 | |||||
6080 | for (const auto &i : VFCandidates) { | ||||
6081 | // The cost for scalar VF=1 is already calculated, so ignore it. | ||||
6082 | if (i.isScalar()) | ||||
6083 | continue; | ||||
6084 | |||||
6085 | // Notice that the vector loop needs to be executed less times, so | ||||
6086 | // we need to divide the cost of the vector loops by the width of | ||||
6087 | // the vector elements. | ||||
6088 | VectorizationCostTy C = expectedCost(i); | ||||
6089 | |||||
6090 | assert(C.first.isValid() && "Unexpected invalid cost for vector loop")(static_cast <bool> (C.first.isValid() && "Unexpected invalid cost for vector loop" ) ? void (0) : __assert_fail ("C.first.isValid() && \"Unexpected invalid cost for vector loop\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 6090, __extension__ __PRETTY_FUNCTION__)); | ||||
6091 | VectorizationFactor Candidate(i, C.first); | ||||
6092 | LLVM_DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { dbgs() << "LV: Vector loop of width " << i << " costs: " << (*Candidate.Cost.getValue () / Candidate.Width.getKnownMinValue()) << (i.isScalable () ? " (assuming a minimum vscale of 1)" : "") << ".\n" ; } } while (false) | ||||
6093 | dbgs() << "LV: Vector loop of width " << i << " costs: "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { dbgs() << "LV: Vector loop of width " << i << " costs: " << (*Candidate.Cost.getValue () / Candidate.Width.getKnownMinValue()) << (i.isScalable () ? " (assuming a minimum vscale of 1)" : "") << ".\n" ; } } while (false) | ||||
6094 | << (*Candidate.Cost.getValue() /do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { dbgs() << "LV: Vector loop of width " << i << " costs: " << (*Candidate.Cost.getValue () / Candidate.Width.getKnownMinValue()) << (i.isScalable () ? " (assuming a minimum vscale of 1)" : "") << ".\n" ; } } while (false) | ||||
6095 | Candidate.Width.getKnownMinValue())do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { dbgs() << "LV: Vector loop of width " << i << " costs: " << (*Candidate.Cost.getValue () / Candidate.Width.getKnownMinValue()) << (i.isScalable () ? " (assuming a minimum vscale of 1)" : "") << ".\n" ; } } while (false) | ||||
6096 | << (i.isScalable() ? " (assuming a minimum vscale of 1)" : "")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { dbgs() << "LV: Vector loop of width " << i << " costs: " << (*Candidate.Cost.getValue () / Candidate.Width.getKnownMinValue()) << (i.isScalable () ? " (assuming a minimum vscale of 1)" : "") << ".\n" ; } } while (false) | ||||
6097 | << ".\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { dbgs() << "LV: Vector loop of width " << i << " costs: " << (*Candidate.Cost.getValue () / Candidate.Width.getKnownMinValue()) << (i.isScalable () ? " (assuming a minimum vscale of 1)" : "") << ".\n" ; } } while (false); | ||||
6098 | |||||
6099 | if (!C.second && !ForceVectorization) { | ||||
6100 | LLVM_DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { dbgs() << "LV: Not considering vector loop of width " << i << " because it will not generate any vector instructions.\n" ; } } while (false) | ||||
6101 | dbgs() << "LV: Not considering vector loop of width " << ido { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { dbgs() << "LV: Not considering vector loop of width " << i << " because it will not generate any vector instructions.\n" ; } } while (false) | ||||
6102 | << " because it will not generate any vector instructions.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { dbgs() << "LV: Not considering vector loop of width " << i << " because it will not generate any vector instructions.\n" ; } } while (false); | ||||
6103 | continue; | ||||
6104 | } | ||||
6105 | |||||
6106 | // If profitable add it to ProfitableVF list. | ||||
6107 | if (isMoreProfitable(Candidate, ScalarCost)) | ||||
6108 | ProfitableVFs.push_back(Candidate); | ||||
6109 | |||||
6110 | if (isMoreProfitable(Candidate, ChosenFactor)) | ||||
6111 | ChosenFactor = Candidate; | ||||
6112 | } | ||||
6113 | |||||
6114 | if (!EnableCondStoresVectorization && NumPredStores) { | ||||
6115 | reportVectorizationFailure("There are conditional stores.", | ||||
6116 | "store that is conditionally executed prevents vectorization", | ||||
6117 | "ConditionalStore", ORE, TheLoop); | ||||
6118 | ChosenFactor = ScalarCost; | ||||
6119 | } | ||||
6120 | |||||
6121 | LLVM_DEBUG(if (ForceVectorization && !ChosenFactor.Width.isScalar() &&do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { if (ForceVectorization && !ChosenFactor .Width.isScalar() && *ChosenFactor.Cost.getValue() >= *ScalarCost.Cost.getValue()) dbgs() << "LV: Vectorization seems to be not beneficial, " << "but was forced by a user.\n"; } } while (false) | ||||
6122 | *ChosenFactor.Cost.getValue() >= *ScalarCost.Cost.getValue())do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { if (ForceVectorization && !ChosenFactor .Width.isScalar() && *ChosenFactor.Cost.getValue() >= *ScalarCost.Cost.getValue()) dbgs() << "LV: Vectorization seems to be not beneficial, " << "but was forced by a user.\n"; } } while (false) | ||||
6123 | dbgs()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { if (ForceVectorization && !ChosenFactor .Width.isScalar() && *ChosenFactor.Cost.getValue() >= *ScalarCost.Cost.getValue()) dbgs() << "LV: Vectorization seems to be not beneficial, " << "but was forced by a user.\n"; } } while (false) | ||||
6124 | << "LV: Vectorization seems to be not beneficial, "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { if (ForceVectorization && !ChosenFactor .Width.isScalar() && *ChosenFactor.Cost.getValue() >= *ScalarCost.Cost.getValue()) dbgs() << "LV: Vectorization seems to be not beneficial, " << "but was forced by a user.\n"; } } while (false) | ||||
6125 | << "but was forced by a user.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { if (ForceVectorization && !ChosenFactor .Width.isScalar() && *ChosenFactor.Cost.getValue() >= *ScalarCost.Cost.getValue()) dbgs() << "LV: Vectorization seems to be not beneficial, " << "but was forced by a user.\n"; } } while (false); | ||||
6126 | LLVM_DEBUG(dbgs() << "LV: Selecting VF: " << ChosenFactor.Width << ".\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { dbgs() << "LV: Selecting VF: " << ChosenFactor.Width << ".\n"; } } while (false); | ||||
6127 | return ChosenFactor; | ||||
6128 | } | ||||
6129 | |||||
6130 | bool LoopVectorizationCostModel::isCandidateForEpilogueVectorization( | ||||
6131 | const Loop &L, ElementCount VF) const { | ||||
6132 | // Cross iteration phis such as reductions need special handling and are | ||||
6133 | // currently unsupported. | ||||
6134 | if (any_of(L.getHeader()->phis(), [&](PHINode &Phi) { | ||||
6135 | return Legal->isFirstOrderRecurrence(&Phi) || | ||||
6136 | Legal->isReductionVariable(&Phi); | ||||
6137 | })) | ||||
6138 | return false; | ||||
6139 | |||||
6140 | // Phis with uses outside of the loop require special handling and are | ||||
6141 | // currently unsupported. | ||||
6142 | for (auto &Entry : Legal->getInductionVars()) { | ||||
6143 | // Look for uses of the value of the induction at the last iteration. | ||||
6144 | Value *PostInc = Entry.first->getIncomingValueForBlock(L.getLoopLatch()); | ||||
6145 | for (User *U : PostInc->users()) | ||||
6146 | if (!L.contains(cast<Instruction>(U))) | ||||
6147 | return false; | ||||
6148 | // Look for uses of penultimate value of the induction. | ||||
6149 | for (User *U : Entry.first->users()) | ||||
6150 | if (!L.contains(cast<Instruction>(U))) | ||||
6151 | return false; | ||||
6152 | } | ||||
6153 | |||||
6154 | // Induction variables that are widened require special handling that is | ||||
6155 | // currently not supported. | ||||
6156 | if (any_of(Legal->getInductionVars(), [&](auto &Entry) { | ||||
6157 | return !(this->isScalarAfterVectorization(Entry.first, VF) || | ||||
6158 | this->isProfitableToScalarize(Entry.first, VF)); | ||||
6159 | })) | ||||
6160 | return false; | ||||
6161 | |||||
6162 | return true; | ||||
6163 | } | ||||
6164 | |||||
6165 | bool LoopVectorizationCostModel::isEpilogueVectorizationProfitable( | ||||
6166 | const ElementCount VF) const { | ||||
6167 | // FIXME: We need a much better cost-model to take different parameters such | ||||
6168 | // as register pressure, code size increase and cost of extra branches into | ||||
6169 | // account. For now we apply a very crude heuristic and only consider loops | ||||
6170 | // with vectorization factors larger than a certain value. | ||||
6171 | // We also consider epilogue vectorization unprofitable for targets that don't | ||||
6172 | // consider interleaving beneficial (eg. MVE). | ||||
6173 | if (TTI.getMaxInterleaveFactor(VF.getKnownMinValue()) <= 1) | ||||
6174 | return false; | ||||
6175 | if (VF.getFixedValue() >= EpilogueVectorizationMinVF) | ||||
6176 | return true; | ||||
6177 | return false; | ||||
6178 | } | ||||
6179 | |||||
6180 | VectorizationFactor | ||||
6181 | LoopVectorizationCostModel::selectEpilogueVectorizationFactor( | ||||
6182 | const ElementCount MainLoopVF, const LoopVectorizationPlanner &LVP) { | ||||
6183 | VectorizationFactor Result = VectorizationFactor::Disabled(); | ||||
6184 | if (!EnableEpilogueVectorization) { | ||||
6185 | LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization is disabled.\n";)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { dbgs() << "LEV: Epilogue vectorization is disabled.\n" ;; } } while (false); | ||||
6186 | return Result; | ||||
6187 | } | ||||
6188 | |||||
6189 | if (!isScalarEpilogueAllowed()) { | ||||
6190 | LLVM_DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { dbgs() << "LEV: Unable to vectorize epilogue because no epilogue is " "allowed.\n";; } } while (false) | ||||
6191 | dbgs() << "LEV: Unable to vectorize epilogue because no epilogue is "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { dbgs() << "LEV: Unable to vectorize epilogue because no epilogue is " "allowed.\n";; } } while (false) | ||||
6192 | "allowed.\n";)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { dbgs() << "LEV: Unable to vectorize epilogue because no epilogue is " "allowed.\n";; } } while (false); | ||||
6193 | return Result; | ||||
6194 | } | ||||
6195 | |||||
6196 | // FIXME: This can be fixed for scalable vectors later, because at this stage | ||||
6197 | // the LoopVectorizer will only consider vectorizing a loop with scalable | ||||
6198 | // vectors when the loop has a hint to enable vectorization for a given VF. | ||||
6199 | if (MainLoopVF.isScalable()) { | ||||
6200 | LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization for scalable vectors not "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { dbgs() << "LEV: Epilogue vectorization for scalable vectors not " "yet supported.\n"; } } while (false) | ||||
6201 | "yet supported.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { dbgs() << "LEV: Epilogue vectorization for scalable vectors not " "yet supported.\n"; } } while (false); | ||||
6202 | return Result; | ||||
6203 | } | ||||
6204 | |||||
6205 | // Not really a cost consideration, but check for unsupported cases here to | ||||
6206 | // simplify the logic. | ||||
6207 | if (!isCandidateForEpilogueVectorization(*TheLoop, MainLoopVF)) { | ||||
6208 | LLVM_DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { dbgs() << "LEV: Unable to vectorize epilogue because the loop is " "not a supported candidate.\n";; } } while (false) | ||||
6209 | dbgs() << "LEV: Unable to vectorize epilogue because the loop is "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { dbgs() << "LEV: Unable to vectorize epilogue because the loop is " "not a supported candidate.\n";; } } while (false) | ||||
6210 | "not a supported candidate.\n";)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { dbgs() << "LEV: Unable to vectorize epilogue because the loop is " "not a supported candidate.\n";; } } while (false); | ||||
6211 | return Result; | ||||
6212 | } | ||||
6213 | |||||
6214 | if (EpilogueVectorizationForceVF > 1) { | ||||
6215 | LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization factor is forced.\n";)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { dbgs() << "LEV: Epilogue vectorization factor is forced.\n" ;; } } while (false); | ||||
6216 | if (LVP.hasPlanWithVFs( | ||||
6217 | {MainLoopVF, ElementCount::getFixed(EpilogueVectorizationForceVF)})) | ||||
6218 | return {ElementCount::getFixed(EpilogueVectorizationForceVF), 0}; | ||||
6219 | else { | ||||
6220 | LLVM_DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { dbgs() << "LEV: Epilogue vectorization forced factor is not viable.\n" ;; } } while (false) | ||||
6221 | dbgs()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { dbgs() << "LEV: Epilogue vectorization forced factor is not viable.\n" ;; } } while (false) | ||||
6222 | << "LEV: Epilogue vectorization forced factor is not viable.\n";)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { dbgs() << "LEV: Epilogue vectorization forced factor is not viable.\n" ;; } } while (false); | ||||
6223 | return Result; | ||||
6224 | } | ||||
6225 | } | ||||
6226 | |||||
6227 | if (TheLoop->getHeader()->getParent()->hasOptSize() || | ||||
6228 | TheLoop->getHeader()->getParent()->hasMinSize()) { | ||||
6229 | LLVM_DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { dbgs() << "LEV: Epilogue vectorization skipped due to opt for size.\n" ;; } } while (false) | ||||
6230 | dbgs()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { dbgs() << "LEV: Epilogue vectorization skipped due to opt for size.\n" ;; } } while (false) | ||||
6231 | << "LEV: Epilogue vectorization skipped due to opt for size.\n";)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { dbgs() << "LEV: Epilogue vectorization skipped due to opt for size.\n" ;; } } while (false); | ||||
6232 | return Result; | ||||
6233 | } | ||||
6234 | |||||
6235 | if (!isEpilogueVectorizationProfitable(MainLoopVF)) | ||||
6236 | return Result; | ||||
6237 | |||||
6238 | for (auto &NextVF : ProfitableVFs) | ||||
6239 | if (ElementCount::isKnownLT(NextVF.Width, MainLoopVF) && | ||||
6240 | (Result.Width.getFixedValue() == 1 || | ||||
6241 | isMoreProfitable(NextVF, Result)) && | ||||
6242 | LVP.hasPlanWithVFs({MainLoopVF, NextVF.Width})) | ||||
6243 | Result = NextVF; | ||||
6244 | |||||
6245 | if (Result != VectorizationFactor::Disabled()) | ||||
6246 | LLVM_DEBUG(dbgs() << "LEV: Vectorizing epilogue loop with VF = "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { dbgs() << "LEV: Vectorizing epilogue loop with VF = " << Result.Width.getFixedValue() << "\n";; } } while (false) | ||||
6247 | << Result.Width.getFixedValue() << "\n";)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { dbgs() << "LEV: Vectorizing epilogue loop with VF = " << Result.Width.getFixedValue() << "\n";; } } while (false); | ||||
6248 | return Result; | ||||
6249 | } | ||||
6250 | |||||
6251 | std::pair<unsigned, unsigned> | ||||
6252 | LoopVectorizationCostModel::getSmallestAndWidestTypes() { | ||||
6253 | unsigned MinWidth = -1U; | ||||
6254 | unsigned MaxWidth = 8; | ||||
6255 | const DataLayout &DL = TheFunction->getParent()->getDataLayout(); | ||||
6256 | |||||
6257 | // For each block. | ||||
6258 | for (BasicBlock *BB : TheLoop->blocks()) { | ||||
6259 | // For each instruction in the loop. | ||||
6260 | for (Instruction &I : BB->instructionsWithoutDebug()) { | ||||
6261 | Type *T = I.getType(); | ||||
6262 | |||||
6263 | // Skip ignored values. | ||||
6264 | if (ValuesToIgnore.count(&I)) | ||||
6265 | continue; | ||||
6266 | |||||
6267 | // Only examine Loads, Stores and PHINodes. | ||||
6268 | if (!isa<LoadInst>(I) && !isa<StoreInst>(I) && !isa<PHINode>(I)) | ||||
6269 | continue; | ||||
6270 | |||||
6271 | // Examine PHI nodes that are reduction variables. Update the type to | ||||
6272 | // account for the recurrence type. | ||||
6273 | if (auto *PN = dyn_cast<PHINode>(&I)) { | ||||
6274 | if (!Legal->isReductionVariable(PN)) | ||||
6275 | continue; | ||||
6276 | const RecurrenceDescriptor &RdxDesc = Legal->getReductionVars()[PN]; | ||||
6277 | if (PreferInLoopReductions || useOrderedReductions(RdxDesc) || | ||||
6278 | TTI.preferInLoopReduction(RdxDesc.getOpcode(), | ||||
6279 | RdxDesc.getRecurrenceType(), | ||||
6280 | TargetTransformInfo::ReductionFlags())) | ||||
6281 | continue; | ||||
6282 | T = RdxDesc.getRecurrenceType(); | ||||
6283 | } | ||||
6284 | |||||
6285 | // Examine the stored values. | ||||
6286 | if (auto *ST = dyn_cast<StoreInst>(&I)) | ||||
6287 | T = ST->getValueOperand()->getType(); | ||||
6288 | |||||
6289 | // Ignore loaded pointer types and stored pointer types that are not | ||||
6290 | // vectorizable. | ||||
6291 | // | ||||
6292 | // FIXME: The check here attempts to predict whether a load or store will | ||||
6293 | // be vectorized. We only know this for certain after a VF has | ||||
6294 | // been selected. Here, we assume that if an access can be | ||||
6295 | // vectorized, it will be. We should also look at extending this | ||||
6296 | // optimization to non-pointer types. | ||||
6297 | // | ||||
6298 | if (T->isPointerTy() && !isConsecutiveLoadOrStore(&I) && | ||||
6299 | !isAccessInterleaved(&I) && !isLegalGatherOrScatter(&I)) | ||||
6300 | continue; | ||||
6301 | |||||
6302 | MinWidth = std::min(MinWidth, | ||||
6303 | (unsigned)DL.getTypeSizeInBits(T->getScalarType())); | ||||
6304 | MaxWidth = std::max(MaxWidth, | ||||
6305 | (unsigned)DL.getTypeSizeInBits(T->getScalarType())); | ||||
6306 | } | ||||
6307 | } | ||||
6308 | |||||
6309 | return {MinWidth, MaxWidth}; | ||||
6310 | } | ||||
6311 | |||||
6312 | unsigned LoopVectorizationCostModel::selectInterleaveCount(ElementCount VF, | ||||
6313 | unsigned LoopCost) { | ||||
6314 | // -- The interleave heuristics -- | ||||
6315 | // We interleave the loop in order to expose ILP and reduce the loop overhead. | ||||
6316 | // There are many micro-architectural considerations that we can't predict | ||||
6317 | // at this level. For example, frontend pressure (on decode or fetch) due to | ||||
6318 | // code size, or the number and capabilities of the execution ports. | ||||
6319 | // | ||||
6320 | // We use the following heuristics to select the interleave count: | ||||
6321 | // 1. If the code has reductions, then we interleave to break the cross | ||||
6322 | // iteration dependency. | ||||
6323 | // 2. If the loop is really small, then we interleave to reduce the loop | ||||
6324 | // overhead. | ||||
6325 | // 3. We don't interleave if we think that we will spill registers to memory | ||||
6326 | // due to the increased register pressure. | ||||
6327 | |||||
6328 | if (!isScalarEpilogueAllowed()) | ||||
6329 | return 1; | ||||
6330 | |||||
6331 | // We used the distance for the interleave count. | ||||
6332 | if (Legal->getMaxSafeDepDistBytes() != -1U) | ||||
6333 | return 1; | ||||
6334 | |||||
6335 | auto BestKnownTC = getSmallBestKnownTC(*PSE.getSE(), TheLoop); | ||||
6336 | const bool HasReductions = !Legal->getReductionVars().empty(); | ||||
6337 | // Do not interleave loops with a relatively small known or estimated trip | ||||
6338 | // count. But we will interleave when InterleaveSmallLoopScalarReduction is | ||||
6339 | // enabled, and the code has scalar reductions(HasReductions && VF = 1), | ||||
6340 | // because with the above conditions interleaving can expose ILP and break | ||||
6341 | // cross iteration dependences for reductions. | ||||
6342 | if (BestKnownTC && (*BestKnownTC < TinyTripCountInterleaveThreshold) && | ||||
6343 | !(InterleaveSmallLoopScalarReduction && HasReductions && VF.isScalar())) | ||||
6344 | return 1; | ||||
6345 | |||||
6346 | RegisterUsage R = calculateRegisterUsage({VF})[0]; | ||||
6347 | // We divide by these constants so assume that we have at least one | ||||
6348 | // instruction that uses at least one register. | ||||
6349 | for (auto& pair : R.MaxLocalUsers) { | ||||
6350 | pair.second = std::max(pair.second, 1U); | ||||
6351 | } | ||||
6352 | |||||
6353 | // We calculate the interleave count using the following formula. | ||||
6354 | // Subtract the number of loop invariants from the number of available | ||||
6355 | // registers. These registers are used by all of the interleaved instances. | ||||
6356 | // Next, divide the remaining registers by the number of registers that is | ||||
6357 | // required by the loop, in order to estimate how many parallel instances | ||||
6358 | // fit without causing spills. All of this is rounded down if necessary to be | ||||
6359 | // a power of two. We want power of two interleave count to simplify any | ||||
6360 | // addressing operations or alignment considerations. | ||||
6361 | // We also want power of two interleave counts to ensure that the induction | ||||
6362 | // variable of the vector loop wraps to zero, when tail is folded by masking; | ||||
6363 | // this currently happens when OptForSize, in which case IC is set to 1 above. | ||||
6364 | unsigned IC = UINT_MAX(2147483647 *2U +1U); | ||||
6365 | |||||
6366 | for (auto& pair : R.MaxLocalUsers) { | ||||
6367 | unsigned TargetNumRegisters = TTI.getNumberOfRegisters(pair.first); | ||||
6368 | LLVM_DEBUG(dbgs() << "LV: The target has " << TargetNumRegistersdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { dbgs() << "LV: The target has " << TargetNumRegisters << " registers of " << TTI.getRegisterClassName (pair.first) << " register class\n"; } } while (false) | ||||
6369 | << " registers of "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { dbgs() << "LV: The target has " << TargetNumRegisters << " registers of " << TTI.getRegisterClassName (pair.first) << " register class\n"; } } while (false) | ||||
6370 | << TTI.getRegisterClassName(pair.first) << " register class\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { dbgs() << "LV: The target has " << TargetNumRegisters << " registers of " << TTI.getRegisterClassName (pair.first) << " register class\n"; } } while (false); | ||||
6371 | if (VF.isScalar()) { | ||||
6372 | if (ForceTargetNumScalarRegs.getNumOccurrences() > 0) | ||||
6373 | TargetNumRegisters = ForceTargetNumScalarRegs; | ||||
6374 | } else { | ||||
6375 | if (ForceTargetNumVectorRegs.getNumOccurrences() > 0) | ||||
6376 | TargetNumRegisters = ForceTargetNumVectorRegs; | ||||
6377 | } | ||||
6378 | unsigned MaxLocalUsers = pair.second; | ||||
6379 | unsigned LoopInvariantRegs = 0; | ||||
6380 | if (R.LoopInvariantRegs.find(pair.first) != R.LoopInvariantRegs.end()) | ||||
6381 | LoopInvariantRegs = R.LoopInvariantRegs[pair.first]; | ||||
6382 | |||||
6383 | unsigned TmpIC = PowerOf2Floor((TargetNumRegisters - LoopInvariantRegs) / MaxLocalUsers); | ||||
6384 | // Don't count the induction variable as interleaved. | ||||
6385 | if (EnableIndVarRegisterHeur) { | ||||
6386 | TmpIC = | ||||
6387 | PowerOf2Floor((TargetNumRegisters - LoopInvariantRegs - 1) / | ||||
6388 | std::max(1U, (MaxLocalUsers - 1))); | ||||
6389 | } | ||||
6390 | |||||
6391 | IC = std::min(IC, TmpIC); | ||||
6392 | } | ||||
6393 | |||||
6394 | // Clamp the interleave ranges to reasonable counts. | ||||
6395 | unsigned MaxInterleaveCount = | ||||
6396 | TTI.getMaxInterleaveFactor(VF.getKnownMinValue()); | ||||
6397 | |||||
6398 | // Check if the user has overridden the max. | ||||
6399 | if (VF.isScalar()) { | ||||
6400 | if (ForceTargetMaxScalarInterleaveFactor.getNumOccurrences() > 0) | ||||
6401 | MaxInterleaveCount = ForceTargetMaxScalarInterleaveFactor; | ||||
6402 | } else { | ||||
6403 | if (ForceTargetMaxVectorInterleaveFactor.getNumOccurrences() > 0) | ||||
6404 | MaxInterleaveCount = ForceTargetMaxVectorInterleaveFactor; | ||||
6405 | } | ||||
6406 | |||||
6407 | // If trip count is known or estimated compile time constant, limit the | ||||
6408 | // interleave count to be less than the trip count divided by VF, provided it | ||||
6409 | // is at least 1. | ||||
6410 | // | ||||
6411 | // For scalable vectors we can't know if interleaving is beneficial. It may | ||||
6412 | // not be beneficial for small loops if none of the lanes in the second vector | ||||
6413 | // iterations is enabled. However, for larger loops, there is likely to be a | ||||
6414 | // similar benefit as for fixed-width vectors. For now, we choose to leave | ||||
6415 | // the InterleaveCount as if vscale is '1', although if some information about | ||||
6416 | // the vector is known (e.g. min vector size), we can make a better decision. | ||||
6417 | if (BestKnownTC) { | ||||
6418 | MaxInterleaveCount = | ||||
6419 | std::min(*BestKnownTC / VF.getKnownMinValue(), MaxInterleaveCount); | ||||
6420 | // Make sure MaxInterleaveCount is greater than 0. | ||||
6421 | MaxInterleaveCount = std::max(1u, MaxInterleaveCount); | ||||
6422 | } | ||||
6423 | |||||
6424 | assert(MaxInterleaveCount > 0 &&(static_cast <bool> (MaxInterleaveCount > 0 && "Maximum interleave count must be greater than 0") ? void (0 ) : __assert_fail ("MaxInterleaveCount > 0 && \"Maximum interleave count must be greater than 0\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 6425, __extension__ __PRETTY_FUNCTION__)) | ||||
6425 | "Maximum interleave count must be greater than 0")(static_cast <bool> (MaxInterleaveCount > 0 && "Maximum interleave count must be greater than 0") ? void (0 ) : __assert_fail ("MaxInterleaveCount > 0 && \"Maximum interleave count must be greater than 0\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 6425, __extension__ __PRETTY_FUNCTION__)); | ||||
6426 | |||||
6427 | // Clamp the calculated IC to be between the 1 and the max interleave count | ||||
6428 | // that the target and trip count allows. | ||||
6429 | if (IC > MaxInterleaveCount) | ||||
6430 | IC = MaxInterleaveCount; | ||||
6431 | else | ||||
6432 | // Make sure IC is greater than 0. | ||||
6433 | IC = std::max(1u, IC); | ||||
6434 | |||||
6435 | assert(IC > 0 && "Interleave count must be greater than 0.")(static_cast <bool> (IC > 0 && "Interleave count must be greater than 0." ) ? void (0) : __assert_fail ("IC > 0 && \"Interleave count must be greater than 0.\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 6435, __extension__ __PRETTY_FUNCTION__)); | ||||
6436 | |||||
6437 | // If we did not calculate the cost for VF (because the user selected the VF) | ||||
6438 | // then we calculate the cost of VF here. | ||||
6439 | if (LoopCost == 0) { | ||||
6440 | assert(expectedCost(VF).first.isValid() && "Expected a valid cost")(static_cast <bool> (expectedCost(VF).first.isValid() && "Expected a valid cost") ? void (0) : __assert_fail ("expectedCost(VF).first.isValid() && \"Expected a valid cost\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 6440, __extension__ __PRETTY_FUNCTION__)); | ||||
6441 | LoopCost = *expectedCost(VF).first.getValue(); | ||||
6442 | } | ||||
6443 | |||||
6444 | assert(LoopCost && "Non-zero loop cost expected")(static_cast <bool> (LoopCost && "Non-zero loop cost expected" ) ? void (0) : __assert_fail ("LoopCost && \"Non-zero loop cost expected\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 6444, __extension__ __PRETTY_FUNCTION__)); | ||||
6445 | |||||
6446 | // Interleave if we vectorized this loop and there is a reduction that could | ||||
6447 | // benefit from interleaving. | ||||
6448 | if (VF.isVector() && HasReductions) { | ||||
6449 | LLVM_DEBUG(dbgs() << "LV: Interleaving because of reductions.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { dbgs() << "LV: Interleaving because of reductions.\n" ; } } while (false); | ||||
6450 | return IC; | ||||
6451 | } | ||||
6452 | |||||
6453 | // Note that if we've already vectorized the loop we will have done the | ||||
6454 | // runtime check and so interleaving won't require further checks. | ||||
6455 | bool InterleavingRequiresRuntimePointerCheck = | ||||
6456 | (VF.isScalar() && Legal->getRuntimePointerChecking()->Need); | ||||
6457 | |||||
6458 | // We want to interleave small loops in order to reduce the loop overhead and | ||||
6459 | // potentially expose ILP opportunities. | ||||
6460 | LLVM_DEBUG(dbgs() << "LV: Loop cost is " << LoopCost << '\n'do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { dbgs() << "LV: Loop cost is " << LoopCost << '\n' << "LV: IC is " << IC << '\n' << "LV: VF is " << VF << '\n'; } } while (false) | ||||
6461 | << "LV: IC is " << IC << '\n'do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { dbgs() << "LV: Loop cost is " << LoopCost << '\n' << "LV: IC is " << IC << '\n' << "LV: VF is " << VF << '\n'; } } while (false) | ||||
6462 | << "LV: VF is " << VF << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { dbgs() << "LV: Loop cost is " << LoopCost << '\n' << "LV: IC is " << IC << '\n' << "LV: VF is " << VF << '\n'; } } while (false); | ||||
6463 | const bool AggressivelyInterleaveReductions = | ||||
6464 | TTI.enableAggressiveInterleaving(HasReductions); | ||||
6465 | if (!InterleavingRequiresRuntimePointerCheck && LoopCost < SmallLoopCost) { | ||||
6466 | // We assume that the cost overhead is 1 and we use the cost model | ||||
6467 | // to estimate the cost of the loop and interleave until the cost of the | ||||
6468 | // loop overhead is about 5% of the cost of the loop. | ||||
6469 | unsigned SmallIC = | ||||
6470 | std::min(IC, (unsigned)PowerOf2Floor(SmallLoopCost / LoopCost)); | ||||
6471 | |||||
6472 | // Interleave until store/load ports (estimated by max interleave count) are | ||||
6473 | // saturated. | ||||
6474 | unsigned NumStores = Legal->getNumStores(); | ||||
6475 | unsigned NumLoads = Legal->getNumLoads(); | ||||
6476 | unsigned StoresIC = IC / (NumStores ? NumStores : 1); | ||||
6477 | unsigned LoadsIC = IC / (NumLoads ? NumLoads : 1); | ||||
6478 | |||||
6479 | // If we have a scalar reduction (vector reductions are already dealt with | ||||
6480 | // by this point), we can increase the critical path length if the loop | ||||
6481 | // we're interleaving is inside another loop. Limit, by default to 2, so the | ||||
6482 | // critical path only gets increased by one reduction operation. | ||||
6483 | if (HasReductions && TheLoop->getLoopDepth() > 1) { | ||||
6484 | unsigned F = static_cast<unsigned>(MaxNestedScalarReductionIC); | ||||
6485 | SmallIC = std::min(SmallIC, F); | ||||
6486 | StoresIC = std::min(StoresIC, F); | ||||
6487 | LoadsIC = std::min(LoadsIC, F); | ||||
6488 | } | ||||
6489 | |||||
6490 | if (EnableLoadStoreRuntimeInterleave && | ||||
6491 | std::max(StoresIC, LoadsIC) > SmallIC) { | ||||
6492 | LLVM_DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { dbgs() << "LV: Interleaving to saturate store or load ports.\n" ; } } while (false) | ||||
6493 | dbgs() << "LV: Interleaving to saturate store or load ports.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { dbgs() << "LV: Interleaving to saturate store or load ports.\n" ; } } while (false); | ||||
6494 | return std::max(StoresIC, LoadsIC); | ||||
6495 | } | ||||
6496 | |||||
6497 | // If there are scalar reductions and TTI has enabled aggressive | ||||
6498 | // interleaving for reductions, we will interleave to expose ILP. | ||||
6499 | if (InterleaveSmallLoopScalarReduction && VF.isScalar() && | ||||
6500 | AggressivelyInterleaveReductions) { | ||||
6501 | LLVM_DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { dbgs() << "LV: Interleaving to expose ILP.\n" ; } } while (false); | ||||
6502 | // Interleave no less than SmallIC but not as aggressive as the normal IC | ||||
6503 | // to satisfy the rare situation when resources are too limited. | ||||
6504 | return std::max(IC / 2, SmallIC); | ||||
6505 | } else { | ||||
6506 | LLVM_DEBUG(dbgs() << "LV: Interleaving to reduce branch cost.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { dbgs() << "LV: Interleaving to reduce branch cost.\n" ; } } while (false); | ||||
6507 | return SmallIC; | ||||
6508 | } | ||||
6509 | } | ||||
6510 | |||||
6511 | // Interleave if this is a large loop (small loops are already dealt with by | ||||
6512 | // this point) that could benefit from interleaving. | ||||
6513 | if (AggressivelyInterleaveReductions) { | ||||
6514 | LLVM_DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { dbgs() << "LV: Interleaving to expose ILP.\n" ; } } while (false); | ||||
6515 | return IC; | ||||
6516 | } | ||||
6517 | |||||
6518 | LLVM_DEBUG(dbgs() << "LV: Not Interleaving.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { dbgs() << "LV: Not Interleaving.\n" ; } } while (false); | ||||
6519 | return 1; | ||||
6520 | } | ||||
6521 | |||||
6522 | SmallVector<LoopVectorizationCostModel::RegisterUsage, 8> | ||||
6523 | LoopVectorizationCostModel::calculateRegisterUsage(ArrayRef<ElementCount> VFs) { | ||||
6524 | // This function calculates the register usage by measuring the highest number | ||||
6525 | // of values that are alive at a single location. Obviously, this is a very | ||||
6526 | // rough estimation. We scan the loop in a topological order in order and | ||||
6527 | // assign a number to each instruction. We use RPO to ensure that defs are | ||||
6528 | // met before their users. We assume that each instruction that has in-loop | ||||
6529 | // users starts an interval. We record every time that an in-loop value is | ||||
6530 | // used, so we have a list of the first and last occurrences of each | ||||
6531 | // instruction. Next, we transpose this data structure into a multi map that | ||||
6532 | // holds the list of intervals that *end* at a specific location. This multi | ||||
6533 | // map allows us to perform a linear search. We scan the instructions linearly | ||||
6534 | // and record each time that a new interval starts, by placing it in a set. | ||||
6535 | // If we find this value in the multi-map then we remove it from the set. | ||||
6536 | // The max register usage is the maximum size of the set. | ||||
6537 | // We also search for instructions that are defined outside the loop, but are | ||||
6538 | // used inside the loop. We need this number separately from the max-interval | ||||
6539 | // usage number because when we unroll, loop-invariant values do not take | ||||
6540 | // more register. | ||||
6541 | LoopBlocksDFS DFS(TheLoop); | ||||
6542 | DFS.perform(LI); | ||||
6543 | |||||
6544 | RegisterUsage RU; | ||||
6545 | |||||
6546 | // Each 'key' in the map opens a new interval. The values | ||||
6547 | // of the map are the index of the 'last seen' usage of the | ||||
6548 | // instruction that is the key. | ||||
6549 | using IntervalMap = DenseMap<Instruction *, unsigned>; | ||||
6550 | |||||
6551 | // Maps instruction to its index. | ||||
6552 | SmallVector<Instruction *, 64> IdxToInstr; | ||||
6553 | // Marks the end of each interval. | ||||
6554 | IntervalMap EndPoint; | ||||
6555 | // Saves the list of instruction indices that are used in the loop. | ||||
6556 | SmallPtrSet<Instruction *, 8> Ends; | ||||
6557 | // Saves the list of values that are used in the loop but are | ||||
6558 | // defined outside the loop, such as arguments and constants. | ||||
6559 | SmallPtrSet<Value *, 8> LoopInvariants; | ||||
6560 | |||||
6561 | for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) { | ||||
6562 | for (Instruction &I : BB->instructionsWithoutDebug()) { | ||||
6563 | IdxToInstr.push_back(&I); | ||||
6564 | |||||
6565 | // Save the end location of each USE. | ||||
6566 | for (Value *U : I.operands()) { | ||||
6567 | auto *Instr = dyn_cast<Instruction>(U); | ||||
6568 | |||||
6569 | // Ignore non-instruction values such as arguments, constants, etc. | ||||
6570 | if (!Instr) | ||||
6571 | continue; | ||||
6572 | |||||
6573 | // If this instruction is outside the loop then record it and continue. | ||||
6574 | if (!TheLoop->contains(Instr)) { | ||||
6575 | LoopInvariants.insert(Instr); | ||||
6576 | continue; | ||||
6577 | } | ||||
6578 | |||||
6579 | // Overwrite previous end points. | ||||
6580 | EndPoint[Instr] = IdxToInstr.size(); | ||||
6581 | Ends.insert(Instr); | ||||
6582 | } | ||||
6583 | } | ||||
6584 | } | ||||
6585 | |||||
6586 | // Saves the list of intervals that end with the index in 'key'. | ||||
6587 | using InstrList = SmallVector<Instruction *, 2>; | ||||
6588 | DenseMap<unsigned, InstrList> TransposeEnds; | ||||
6589 | |||||
6590 | // Transpose the EndPoints to a list of values that end at each index. | ||||
6591 | for (auto &Interval : EndPoint) | ||||
6592 | TransposeEnds[Interval.second].push_back(Interval.first); | ||||
6593 | |||||
6594 | SmallPtrSet<Instruction *, 8> OpenIntervals; | ||||
6595 | SmallVector<RegisterUsage, 8> RUs(VFs.size()); | ||||
6596 | SmallVector<SmallMapVector<unsigned, unsigned, 4>, 8> MaxUsages(VFs.size()); | ||||
6597 | |||||
6598 | LLVM_DEBUG(dbgs() << "LV(REG): Calculating max register usage:\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { dbgs() << "LV(REG): Calculating max register usage:\n" ; } } while (false); | ||||
6599 | |||||
6600 | // A lambda that gets the register usage for the given type and VF. | ||||
6601 | const auto &TTICapture = TTI; | ||||
6602 | auto GetRegUsage = [&TTICapture](Type *Ty, ElementCount VF) { | ||||
6603 | if (Ty->isTokenTy() || !VectorType::isValidElementType(Ty)) | ||||
6604 | return 0; | ||||
6605 | return *TTICapture.getRegUsageForType(VectorType::get(Ty, VF)).getValue(); | ||||
6606 | }; | ||||
6607 | |||||
6608 | for (unsigned int i = 0, s = IdxToInstr.size(); i < s; ++i) { | ||||
6609 | Instruction *I = IdxToInstr[i]; | ||||
6610 | |||||
6611 | // Remove all of the instructions that end at this location. | ||||
6612 | InstrList &List = TransposeEnds[i]; | ||||
6613 | for (Instruction *ToRemove : List) | ||||
6614 | OpenIntervals.erase(ToRemove); | ||||
6615 | |||||
6616 | // Ignore instructions that are never used within the loop. | ||||
6617 | if (!Ends.count(I)) | ||||
6618 | continue; | ||||
6619 | |||||
6620 | // Skip ignored values. | ||||
6621 | if (ValuesToIgnore.count(I)) | ||||
6622 | continue; | ||||
6623 | |||||
6624 | // For each VF find the maximum usage of registers. | ||||
6625 | for (unsigned j = 0, e = VFs.size(); j < e; ++j) { | ||||
6626 | // Count the number of live intervals. | ||||
6627 | SmallMapVector<unsigned, unsigned, 4> RegUsage; | ||||
6628 | |||||
6629 | if (VFs[j].isScalar()) { | ||||
6630 | for (auto Inst : OpenIntervals) { | ||||
6631 | unsigned ClassID = TTI.getRegisterClassForType(false, Inst->getType()); | ||||
6632 | if (RegUsage.find(ClassID) == RegUsage.end()) | ||||
6633 | RegUsage[ClassID] = 1; | ||||
6634 | else | ||||
6635 | RegUsage[ClassID] += 1; | ||||
6636 | } | ||||
6637 | } else { | ||||
6638 | collectUniformsAndScalars(VFs[j]); | ||||
6639 | for (auto Inst : OpenIntervals) { | ||||
6640 | // Skip ignored values for VF > 1. | ||||
6641 | if (VecValuesToIgnore.count(Inst)) | ||||
6642 | continue; | ||||
6643 | if (isScalarAfterVectorization(Inst, VFs[j])) { | ||||
6644 | unsigned ClassID = TTI.getRegisterClassForType(false, Inst->getType()); | ||||
6645 | if (RegUsage.find(ClassID) == RegUsage.end()) | ||||
6646 | RegUsage[ClassID] = 1; | ||||
6647 | else | ||||
6648 | RegUsage[ClassID] += 1; | ||||
6649 | } else { | ||||
6650 | unsigned ClassID = TTI.getRegisterClassForType(true, Inst->getType()); | ||||
6651 | if (RegUsage.find(ClassID) == RegUsage.end()) | ||||
6652 | RegUsage[ClassID] = GetRegUsage(Inst->getType(), VFs[j]); | ||||
6653 | else | ||||
6654 | RegUsage[ClassID] += GetRegUsage(Inst->getType(), VFs[j]); | ||||
6655 | } | ||||
6656 | } | ||||
6657 | } | ||||
6658 | |||||
6659 | for (auto& pair : RegUsage) { | ||||
6660 | if (MaxUsages[j].find(pair.first) != MaxUsages[j].end()) | ||||
6661 | MaxUsages[j][pair.first] = std::max(MaxUsages[j][pair.first], pair.second); | ||||
6662 | else | ||||
6663 | MaxUsages[j][pair.first] = pair.second; | ||||
6664 | } | ||||
6665 | } | ||||
6666 | |||||
6667 | LLVM_DEBUG(dbgs() << "LV(REG): At #" << i << " Interval # "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { dbgs() << "LV(REG): At #" << i << " Interval # " << OpenIntervals.size() << '\n'; } } while (false) | ||||
6668 | << OpenIntervals.size() << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { dbgs() << "LV(REG): At #" << i << " Interval # " << OpenIntervals.size() << '\n'; } } while (false); | ||||
6669 | |||||
6670 | // Add the current instruction to the list of open intervals. | ||||
6671 | OpenIntervals.insert(I); | ||||
6672 | } | ||||
6673 | |||||
6674 | for (unsigned i = 0, e = VFs.size(); i < e; ++i) { | ||||
6675 | SmallMapVector<unsigned, unsigned, 4> Invariant; | ||||
6676 | |||||
6677 | for (auto Inst : LoopInvariants) { | ||||
6678 | unsigned Usage = | ||||
6679 | VFs[i].isScalar() ? 1 : GetRegUsage(Inst->getType(), VFs[i]); | ||||
6680 | unsigned ClassID = | ||||
6681 | TTI.getRegisterClassForType(VFs[i].isVector(), Inst->getType()); | ||||
6682 | if (Invariant.find(ClassID) == Invariant.end()) | ||||
6683 | Invariant[ClassID] = Usage; | ||||
6684 | else | ||||
6685 | Invariant[ClassID] += Usage; | ||||
6686 | } | ||||
6687 | |||||
6688 | LLVM_DEBUG({do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { { dbgs() << "LV(REG): VF = " << VFs[i] << '\n'; dbgs() << "LV(REG): Found max usage: " << MaxUsages[i].size() << " item\n"; for (const auto &pair : MaxUsages[i]) { dbgs() << "LV(REG): RegisterClass: " << TTI.getRegisterClassName(pair.first) << ", " << pair.second << " registers\n"; } dbgs() << "LV(REG): Found invariant usage: " << Invariant.size() << " item\n"; for (const auto &pair : Invariant) { dbgs() << "LV(REG): RegisterClass: " << TTI.getRegisterClassName(pair.first) << ", " << pair.second << " registers\n"; } }; } } while (false) | ||||
6689 | dbgs() << "LV(REG): VF = " << VFs[i] << '\n';do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { { dbgs() << "LV(REG): VF = " << VFs[i] << '\n'; dbgs() << "LV(REG): Found max usage: " << MaxUsages[i].size() << " item\n"; for (const auto &pair : MaxUsages[i]) { dbgs() << "LV(REG): RegisterClass: " << TTI.getRegisterClassName(pair.first) << ", " << pair.second << " registers\n"; } dbgs() << "LV(REG): Found invariant usage: " << Invariant.size() << " item\n"; for (const auto &pair : Invariant) { dbgs() << "LV(REG): RegisterClass: " << TTI.getRegisterClassName(pair.first) << ", " << pair.second << " registers\n"; } }; } } while (false) | ||||
6690 | dbgs() << "LV(REG): Found max usage: " << MaxUsages[i].size()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { { dbgs() << "LV(REG): VF = " << VFs[i] << '\n'; dbgs() << "LV(REG): Found max usage: " << MaxUsages[i].size() << " item\n"; for (const auto &pair : MaxUsages[i]) { dbgs() << "LV(REG): RegisterClass: " << TTI.getRegisterClassName(pair.first) << ", " << pair.second << " registers\n"; } dbgs() << "LV(REG): Found invariant usage: " << Invariant.size() << " item\n"; for (const auto &pair : Invariant) { dbgs() << "LV(REG): RegisterClass: " << TTI.getRegisterClassName(pair.first) << ", " << pair.second << " registers\n"; } }; } } while (false) | ||||
6691 | << " item\n";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { { dbgs() << "LV(REG): VF = " << VFs[i] << '\n'; dbgs() << "LV(REG): Found max usage: " << MaxUsages[i].size() << " item\n"; for (const auto &pair : MaxUsages[i]) { dbgs() << "LV(REG): RegisterClass: " << TTI.getRegisterClassName(pair.first) << ", " << pair.second << " registers\n"; } dbgs() << "LV(REG): Found invariant usage: " << Invariant.size() << " item\n"; for (const auto &pair : Invariant) { dbgs() << "LV(REG): RegisterClass: " << TTI.getRegisterClassName(pair.first) << ", " << pair.second << " registers\n"; } }; } } while (false) | ||||
6692 | for (const auto &pair : MaxUsages[i]) {do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { { dbgs() << "LV(REG): VF = " << VFs[i] << '\n'; dbgs() << "LV(REG): Found max usage: " << MaxUsages[i].size() << " item\n"; for (const auto &pair : MaxUsages[i]) { dbgs() << "LV(REG): RegisterClass: " << TTI.getRegisterClassName(pair.first) << ", " << pair.second << " registers\n"; } dbgs() << "LV(REG): Found invariant usage: " << Invariant.size() << " item\n"; for (const auto &pair : Invariant) { dbgs() << "LV(REG): RegisterClass: " << TTI.getRegisterClassName(pair.first) << ", " << pair.second << " registers\n"; } }; } } while (false) | ||||
6693 | dbgs() << "LV(REG): RegisterClass: "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { { dbgs() << "LV(REG): VF = " << VFs[i] << '\n'; dbgs() << "LV(REG): Found max usage: " << MaxUsages[i].size() << " item\n"; for (const auto &pair : MaxUsages[i]) { dbgs() << "LV(REG): RegisterClass: " << TTI.getRegisterClassName(pair.first) << ", " << pair.second << " registers\n"; } dbgs() << "LV(REG): Found invariant usage: " << Invariant.size() << " item\n"; for (const auto &pair : Invariant) { dbgs() << "LV(REG): RegisterClass: " << TTI.getRegisterClassName(pair.first) << ", " << pair.second << " registers\n"; } }; } } while (false) | ||||
6694 | << TTI.getRegisterClassName(pair.first) << ", " << pair.seconddo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { { dbgs() << "LV(REG): VF = " << VFs[i] << '\n'; dbgs() << "LV(REG): Found max usage: " << MaxUsages[i].size() << " item\n"; for (const auto &pair : MaxUsages[i]) { dbgs() << "LV(REG): RegisterClass: " << TTI.getRegisterClassName(pair.first) << ", " << pair.second << " registers\n"; } dbgs() << "LV(REG): Found invariant usage: " << Invariant.size() << " item\n"; for (const auto &pair : Invariant) { dbgs() << "LV(REG): RegisterClass: " << TTI.getRegisterClassName(pair.first) << ", " << pair.second << " registers\n"; } }; } } while (false) | ||||
6695 | << " registers\n";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { { dbgs() << "LV(REG): VF = " << VFs[i] << '\n'; dbgs() << "LV(REG): Found max usage: " << MaxUsages[i].size() << " item\n"; for (const auto &pair : MaxUsages[i]) { dbgs() << "LV(REG): RegisterClass: " << TTI.getRegisterClassName(pair.first) << ", " << pair.second << " registers\n"; } dbgs() << "LV(REG): Found invariant usage: " << Invariant.size() << " item\n"; for (const auto &pair : Invariant) { dbgs() << "LV(REG): RegisterClass: " << TTI.getRegisterClassName(pair.first) << ", " << pair.second << " registers\n"; } }; } } while (false) | ||||
6696 | }do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { { dbgs() << "LV(REG): VF = " << VFs[i] << '\n'; dbgs() << "LV(REG): Found max usage: " << MaxUsages[i].size() << " item\n"; for (const auto &pair : MaxUsages[i]) { dbgs() << "LV(REG): RegisterClass: " << TTI.getRegisterClassName(pair.first) << ", " << pair.second << " registers\n"; } dbgs() << "LV(REG): Found invariant usage: " << Invariant.size() << " item\n"; for (const auto &pair : Invariant) { dbgs() << "LV(REG): RegisterClass: " << TTI.getRegisterClassName(pair.first) << ", " << pair.second << " registers\n"; } }; } } while (false) | ||||
6697 | dbgs() << "LV(REG): Found invariant usage: " << Invariant.size()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { { dbgs() << "LV(REG): VF = " << VFs[i] << '\n'; dbgs() << "LV(REG): Found max usage: " << MaxUsages[i].size() << " item\n"; for (const auto &pair : MaxUsages[i]) { dbgs() << "LV(REG): RegisterClass: " << TTI.getRegisterClassName(pair.first) << ", " << pair.second << " registers\n"; } dbgs() << "LV(REG): Found invariant usage: " << Invariant.size() << " item\n"; for (const auto &pair : Invariant) { dbgs() << "LV(REG): RegisterClass: " << TTI.getRegisterClassName(pair.first) << ", " << pair.second << " registers\n"; } }; } } while (false) | ||||
6698 | << " item\n";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { { dbgs() << "LV(REG): VF = " << VFs[i] << '\n'; dbgs() << "LV(REG): Found max usage: " << MaxUsages[i].size() << " item\n"; for (const auto &pair : MaxUsages[i]) { dbgs() << "LV(REG): RegisterClass: " << TTI.getRegisterClassName(pair.first) << ", " << pair.second << " registers\n"; } dbgs() << "LV(REG): Found invariant usage: " << Invariant.size() << " item\n"; for (const auto &pair : Invariant) { dbgs() << "LV(REG): RegisterClass: " << TTI.getRegisterClassName(pair.first) << ", " << pair.second << " registers\n"; } }; } } while (false) | ||||
6699 | for (const auto &pair : Invariant) {do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { { dbgs() << "LV(REG): VF = " << VFs[i] << '\n'; dbgs() << "LV(REG): Found max usage: " << MaxUsages[i].size() << " item\n"; for (const auto &pair : MaxUsages[i]) { dbgs() << "LV(REG): RegisterClass: " << TTI.getRegisterClassName(pair.first) << ", " << pair.second << " registers\n"; } dbgs() << "LV(REG): Found invariant usage: " << Invariant.size() << " item\n"; for (const auto &pair : Invariant) { dbgs() << "LV(REG): RegisterClass: " << TTI.getRegisterClassName(pair.first) << ", " << pair.second << " registers\n"; } }; } } while (false) | ||||
6700 | dbgs() << "LV(REG): RegisterClass: "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { { dbgs() << "LV(REG): VF = " << VFs[i] << '\n'; dbgs() << "LV(REG): Found max usage: " << MaxUsages[i].size() << " item\n"; for (const auto &pair : MaxUsages[i]) { dbgs() << "LV(REG): RegisterClass: " << TTI.getRegisterClassName(pair.first) << ", " << pair.second << " registers\n"; } dbgs() << "LV(REG): Found invariant usage: " << Invariant.size() << " item\n"; for (const auto &pair : Invariant) { dbgs() << "LV(REG): RegisterClass: " << TTI.getRegisterClassName(pair.first) << ", " << pair.second << " registers\n"; } }; } } while (false) | ||||
6701 | << TTI.getRegisterClassName(pair.first) << ", " << pair.seconddo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { { dbgs() << "LV(REG): VF = " << VFs[i] << '\n'; dbgs() << "LV(REG): Found max usage: " << MaxUsages[i].size() << " item\n"; for (const auto &pair : MaxUsages[i]) { dbgs() << "LV(REG): RegisterClass: " << TTI.getRegisterClassName(pair.first) << ", " << pair.second << " registers\n"; } dbgs() << "LV(REG): Found invariant usage: " << Invariant.size() << " item\n"; for (const auto &pair : Invariant) { dbgs() << "LV(REG): RegisterClass: " << TTI.getRegisterClassName(pair.first) << ", " << pair.second << " registers\n"; } }; } } while (false) | ||||
6702 | << " registers\n";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { { dbgs() << "LV(REG): VF = " << VFs[i] << '\n'; dbgs() << "LV(REG): Found max usage: " << MaxUsages[i].size() << " item\n"; for (const auto &pair : MaxUsages[i]) { dbgs() << "LV(REG): RegisterClass: " << TTI.getRegisterClassName(pair.first) << ", " << pair.second << " registers\n"; } dbgs() << "LV(REG): Found invariant usage: " << Invariant.size() << " item\n"; for (const auto &pair : Invariant) { dbgs() << "LV(REG): RegisterClass: " << TTI.getRegisterClassName(pair.first) << ", " << pair.second << " registers\n"; } }; } } while (false) | ||||
6703 | }do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { { dbgs() << "LV(REG): VF = " << VFs[i] << '\n'; dbgs() << "LV(REG): Found max usage: " << MaxUsages[i].size() << " item\n"; for (const auto &pair : MaxUsages[i]) { dbgs() << "LV(REG): RegisterClass: " << TTI.getRegisterClassName(pair.first) << ", " << pair.second << " registers\n"; } dbgs() << "LV(REG): Found invariant usage: " << Invariant.size() << " item\n"; for (const auto &pair : Invariant) { dbgs() << "LV(REG): RegisterClass: " << TTI.getRegisterClassName(pair.first) << ", " << pair.second << " registers\n"; } }; } } while (false) | ||||
6704 | })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { { dbgs() << "LV(REG): VF = " << VFs[i] << '\n'; dbgs() << "LV(REG): Found max usage: " << MaxUsages[i].size() << " item\n"; for (const auto &pair : MaxUsages[i]) { dbgs() << "LV(REG): RegisterClass: " << TTI.getRegisterClassName(pair.first) << ", " << pair.second << " registers\n"; } dbgs() << "LV(REG): Found invariant usage: " << Invariant.size() << " item\n"; for (const auto &pair : Invariant) { dbgs() << "LV(REG): RegisterClass: " << TTI.getRegisterClassName(pair.first) << ", " << pair.second << " registers\n"; } }; } } while (false); | ||||
6705 | |||||
6706 | RU.LoopInvariantRegs = Invariant; | ||||
6707 | RU.MaxLocalUsers = MaxUsages[i]; | ||||
6708 | RUs[i] = RU; | ||||
6709 | } | ||||
6710 | |||||
6711 | return RUs; | ||||
6712 | } | ||||
6713 | |||||
6714 | bool LoopVectorizationCostModel::useEmulatedMaskMemRefHack(Instruction *I){ | ||||
6715 | // TODO: Cost model for emulated masked load/store is completely | ||||
6716 | // broken. This hack guides the cost model to use an artificially | ||||
6717 | // high enough value to practically disable vectorization with such | ||||
6718 | // operations, except where previously deployed legality hack allowed | ||||
6719 | // using very low cost values. This is to avoid regressions coming simply | ||||
6720 | // from moving "masked load/store" check from legality to cost model. | ||||
6721 | // Masked Load/Gather emulation was previously never allowed. | ||||
6722 | // Limited number of Masked Store/Scatter emulation was allowed. | ||||
6723 | assert(isPredicatedInst(I) &&(static_cast <bool> (isPredicatedInst(I) && "Expecting a scalar emulated instruction" ) ? void (0) : __assert_fail ("isPredicatedInst(I) && \"Expecting a scalar emulated instruction\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 6724, __extension__ __PRETTY_FUNCTION__)) | ||||
6724 | "Expecting a scalar emulated instruction")(static_cast <bool> (isPredicatedInst(I) && "Expecting a scalar emulated instruction" ) ? void (0) : __assert_fail ("isPredicatedInst(I) && \"Expecting a scalar emulated instruction\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 6724, __extension__ __PRETTY_FUNCTION__)); | ||||
6725 | return isa<LoadInst>(I) || | ||||
6726 | (isa<StoreInst>(I) && | ||||
6727 | NumPredStores > NumberOfStoresToPredicate); | ||||
6728 | } | ||||
6729 | |||||
6730 | void LoopVectorizationCostModel::collectInstsToScalarize(ElementCount VF) { | ||||
6731 | // If we aren't vectorizing the loop, or if we've already collected the | ||||
6732 | // instructions to scalarize, there's nothing to do. Collection may already | ||||
6733 | // have occurred if we have a user-selected VF and are now computing the | ||||
6734 | // expected cost for interleaving. | ||||
6735 | if (VF.isScalar() || VF.isZero() || | ||||
6736 | InstsToScalarize.find(VF) != InstsToScalarize.end()) | ||||
6737 | return; | ||||
6738 | |||||
6739 | // Initialize a mapping for VF in InstsToScalalarize. If we find that it's | ||||
6740 | // not profitable to scalarize any instructions, the presence of VF in the | ||||
6741 | // map will indicate that we've analyzed it already. | ||||
6742 | ScalarCostsTy &ScalarCostsVF = InstsToScalarize[VF]; | ||||
6743 | |||||
6744 | // Find all the instructions that are scalar with predication in the loop and | ||||
6745 | // determine if it would be better to not if-convert the blocks they are in. | ||||
6746 | // If so, we also record the instructions to scalarize. | ||||
6747 | for (BasicBlock *BB : TheLoop->blocks()) { | ||||
6748 | if (!blockNeedsPredication(BB)) | ||||
6749 | continue; | ||||
6750 | for (Instruction &I : *BB) | ||||
6751 | if (isScalarWithPredication(&I)) { | ||||
6752 | ScalarCostsTy ScalarCosts; | ||||
6753 | // Do not apply discount logic if hacked cost is needed | ||||
6754 | // for emulated masked memrefs. | ||||
6755 | if (!useEmulatedMaskMemRefHack(&I) && | ||||
6756 | computePredInstDiscount(&I, ScalarCosts, VF) >= 0) | ||||
6757 | ScalarCostsVF.insert(ScalarCosts.begin(), ScalarCosts.end()); | ||||
6758 | // Remember that BB will remain after vectorization. | ||||
6759 | PredicatedBBsAfterVectorization.insert(BB); | ||||
6760 | } | ||||
6761 | } | ||||
6762 | } | ||||
6763 | |||||
6764 | int LoopVectorizationCostModel::computePredInstDiscount( | ||||
6765 | Instruction *PredInst, ScalarCostsTy &ScalarCosts, ElementCount VF) { | ||||
6766 | assert(!isUniformAfterVectorization(PredInst, VF) &&(static_cast <bool> (!isUniformAfterVectorization(PredInst , VF) && "Instruction marked uniform-after-vectorization will be predicated" ) ? void (0) : __assert_fail ("!isUniformAfterVectorization(PredInst, VF) && \"Instruction marked uniform-after-vectorization will be predicated\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 6767, __extension__ __PRETTY_FUNCTION__)) | ||||
6767 | "Instruction marked uniform-after-vectorization will be predicated")(static_cast <bool> (!isUniformAfterVectorization(PredInst , VF) && "Instruction marked uniform-after-vectorization will be predicated" ) ? void (0) : __assert_fail ("!isUniformAfterVectorization(PredInst, VF) && \"Instruction marked uniform-after-vectorization will be predicated\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 6767, __extension__ __PRETTY_FUNCTION__)); | ||||
6768 | |||||
6769 | // Initialize the discount to zero, meaning that the scalar version and the | ||||
6770 | // vector version cost the same. | ||||
6771 | InstructionCost Discount = 0; | ||||
6772 | |||||
6773 | // Holds instructions to analyze. The instructions we visit are mapped in | ||||
6774 | // ScalarCosts. Those instructions are the ones that would be scalarized if | ||||
6775 | // we find that the scalar version costs less. | ||||
6776 | SmallVector<Instruction *, 8> Worklist; | ||||
6777 | |||||
6778 | // Returns true if the given instruction can be scalarized. | ||||
6779 | auto canBeScalarized = [&](Instruction *I) -> bool { | ||||
6780 | // We only attempt to scalarize instructions forming a single-use chain | ||||
6781 | // from the original predicated block that would otherwise be vectorized. | ||||
6782 | // Although not strictly necessary, we give up on instructions we know will | ||||
6783 | // already be scalar to avoid traversing chains that are unlikely to be | ||||
6784 | // beneficial. | ||||
6785 | if (!I->hasOneUse() || PredInst->getParent() != I->getParent() || | ||||
6786 | isScalarAfterVectorization(I, VF)) | ||||
6787 | return false; | ||||
6788 | |||||
6789 | // If the instruction is scalar with predication, it will be analyzed | ||||
6790 | // separately. We ignore it within the context of PredInst. | ||||
6791 | if (isScalarWithPredication(I)) | ||||
6792 | return false; | ||||
6793 | |||||
6794 | // If any of the instruction's operands are uniform after vectorization, | ||||
6795 | // the instruction cannot be scalarized. This prevents, for example, a | ||||
6796 | // masked load from being scalarized. | ||||
6797 | // | ||||
6798 | // We assume we will only emit a value for lane zero of an instruction | ||||
6799 | // marked uniform after vectorization, rather than VF identical values. | ||||
6800 | // Thus, if we scalarize an instruction that uses a uniform, we would | ||||
6801 | // create uses of values corresponding to the lanes we aren't emitting code | ||||
6802 | // for. This behavior can be changed by allowing getScalarValue to clone | ||||
6803 | // the lane zero values for uniforms rather than asserting. | ||||
6804 | for (Use &U : I->operands()) | ||||
6805 | if (auto *J = dyn_cast<Instruction>(U.get())) | ||||
6806 | if (isUniformAfterVectorization(J, VF)) | ||||
6807 | return false; | ||||
6808 | |||||
6809 | // Otherwise, we can scalarize the instruction. | ||||
6810 | return true; | ||||
6811 | }; | ||||
6812 | |||||
6813 | // Compute the expected cost discount from scalarizing the entire expression | ||||
6814 | // feeding the predicated instruction. We currently only consider expressions | ||||
6815 | // that are single-use instruction chains. | ||||
6816 | Worklist.push_back(PredInst); | ||||
6817 | while (!Worklist.empty()) { | ||||
6818 | Instruction *I = Worklist.pop_back_val(); | ||||
6819 | |||||
6820 | // If we've already analyzed the instruction, there's nothing to do. | ||||
6821 | if (ScalarCosts.find(I) != ScalarCosts.end()) | ||||
6822 | continue; | ||||
6823 | |||||
6824 | // Compute the cost of the vector instruction. Note that this cost already | ||||
6825 | // includes the scalarization overhead of the predicated instruction. | ||||
6826 | InstructionCost VectorCost = getInstructionCost(I, VF).first; | ||||
6827 | |||||
6828 | // Compute the cost of the scalarized instruction. This cost is the cost of | ||||
6829 | // the instruction as if it wasn't if-converted and instead remained in the | ||||
6830 | // predicated block. We will scale this cost by block probability after | ||||
6831 | // computing the scalarization overhead. | ||||
6832 | assert(!VF.isScalable() && "scalable vectors not yet supported.")(static_cast <bool> (!VF.isScalable() && "scalable vectors not yet supported." ) ? void (0) : __assert_fail ("!VF.isScalable() && \"scalable vectors not yet supported.\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 6832, __extension__ __PRETTY_FUNCTION__)); | ||||
6833 | InstructionCost ScalarCost = | ||||
6834 | VF.getKnownMinValue() * | ||||
6835 | getInstructionCost(I, ElementCount::getFixed(1)).first; | ||||
6836 | |||||
6837 | // Compute the scalarization overhead of needed insertelement instructions | ||||
6838 | // and phi nodes. | ||||
6839 | if (isScalarWithPredication(I) && !I->getType()->isVoidTy()) { | ||||
6840 | ScalarCost += TTI.getScalarizationOverhead( | ||||
6841 | cast<VectorType>(ToVectorTy(I->getType(), VF)), | ||||
6842 | APInt::getAllOnesValue(VF.getKnownMinValue()), true, false); | ||||
6843 | assert(!VF.isScalable() && "scalable vectors not yet supported.")(static_cast <bool> (!VF.isScalable() && "scalable vectors not yet supported." ) ? void (0) : __assert_fail ("!VF.isScalable() && \"scalable vectors not yet supported.\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 6843, __extension__ __PRETTY_FUNCTION__)); | ||||
6844 | ScalarCost += | ||||
6845 | VF.getKnownMinValue() * | ||||
6846 | TTI.getCFInstrCost(Instruction::PHI, TTI::TCK_RecipThroughput); | ||||
6847 | } | ||||
6848 | |||||
6849 | // Compute the scalarization overhead of needed extractelement | ||||
6850 | // instructions. For each of the instruction's operands, if the operand can | ||||
6851 | // be scalarized, add it to the worklist; otherwise, account for the | ||||
6852 | // overhead. | ||||
6853 | for (Use &U : I->operands()) | ||||
6854 | if (auto *J = dyn_cast<Instruction>(U.get())) { | ||||
6855 | assert(VectorType::isValidElementType(J->getType()) &&(static_cast <bool> (VectorType::isValidElementType(J-> getType()) && "Instruction has non-scalar type") ? void (0) : __assert_fail ("VectorType::isValidElementType(J->getType()) && \"Instruction has non-scalar type\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 6856, __extension__ __PRETTY_FUNCTION__)) | ||||
6856 | "Instruction has non-scalar type")(static_cast <bool> (VectorType::isValidElementType(J-> getType()) && "Instruction has non-scalar type") ? void (0) : __assert_fail ("VectorType::isValidElementType(J->getType()) && \"Instruction has non-scalar type\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 6856, __extension__ __PRETTY_FUNCTION__)); | ||||
6857 | if (canBeScalarized(J)) | ||||
6858 | Worklist.push_back(J); | ||||
6859 | else if (needsExtract(J, VF)) { | ||||
6860 | assert(!VF.isScalable() && "scalable vectors not yet supported.")(static_cast <bool> (!VF.isScalable() && "scalable vectors not yet supported." ) ? void (0) : __assert_fail ("!VF.isScalable() && \"scalable vectors not yet supported.\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 6860, __extension__ __PRETTY_FUNCTION__)); | ||||
6861 | ScalarCost += TTI.getScalarizationOverhead( | ||||
6862 | cast<VectorType>(ToVectorTy(J->getType(), VF)), | ||||
6863 | APInt::getAllOnesValue(VF.getKnownMinValue()), false, true); | ||||
6864 | } | ||||
6865 | } | ||||
6866 | |||||
6867 | // Scale the total scalar cost by block probability. | ||||
6868 | ScalarCost /= getReciprocalPredBlockProb(); | ||||
6869 | |||||
6870 | // Compute the discount. A non-negative discount means the vector version | ||||
6871 | // of the instruction costs more, and scalarizing would be beneficial. | ||||
6872 | Discount += VectorCost - ScalarCost; | ||||
6873 | ScalarCosts[I] = ScalarCost; | ||||
6874 | } | ||||
6875 | |||||
6876 | return *Discount.getValue(); | ||||
6877 | } | ||||
6878 | |||||
6879 | LoopVectorizationCostModel::VectorizationCostTy | ||||
6880 | LoopVectorizationCostModel::expectedCost(ElementCount VF) { | ||||
6881 | VectorizationCostTy Cost; | ||||
6882 | |||||
6883 | // For each block. | ||||
6884 | for (BasicBlock *BB : TheLoop->blocks()) { | ||||
6885 | VectorizationCostTy BlockCost; | ||||
6886 | |||||
6887 | // For each instruction in the old loop. | ||||
6888 | for (Instruction &I : BB->instructionsWithoutDebug()) { | ||||
6889 | // Skip ignored values. | ||||
6890 | if (ValuesToIgnore.count(&I) || | ||||
6891 | (VF.isVector() && VecValuesToIgnore.count(&I))) | ||||
6892 | continue; | ||||
6893 | |||||
6894 | VectorizationCostTy C = getInstructionCost(&I, VF); | ||||
6895 | |||||
6896 | // Check if we should override the cost. | ||||
6897 | if (ForceTargetInstructionCost.getNumOccurrences() > 0) | ||||
6898 | C.first = InstructionCost(ForceTargetInstructionCost); | ||||
6899 | |||||
6900 | BlockCost.first += C.first; | ||||
6901 | BlockCost.second |= C.second; | ||||
6902 | LLVM_DEBUG(dbgs() << "LV: Found an estimated cost of " << C.firstdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { dbgs() << "LV: Found an estimated cost of " << C.first << " for VF " << VF << " For instruction: " << I << '\n'; } } while (false) | ||||
6903 | << " for VF " << VF << " For instruction: " << Ido { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { dbgs() << "LV: Found an estimated cost of " << C.first << " for VF " << VF << " For instruction: " << I << '\n'; } } while (false) | ||||
6904 | << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { dbgs() << "LV: Found an estimated cost of " << C.first << " for VF " << VF << " For instruction: " << I << '\n'; } } while (false); | ||||
6905 | } | ||||
6906 | |||||
6907 | // If we are vectorizing a predicated block, it will have been | ||||
6908 | // if-converted. This means that the block's instructions (aside from | ||||
6909 | // stores and instructions that may divide by zero) will now be | ||||
6910 | // unconditionally executed. For the scalar case, we may not always execute | ||||
6911 | // the predicated block, if it is an if-else block. Thus, scale the block's | ||||
6912 | // cost by the probability of executing it. blockNeedsPredication from | ||||
6913 | // Legal is used so as to not include all blocks in tail folded loops. | ||||
6914 | if (VF.isScalar() && Legal->blockNeedsPredication(BB)) | ||||
6915 | BlockCost.first /= getReciprocalPredBlockProb(); | ||||
6916 | |||||
6917 | Cost.first += BlockCost.first; | ||||
6918 | Cost.second |= BlockCost.second; | ||||
6919 | } | ||||
6920 | |||||
6921 | return Cost; | ||||
6922 | } | ||||
6923 | |||||
6924 | /// Gets Address Access SCEV after verifying that the access pattern | ||||
6925 | /// is loop invariant except the induction variable dependence. | ||||
6926 | /// | ||||
6927 | /// This SCEV can be sent to the Target in order to estimate the address | ||||
6928 | /// calculation cost. | ||||
6929 | static const SCEV *getAddressAccessSCEV( | ||||
6930 | Value *Ptr, | ||||
6931 | LoopVectorizationLegality *Legal, | ||||
6932 | PredicatedScalarEvolution &PSE, | ||||
6933 | const Loop *TheLoop) { | ||||
6934 | |||||
6935 | auto *Gep = dyn_cast<GetElementPtrInst>(Ptr); | ||||
6936 | if (!Gep) | ||||
6937 | return nullptr; | ||||
6938 | |||||
6939 | // We are looking for a gep with all loop invariant indices except for one | ||||
6940 | // which should be an induction variable. | ||||
6941 | auto SE = PSE.getSE(); | ||||
6942 | unsigned NumOperands = Gep->getNumOperands(); | ||||
6943 | for (unsigned i = 1; i < NumOperands; ++i) { | ||||
6944 | Value *Opd = Gep->getOperand(i); | ||||
6945 | if (!SE->isLoopInvariant(SE->getSCEV(Opd), TheLoop) && | ||||
6946 | !Legal->isInductionVariable(Opd)) | ||||
6947 | return nullptr; | ||||
6948 | } | ||||
6949 | |||||
6950 | // Now we know we have a GEP ptr, %inv, %ind, %inv. return the Ptr SCEV. | ||||
6951 | return PSE.getSCEV(Ptr); | ||||
6952 | } | ||||
6953 | |||||
6954 | static bool isStrideMul(Instruction *I, LoopVectorizationLegality *Legal) { | ||||
6955 | return Legal->hasStride(I->getOperand(0)) || | ||||
6956 | Legal->hasStride(I->getOperand(1)); | ||||
6957 | } | ||||
6958 | |||||
6959 | InstructionCost | ||||
6960 | LoopVectorizationCostModel::getMemInstScalarizationCost(Instruction *I, | ||||
6961 | ElementCount VF) { | ||||
6962 | assert(VF.isVector() &&(static_cast <bool> (VF.isVector() && "Scalarization cost of instruction implies vectorization." ) ? void (0) : __assert_fail ("VF.isVector() && \"Scalarization cost of instruction implies vectorization.\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 6963, __extension__ __PRETTY_FUNCTION__)) | ||||
6963 | "Scalarization cost of instruction implies vectorization.")(static_cast <bool> (VF.isVector() && "Scalarization cost of instruction implies vectorization." ) ? void (0) : __assert_fail ("VF.isVector() && \"Scalarization cost of instruction implies vectorization.\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 6963, __extension__ __PRETTY_FUNCTION__)); | ||||
6964 | if (VF.isScalable()) | ||||
6965 | return InstructionCost::getInvalid(); | ||||
6966 | |||||
6967 | Type *ValTy = getLoadStoreType(I); | ||||
6968 | auto SE = PSE.getSE(); | ||||
6969 | |||||
6970 | unsigned AS = getLoadStoreAddressSpace(I); | ||||
6971 | Value *Ptr = getLoadStorePointerOperand(I); | ||||
6972 | Type *PtrTy = ToVectorTy(Ptr->getType(), VF); | ||||
6973 | |||||
6974 | // Figure out whether the access is strided and get the stride value | ||||
6975 | // if it's known in compile time | ||||
6976 | const SCEV *PtrSCEV = getAddressAccessSCEV(Ptr, Legal, PSE, TheLoop); | ||||
6977 | |||||
6978 | // Get the cost of the scalar memory instruction and address computation. | ||||
6979 | InstructionCost Cost = | ||||
6980 | VF.getKnownMinValue() * TTI.getAddressComputationCost(PtrTy, SE, PtrSCEV); | ||||
6981 | |||||
6982 | // Don't pass *I here, since it is scalar but will actually be part of a | ||||
6983 | // vectorized loop where the user of it is a vectorized instruction. | ||||
6984 | const Align Alignment = getLoadStoreAlignment(I); | ||||
6985 | Cost += VF.getKnownMinValue() * | ||||
6986 | TTI.getMemoryOpCost(I->getOpcode(), ValTy->getScalarType(), Alignment, | ||||
6987 | AS, TTI::TCK_RecipThroughput); | ||||
6988 | |||||
6989 | // Get the overhead of the extractelement and insertelement instructions | ||||
6990 | // we might create due to scalarization. | ||||
6991 | Cost += getScalarizationOverhead(I, VF); | ||||
6992 | |||||
6993 | // If we have a predicated load/store, it will need extra i1 extracts and | ||||
6994 | // conditional branches, but may not be executed for each vector lane. Scale | ||||
6995 | // the cost by the probability of executing the predicated block. | ||||
6996 | if (isPredicatedInst(I)) { | ||||
6997 | Cost /= getReciprocalPredBlockProb(); | ||||
6998 | |||||
6999 | // Add the cost of an i1 extract and a branch | ||||
7000 | auto *Vec_i1Ty = | ||||
7001 | VectorType::get(IntegerType::getInt1Ty(ValTy->getContext()), VF); | ||||
7002 | Cost += TTI.getScalarizationOverhead( | ||||
7003 | Vec_i1Ty, APInt::getAllOnesValue(VF.getKnownMinValue()), | ||||
7004 | /*Insert=*/false, /*Extract=*/true); | ||||
7005 | Cost += TTI.getCFInstrCost(Instruction::Br, TTI::TCK_RecipThroughput); | ||||
7006 | |||||
7007 | if (useEmulatedMaskMemRefHack(I)) | ||||
7008 | // Artificially setting to a high enough value to practically disable | ||||
7009 | // vectorization with such operations. | ||||
7010 | Cost = 3000000; | ||||
7011 | } | ||||
7012 | |||||
7013 | return Cost; | ||||
7014 | } | ||||
7015 | |||||
7016 | InstructionCost | ||||
7017 | LoopVectorizationCostModel::getConsecutiveMemOpCost(Instruction *I, | ||||
7018 | ElementCount VF) { | ||||
7019 | Type *ValTy = getLoadStoreType(I); | ||||
7020 | auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF)); | ||||
7021 | Value *Ptr = getLoadStorePointerOperand(I); | ||||
7022 | unsigned AS = getLoadStoreAddressSpace(I); | ||||
7023 | int ConsecutiveStride = Legal->isConsecutivePtr(Ptr); | ||||
7024 | enum TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; | ||||
7025 | |||||
7026 | assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) &&(static_cast <bool> ((ConsecutiveStride == 1 || ConsecutiveStride == -1) && "Stride should be 1 or -1 for consecutive memory access" ) ? void (0) : __assert_fail ("(ConsecutiveStride == 1 || ConsecutiveStride == -1) && \"Stride should be 1 or -1 for consecutive memory access\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 7027, __extension__ __PRETTY_FUNCTION__)) | ||||
7027 | "Stride should be 1 or -1 for consecutive memory access")(static_cast <bool> ((ConsecutiveStride == 1 || ConsecutiveStride == -1) && "Stride should be 1 or -1 for consecutive memory access" ) ? void (0) : __assert_fail ("(ConsecutiveStride == 1 || ConsecutiveStride == -1) && \"Stride should be 1 or -1 for consecutive memory access\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 7027, __extension__ __PRETTY_FUNCTION__)); | ||||
7028 | const Align Alignment = getLoadStoreAlignment(I); | ||||
7029 | InstructionCost Cost = 0; | ||||
7030 | if (Legal->isMaskRequired(I)) | ||||
7031 | Cost += TTI.getMaskedMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS, | ||||
7032 | CostKind); | ||||
7033 | else | ||||
7034 | Cost += TTI.getMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS, | ||||
7035 | CostKind, I); | ||||
7036 | |||||
7037 | bool Reverse = ConsecutiveStride < 0; | ||||
7038 | if (Reverse) | ||||
7039 | Cost += | ||||
7040 | TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, None, 0); | ||||
7041 | return Cost; | ||||
7042 | } | ||||
7043 | |||||
7044 | InstructionCost | ||||
7045 | LoopVectorizationCostModel::getUniformMemOpCost(Instruction *I, | ||||
7046 | ElementCount VF) { | ||||
7047 | assert(Legal->isUniformMemOp(*I))(static_cast <bool> (Legal->isUniformMemOp(*I)) ? void (0) : __assert_fail ("Legal->isUniformMemOp(*I)", "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 7047, __extension__ __PRETTY_FUNCTION__)); | ||||
7048 | |||||
7049 | Type *ValTy = getLoadStoreType(I); | ||||
7050 | auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF)); | ||||
7051 | const Align Alignment = getLoadStoreAlignment(I); | ||||
7052 | unsigned AS = getLoadStoreAddressSpace(I); | ||||
7053 | enum TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; | ||||
7054 | if (isa<LoadInst>(I)) { | ||||
7055 | return TTI.getAddressComputationCost(ValTy) + | ||||
7056 | TTI.getMemoryOpCost(Instruction::Load, ValTy, Alignment, AS, | ||||
7057 | CostKind) + | ||||
7058 | TTI.getShuffleCost(TargetTransformInfo::SK_Broadcast, VectorTy); | ||||
7059 | } | ||||
7060 | StoreInst *SI = cast<StoreInst>(I); | ||||
7061 | |||||
7062 | bool isLoopInvariantStoreValue = Legal->isUniform(SI->getValueOperand()); | ||||
7063 | return TTI.getAddressComputationCost(ValTy) + | ||||
7064 | TTI.getMemoryOpCost(Instruction::Store, ValTy, Alignment, AS, | ||||
7065 | CostKind) + | ||||
7066 | (isLoopInvariantStoreValue | ||||
7067 | ? 0 | ||||
7068 | : TTI.getVectorInstrCost(Instruction::ExtractElement, VectorTy, | ||||
7069 | VF.getKnownMinValue() - 1)); | ||||
7070 | } | ||||
7071 | |||||
7072 | InstructionCost | ||||
7073 | LoopVectorizationCostModel::getGatherScatterCost(Instruction *I, | ||||
7074 | ElementCount VF) { | ||||
7075 | Type *ValTy = getLoadStoreType(I); | ||||
7076 | auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF)); | ||||
7077 | const Align Alignment = getLoadStoreAlignment(I); | ||||
7078 | const Value *Ptr = getLoadStorePointerOperand(I); | ||||
7079 | |||||
7080 | return TTI.getAddressComputationCost(VectorTy) + | ||||
7081 | TTI.getGatherScatterOpCost( | ||||
7082 | I->getOpcode(), VectorTy, Ptr, Legal->isMaskRequired(I), Alignment, | ||||
7083 | TargetTransformInfo::TCK_RecipThroughput, I); | ||||
7084 | } | ||||
7085 | |||||
7086 | InstructionCost | ||||
7087 | LoopVectorizationCostModel::getInterleaveGroupCost(Instruction *I, | ||||
7088 | ElementCount VF) { | ||||
7089 | // TODO: Once we have support for interleaving with scalable vectors | ||||
7090 | // we can calculate the cost properly here. | ||||
7091 | if (VF.isScalable()) | ||||
7092 | return InstructionCost::getInvalid(); | ||||
7093 | |||||
7094 | Type *ValTy = getLoadStoreType(I); | ||||
7095 | auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF)); | ||||
7096 | unsigned AS = getLoadStoreAddressSpace(I); | ||||
7097 | |||||
7098 | auto Group = getInterleavedAccessGroup(I); | ||||
7099 | assert(Group && "Fail to get an interleaved access group.")(static_cast <bool> (Group && "Fail to get an interleaved access group." ) ? void (0) : __assert_fail ("Group && \"Fail to get an interleaved access group.\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 7099, __extension__ __PRETTY_FUNCTION__)); | ||||
7100 | |||||
7101 | unsigned InterleaveFactor = Group->getFactor(); | ||||
7102 | auto *WideVecTy = VectorType::get(ValTy, VF * InterleaveFactor); | ||||
7103 | |||||
7104 | // Holds the indices of existing members in an interleaved load group. | ||||
7105 | // An interleaved store group doesn't need this as it doesn't allow gaps. | ||||
7106 | SmallVector<unsigned, 4> Indices; | ||||
7107 | if (isa<LoadInst>(I)) { | ||||
7108 | for (unsigned i = 0; i < InterleaveFactor; i++) | ||||
7109 | if (Group->getMember(i)) | ||||
7110 | Indices.push_back(i); | ||||
7111 | } | ||||
7112 | |||||
7113 | // Calculate the cost of the whole interleaved group. | ||||
7114 | bool UseMaskForGaps = | ||||
7115 | Group->requiresScalarEpilogue() && !isScalarEpilogueAllowed(); | ||||
7116 | InstructionCost Cost = TTI.getInterleavedMemoryOpCost( | ||||
7117 | I->getOpcode(), WideVecTy, Group->getFactor(), Indices, Group->getAlign(), | ||||
7118 | AS, TTI::TCK_RecipThroughput, Legal->isMaskRequired(I), UseMaskForGaps); | ||||
7119 | |||||
7120 | if (Group->isReverse()) { | ||||
7121 | // TODO: Add support for reversed masked interleaved access. | ||||
7122 | assert(!Legal->isMaskRequired(I) &&(static_cast <bool> (!Legal->isMaskRequired(I) && "Reverse masked interleaved access not supported.") ? void ( 0) : __assert_fail ("!Legal->isMaskRequired(I) && \"Reverse masked interleaved access not supported.\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 7123, __extension__ __PRETTY_FUNCTION__)) | ||||
7123 | "Reverse masked interleaved access not supported.")(static_cast <bool> (!Legal->isMaskRequired(I) && "Reverse masked interleaved access not supported.") ? void ( 0) : __assert_fail ("!Legal->isMaskRequired(I) && \"Reverse masked interleaved access not supported.\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 7123, __extension__ __PRETTY_FUNCTION__)); | ||||
7124 | Cost += | ||||
7125 | Group->getNumMembers() * | ||||
7126 | TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, None, 0); | ||||
7127 | } | ||||
7128 | return Cost; | ||||
7129 | } | ||||
7130 | |||||
7131 | InstructionCost LoopVectorizationCostModel::getReductionPatternCost( | ||||
7132 | Instruction *I, ElementCount VF, Type *Ty, TTI::TargetCostKind CostKind) { | ||||
7133 | // Early exit for no inloop reductions | ||||
7134 | if (InLoopReductionChains.empty() || VF.isScalar() || !isa<VectorType>(Ty)) | ||||
7135 | return InstructionCost::getInvalid(); | ||||
7136 | auto *VectorTy = cast<VectorType>(Ty); | ||||
7137 | |||||
7138 | // We are looking for a pattern of, and finding the minimal acceptable cost: | ||||
7139 | // reduce(mul(ext(A), ext(B))) or | ||||
7140 | // reduce(mul(A, B)) or | ||||
7141 | // reduce(ext(A)) or | ||||
7142 | // reduce(A). | ||||
7143 | // The basic idea is that we walk down the tree to do that, finding the root | ||||
7144 | // reduction instruction in InLoopReductionImmediateChains. From there we find | ||||
7145 | // the pattern of mul/ext and test the cost of the entire pattern vs the cost | ||||
7146 | // of the components. If the reduction cost is lower then we return it for the | ||||
7147 | // reduction instruction and 0 for the other instructions in the pattern. If | ||||
7148 | // it is not we return an invalid cost specifying the orignal cost method | ||||
7149 | // should be used. | ||||
7150 | Instruction *RetI = I; | ||||
7151 | if ((RetI->getOpcode() == Instruction::SExt || | ||||
7152 | RetI->getOpcode() == Instruction::ZExt)) { | ||||
7153 | if (!RetI->hasOneUser()) | ||||
7154 | return InstructionCost::getInvalid(); | ||||
7155 | RetI = RetI->user_back(); | ||||
7156 | } | ||||
7157 | if (RetI->getOpcode() == Instruction::Mul && | ||||
7158 | RetI->user_back()->getOpcode() == Instruction::Add) { | ||||
7159 | if (!RetI->hasOneUser()) | ||||
7160 | return InstructionCost::getInvalid(); | ||||
7161 | RetI = RetI->user_back(); | ||||
7162 | } | ||||
7163 | |||||
7164 | // Test if the found instruction is a reduction, and if not return an invalid | ||||
7165 | // cost specifying the parent to use the original cost modelling. | ||||
7166 | if (!InLoopReductionImmediateChains.count(RetI)) | ||||
7167 | return InstructionCost::getInvalid(); | ||||
7168 | |||||
7169 | // Find the reduction this chain is a part of and calculate the basic cost of | ||||
7170 | // the reduction on its own. | ||||
7171 | Instruction *LastChain = InLoopReductionImmediateChains[RetI]; | ||||
7172 | Instruction *ReductionPhi = LastChain; | ||||
7173 | while (!isa<PHINode>(ReductionPhi)) | ||||
7174 | ReductionPhi = InLoopReductionImmediateChains[ReductionPhi]; | ||||
7175 | |||||
7176 | const RecurrenceDescriptor &RdxDesc = | ||||
7177 | Legal->getReductionVars()[cast<PHINode>(ReductionPhi)]; | ||||
7178 | InstructionCost BaseCost = TTI.getArithmeticReductionCost( | ||||
7179 | RdxDesc.getOpcode(), VectorTy, false, CostKind); | ||||
7180 | |||||
7181 | // Get the operand that was not the reduction chain and match it to one of the | ||||
7182 | // patterns, returning the better cost if it is found. | ||||
7183 | Instruction *RedOp = RetI->getOperand(1) == LastChain | ||||
7184 | ? dyn_cast<Instruction>(RetI->getOperand(0)) | ||||
7185 | : dyn_cast<Instruction>(RetI->getOperand(1)); | ||||
7186 | |||||
7187 | VectorTy = VectorType::get(I->getOperand(0)->getType(), VectorTy); | ||||
7188 | |||||
7189 | if (RedOp && (isa<SExtInst>(RedOp) || isa<ZExtInst>(RedOp)) && | ||||
7190 | !TheLoop->isLoopInvariant(RedOp)) { | ||||
7191 | bool IsUnsigned = isa<ZExtInst>(RedOp); | ||||
7192 | auto *ExtType = VectorType::get(RedOp->getOperand(0)->getType(), VectorTy); | ||||
7193 | InstructionCost RedCost = TTI.getExtendedAddReductionCost( | ||||
7194 | /*IsMLA=*/false, IsUnsigned, RdxDesc.getRecurrenceType(), ExtType, | ||||
7195 | CostKind); | ||||
7196 | |||||
7197 | InstructionCost ExtCost = | ||||
7198 | TTI.getCastInstrCost(RedOp->getOpcode(), VectorTy, ExtType, | ||||
7199 | TTI::CastContextHint::None, CostKind, RedOp); | ||||
7200 | if (RedCost.isValid() && RedCost < BaseCost + ExtCost) | ||||
7201 | return I == RetI ? *RedCost.getValue() : 0; | ||||
7202 | } else if (RedOp && RedOp->getOpcode() == Instruction::Mul) { | ||||
7203 | Instruction *Mul = RedOp; | ||||
7204 | Instruction *Op0 = dyn_cast<Instruction>(Mul->getOperand(0)); | ||||
7205 | Instruction *Op1 = dyn_cast<Instruction>(Mul->getOperand(1)); | ||||
7206 | if (Op0 && Op1 && (isa<SExtInst>(Op0) || isa<ZExtInst>(Op0)) && | ||||
7207 | Op0->getOpcode() == Op1->getOpcode() && | ||||
7208 | Op0->getOperand(0)->getType() == Op1->getOperand(0)->getType() && | ||||
7209 | !TheLoop->isLoopInvariant(Op0) && !TheLoop->isLoopInvariant(Op1)) { | ||||
7210 | bool IsUnsigned = isa<ZExtInst>(Op0); | ||||
7211 | auto *ExtType = VectorType::get(Op0->getOperand(0)->getType(), VectorTy); | ||||
7212 | // reduce(mul(ext, ext)) | ||||
7213 | InstructionCost ExtCost = | ||||
7214 | TTI.getCastInstrCost(Op0->getOpcode(), VectorTy, ExtType, | ||||
7215 | TTI::CastContextHint::None, CostKind, Op0); | ||||
7216 | InstructionCost MulCost = | ||||
7217 | TTI.getArithmeticInstrCost(Mul->getOpcode(), VectorTy, CostKind); | ||||
7218 | |||||
7219 | InstructionCost RedCost = TTI.getExtendedAddReductionCost( | ||||
7220 | /*IsMLA=*/true, IsUnsigned, RdxDesc.getRecurrenceType(), ExtType, | ||||
7221 | CostKind); | ||||
7222 | |||||
7223 | if (RedCost.isValid() && RedCost < ExtCost * 2 + MulCost + BaseCost) | ||||
7224 | return I == RetI ? *RedCost.getValue() : 0; | ||||
7225 | } else { | ||||
7226 | InstructionCost MulCost = | ||||
7227 | TTI.getArithmeticInstrCost(Mul->getOpcode(), VectorTy, CostKind); | ||||
7228 | |||||
7229 | InstructionCost RedCost = TTI.getExtendedAddReductionCost( | ||||
7230 | /*IsMLA=*/true, true, RdxDesc.getRecurrenceType(), VectorTy, | ||||
7231 | CostKind); | ||||
7232 | |||||
7233 | if (RedCost.isValid() && RedCost < MulCost + BaseCost) | ||||
7234 | return I == RetI ? *RedCost.getValue() : 0; | ||||
7235 | } | ||||
7236 | } | ||||
7237 | |||||
7238 | return I == RetI ? BaseCost : InstructionCost::getInvalid(); | ||||
7239 | } | ||||
7240 | |||||
7241 | InstructionCost | ||||
7242 | LoopVectorizationCostModel::getMemoryInstructionCost(Instruction *I, | ||||
7243 | ElementCount VF) { | ||||
7244 | // Calculate scalar cost only. Vectorization cost should be ready at this | ||||
7245 | // moment. | ||||
7246 | if (VF.isScalar()) { | ||||
7247 | Type *ValTy = getLoadStoreType(I); | ||||
7248 | const Align Alignment = getLoadStoreAlignment(I); | ||||
7249 | unsigned AS = getLoadStoreAddressSpace(I); | ||||
7250 | |||||
7251 | return TTI.getAddressComputationCost(ValTy) + | ||||
7252 | TTI.getMemoryOpCost(I->getOpcode(), ValTy, Alignment, AS, | ||||
7253 | TTI::TCK_RecipThroughput, I); | ||||
7254 | } | ||||
7255 | return getWideningCost(I, VF); | ||||
7256 | } | ||||
7257 | |||||
7258 | LoopVectorizationCostModel::VectorizationCostTy | ||||
7259 | LoopVectorizationCostModel::getInstructionCost(Instruction *I, | ||||
7260 | ElementCount VF) { | ||||
7261 | // If we know that this instruction will remain uniform, check the cost of | ||||
7262 | // the scalar version. | ||||
7263 | if (isUniformAfterVectorization(I, VF)) | ||||
7264 | VF = ElementCount::getFixed(1); | ||||
7265 | |||||
7266 | if (VF.isVector() && isProfitableToScalarize(I, VF)) | ||||
7267 | return VectorizationCostTy(InstsToScalarize[VF][I], false); | ||||
7268 | |||||
7269 | // Forced scalars do not have any scalarization overhead. | ||||
7270 | auto ForcedScalar = ForcedScalars.find(VF); | ||||
7271 | if (VF.isVector() && ForcedScalar != ForcedScalars.end()) { | ||||
7272 | auto InstSet = ForcedScalar->second; | ||||
7273 | if (InstSet.count(I)) | ||||
7274 | return VectorizationCostTy( | ||||
7275 | (getInstructionCost(I, ElementCount::getFixed(1)).first * | ||||
7276 | VF.getKnownMinValue()), | ||||
7277 | false); | ||||
7278 | } | ||||
7279 | |||||
7280 | Type *VectorTy; | ||||
7281 | InstructionCost C = getInstructionCost(I, VF, VectorTy); | ||||
7282 | |||||
7283 | bool TypeNotScalarized = | ||||
7284 | VF.isVector() && VectorTy->isVectorTy() && | ||||
7285 | TTI.getNumberOfParts(VectorTy) < VF.getKnownMinValue(); | ||||
7286 | return VectorizationCostTy(C, TypeNotScalarized); | ||||
7287 | } | ||||
7288 | |||||
7289 | InstructionCost | ||||
7290 | LoopVectorizationCostModel::getScalarizationOverhead(Instruction *I, | ||||
7291 | ElementCount VF) const { | ||||
7292 | |||||
7293 | if (VF.isScalable()) | ||||
7294 | return InstructionCost::getInvalid(); | ||||
7295 | |||||
7296 | if (VF.isScalar()) | ||||
7297 | return 0; | ||||
7298 | |||||
7299 | InstructionCost Cost = 0; | ||||
7300 | Type *RetTy = ToVectorTy(I->getType(), VF); | ||||
7301 | if (!RetTy->isVoidTy() && | ||||
7302 | (!isa<LoadInst>(I) || !TTI.supportsEfficientVectorElementLoadStore())) | ||||
7303 | Cost += TTI.getScalarizationOverhead( | ||||
7304 | cast<VectorType>(RetTy), APInt::getAllOnesValue(VF.getKnownMinValue()), | ||||
7305 | true, false); | ||||
7306 | |||||
7307 | // Some targets keep addresses scalar. | ||||
7308 | if (isa<LoadInst>(I) && !TTI.prefersVectorizedAddressing()) | ||||
7309 | return Cost; | ||||
7310 | |||||
7311 | // Some targets support efficient element stores. | ||||
7312 | if (isa<StoreInst>(I) && TTI.supportsEfficientVectorElementLoadStore()) | ||||
7313 | return Cost; | ||||
7314 | |||||
7315 | // Collect operands to consider. | ||||
7316 | CallInst *CI = dyn_cast<CallInst>(I); | ||||
7317 | Instruction::op_range Ops = CI ? CI->arg_operands() : I->operands(); | ||||
7318 | |||||
7319 | // Skip operands that do not require extraction/scalarization and do not incur | ||||
7320 | // any overhead. | ||||
7321 | SmallVector<Type *> Tys; | ||||
7322 | for (auto *V : filterExtractingOperands(Ops, VF)) | ||||
7323 | Tys.push_back(MaybeVectorizeType(V->getType(), VF)); | ||||
7324 | return Cost + TTI.getOperandsScalarizationOverhead( | ||||
7325 | filterExtractingOperands(Ops, VF), Tys); | ||||
7326 | } | ||||
7327 | |||||
7328 | void LoopVectorizationCostModel::setCostBasedWideningDecision(ElementCount VF) { | ||||
7329 | if (VF.isScalar()) | ||||
7330 | return; | ||||
7331 | NumPredStores = 0; | ||||
7332 | for (BasicBlock *BB : TheLoop->blocks()) { | ||||
7333 | // For each instruction in the old loop. | ||||
7334 | for (Instruction &I : *BB) { | ||||
7335 | Value *Ptr = getLoadStorePointerOperand(&I); | ||||
7336 | if (!Ptr) | ||||
7337 | continue; | ||||
7338 | |||||
7339 | // TODO: We should generate better code and update the cost model for | ||||
7340 | // predicated uniform stores. Today they are treated as any other | ||||
7341 | // predicated store (see added test cases in | ||||
7342 | // invariant-store-vectorization.ll). | ||||
7343 | if (isa<StoreInst>(&I) && isScalarWithPredication(&I)) | ||||
7344 | NumPredStores++; | ||||
7345 | |||||
7346 | if (Legal->isUniformMemOp(I)) { | ||||
7347 | // TODO: Avoid replicating loads and stores instead of | ||||
7348 | // relying on instcombine to remove them. | ||||
7349 | // Load: Scalar load + broadcast | ||||
7350 | // Store: Scalar store + isLoopInvariantStoreValue ? 0 : extract | ||||
7351 | InstructionCost Cost = getUniformMemOpCost(&I, VF); | ||||
7352 | setWideningDecision(&I, VF, CM_Scalarize, Cost); | ||||
7353 | continue; | ||||
7354 | } | ||||
7355 | |||||
7356 | // We assume that widening is the best solution when possible. | ||||
7357 | if (memoryInstructionCanBeWidened(&I, VF)) { | ||||
7358 | InstructionCost Cost = getConsecutiveMemOpCost(&I, VF); | ||||
7359 | int ConsecutiveStride = | ||||
7360 | Legal->isConsecutivePtr(getLoadStorePointerOperand(&I)); | ||||
7361 | assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) &&(static_cast <bool> ((ConsecutiveStride == 1 || ConsecutiveStride == -1) && "Expected consecutive stride.") ? void (0) : __assert_fail ("(ConsecutiveStride == 1 || ConsecutiveStride == -1) && \"Expected consecutive stride.\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 7362, __extension__ __PRETTY_FUNCTION__)) | ||||
7362 | "Expected consecutive stride.")(static_cast <bool> ((ConsecutiveStride == 1 || ConsecutiveStride == -1) && "Expected consecutive stride.") ? void (0) : __assert_fail ("(ConsecutiveStride == 1 || ConsecutiveStride == -1) && \"Expected consecutive stride.\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 7362, __extension__ __PRETTY_FUNCTION__)); | ||||
7363 | InstWidening Decision = | ||||
7364 | ConsecutiveStride == 1 ? CM_Widen : CM_Widen_Reverse; | ||||
7365 | setWideningDecision(&I, VF, Decision, Cost); | ||||
7366 | continue; | ||||
7367 | } | ||||
7368 | |||||
7369 | // Choose between Interleaving, Gather/Scatter or Scalarization. | ||||
7370 | InstructionCost InterleaveCost = InstructionCost::getInvalid(); | ||||
7371 | unsigned NumAccesses = 1; | ||||
7372 | if (isAccessInterleaved(&I)) { | ||||
7373 | auto Group = getInterleavedAccessGroup(&I); | ||||
7374 | assert(Group && "Fail to get an interleaved access group.")(static_cast <bool> (Group && "Fail to get an interleaved access group." ) ? void (0) : __assert_fail ("Group && \"Fail to get an interleaved access group.\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 7374, __extension__ __PRETTY_FUNCTION__)); | ||||
7375 | |||||
7376 | // Make one decision for the whole group. | ||||
7377 | if (getWideningDecision(&I, VF) != CM_Unknown) | ||||
7378 | continue; | ||||
7379 | |||||
7380 | NumAccesses = Group->getNumMembers(); | ||||
7381 | if (interleavedAccessCanBeWidened(&I, VF)) | ||||
7382 | InterleaveCost = getInterleaveGroupCost(&I, VF); | ||||
7383 | } | ||||
7384 | |||||
7385 | InstructionCost GatherScatterCost = | ||||
7386 | isLegalGatherOrScatter(&I) | ||||
7387 | ? getGatherScatterCost(&I, VF) * NumAccesses | ||||
7388 | : InstructionCost::getInvalid(); | ||||
7389 | |||||
7390 | InstructionCost ScalarizationCost = | ||||
7391 | getMemInstScalarizationCost(&I, VF) * NumAccesses; | ||||
7392 | |||||
7393 | // Choose better solution for the current VF, | ||||
7394 | // write down this decision and use it during vectorization. | ||||
7395 | InstructionCost Cost; | ||||
7396 | InstWidening Decision; | ||||
7397 | if (InterleaveCost <= GatherScatterCost && | ||||
7398 | InterleaveCost < ScalarizationCost) { | ||||
7399 | Decision = CM_Interleave; | ||||
7400 | Cost = InterleaveCost; | ||||
7401 | } else if (GatherScatterCost < ScalarizationCost) { | ||||
7402 | Decision = CM_GatherScatter; | ||||
7403 | Cost = GatherScatterCost; | ||||
7404 | } else { | ||||
7405 | assert(!VF.isScalable() &&(static_cast <bool> (!VF.isScalable() && "We cannot yet scalarise for scalable vectors" ) ? void (0) : __assert_fail ("!VF.isScalable() && \"We cannot yet scalarise for scalable vectors\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 7406, __extension__ __PRETTY_FUNCTION__)) | ||||
7406 | "We cannot yet scalarise for scalable vectors")(static_cast <bool> (!VF.isScalable() && "We cannot yet scalarise for scalable vectors" ) ? void (0) : __assert_fail ("!VF.isScalable() && \"We cannot yet scalarise for scalable vectors\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 7406, __extension__ __PRETTY_FUNCTION__)); | ||||
7407 | Decision = CM_Scalarize; | ||||
7408 | Cost = ScalarizationCost; | ||||
7409 | } | ||||
7410 | // If the instructions belongs to an interleave group, the whole group | ||||
7411 | // receives the same decision. The whole group receives the cost, but | ||||
7412 | // the cost will actually be assigned to one instruction. | ||||
7413 | if (auto Group = getInterleavedAccessGroup(&I)) | ||||
7414 | setWideningDecision(Group, VF, Decision, Cost); | ||||
7415 | else | ||||
7416 | setWideningDecision(&I, VF, Decision, Cost); | ||||
7417 | } | ||||
7418 | } | ||||
7419 | |||||
7420 | // Make sure that any load of address and any other address computation | ||||
7421 | // remains scalar unless there is gather/scatter support. This avoids | ||||
7422 | // inevitable extracts into address registers, and also has the benefit of | ||||
7423 | // activating LSR more, since that pass can't optimize vectorized | ||||
7424 | // addresses. | ||||
7425 | if (TTI.prefersVectorizedAddressing()) | ||||
7426 | return; | ||||
7427 | |||||
7428 | // Start with all scalar pointer uses. | ||||
7429 | SmallPtrSet<Instruction *, 8> AddrDefs; | ||||
7430 | for (BasicBlock *BB : TheLoop->blocks()) | ||||
7431 | for (Instruction &I : *BB) { | ||||
7432 | Instruction *PtrDef = | ||||
7433 | dyn_cast_or_null<Instruction>(getLoadStorePointerOperand(&I)); | ||||
7434 | if (PtrDef && TheLoop->contains(PtrDef) && | ||||
7435 | getWideningDecision(&I, VF) != CM_GatherScatter) | ||||
7436 | AddrDefs.insert(PtrDef); | ||||
7437 | } | ||||
7438 | |||||
7439 | // Add all instructions used to generate the addresses. | ||||
7440 | SmallVector<Instruction *, 4> Worklist; | ||||
7441 | append_range(Worklist, AddrDefs); | ||||
7442 | while (!Worklist.empty()) { | ||||
7443 | Instruction *I = Worklist.pop_back_val(); | ||||
7444 | for (auto &Op : I->operands()) | ||||
7445 | if (auto *InstOp = dyn_cast<Instruction>(Op)) | ||||
7446 | if ((InstOp->getParent() == I->getParent()) && !isa<PHINode>(InstOp) && | ||||
7447 | AddrDefs.insert(InstOp).second) | ||||
7448 | Worklist.push_back(InstOp); | ||||
7449 | } | ||||
7450 | |||||
7451 | for (auto *I : AddrDefs) { | ||||
7452 | if (isa<LoadInst>(I)) { | ||||
7453 | // Setting the desired widening decision should ideally be handled in | ||||
7454 | // by cost functions, but since this involves the task of finding out | ||||
7455 | // if the loaded register is involved in an address computation, it is | ||||
7456 | // instead changed here when we know this is the case. | ||||
7457 | InstWidening Decision = getWideningDecision(I, VF); | ||||
7458 | if (Decision == CM_Widen || Decision == CM_Widen_Reverse) | ||||
7459 | // Scalarize a widened load of address. | ||||
7460 | setWideningDecision( | ||||
7461 | I, VF, CM_Scalarize, | ||||
7462 | (VF.getKnownMinValue() * | ||||
7463 | getMemoryInstructionCost(I, ElementCount::getFixed(1)))); | ||||
7464 | else if (auto Group = getInterleavedAccessGroup(I)) { | ||||
7465 | // Scalarize an interleave group of address loads. | ||||
7466 | for (unsigned I = 0; I < Group->getFactor(); ++I) { | ||||
7467 | if (Instruction *Member = Group->getMember(I)) | ||||
7468 | setWideningDecision( | ||||
7469 | Member, VF, CM_Scalarize, | ||||
7470 | (VF.getKnownMinValue() * | ||||
7471 | getMemoryInstructionCost(Member, ElementCount::getFixed(1)))); | ||||
7472 | } | ||||
7473 | } | ||||
7474 | } else | ||||
7475 | // Make sure I gets scalarized and a cost estimate without | ||||
7476 | // scalarization overhead. | ||||
7477 | ForcedScalars[VF].insert(I); | ||||
7478 | } | ||||
7479 | } | ||||
7480 | |||||
7481 | InstructionCost | ||||
7482 | LoopVectorizationCostModel::getInstructionCost(Instruction *I, ElementCount VF, | ||||
7483 | Type *&VectorTy) { | ||||
7484 | Type *RetTy = I->getType(); | ||||
7485 | if (canTruncateToMinimalBitwidth(I, VF)) | ||||
7486 | RetTy = IntegerType::get(RetTy->getContext(), MinBWs[I]); | ||||
7487 | auto SE = PSE.getSE(); | ||||
7488 | TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; | ||||
7489 | |||||
7490 | auto hasSingleCopyAfterVectorization = [this](Instruction *I, | ||||
7491 | ElementCount VF) -> bool { | ||||
7492 | if (VF.isScalar()) | ||||
7493 | return true; | ||||
7494 | |||||
7495 | auto Scalarized = InstsToScalarize.find(VF); | ||||
7496 | assert(Scalarized != InstsToScalarize.end() &&(static_cast <bool> (Scalarized != InstsToScalarize.end () && "VF not yet analyzed for scalarization profitability" ) ? void (0) : __assert_fail ("Scalarized != InstsToScalarize.end() && \"VF not yet analyzed for scalarization profitability\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 7497, __extension__ __PRETTY_FUNCTION__)) | ||||
7497 | "VF not yet analyzed for scalarization profitability")(static_cast <bool> (Scalarized != InstsToScalarize.end () && "VF not yet analyzed for scalarization profitability" ) ? void (0) : __assert_fail ("Scalarized != InstsToScalarize.end() && \"VF not yet analyzed for scalarization profitability\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 7497, __extension__ __PRETTY_FUNCTION__)); | ||||
7498 | return !Scalarized->second.count(I) && | ||||
7499 | llvm::all_of(I->users(), [&](User *U) { | ||||
7500 | auto *UI = cast<Instruction>(U); | ||||
7501 | return !Scalarized->second.count(UI); | ||||
7502 | }); | ||||
7503 | }; | ||||
7504 | (void) hasSingleCopyAfterVectorization; | ||||
7505 | |||||
7506 | if (isScalarAfterVectorization(I, VF)) { | ||||
7507 | // With the exception of GEPs and PHIs, after scalarization there should | ||||
7508 | // only be one copy of the instruction generated in the loop. This is | ||||
7509 | // because the VF is either 1, or any instructions that need scalarizing | ||||
7510 | // have already been dealt with by the the time we get here. As a result, | ||||
7511 | // it means we don't have to multiply the instruction cost by VF. | ||||
7512 | assert(I->getOpcode() == Instruction::GetElementPtr ||(static_cast <bool> (I->getOpcode() == Instruction:: GetElementPtr || I->getOpcode() == Instruction::PHI || (I-> getOpcode() == Instruction::BitCast && I->getType( )->isPointerTy()) || hasSingleCopyAfterVectorization(I, VF )) ? void (0) : __assert_fail ("I->getOpcode() == Instruction::GetElementPtr || I->getOpcode() == Instruction::PHI || (I->getOpcode() == Instruction::BitCast && I->getType()->isPointerTy()) || hasSingleCopyAfterVectorization(I, VF)" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 7516, __extension__ __PRETTY_FUNCTION__)) | ||||
7513 | I->getOpcode() == Instruction::PHI ||(static_cast <bool> (I->getOpcode() == Instruction:: GetElementPtr || I->getOpcode() == Instruction::PHI || (I-> getOpcode() == Instruction::BitCast && I->getType( )->isPointerTy()) || hasSingleCopyAfterVectorization(I, VF )) ? void (0) : __assert_fail ("I->getOpcode() == Instruction::GetElementPtr || I->getOpcode() == Instruction::PHI || (I->getOpcode() == Instruction::BitCast && I->getType()->isPointerTy()) || hasSingleCopyAfterVectorization(I, VF)" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 7516, __extension__ __PRETTY_FUNCTION__)) | ||||
7514 | (I->getOpcode() == Instruction::BitCast &&(static_cast <bool> (I->getOpcode() == Instruction:: GetElementPtr || I->getOpcode() == Instruction::PHI || (I-> getOpcode() == Instruction::BitCast && I->getType( )->isPointerTy()) || hasSingleCopyAfterVectorization(I, VF )) ? void (0) : __assert_fail ("I->getOpcode() == Instruction::GetElementPtr || I->getOpcode() == Instruction::PHI || (I->getOpcode() == Instruction::BitCast && I->getType()->isPointerTy()) || hasSingleCopyAfterVectorization(I, VF)" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 7516, __extension__ __PRETTY_FUNCTION__)) | ||||
7515 | I->getType()->isPointerTy()) ||(static_cast <bool> (I->getOpcode() == Instruction:: GetElementPtr || I->getOpcode() == Instruction::PHI || (I-> getOpcode() == Instruction::BitCast && I->getType( )->isPointerTy()) || hasSingleCopyAfterVectorization(I, VF )) ? void (0) : __assert_fail ("I->getOpcode() == Instruction::GetElementPtr || I->getOpcode() == Instruction::PHI || (I->getOpcode() == Instruction::BitCast && I->getType()->isPointerTy()) || hasSingleCopyAfterVectorization(I, VF)" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 7516, __extension__ __PRETTY_FUNCTION__)) | ||||
7516 | hasSingleCopyAfterVectorization(I, VF))(static_cast <bool> (I->getOpcode() == Instruction:: GetElementPtr || I->getOpcode() == Instruction::PHI || (I-> getOpcode() == Instruction::BitCast && I->getType( )->isPointerTy()) || hasSingleCopyAfterVectorization(I, VF )) ? void (0) : __assert_fail ("I->getOpcode() == Instruction::GetElementPtr || I->getOpcode() == Instruction::PHI || (I->getOpcode() == Instruction::BitCast && I->getType()->isPointerTy()) || hasSingleCopyAfterVectorization(I, VF)" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 7516, __extension__ __PRETTY_FUNCTION__)); | ||||
7517 | VectorTy = RetTy; | ||||
7518 | } else | ||||
7519 | VectorTy = ToVectorTy(RetTy, VF); | ||||
7520 | |||||
7521 | // TODO: We need to estimate the cost of intrinsic calls. | ||||
7522 | switch (I->getOpcode()) { | ||||
7523 | case Instruction::GetElementPtr: | ||||
7524 | // We mark this instruction as zero-cost because the cost of GEPs in | ||||
7525 | // vectorized code depends on whether the corresponding memory instruction | ||||
7526 | // is scalarized or not. Therefore, we handle GEPs with the memory | ||||
7527 | // instruction cost. | ||||
7528 | return 0; | ||||
7529 | case Instruction::Br: { | ||||
7530 | // In cases of scalarized and predicated instructions, there will be VF | ||||
7531 | // predicated blocks in the vectorized loop. Each branch around these | ||||
7532 | // blocks requires also an extract of its vector compare i1 element. | ||||
7533 | bool ScalarPredicatedBB = false; | ||||
7534 | BranchInst *BI = cast<BranchInst>(I); | ||||
7535 | if (VF.isVector() && BI->isConditional() && | ||||
7536 | (PredicatedBBsAfterVectorization.count(BI->getSuccessor(0)) || | ||||
7537 | PredicatedBBsAfterVectorization.count(BI->getSuccessor(1)))) | ||||
7538 | ScalarPredicatedBB = true; | ||||
7539 | |||||
7540 | if (ScalarPredicatedBB) { | ||||
7541 | // Return cost for branches around scalarized and predicated blocks. | ||||
7542 | assert(!VF.isScalable() && "scalable vectors not yet supported.")(static_cast <bool> (!VF.isScalable() && "scalable vectors not yet supported." ) ? void (0) : __assert_fail ("!VF.isScalable() && \"scalable vectors not yet supported.\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 7542, __extension__ __PRETTY_FUNCTION__)); | ||||
7543 | auto *Vec_i1Ty = | ||||
7544 | VectorType::get(IntegerType::getInt1Ty(RetTy->getContext()), VF); | ||||
7545 | return (TTI.getScalarizationOverhead( | ||||
7546 | Vec_i1Ty, APInt::getAllOnesValue(VF.getKnownMinValue()), | ||||
7547 | false, true) + | ||||
7548 | (TTI.getCFInstrCost(Instruction::Br, CostKind) * | ||||
7549 | VF.getKnownMinValue())); | ||||
7550 | } else if (I->getParent() == TheLoop->getLoopLatch() || VF.isScalar()) | ||||
7551 | // The back-edge branch will remain, as will all scalar branches. | ||||
7552 | return TTI.getCFInstrCost(Instruction::Br, CostKind); | ||||
7553 | else | ||||
7554 | // This branch will be eliminated by if-conversion. | ||||
7555 | return 0; | ||||
7556 | // Note: We currently assume zero cost for an unconditional branch inside | ||||
7557 | // a predicated block since it will become a fall-through, although we | ||||
7558 | // may decide in the future to call TTI for all branches. | ||||
7559 | } | ||||
7560 | case Instruction::PHI: { | ||||
7561 | auto *Phi = cast<PHINode>(I); | ||||
7562 | |||||
7563 | // First-order recurrences are replaced by vector shuffles inside the loop. | ||||
7564 | // NOTE: Don't use ToVectorTy as SK_ExtractSubvector expects a vector type. | ||||
7565 | if (VF.isVector() && Legal->isFirstOrderRecurrence(Phi)) | ||||
7566 | return TTI.getShuffleCost( | ||||
7567 | TargetTransformInfo::SK_ExtractSubvector, cast<VectorType>(VectorTy), | ||||
7568 | None, VF.getKnownMinValue() - 1, FixedVectorType::get(RetTy, 1)); | ||||
7569 | |||||
7570 | // Phi nodes in non-header blocks (not inductions, reductions, etc.) are | ||||
7571 | // converted into select instructions. We require N - 1 selects per phi | ||||
7572 | // node, where N is the number of incoming values. | ||||
7573 | if (VF.isVector() && Phi->getParent() != TheLoop->getHeader()) | ||||
7574 | return (Phi->getNumIncomingValues() - 1) * | ||||
7575 | TTI.getCmpSelInstrCost( | ||||
7576 | Instruction::Select, ToVectorTy(Phi->getType(), VF), | ||||
7577 | ToVectorTy(Type::getInt1Ty(Phi->getContext()), VF), | ||||
7578 | CmpInst::BAD_ICMP_PREDICATE, CostKind); | ||||
7579 | |||||
7580 | return TTI.getCFInstrCost(Instruction::PHI, CostKind); | ||||
7581 | } | ||||
7582 | case Instruction::UDiv: | ||||
7583 | case Instruction::SDiv: | ||||
7584 | case Instruction::URem: | ||||
7585 | case Instruction::SRem: | ||||
7586 | // If we have a predicated instruction, it may not be executed for each | ||||
7587 | // vector lane. Get the scalarization cost and scale this amount by the | ||||
7588 | // probability of executing the predicated block. If the instruction is not | ||||
7589 | // predicated, we fall through to the next case. | ||||
7590 | if (VF.isVector() && isScalarWithPredication(I)) { | ||||
7591 | InstructionCost Cost = 0; | ||||
7592 | |||||
7593 | // These instructions have a non-void type, so account for the phi nodes | ||||
7594 | // that we will create. This cost is likely to be zero. The phi node | ||||
7595 | // cost, if any, should be scaled by the block probability because it | ||||
7596 | // models a copy at the end of each predicated block. | ||||
7597 | Cost += VF.getKnownMinValue() * | ||||
7598 | TTI.getCFInstrCost(Instruction::PHI, CostKind); | ||||
7599 | |||||
7600 | // The cost of the non-predicated instruction. | ||||
7601 | Cost += VF.getKnownMinValue() * | ||||
7602 | TTI.getArithmeticInstrCost(I->getOpcode(), RetTy, CostKind); | ||||
7603 | |||||
7604 | // The cost of insertelement and extractelement instructions needed for | ||||
7605 | // scalarization. | ||||
7606 | Cost += getScalarizationOverhead(I, VF); | ||||
7607 | |||||
7608 | // Scale the cost by the probability of executing the predicated blocks. | ||||
7609 | // This assumes the predicated block for each vector lane is equally | ||||
7610 | // likely. | ||||
7611 | return Cost / getReciprocalPredBlockProb(); | ||||
7612 | } | ||||
7613 | LLVM_FALLTHROUGH[[gnu::fallthrough]]; | ||||
7614 | case Instruction::Add: | ||||
7615 | case Instruction::FAdd: | ||||
7616 | case Instruction::Sub: | ||||
7617 | case Instruction::FSub: | ||||
7618 | case Instruction::Mul: | ||||
7619 | case Instruction::FMul: | ||||
7620 | case Instruction::FDiv: | ||||
7621 | case Instruction::FRem: | ||||
7622 | case Instruction::Shl: | ||||
7623 | case Instruction::LShr: | ||||
7624 | case Instruction::AShr: | ||||
7625 | case Instruction::And: | ||||
7626 | case Instruction::Or: | ||||
7627 | case Instruction::Xor: { | ||||
7628 | // Since we will replace the stride by 1 the multiplication should go away. | ||||
7629 | if (I->getOpcode() == Instruction::Mul && isStrideMul(I, Legal)) | ||||
7630 | return 0; | ||||
7631 | |||||
7632 | // Detect reduction patterns | ||||
7633 | InstructionCost RedCost; | ||||
7634 | if ((RedCost = getReductionPatternCost(I, VF, VectorTy, CostKind)) | ||||
7635 | .isValid()) | ||||
7636 | return RedCost; | ||||
7637 | |||||
7638 | // Certain instructions can be cheaper to vectorize if they have a constant | ||||
7639 | // second vector operand. One example of this are shifts on x86. | ||||
7640 | Value *Op2 = I->getOperand(1); | ||||
7641 | TargetTransformInfo::OperandValueProperties Op2VP; | ||||
7642 | TargetTransformInfo::OperandValueKind Op2VK = | ||||
7643 | TTI.getOperandInfo(Op2, Op2VP); | ||||
7644 | if (Op2VK == TargetTransformInfo::OK_AnyValue && Legal->isUniform(Op2)) | ||||
7645 | Op2VK = TargetTransformInfo::OK_UniformValue; | ||||
7646 | |||||
7647 | SmallVector<const Value *, 4> Operands(I->operand_values()); | ||||
7648 | return TTI.getArithmeticInstrCost( | ||||
7649 | I->getOpcode(), VectorTy, CostKind, TargetTransformInfo::OK_AnyValue, | ||||
7650 | Op2VK, TargetTransformInfo::OP_None, Op2VP, Operands, I); | ||||
7651 | } | ||||
7652 | case Instruction::FNeg: { | ||||
7653 | return TTI.getArithmeticInstrCost( | ||||
7654 | I->getOpcode(), VectorTy, CostKind, TargetTransformInfo::OK_AnyValue, | ||||
7655 | TargetTransformInfo::OK_AnyValue, TargetTransformInfo::OP_None, | ||||
7656 | TargetTransformInfo::OP_None, I->getOperand(0), I); | ||||
7657 | } | ||||
7658 | case Instruction::Select: { | ||||
7659 | SelectInst *SI = cast<SelectInst>(I); | ||||
7660 | const SCEV *CondSCEV = SE->getSCEV(SI->getCondition()); | ||||
7661 | bool ScalarCond = (SE->isLoopInvariant(CondSCEV, TheLoop)); | ||||
7662 | |||||
7663 | const Value *Op0, *Op1; | ||||
7664 | using namespace llvm::PatternMatch; | ||||
7665 | if (!ScalarCond && (match(I, m_LogicalAnd(m_Value(Op0), m_Value(Op1))) || | ||||
7666 | match(I, m_LogicalOr(m_Value(Op0), m_Value(Op1))))) { | ||||
7667 | // select x, y, false --> x & y | ||||
7668 | // select x, true, y --> x | y | ||||
7669 | TTI::OperandValueProperties Op1VP = TTI::OP_None; | ||||
7670 | TTI::OperandValueProperties Op2VP = TTI::OP_None; | ||||
7671 | TTI::OperandValueKind Op1VK = TTI::getOperandInfo(Op0, Op1VP); | ||||
7672 | TTI::OperandValueKind Op2VK = TTI::getOperandInfo(Op1, Op2VP); | ||||
7673 | assert(Op0->getType()->getScalarSizeInBits() == 1 &&(static_cast <bool> (Op0->getType()->getScalarSizeInBits () == 1 && Op1->getType()->getScalarSizeInBits( ) == 1) ? void (0) : __assert_fail ("Op0->getType()->getScalarSizeInBits() == 1 && Op1->getType()->getScalarSizeInBits() == 1" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 7674, __extension__ __PRETTY_FUNCTION__)) | ||||
7674 | Op1->getType()->getScalarSizeInBits() == 1)(static_cast <bool> (Op0->getType()->getScalarSizeInBits () == 1 && Op1->getType()->getScalarSizeInBits( ) == 1) ? void (0) : __assert_fail ("Op0->getType()->getScalarSizeInBits() == 1 && Op1->getType()->getScalarSizeInBits() == 1" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 7674, __extension__ __PRETTY_FUNCTION__)); | ||||
7675 | |||||
7676 | SmallVector<const Value *, 2> Operands{Op0, Op1}; | ||||
7677 | return TTI.getArithmeticInstrCost( | ||||
7678 | match(I, m_LogicalOr()) ? Instruction::Or : Instruction::And, VectorTy, | ||||
7679 | CostKind, Op1VK, Op2VK, Op1VP, Op2VP, Operands, I); | ||||
7680 | } | ||||
7681 | |||||
7682 | Type *CondTy = SI->getCondition()->getType(); | ||||
7683 | if (!ScalarCond) | ||||
7684 | CondTy = VectorType::get(CondTy, VF); | ||||
7685 | return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, CondTy, | ||||
7686 | CmpInst::BAD_ICMP_PREDICATE, CostKind, I); | ||||
7687 | } | ||||
7688 | case Instruction::ICmp: | ||||
7689 | case Instruction::FCmp: { | ||||
7690 | Type *ValTy = I->getOperand(0)->getType(); | ||||
7691 | Instruction *Op0AsInstruction = dyn_cast<Instruction>(I->getOperand(0)); | ||||
7692 | if (canTruncateToMinimalBitwidth(Op0AsInstruction, VF)) | ||||
7693 | ValTy = IntegerType::get(ValTy->getContext(), MinBWs[Op0AsInstruction]); | ||||
7694 | VectorTy = ToVectorTy(ValTy, VF); | ||||
7695 | return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, nullptr, | ||||
7696 | CmpInst::BAD_ICMP_PREDICATE, CostKind, I); | ||||
7697 | } | ||||
7698 | case Instruction::Store: | ||||
7699 | case Instruction::Load: { | ||||
7700 | ElementCount Width = VF; | ||||
7701 | if (Width.isVector()) { | ||||
7702 | InstWidening Decision = getWideningDecision(I, Width); | ||||
7703 | assert(Decision != CM_Unknown &&(static_cast <bool> (Decision != CM_Unknown && "CM decision should be taken at this point" ) ? void (0) : __assert_fail ("Decision != CM_Unknown && \"CM decision should be taken at this point\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 7704, __extension__ __PRETTY_FUNCTION__)) | ||||
7704 | "CM decision should be taken at this point")(static_cast <bool> (Decision != CM_Unknown && "CM decision should be taken at this point" ) ? void (0) : __assert_fail ("Decision != CM_Unknown && \"CM decision should be taken at this point\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 7704, __extension__ __PRETTY_FUNCTION__)); | ||||
7705 | if (Decision == CM_Scalarize) | ||||
7706 | Width = ElementCount::getFixed(1); | ||||
7707 | } | ||||
7708 | VectorTy = ToVectorTy(getLoadStoreType(I), Width); | ||||
7709 | return getMemoryInstructionCost(I, VF); | ||||
7710 | } | ||||
7711 | case Instruction::BitCast: | ||||
7712 | if (I->getType()->isPointerTy()) | ||||
7713 | return 0; | ||||
7714 | LLVM_FALLTHROUGH[[gnu::fallthrough]]; | ||||
7715 | case Instruction::ZExt: | ||||
7716 | case Instruction::SExt: | ||||
7717 | case Instruction::FPToUI: | ||||
7718 | case Instruction::FPToSI: | ||||
7719 | case Instruction::FPExt: | ||||
7720 | case Instruction::PtrToInt: | ||||
7721 | case Instruction::IntToPtr: | ||||
7722 | case Instruction::SIToFP: | ||||
7723 | case Instruction::UIToFP: | ||||
7724 | case Instruction::Trunc: | ||||
7725 | case Instruction::FPTrunc: { | ||||
7726 | // Computes the CastContextHint from a Load/Store instruction. | ||||
7727 | auto ComputeCCH = [&](Instruction *I) -> TTI::CastContextHint { | ||||
7728 | assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&(static_cast <bool> ((isa<LoadInst>(I) || isa< StoreInst>(I)) && "Expected a load or a store!") ? void (0) : __assert_fail ("(isa<LoadInst>(I) || isa<StoreInst>(I)) && \"Expected a load or a store!\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 7729, __extension__ __PRETTY_FUNCTION__)) | ||||
7729 | "Expected a load or a store!")(static_cast <bool> ((isa<LoadInst>(I) || isa< StoreInst>(I)) && "Expected a load or a store!") ? void (0) : __assert_fail ("(isa<LoadInst>(I) || isa<StoreInst>(I)) && \"Expected a load or a store!\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 7729, __extension__ __PRETTY_FUNCTION__)); | ||||
7730 | |||||
7731 | if (VF.isScalar() || !TheLoop->contains(I)) | ||||
7732 | return TTI::CastContextHint::Normal; | ||||
7733 | |||||
7734 | switch (getWideningDecision(I, VF)) { | ||||
7735 | case LoopVectorizationCostModel::CM_GatherScatter: | ||||
7736 | return TTI::CastContextHint::GatherScatter; | ||||
7737 | case LoopVectorizationCostModel::CM_Interleave: | ||||
7738 | return TTI::CastContextHint::Interleave; | ||||
7739 | case LoopVectorizationCostModel::CM_Scalarize: | ||||
7740 | case LoopVectorizationCostModel::CM_Widen: | ||||
7741 | return Legal->isMaskRequired(I) ? TTI::CastContextHint::Masked | ||||
7742 | : TTI::CastContextHint::Normal; | ||||
7743 | case LoopVectorizationCostModel::CM_Widen_Reverse: | ||||
7744 | return TTI::CastContextHint::Reversed; | ||||
7745 | case LoopVectorizationCostModel::CM_Unknown: | ||||
7746 | llvm_unreachable("Instr did not go through cost modelling?")::llvm::llvm_unreachable_internal("Instr did not go through cost modelling?" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 7746); | ||||
7747 | } | ||||
7748 | |||||
7749 | llvm_unreachable("Unhandled case!")::llvm::llvm_unreachable_internal("Unhandled case!", "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 7749); | ||||
7750 | }; | ||||
7751 | |||||
7752 | unsigned Opcode = I->getOpcode(); | ||||
7753 | TTI::CastContextHint CCH = TTI::CastContextHint::None; | ||||
7754 | // For Trunc, the context is the only user, which must be a StoreInst. | ||||
7755 | if (Opcode == Instruction::Trunc || Opcode == Instruction::FPTrunc) { | ||||
7756 | if (I->hasOneUse()) | ||||
7757 | if (StoreInst *Store = dyn_cast<StoreInst>(*I->user_begin())) | ||||
7758 | CCH = ComputeCCH(Store); | ||||
7759 | } | ||||
7760 | // For Z/Sext, the context is the operand, which must be a LoadInst. | ||||
7761 | else if (Opcode == Instruction::ZExt || Opcode == Instruction::SExt || | ||||
7762 | Opcode == Instruction::FPExt) { | ||||
7763 | if (LoadInst *Load = dyn_cast<LoadInst>(I->getOperand(0))) | ||||
7764 | CCH = ComputeCCH(Load); | ||||
7765 | } | ||||
7766 | |||||
7767 | // We optimize the truncation of induction variables having constant | ||||
7768 | // integer steps. The cost of these truncations is the same as the scalar | ||||
7769 | // operation. | ||||
7770 | if (isOptimizableIVTruncate(I, VF)) { | ||||
7771 | auto *Trunc = cast<TruncInst>(I); | ||||
7772 | return TTI.getCastInstrCost(Instruction::Trunc, Trunc->getDestTy(), | ||||
7773 | Trunc->getSrcTy(), CCH, CostKind, Trunc); | ||||
7774 | } | ||||
7775 | |||||
7776 | // Detect reduction patterns | ||||
7777 | InstructionCost RedCost; | ||||
7778 | if ((RedCost = getReductionPatternCost(I, VF, VectorTy, CostKind)) | ||||
7779 | .isValid()) | ||||
7780 | return RedCost; | ||||
7781 | |||||
7782 | Type *SrcScalarTy = I->getOperand(0)->getType(); | ||||
7783 | Type *SrcVecTy = | ||||
7784 | VectorTy->isVectorTy() ? ToVectorTy(SrcScalarTy, VF) : SrcScalarTy; | ||||
7785 | if (canTruncateToMinimalBitwidth(I, VF)) { | ||||
7786 | // This cast is going to be shrunk. This may remove the cast or it might | ||||
7787 | // turn it into slightly different cast. For example, if MinBW == 16, | ||||
7788 | // "zext i8 %1 to i32" becomes "zext i8 %1 to i16". | ||||
7789 | // | ||||
7790 | // Calculate the modified src and dest types. | ||||
7791 | Type *MinVecTy = VectorTy; | ||||
7792 | if (Opcode == Instruction::Trunc) { | ||||
7793 | SrcVecTy = smallestIntegerVectorType(SrcVecTy, MinVecTy); | ||||
7794 | VectorTy = | ||||
7795 | largestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy); | ||||
7796 | } else if (Opcode == Instruction::ZExt || Opcode == Instruction::SExt) { | ||||
7797 | SrcVecTy = largestIntegerVectorType(SrcVecTy, MinVecTy); | ||||
7798 | VectorTy = | ||||
7799 | smallestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy); | ||||
7800 | } | ||||
7801 | } | ||||
7802 | |||||
7803 | return TTI.getCastInstrCost(Opcode, VectorTy, SrcVecTy, CCH, CostKind, I); | ||||
7804 | } | ||||
7805 | case Instruction::Call: { | ||||
7806 | bool NeedToScalarize; | ||||
7807 | CallInst *CI = cast<CallInst>(I); | ||||
7808 | InstructionCost CallCost = getVectorCallCost(CI, VF, NeedToScalarize); | ||||
7809 | if (getVectorIntrinsicIDForCall(CI, TLI)) { | ||||
7810 | InstructionCost IntrinsicCost = getVectorIntrinsicCost(CI, VF); | ||||
7811 | return std::min(CallCost, IntrinsicCost); | ||||
7812 | } | ||||
7813 | return CallCost; | ||||
7814 | } | ||||
7815 | case Instruction::ExtractValue: | ||||
7816 | return TTI.getInstructionCost(I, TTI::TCK_RecipThroughput); | ||||
7817 | default: | ||||
7818 | // This opcode is unknown. Assume that it is the same as 'mul'. | ||||
7819 | return TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy, CostKind); | ||||
7820 | } // end of switch. | ||||
7821 | } | ||||
7822 | |||||
7823 | char LoopVectorize::ID = 0; | ||||
7824 | |||||
7825 | static const char lv_name[] = "Loop Vectorization"; | ||||
7826 | |||||
7827 | INITIALIZE_PASS_BEGIN(LoopVectorize, LV_NAME, lv_name, false, false)static void *initializeLoopVectorizePassOnce(PassRegistry & Registry) { | ||||
7828 | INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)initializeTargetTransformInfoWrapperPassPass(Registry); | ||||
7829 | INITIALIZE_PASS_DEPENDENCY(BasicAAWrapperPass)initializeBasicAAWrapperPassPass(Registry); | ||||
7830 | INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)initializeAAResultsWrapperPassPass(Registry); | ||||
7831 | INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass)initializeGlobalsAAWrapperPassPass(Registry); | ||||
7832 | INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)initializeAssumptionCacheTrackerPass(Registry); | ||||
7833 | INITIALIZE_PASS_DEPENDENCY(BlockFrequencyInfoWrapperPass)initializeBlockFrequencyInfoWrapperPassPass(Registry); | ||||
7834 | INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)initializeDominatorTreeWrapperPassPass(Registry); | ||||
7835 | INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass)initializeScalarEvolutionWrapperPassPass(Registry); | ||||
7836 | INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)initializeLoopInfoWrapperPassPass(Registry); | ||||
7837 | INITIALIZE_PASS_DEPENDENCY(LoopAccessLegacyAnalysis)initializeLoopAccessLegacyAnalysisPass(Registry); | ||||
7838 | INITIALIZE_PASS_DEPENDENCY(DemandedBitsWrapperPass)initializeDemandedBitsWrapperPassPass(Registry); | ||||
7839 | INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass)initializeOptimizationRemarkEmitterWrapperPassPass(Registry); | ||||
7840 | INITIALIZE_PASS_DEPENDENCY(ProfileSummaryInfoWrapperPass)initializeProfileSummaryInfoWrapperPassPass(Registry); | ||||
7841 | INITIALIZE_PASS_DEPENDENCY(InjectTLIMappingsLegacy)initializeInjectTLIMappingsLegacyPass(Registry); | ||||
7842 | INITIALIZE_PASS_END(LoopVectorize, LV_NAME, lv_name, false, false)PassInfo *PI = new PassInfo( lv_name, "loop-vectorize", & LoopVectorize::ID, PassInfo::NormalCtor_t(callDefaultCtor< LoopVectorize>), false, false); Registry.registerPass(*PI, true); return PI; } static llvm::once_flag InitializeLoopVectorizePassFlag ; void llvm::initializeLoopVectorizePass(PassRegistry &Registry ) { llvm::call_once(InitializeLoopVectorizePassFlag, initializeLoopVectorizePassOnce , std::ref(Registry)); } | ||||
7843 | |||||
7844 | namespace llvm { | ||||
7845 | |||||
7846 | Pass *createLoopVectorizePass() { return new LoopVectorize(); } | ||||
7847 | |||||
7848 | Pass *createLoopVectorizePass(bool InterleaveOnlyWhenForced, | ||||
7849 | bool VectorizeOnlyWhenForced) { | ||||
7850 | return new LoopVectorize(InterleaveOnlyWhenForced, VectorizeOnlyWhenForced); | ||||
7851 | } | ||||
7852 | |||||
7853 | } // end namespace llvm | ||||
7854 | |||||
7855 | bool LoopVectorizationCostModel::isConsecutiveLoadOrStore(Instruction *Inst) { | ||||
7856 | // Check if the pointer operand of a load or store instruction is | ||||
7857 | // consecutive. | ||||
7858 | if (auto *Ptr = getLoadStorePointerOperand(Inst)) | ||||
7859 | return Legal->isConsecutivePtr(Ptr); | ||||
7860 | return false; | ||||
7861 | } | ||||
7862 | |||||
7863 | void LoopVectorizationCostModel::collectValuesToIgnore() { | ||||
7864 | // Ignore ephemeral values. | ||||
7865 | CodeMetrics::collectEphemeralValues(TheLoop, AC, ValuesToIgnore); | ||||
7866 | |||||
7867 | // Ignore type-promoting instructions we identified during reduction | ||||
7868 | // detection. | ||||
7869 | for (auto &Reduction : Legal->getReductionVars()) { | ||||
7870 | RecurrenceDescriptor &RedDes = Reduction.second; | ||||
7871 | const SmallPtrSetImpl<Instruction *> &Casts = RedDes.getCastInsts(); | ||||
7872 | VecValuesToIgnore.insert(Casts.begin(), Casts.end()); | ||||
7873 | } | ||||
7874 | // Ignore type-casting instructions we identified during induction | ||||
7875 | // detection. | ||||
7876 | for (auto &Induction : Legal->getInductionVars()) { | ||||
7877 | InductionDescriptor &IndDes = Induction.second; | ||||
7878 | const SmallVectorImpl<Instruction *> &Casts = IndDes.getCastInsts(); | ||||
7879 | VecValuesToIgnore.insert(Casts.begin(), Casts.end()); | ||||
7880 | } | ||||
7881 | } | ||||
7882 | |||||
7883 | void LoopVectorizationCostModel::collectInLoopReductions() { | ||||
7884 | for (auto &Reduction : Legal->getReductionVars()) { | ||||
7885 | PHINode *Phi = Reduction.first; | ||||
7886 | RecurrenceDescriptor &RdxDesc = Reduction.second; | ||||
7887 | |||||
7888 | // We don't collect reductions that are type promoted (yet). | ||||
7889 | if (RdxDesc.getRecurrenceType() != Phi->getType()) | ||||
7890 | continue; | ||||
7891 | |||||
7892 | // If the target would prefer this reduction to happen "in-loop", then we | ||||
7893 | // want to record it as such. | ||||
7894 | unsigned Opcode = RdxDesc.getOpcode(); | ||||
7895 | if (!PreferInLoopReductions && !useOrderedReductions(RdxDesc) && | ||||
7896 | !TTI.preferInLoopReduction(Opcode, Phi->getType(), | ||||
7897 | TargetTransformInfo::ReductionFlags())) | ||||
7898 | continue; | ||||
7899 | |||||
7900 | // Check that we can correctly put the reductions into the loop, by | ||||
7901 | // finding the chain of operations that leads from the phi to the loop | ||||
7902 | // exit value. | ||||
7903 | SmallVector<Instruction *, 4> ReductionOperations = | ||||
7904 | RdxDesc.getReductionOpChain(Phi, TheLoop); | ||||
7905 | bool InLoop = !ReductionOperations.empty(); | ||||
7906 | if (InLoop) { | ||||
7907 | InLoopReductionChains[Phi] = ReductionOperations; | ||||
7908 | // Add the elements to InLoopReductionImmediateChains for cost modelling. | ||||
7909 | Instruction *LastChain = Phi; | ||||
7910 | for (auto *I : ReductionOperations) { | ||||
7911 | InLoopReductionImmediateChains[I] = LastChain; | ||||
7912 | LastChain = I; | ||||
7913 | } | ||||
7914 | } | ||||
7915 | LLVM_DEBUG(dbgs() << "LV: Using " << (InLoop ? "inloop" : "out of loop")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { dbgs() << "LV: Using " << ( InLoop ? "inloop" : "out of loop") << " reduction for phi: " << *Phi << "\n"; } } while (false) | ||||
7916 | << " reduction for phi: " << *Phi << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { dbgs() << "LV: Using " << ( InLoop ? "inloop" : "out of loop") << " reduction for phi: " << *Phi << "\n"; } } while (false); | ||||
7917 | } | ||||
7918 | } | ||||
7919 | |||||
7920 | // TODO: we could return a pair of values that specify the max VF and | ||||
7921 | // min VF, to be used in `buildVPlans(MinVF, MaxVF)` instead of | ||||
7922 | // `buildVPlans(VF, VF)`. We cannot do it because VPLAN at the moment | ||||
7923 | // doesn't have a cost model that can choose which plan to execute if | ||||
7924 | // more than one is generated. | ||||
7925 | static unsigned determineVPlanVF(const unsigned WidestVectorRegBits, | ||||
7926 | LoopVectorizationCostModel &CM) { | ||||
7927 | unsigned WidestType; | ||||
7928 | std::tie(std::ignore, WidestType) = CM.getSmallestAndWidestTypes(); | ||||
7929 | return WidestVectorRegBits / WidestType; | ||||
7930 | } | ||||
7931 | |||||
7932 | VectorizationFactor | ||||
7933 | LoopVectorizationPlanner::planInVPlanNativePath(ElementCount UserVF) { | ||||
7934 | assert(!UserVF.isScalable() && "scalable vectors not yet supported")(static_cast <bool> (!UserVF.isScalable() && "scalable vectors not yet supported" ) ? void (0) : __assert_fail ("!UserVF.isScalable() && \"scalable vectors not yet supported\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 7934, __extension__ __PRETTY_FUNCTION__)); | ||||
7935 | ElementCount VF = UserVF; | ||||
7936 | // Outer loop handling: They may require CFG and instruction level | ||||
7937 | // transformations before even evaluating whether vectorization is profitable. | ||||
7938 | // Since we cannot modify the incoming IR, we need to build VPlan upfront in | ||||
7939 | // the vectorization pipeline. | ||||
7940 | if (!OrigLoop->isInnermost()) { | ||||
7941 | // If the user doesn't provide a vectorization factor, determine a | ||||
7942 | // reasonable one. | ||||
7943 | if (UserVF.isZero()) { | ||||
7944 | VF = ElementCount::getFixed(determineVPlanVF( | ||||
7945 | TTI->getRegisterBitWidth(TargetTransformInfo::RGK_FixedWidthVector) | ||||
7946 | .getFixedSize(), | ||||
7947 | CM)); | ||||
7948 | LLVM_DEBUG(dbgs() << "LV: VPlan computed VF " << VF << ".\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { dbgs() << "LV: VPlan computed VF " << VF << ".\n"; } } while (false); | ||||
7949 | |||||
7950 | // Make sure we have a VF > 1 for stress testing. | ||||
7951 | if (VPlanBuildStressTest && (VF.isScalar() || VF.isZero())) { | ||||
7952 | LLVM_DEBUG(dbgs() << "LV: VPlan stress testing: "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { dbgs() << "LV: VPlan stress testing: " << "overriding computed VF.\n"; } } while (false) | ||||
7953 | << "overriding computed VF.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { dbgs() << "LV: VPlan stress testing: " << "overriding computed VF.\n"; } } while (false); | ||||
7954 | VF = ElementCount::getFixed(4); | ||||
7955 | } | ||||
7956 | } | ||||
7957 | assert(EnableVPlanNativePath && "VPlan-native path is not enabled.")(static_cast <bool> (EnableVPlanNativePath && "VPlan-native path is not enabled." ) ? void (0) : __assert_fail ("EnableVPlanNativePath && \"VPlan-native path is not enabled.\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 7957, __extension__ __PRETTY_FUNCTION__)); | ||||
7958 | assert(isPowerOf2_32(VF.getKnownMinValue()) &&(static_cast <bool> (isPowerOf2_32(VF.getKnownMinValue( )) && "VF needs to be a power of two") ? void (0) : __assert_fail ("isPowerOf2_32(VF.getKnownMinValue()) && \"VF needs to be a power of two\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 7959, __extension__ __PRETTY_FUNCTION__)) | ||||
7959 | "VF needs to be a power of two")(static_cast <bool> (isPowerOf2_32(VF.getKnownMinValue( )) && "VF needs to be a power of two") ? void (0) : __assert_fail ("isPowerOf2_32(VF.getKnownMinValue()) && \"VF needs to be a power of two\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 7959, __extension__ __PRETTY_FUNCTION__)); | ||||
7960 | LLVM_DEBUG(dbgs() << "LV: Using " << (!UserVF.isZero() ? "user " : "")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { dbgs() << "LV: Using " << ( !UserVF.isZero() ? "user " : "") << "VF " << VF << " to build VPlans.\n"; } } while (false) | ||||
7961 | << "VF " << VF << " to build VPlans.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { dbgs() << "LV: Using " << ( !UserVF.isZero() ? "user " : "") << "VF " << VF << " to build VPlans.\n"; } } while (false); | ||||
7962 | buildVPlans(VF, VF); | ||||
7963 | |||||
7964 | // For VPlan build stress testing, we bail out after VPlan construction. | ||||
7965 | if (VPlanBuildStressTest) | ||||
7966 | return VectorizationFactor::Disabled(); | ||||
7967 | |||||
7968 | return {VF, 0 /*Cost*/}; | ||||
7969 | } | ||||
7970 | |||||
7971 | LLVM_DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { dbgs() << "LV: Not vectorizing. Inner loops aren't supported in the " "VPlan-native path.\n"; } } while (false) | ||||
7972 | dbgs() << "LV: Not vectorizing. Inner loops aren't supported in the "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { dbgs() << "LV: Not vectorizing. Inner loops aren't supported in the " "VPlan-native path.\n"; } } while (false) | ||||
7973 | "VPlan-native path.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { dbgs() << "LV: Not vectorizing. Inner loops aren't supported in the " "VPlan-native path.\n"; } } while (false); | ||||
7974 | return VectorizationFactor::Disabled(); | ||||
7975 | } | ||||
7976 | |||||
7977 | Optional<VectorizationFactor> | ||||
7978 | LoopVectorizationPlanner::plan(ElementCount UserVF, unsigned UserIC) { | ||||
7979 | assert(OrigLoop->isInnermost() && "Inner loop expected.")(static_cast <bool> (OrigLoop->isInnermost() && "Inner loop expected.") ? void (0) : __assert_fail ("OrigLoop->isInnermost() && \"Inner loop expected.\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 7979, __extension__ __PRETTY_FUNCTION__)); | ||||
7980 | FixedScalableVFPair MaxFactors = CM.computeMaxVF(UserVF, UserIC); | ||||
7981 | if (!MaxFactors) // Cases that should not to be vectorized nor interleaved. | ||||
7982 | return None; | ||||
7983 | |||||
7984 | // Invalidate interleave groups if all blocks of loop will be predicated. | ||||
7985 | if (CM.blockNeedsPredication(OrigLoop->getHeader()) && | ||||
7986 | !useMaskedInterleavedAccesses(*TTI)) { | ||||
7987 | LLVM_DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { dbgs() << "LV: Invalidate all interleaved groups due to fold-tail by masking " "which requires masked-interleaved support.\n"; } } while (false ) | ||||
7988 | dbgs()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { dbgs() << "LV: Invalidate all interleaved groups due to fold-tail by masking " "which requires masked-interleaved support.\n"; } } while (false ) | ||||
7989 | << "LV: Invalidate all interleaved groups due to fold-tail by masking "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { dbgs() << "LV: Invalidate all interleaved groups due to fold-tail by masking " "which requires masked-interleaved support.\n"; } } while (false ) | ||||
7990 | "which requires masked-interleaved support.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { dbgs() << "LV: Invalidate all interleaved groups due to fold-tail by masking " "which requires masked-interleaved support.\n"; } } while (false ); | ||||
7991 | if (CM.InterleaveInfo.invalidateGroups()) | ||||
7992 | // Invalidating interleave groups also requires invalidating all decisions | ||||
7993 | // based on them, which includes widening decisions and uniform and scalar | ||||
7994 | // values. | ||||
7995 | CM.invalidateCostModelingDecisions(); | ||||
7996 | } | ||||
7997 | |||||
7998 | ElementCount MaxUserVF = | ||||
7999 | UserVF.isScalable() ? MaxFactors.ScalableVF : MaxFactors.FixedVF; | ||||
8000 | bool UserVFIsLegal = ElementCount::isKnownLE(UserVF, MaxUserVF); | ||||
8001 | if (!UserVF.isZero() && UserVFIsLegal) { | ||||
8002 | LLVM_DEBUG(dbgs() << "LV: Using " << (UserVFIsLegal ? "user" : "max")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { dbgs() << "LV: Using " << ( UserVFIsLegal ? "user" : "max") << " VF " << UserVF << ".\n"; } } while (false) | ||||
8003 | << " VF " << UserVF << ".\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { dbgs() << "LV: Using " << ( UserVFIsLegal ? "user" : "max") << " VF " << UserVF << ".\n"; } } while (false); | ||||
8004 | assert(isPowerOf2_32(UserVF.getKnownMinValue()) &&(static_cast <bool> (isPowerOf2_32(UserVF.getKnownMinValue ()) && "VF needs to be a power of two") ? void (0) : __assert_fail ("isPowerOf2_32(UserVF.getKnownMinValue()) && \"VF needs to be a power of two\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 8005, __extension__ __PRETTY_FUNCTION__)) | ||||
8005 | "VF needs to be a power of two")(static_cast <bool> (isPowerOf2_32(UserVF.getKnownMinValue ()) && "VF needs to be a power of two") ? void (0) : __assert_fail ("isPowerOf2_32(UserVF.getKnownMinValue()) && \"VF needs to be a power of two\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 8005, __extension__ __PRETTY_FUNCTION__)); | ||||
8006 | // Collect the instructions (and their associated costs) that will be more | ||||
8007 | // profitable to scalarize. | ||||
8008 | CM.selectUserVectorizationFactor(UserVF); | ||||
8009 | CM.collectInLoopReductions(); | ||||
8010 | buildVPlansWithVPRecipes(UserVF, UserVF); | ||||
8011 | LLVM_DEBUG(printPlans(dbgs()))do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { printPlans(dbgs()); } } while (false); | ||||
8012 | return {{UserVF, 0}}; | ||||
8013 | } | ||||
8014 | |||||
8015 | // Populate the set of Vectorization Factor Candidates. | ||||
8016 | ElementCountSet VFCandidates; | ||||
8017 | for (auto VF = ElementCount::getFixed(1); | ||||
8018 | ElementCount::isKnownLE(VF, MaxFactors.FixedVF); VF *= 2) | ||||
8019 | VFCandidates.insert(VF); | ||||
8020 | for (auto VF = ElementCount::getScalable(1); | ||||
8021 | ElementCount::isKnownLE(VF, MaxFactors.ScalableVF); VF *= 2) | ||||
8022 | VFCandidates.insert(VF); | ||||
8023 | |||||
8024 | for (const auto &VF : VFCandidates) { | ||||
8025 | // Collect Uniform and Scalar instructions after vectorization with VF. | ||||
8026 | CM.collectUniformsAndScalars(VF); | ||||
8027 | |||||
8028 | // Collect the instructions (and their associated costs) that will be more | ||||
8029 | // profitable to scalarize. | ||||
8030 | if (VF.isVector()) | ||||
8031 | CM.collectInstsToScalarize(VF); | ||||
8032 | } | ||||
8033 | |||||
8034 | CM.collectInLoopReductions(); | ||||
8035 | buildVPlansWithVPRecipes(ElementCount::getFixed(1), MaxFactors.FixedVF); | ||||
8036 | buildVPlansWithVPRecipes(ElementCount::getScalable(1), MaxFactors.ScalableVF); | ||||
8037 | |||||
8038 | LLVM_DEBUG(printPlans(dbgs()))do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { printPlans(dbgs()); } } while (false); | ||||
8039 | if (!MaxFactors.hasVector()) | ||||
8040 | return VectorizationFactor::Disabled(); | ||||
8041 | |||||
8042 | // Select the optimal vectorization factor. | ||||
8043 | auto SelectedVF = CM.selectVectorizationFactor(VFCandidates); | ||||
8044 | |||||
8045 | // Check if it is profitable to vectorize with runtime checks. | ||||
8046 | unsigned NumRuntimePointerChecks = Requirements.getNumRuntimePointerChecks(); | ||||
8047 | if (SelectedVF.Width.getKnownMinValue() > 1 && NumRuntimePointerChecks) { | ||||
8048 | bool PragmaThresholdReached = | ||||
8049 | NumRuntimePointerChecks > PragmaVectorizeMemoryCheckThreshold; | ||||
8050 | bool ThresholdReached = | ||||
8051 | NumRuntimePointerChecks > VectorizerParams::RuntimeMemoryCheckThreshold; | ||||
8052 | if ((ThresholdReached && !Hints.allowReordering()) || | ||||
8053 | PragmaThresholdReached) { | ||||
8054 | ORE->emit([&]() { | ||||
8055 | return OptimizationRemarkAnalysisAliasing( | ||||
8056 | DEBUG_TYPE"loop-vectorize", "CantReorderMemOps", OrigLoop->getStartLoc(), | ||||
8057 | OrigLoop->getHeader()) | ||||
8058 | << "loop not vectorized: cannot prove it is safe to reorder " | ||||
8059 | "memory operations"; | ||||
8060 | }); | ||||
8061 | LLVM_DEBUG(dbgs() << "LV: Too many memory checks needed.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { dbgs() << "LV: Too many memory checks needed.\n" ; } } while (false); | ||||
8062 | Hints.emitRemarkWithHints(); | ||||
8063 | return VectorizationFactor::Disabled(); | ||||
8064 | } | ||||
8065 | } | ||||
8066 | return SelectedVF; | ||||
8067 | } | ||||
8068 | |||||
8069 | void LoopVectorizationPlanner::setBestPlan(ElementCount VF, unsigned UF) { | ||||
8070 | LLVM_DEBUG(dbgs() << "Setting best plan to VF=" << VF << ", UF=" << UFdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { dbgs() << "Setting best plan to VF=" << VF << ", UF=" << UF << '\n'; } } while (false) | ||||
8071 | << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { dbgs() << "Setting best plan to VF=" << VF << ", UF=" << UF << '\n'; } } while (false); | ||||
8072 | BestVF = VF; | ||||
8073 | BestUF = UF; | ||||
8074 | |||||
8075 | erase_if(VPlans, [VF](const VPlanPtr &Plan) { | ||||
8076 | return !Plan->hasVF(VF); | ||||
8077 | }); | ||||
8078 | assert(VPlans.size() == 1 && "Best VF has not a single VPlan.")(static_cast <bool> (VPlans.size() == 1 && "Best VF has not a single VPlan." ) ? void (0) : __assert_fail ("VPlans.size() == 1 && \"Best VF has not a single VPlan.\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 8078, __extension__ __PRETTY_FUNCTION__)); | ||||
8079 | } | ||||
8080 | |||||
8081 | void LoopVectorizationPlanner::executePlan(InnerLoopVectorizer &ILV, | ||||
8082 | DominatorTree *DT) { | ||||
8083 | // Perform the actual loop transformation. | ||||
8084 | |||||
8085 | // 1. Create a new empty loop. Unlink the old loop and connect the new one. | ||||
8086 | assert(BestVF.hasValue() && "Vectorization Factor is missing")(static_cast <bool> (BestVF.hasValue() && "Vectorization Factor is missing" ) ? void (0) : __assert_fail ("BestVF.hasValue() && \"Vectorization Factor is missing\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 8086, __extension__ __PRETTY_FUNCTION__)); | ||||
8087 | assert(VPlans.size() == 1 && "Not a single VPlan to execute.")(static_cast <bool> (VPlans.size() == 1 && "Not a single VPlan to execute." ) ? void (0) : __assert_fail ("VPlans.size() == 1 && \"Not a single VPlan to execute.\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 8087, __extension__ __PRETTY_FUNCTION__)); | ||||
8088 | |||||
8089 | VPTransformState State{ | ||||
8090 | *BestVF, BestUF, LI, DT, ILV.Builder, &ILV, VPlans.front().get()}; | ||||
8091 | State.CFG.PrevBB = ILV.createVectorizedLoopSkeleton(); | ||||
8092 | State.TripCount = ILV.getOrCreateTripCount(nullptr); | ||||
8093 | State.CanonicalIV = ILV.Induction; | ||||
8094 | |||||
8095 | ILV.printDebugTracesAtStart(); | ||||
8096 | |||||
8097 | //===------------------------------------------------===// | ||||
8098 | // | ||||
8099 | // Notice: any optimization or new instruction that go | ||||
8100 | // into the code below should also be implemented in | ||||
8101 | // the cost-model. | ||||
8102 | // | ||||
8103 | //===------------------------------------------------===// | ||||
8104 | |||||
8105 | // 2. Copy and widen instructions from the old loop into the new loop. | ||||
8106 | VPlans.front()->execute(&State); | ||||
8107 | |||||
8108 | // 3. Fix the vectorized code: take care of header phi's, live-outs, | ||||
8109 | // predication, updating analyses. | ||||
8110 | ILV.fixVectorizedLoop(State); | ||||
8111 | |||||
8112 | ILV.printDebugTracesAtEnd(); | ||||
8113 | } | ||||
8114 | |||||
8115 | #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) | ||||
8116 | void LoopVectorizationPlanner::printPlans(raw_ostream &O) { | ||||
8117 | for (const auto &Plan : VPlans) | ||||
8118 | if (PrintVPlansInDotFormat) | ||||
8119 | Plan->printDOT(O); | ||||
8120 | else | ||||
8121 | Plan->print(O); | ||||
8122 | } | ||||
8123 | #endif | ||||
8124 | |||||
8125 | void LoopVectorizationPlanner::collectTriviallyDeadInstructions( | ||||
8126 | SmallPtrSetImpl<Instruction *> &DeadInstructions) { | ||||
8127 | |||||
8128 | // We create new control-flow for the vectorized loop, so the original exit | ||||
8129 | // conditions will be dead after vectorization if it's only used by the | ||||
8130 | // terminator | ||||
8131 | SmallVector<BasicBlock*> ExitingBlocks; | ||||
8132 | OrigLoop->getExitingBlocks(ExitingBlocks); | ||||
8133 | for (auto *BB : ExitingBlocks) { | ||||
8134 | auto *Cmp = dyn_cast<Instruction>(BB->getTerminator()->getOperand(0)); | ||||
8135 | if (!Cmp || !Cmp->hasOneUse()) | ||||
8136 | continue; | ||||
8137 | |||||
8138 | // TODO: we should introduce a getUniqueExitingBlocks on Loop | ||||
8139 | if (!DeadInstructions.insert(Cmp).second) | ||||
8140 | continue; | ||||
8141 | |||||
8142 | // The operands of the icmp is often a dead trunc, used by IndUpdate. | ||||
8143 | // TODO: can recurse through operands in general | ||||
8144 | for (Value *Op : Cmp->operands()) { | ||||
8145 | if (isa<TruncInst>(Op) && Op->hasOneUse()) | ||||
8146 | DeadInstructions.insert(cast<Instruction>(Op)); | ||||
8147 | } | ||||
8148 | } | ||||
8149 | |||||
8150 | // We create new "steps" for induction variable updates to which the original | ||||
8151 | // induction variables map. An original update instruction will be dead if | ||||
8152 | // all its users except the induction variable are dead. | ||||
8153 | auto *Latch = OrigLoop->getLoopLatch(); | ||||
8154 | for (auto &Induction : Legal->getInductionVars()) { | ||||
8155 | PHINode *Ind = Induction.first; | ||||
8156 | auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); | ||||
8157 | |||||
8158 | // If the tail is to be folded by masking, the primary induction variable, | ||||
8159 | // if exists, isn't dead: it will be used for masking. Don't kill it. | ||||
8160 | if (CM.foldTailByMasking() && IndUpdate == Legal->getPrimaryInduction()) | ||||
8161 | continue; | ||||
8162 | |||||
8163 | if (llvm::all_of(IndUpdate->users(), [&](User *U) -> bool { | ||||
8164 | return U == Ind || DeadInstructions.count(cast<Instruction>(U)); | ||||
8165 | })) | ||||
8166 | DeadInstructions.insert(IndUpdate); | ||||
8167 | |||||
8168 | // We record as "Dead" also the type-casting instructions we had identified | ||||
8169 | // during induction analysis. We don't need any handling for them in the | ||||
8170 | // vectorized loop because we have proven that, under a proper runtime | ||||
8171 | // test guarding the vectorized loop, the value of the phi, and the casted | ||||
8172 | // value of the phi, are the same. The last instruction in this casting chain | ||||
8173 | // will get its scalar/vector/widened def from the scalar/vector/widened def | ||||
8174 | // of the respective phi node. Any other casts in the induction def-use chain | ||||
8175 | // have no other uses outside the phi update chain, and will be ignored. | ||||
8176 | InductionDescriptor &IndDes = Induction.second; | ||||
8177 | const SmallVectorImpl<Instruction *> &Casts = IndDes.getCastInsts(); | ||||
8178 | DeadInstructions.insert(Casts.begin(), Casts.end()); | ||||
8179 | } | ||||
8180 | } | ||||
8181 | |||||
8182 | Value *InnerLoopUnroller::reverseVector(Value *Vec) { return Vec; } | ||||
8183 | |||||
8184 | Value *InnerLoopUnroller::getBroadcastInstrs(Value *V) { return V; } | ||||
8185 | |||||
8186 | Value *InnerLoopUnroller::getStepVector(Value *Val, int StartIdx, Value *Step, | ||||
8187 | Instruction::BinaryOps BinOp) { | ||||
8188 | // When unrolling and the VF is 1, we only need to add a simple scalar. | ||||
8189 | Type *Ty = Val->getType(); | ||||
8190 | assert(!Ty->isVectorTy() && "Val must be a scalar")(static_cast <bool> (!Ty->isVectorTy() && "Val must be a scalar" ) ? void (0) : __assert_fail ("!Ty->isVectorTy() && \"Val must be a scalar\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 8190, __extension__ __PRETTY_FUNCTION__)); | ||||
8191 | |||||
8192 | if (Ty->isFloatingPointTy()) { | ||||
8193 | Constant *C = ConstantFP::get(Ty, (double)StartIdx); | ||||
8194 | |||||
8195 | // Floating-point operations inherit FMF via the builder's flags. | ||||
8196 | Value *MulOp = Builder.CreateFMul(C, Step); | ||||
8197 | return Builder.CreateBinOp(BinOp, Val, MulOp); | ||||
8198 | } | ||||
8199 | Constant *C = ConstantInt::get(Ty, StartIdx); | ||||
8200 | return Builder.CreateAdd(Val, Builder.CreateMul(C, Step), "induction"); | ||||
8201 | } | ||||
8202 | |||||
8203 | static void AddRuntimeUnrollDisableMetaData(Loop *L) { | ||||
8204 | SmallVector<Metadata *, 4> MDs; | ||||
8205 | // Reserve first location for self reference to the LoopID metadata node. | ||||
8206 | MDs.push_back(nullptr); | ||||
8207 | bool IsUnrollMetadata = false; | ||||
8208 | MDNode *LoopID = L->getLoopID(); | ||||
8209 | if (LoopID) { | ||||
8210 | // First find existing loop unrolling disable metadata. | ||||
8211 | for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) { | ||||
8212 | auto *MD = dyn_cast<MDNode>(LoopID->getOperand(i)); | ||||
8213 | if (MD) { | ||||
8214 | const auto *S = dyn_cast<MDString>(MD->getOperand(0)); | ||||
8215 | IsUnrollMetadata = | ||||
8216 | S && S->getString().startswith("llvm.loop.unroll.disable"); | ||||
8217 | } | ||||
8218 | MDs.push_back(LoopID->getOperand(i)); | ||||
8219 | } | ||||
8220 | } | ||||
8221 | |||||
8222 | if (!IsUnrollMetadata) { | ||||
8223 | // Add runtime unroll disable metadata. | ||||
8224 | LLVMContext &Context = L->getHeader()->getContext(); | ||||
8225 | SmallVector<Metadata *, 1> DisableOperands; | ||||
8226 | DisableOperands.push_back( | ||||
8227 | MDString::get(Context, "llvm.loop.unroll.runtime.disable")); | ||||
8228 | MDNode *DisableNode = MDNode::get(Context, DisableOperands); | ||||
8229 | MDs.push_back(DisableNode); | ||||
8230 | MDNode *NewLoopID = MDNode::get(Context, MDs); | ||||
8231 | // Set operand 0 to refer to the loop id itself. | ||||
8232 | NewLoopID->replaceOperandWith(0, NewLoopID); | ||||
8233 | L->setLoopID(NewLoopID); | ||||
8234 | } | ||||
8235 | } | ||||
8236 | |||||
8237 | //===--------------------------------------------------------------------===// | ||||
8238 | // EpilogueVectorizerMainLoop | ||||
8239 | //===--------------------------------------------------------------------===// | ||||
8240 | |||||
8241 | /// This function is partially responsible for generating the control flow | ||||
8242 | /// depicted in https://llvm.org/docs/Vectorizers.html#epilogue-vectorization. | ||||
8243 | BasicBlock *EpilogueVectorizerMainLoop::createEpilogueVectorizedLoopSkeleton() { | ||||
8244 | MDNode *OrigLoopID = OrigLoop->getLoopID(); | ||||
8245 | Loop *Lp = createVectorLoopSkeleton(""); | ||||
8246 | |||||
8247 | // Generate the code to check the minimum iteration count of the vector | ||||
8248 | // epilogue (see below). | ||||
8249 | EPI.EpilogueIterationCountCheck = | ||||
8250 | emitMinimumIterationCountCheck(Lp, LoopScalarPreHeader, true); | ||||
8251 | EPI.EpilogueIterationCountCheck->setName("iter.check"); | ||||
8252 | |||||
8253 | // Generate the code to check any assumptions that we've made for SCEV | ||||
8254 | // expressions. | ||||
8255 | EPI.SCEVSafetyCheck = emitSCEVChecks(Lp, LoopScalarPreHeader); | ||||
8256 | |||||
8257 | // Generate the code that checks at runtime if arrays overlap. We put the | ||||
8258 | // checks into a separate block to make the more common case of few elements | ||||
8259 | // faster. | ||||
8260 | EPI.MemSafetyCheck = emitMemRuntimeChecks(Lp, LoopScalarPreHeader); | ||||
8261 | |||||
8262 | // Generate the iteration count check for the main loop, *after* the check | ||||
8263 | // for the epilogue loop, so that the path-length is shorter for the case | ||||
8264 | // that goes directly through the vector epilogue. The longer-path length for | ||||
8265 | // the main loop is compensated for, by the gain from vectorizing the larger | ||||
8266 | // trip count. Note: the branch will get updated later on when we vectorize | ||||
8267 | // the epilogue. | ||||
8268 | EPI.MainLoopIterationCountCheck = | ||||
8269 | emitMinimumIterationCountCheck(Lp, LoopScalarPreHeader, false); | ||||
8270 | |||||
8271 | // Generate the induction variable. | ||||
8272 | OldInduction = Legal->getPrimaryInduction(); | ||||
8273 | Type *IdxTy = Legal->getWidestInductionType(); | ||||
8274 | Value *StartIdx = ConstantInt::get(IdxTy, 0); | ||||
8275 | Constant *Step = ConstantInt::get(IdxTy, VF.getKnownMinValue() * UF); | ||||
8276 | Value *CountRoundDown = getOrCreateVectorTripCount(Lp); | ||||
8277 | EPI.VectorTripCount = CountRoundDown; | ||||
8278 | Induction = | ||||
8279 | createInductionVariable(Lp, StartIdx, CountRoundDown, Step, | ||||
8280 | getDebugLocFromInstOrOperands(OldInduction)); | ||||
8281 | |||||
8282 | // Skip induction resume value creation here because they will be created in | ||||
8283 | // the second pass. If we created them here, they wouldn't be used anyway, | ||||
8284 | // because the vplan in the second pass still contains the inductions from the | ||||
8285 | // original loop. | ||||
8286 | |||||
8287 | return completeLoopSkeleton(Lp, OrigLoopID); | ||||
8288 | } | ||||
8289 | |||||
8290 | void EpilogueVectorizerMainLoop::printDebugTracesAtStart() { | ||||
8291 | LLVM_DEBUG({do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { { dbgs() << "Create Skeleton for epilogue vectorized loop (first pass)\n" << "Main Loop VF:" << EPI.MainLoopVF.getKnownMinValue () << ", Main Loop UF:" << EPI.MainLoopUF << ", Epilogue Loop VF:" << EPI.EpilogueVF.getKnownMinValue () << ", Epilogue Loop UF:" << EPI.EpilogueUF << "\n"; }; } } while (false) | ||||
8292 | dbgs() << "Create Skeleton for epilogue vectorized loop (first pass)\n"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { { dbgs() << "Create Skeleton for epilogue vectorized loop (first pass)\n" << "Main Loop VF:" << EPI.MainLoopVF.getKnownMinValue () << ", Main Loop UF:" << EPI.MainLoopUF << ", Epilogue Loop VF:" << EPI.EpilogueVF.getKnownMinValue () << ", Epilogue Loop UF:" << EPI.EpilogueUF << "\n"; }; } } while (false) | ||||
8293 | << "Main Loop VF:" << EPI.MainLoopVF.getKnownMinValue()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { { dbgs() << "Create Skeleton for epilogue vectorized loop (first pass)\n" << "Main Loop VF:" << EPI.MainLoopVF.getKnownMinValue () << ", Main Loop UF:" << EPI.MainLoopUF << ", Epilogue Loop VF:" << EPI.EpilogueVF.getKnownMinValue () << ", Epilogue Loop UF:" << EPI.EpilogueUF << "\n"; }; } } while (false) | ||||
8294 | << ", Main Loop UF:" << EPI.MainLoopUFdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { { dbgs() << "Create Skeleton for epilogue vectorized loop (first pass)\n" << "Main Loop VF:" << EPI.MainLoopVF.getKnownMinValue () << ", Main Loop UF:" << EPI.MainLoopUF << ", Epilogue Loop VF:" << EPI.EpilogueVF.getKnownMinValue () << ", Epilogue Loop UF:" << EPI.EpilogueUF << "\n"; }; } } while (false) | ||||
8295 | << ", Epilogue Loop VF:" << EPI.EpilogueVF.getKnownMinValue()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { { dbgs() << "Create Skeleton for epilogue vectorized loop (first pass)\n" << "Main Loop VF:" << EPI.MainLoopVF.getKnownMinValue () << ", Main Loop UF:" << EPI.MainLoopUF << ", Epilogue Loop VF:" << EPI.EpilogueVF.getKnownMinValue () << ", Epilogue Loop UF:" << EPI.EpilogueUF << "\n"; }; } } while (false) | ||||
8296 | << ", Epilogue Loop UF:" << EPI.EpilogueUF << "\n";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { { dbgs() << "Create Skeleton for epilogue vectorized loop (first pass)\n" << "Main Loop VF:" << EPI.MainLoopVF.getKnownMinValue () << ", Main Loop UF:" << EPI.MainLoopUF << ", Epilogue Loop VF:" << EPI.EpilogueVF.getKnownMinValue () << ", Epilogue Loop UF:" << EPI.EpilogueUF << "\n"; }; } } while (false) | ||||
8297 | })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { { dbgs() << "Create Skeleton for epilogue vectorized loop (first pass)\n" << "Main Loop VF:" << EPI.MainLoopVF.getKnownMinValue () << ", Main Loop UF:" << EPI.MainLoopUF << ", Epilogue Loop VF:" << EPI.EpilogueVF.getKnownMinValue () << ", Epilogue Loop UF:" << EPI.EpilogueUF << "\n"; }; } } while (false); | ||||
8298 | } | ||||
8299 | |||||
8300 | void EpilogueVectorizerMainLoop::printDebugTracesAtEnd() { | ||||
8301 | DEBUG_WITH_TYPE(VerboseDebug, {do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType (VerboseDebug)) { { dbgs() << "intermediate fn:\n" << *Induction->getFunction() << "\n"; }; } } while (false ) | ||||
8302 | dbgs() << "intermediate fn:\n" << *Induction->getFunction() << "\n";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType (VerboseDebug)) { { dbgs() << "intermediate fn:\n" << *Induction->getFunction() << "\n"; }; } } while (false ) | ||||
8303 | })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType (VerboseDebug)) { { dbgs() << "intermediate fn:\n" << *Induction->getFunction() << "\n"; }; } } while (false ); | ||||
8304 | } | ||||
8305 | |||||
8306 | BasicBlock *EpilogueVectorizerMainLoop::emitMinimumIterationCountCheck( | ||||
8307 | Loop *L, BasicBlock *Bypass, bool ForEpilogue) { | ||||
8308 | assert(L && "Expected valid Loop.")(static_cast <bool> (L && "Expected valid Loop." ) ? void (0) : __assert_fail ("L && \"Expected valid Loop.\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 8308, __extension__ __PRETTY_FUNCTION__)); | ||||
8309 | assert(Bypass && "Expected valid bypass basic block.")(static_cast <bool> (Bypass && "Expected valid bypass basic block." ) ? void (0) : __assert_fail ("Bypass && \"Expected valid bypass basic block.\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 8309, __extension__ __PRETTY_FUNCTION__)); | ||||
8310 | unsigned VFactor = | ||||
8311 | ForEpilogue ? EPI.EpilogueVF.getKnownMinValue() : VF.getKnownMinValue(); | ||||
8312 | unsigned UFactor = ForEpilogue ? EPI.EpilogueUF : UF; | ||||
8313 | Value *Count = getOrCreateTripCount(L); | ||||
8314 | // Reuse existing vector loop preheader for TC checks. | ||||
8315 | // Note that new preheader block is generated for vector loop. | ||||
8316 | BasicBlock *const TCCheckBlock = LoopVectorPreHeader; | ||||
8317 | IRBuilder<> Builder(TCCheckBlock->getTerminator()); | ||||
8318 | |||||
8319 | // Generate code to check if the loop's trip count is less than VF * UF of the | ||||
8320 | // main vector loop. | ||||
8321 | auto P = | ||||
8322 | Cost->requiresScalarEpilogue() ? ICmpInst::ICMP_ULE : ICmpInst::ICMP_ULT; | ||||
8323 | |||||
8324 | Value *CheckMinIters = Builder.CreateICmp( | ||||
8325 | P, Count, ConstantInt::get(Count->getType(), VFactor * UFactor), | ||||
8326 | "min.iters.check"); | ||||
8327 | |||||
8328 | if (!ForEpilogue) | ||||
8329 | TCCheckBlock->setName("vector.main.loop.iter.check"); | ||||
8330 | |||||
8331 | // Create new preheader for vector loop. | ||||
8332 | LoopVectorPreHeader = SplitBlock(TCCheckBlock, TCCheckBlock->getTerminator(), | ||||
8333 | DT, LI, nullptr, "vector.ph"); | ||||
8334 | |||||
8335 | if (ForEpilogue) { | ||||
8336 | assert(DT->properlyDominates(DT->getNode(TCCheckBlock),(static_cast <bool> (DT->properlyDominates(DT->getNode (TCCheckBlock), DT->getNode(Bypass)->getIDom()) && "TC check is expected to dominate Bypass") ? void (0) : __assert_fail ("DT->properlyDominates(DT->getNode(TCCheckBlock), DT->getNode(Bypass)->getIDom()) && \"TC check is expected to dominate Bypass\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 8338, __extension__ __PRETTY_FUNCTION__)) | ||||
8337 | DT->getNode(Bypass)->getIDom()) &&(static_cast <bool> (DT->properlyDominates(DT->getNode (TCCheckBlock), DT->getNode(Bypass)->getIDom()) && "TC check is expected to dominate Bypass") ? void (0) : __assert_fail ("DT->properlyDominates(DT->getNode(TCCheckBlock), DT->getNode(Bypass)->getIDom()) && \"TC check is expected to dominate Bypass\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 8338, __extension__ __PRETTY_FUNCTION__)) | ||||
8338 | "TC check is expected to dominate Bypass")(static_cast <bool> (DT->properlyDominates(DT->getNode (TCCheckBlock), DT->getNode(Bypass)->getIDom()) && "TC check is expected to dominate Bypass") ? void (0) : __assert_fail ("DT->properlyDominates(DT->getNode(TCCheckBlock), DT->getNode(Bypass)->getIDom()) && \"TC check is expected to dominate Bypass\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 8338, __extension__ __PRETTY_FUNCTION__)); | ||||
8339 | |||||
8340 | // Update dominator for Bypass & LoopExit. | ||||
8341 | DT->changeImmediateDominator(Bypass, TCCheckBlock); | ||||
8342 | DT->changeImmediateDominator(LoopExitBlock, TCCheckBlock); | ||||
8343 | |||||
8344 | LoopBypassBlocks.push_back(TCCheckBlock); | ||||
8345 | |||||
8346 | // Save the trip count so we don't have to regenerate it in the | ||||
8347 | // vec.epilog.iter.check. This is safe to do because the trip count | ||||
8348 | // generated here dominates the vector epilog iter check. | ||||
8349 | EPI.TripCount = Count; | ||||
8350 | } | ||||
8351 | |||||
8352 | ReplaceInstWithInst( | ||||
8353 | TCCheckBlock->getTerminator(), | ||||
8354 | BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters)); | ||||
8355 | |||||
8356 | return TCCheckBlock; | ||||
8357 | } | ||||
8358 | |||||
8359 | //===--------------------------------------------------------------------===// | ||||
8360 | // EpilogueVectorizerEpilogueLoop | ||||
8361 | //===--------------------------------------------------------------------===// | ||||
8362 | |||||
8363 | /// This function is partially responsible for generating the control flow | ||||
8364 | /// depicted in https://llvm.org/docs/Vectorizers.html#epilogue-vectorization. | ||||
8365 | BasicBlock * | ||||
8366 | EpilogueVectorizerEpilogueLoop::createEpilogueVectorizedLoopSkeleton() { | ||||
8367 | MDNode *OrigLoopID = OrigLoop->getLoopID(); | ||||
8368 | Loop *Lp = createVectorLoopSkeleton("vec.epilog."); | ||||
8369 | |||||
8370 | // Now, compare the remaining count and if there aren't enough iterations to | ||||
8371 | // execute the vectorized epilogue skip to the scalar part. | ||||
8372 | BasicBlock *VecEpilogueIterationCountCheck = LoopVectorPreHeader; | ||||
8373 | VecEpilogueIterationCountCheck->setName("vec.epilog.iter.check"); | ||||
8374 | LoopVectorPreHeader = | ||||
8375 | SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT, | ||||
8376 | LI, nullptr, "vec.epilog.ph"); | ||||
8377 | emitMinimumVectorEpilogueIterCountCheck(Lp, LoopScalarPreHeader, | ||||
8378 | VecEpilogueIterationCountCheck); | ||||
8379 | |||||
8380 | // Adjust the control flow taking the state info from the main loop | ||||
8381 | // vectorization into account. | ||||
8382 | assert(EPI.MainLoopIterationCountCheck && EPI.EpilogueIterationCountCheck &&(static_cast <bool> (EPI.MainLoopIterationCountCheck && EPI.EpilogueIterationCountCheck && "expected this to be saved from the previous pass." ) ? void (0) : __assert_fail ("EPI.MainLoopIterationCountCheck && EPI.EpilogueIterationCountCheck && \"expected this to be saved from the previous pass.\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 8383, __extension__ __PRETTY_FUNCTION__)) | ||||
8383 | "expected this to be saved from the previous pass.")(static_cast <bool> (EPI.MainLoopIterationCountCheck && EPI.EpilogueIterationCountCheck && "expected this to be saved from the previous pass." ) ? void (0) : __assert_fail ("EPI.MainLoopIterationCountCheck && EPI.EpilogueIterationCountCheck && \"expected this to be saved from the previous pass.\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 8383, __extension__ __PRETTY_FUNCTION__)); | ||||
8384 | EPI.MainLoopIterationCountCheck->getTerminator()->replaceUsesOfWith( | ||||
8385 | VecEpilogueIterationCountCheck, LoopVectorPreHeader); | ||||
8386 | |||||
8387 | DT->changeImmediateDominator(LoopVectorPreHeader, | ||||
8388 | EPI.MainLoopIterationCountCheck); | ||||
8389 | |||||
8390 | EPI.EpilogueIterationCountCheck->getTerminator()->replaceUsesOfWith( | ||||
8391 | VecEpilogueIterationCountCheck, LoopScalarPreHeader); | ||||
8392 | |||||
8393 | if (EPI.SCEVSafetyCheck) | ||||
8394 | EPI.SCEVSafetyCheck->getTerminator()->replaceUsesOfWith( | ||||
8395 | VecEpilogueIterationCountCheck, LoopScalarPreHeader); | ||||
8396 | if (EPI.MemSafetyCheck) | ||||
8397 | EPI.MemSafetyCheck->getTerminator()->replaceUsesOfWith( | ||||
8398 | VecEpilogueIterationCountCheck, LoopScalarPreHeader); | ||||
8399 | |||||
8400 | DT->changeImmediateDominator( | ||||
8401 | VecEpilogueIterationCountCheck, | ||||
8402 | VecEpilogueIterationCountCheck->getSinglePredecessor()); | ||||
8403 | |||||
8404 | DT->changeImmediateDominator(LoopScalarPreHeader, | ||||
8405 | EPI.EpilogueIterationCountCheck); | ||||
8406 | DT->changeImmediateDominator(LoopExitBlock, EPI.EpilogueIterationCountCheck); | ||||
8407 | |||||
8408 | // Keep track of bypass blocks, as they feed start values to the induction | ||||
8409 | // phis in the scalar loop preheader. | ||||
8410 | if (EPI.SCEVSafetyCheck) | ||||
8411 | LoopBypassBlocks.push_back(EPI.SCEVSafetyCheck); | ||||
8412 | if (EPI.MemSafetyCheck) | ||||
8413 | LoopBypassBlocks.push_back(EPI.MemSafetyCheck); | ||||
8414 | LoopBypassBlocks.push_back(EPI.EpilogueIterationCountCheck); | ||||
8415 | |||||
8416 | // Generate a resume induction for the vector epilogue and put it in the | ||||
8417 | // vector epilogue preheader | ||||
8418 | Type *IdxTy = Legal->getWidestInductionType(); | ||||
8419 | PHINode *EPResumeVal = PHINode::Create(IdxTy, 2, "vec.epilog.resume.val", | ||||
8420 | LoopVectorPreHeader->getFirstNonPHI()); | ||||
8421 | EPResumeVal->addIncoming(EPI.VectorTripCount, VecEpilogueIterationCountCheck); | ||||
8422 | EPResumeVal->addIncoming(ConstantInt::get(IdxTy, 0), | ||||
8423 | EPI.MainLoopIterationCountCheck); | ||||
8424 | |||||
8425 | // Generate the induction variable. | ||||
8426 | OldInduction = Legal->getPrimaryInduction(); | ||||
8427 | Value *CountRoundDown = getOrCreateVectorTripCount(Lp); | ||||
8428 | Constant *Step = ConstantInt::get(IdxTy, VF.getKnownMinValue() * UF); | ||||
8429 | Value *StartIdx = EPResumeVal; | ||||
8430 | Induction = | ||||
8431 | createInductionVariable(Lp, StartIdx, CountRoundDown, Step, | ||||
8432 | getDebugLocFromInstOrOperands(OldInduction)); | ||||
8433 | |||||
8434 | // Generate induction resume values. These variables save the new starting | ||||
8435 | // indexes for the scalar loop. They are used to test if there are any tail | ||||
8436 | // iterations left once the vector loop has completed. | ||||
8437 | // Note that when the vectorized epilogue is skipped due to iteration count | ||||
8438 | // check, then the resume value for the induction variable comes from | ||||
8439 | // the trip count of the main vector loop, hence passing the AdditionalBypass | ||||
8440 | // argument. | ||||
8441 | createInductionResumeValues(Lp, CountRoundDown, | ||||
8442 | {VecEpilogueIterationCountCheck, | ||||
8443 | EPI.VectorTripCount} /* AdditionalBypass */); | ||||
8444 | |||||
8445 | AddRuntimeUnrollDisableMetaData(Lp); | ||||
8446 | return completeLoopSkeleton(Lp, OrigLoopID); | ||||
8447 | } | ||||
8448 | |||||
8449 | BasicBlock * | ||||
8450 | EpilogueVectorizerEpilogueLoop::emitMinimumVectorEpilogueIterCountCheck( | ||||
8451 | Loop *L, BasicBlock *Bypass, BasicBlock *Insert) { | ||||
8452 | |||||
8453 | assert(EPI.TripCount &&(static_cast <bool> (EPI.TripCount && "Expected trip count to have been safed in the first pass." ) ? void (0) : __assert_fail ("EPI.TripCount && \"Expected trip count to have been safed in the first pass.\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 8454, __extension__ __PRETTY_FUNCTION__)) | ||||
8454 | "Expected trip count to have been safed in the first pass.")(static_cast <bool> (EPI.TripCount && "Expected trip count to have been safed in the first pass." ) ? void (0) : __assert_fail ("EPI.TripCount && \"Expected trip count to have been safed in the first pass.\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 8454, __extension__ __PRETTY_FUNCTION__)); | ||||
8455 | assert((static_cast <bool> ((!isa<Instruction>(EPI.TripCount ) || DT->dominates(cast<Instruction>(EPI.TripCount)-> getParent(), Insert)) && "saved trip count does not dominate insertion point." ) ? void (0) : __assert_fail ("(!isa<Instruction>(EPI.TripCount) || DT->dominates(cast<Instruction>(EPI.TripCount)->getParent(), Insert)) && \"saved trip count does not dominate insertion point.\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 8458, __extension__ __PRETTY_FUNCTION__)) | ||||
8456 | (!isa<Instruction>(EPI.TripCount) ||(static_cast <bool> ((!isa<Instruction>(EPI.TripCount ) || DT->dominates(cast<Instruction>(EPI.TripCount)-> getParent(), Insert)) && "saved trip count does not dominate insertion point." ) ? void (0) : __assert_fail ("(!isa<Instruction>(EPI.TripCount) || DT->dominates(cast<Instruction>(EPI.TripCount)->getParent(), Insert)) && \"saved trip count does not dominate insertion point.\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 8458, __extension__ __PRETTY_FUNCTION__)) | ||||
8457 | DT->dominates(cast<Instruction>(EPI.TripCount)->getParent(), Insert)) &&(static_cast <bool> ((!isa<Instruction>(EPI.TripCount ) || DT->dominates(cast<Instruction>(EPI.TripCount)-> getParent(), Insert)) && "saved trip count does not dominate insertion point." ) ? void (0) : __assert_fail ("(!isa<Instruction>(EPI.TripCount) || DT->dominates(cast<Instruction>(EPI.TripCount)->getParent(), Insert)) && \"saved trip count does not dominate insertion point.\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 8458, __extension__ __PRETTY_FUNCTION__)) | ||||
8458 | "saved trip count does not dominate insertion point.")(static_cast <bool> ((!isa<Instruction>(EPI.TripCount ) || DT->dominates(cast<Instruction>(EPI.TripCount)-> getParent(), Insert)) && "saved trip count does not dominate insertion point." ) ? void (0) : __assert_fail ("(!isa<Instruction>(EPI.TripCount) || DT->dominates(cast<Instruction>(EPI.TripCount)->getParent(), Insert)) && \"saved trip count does not dominate insertion point.\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 8458, __extension__ __PRETTY_FUNCTION__)); | ||||
8459 | Value *TC = EPI.TripCount; | ||||
8460 | IRBuilder<> Builder(Insert->getTerminator()); | ||||
8461 | Value *Count = Builder.CreateSub(TC, EPI.VectorTripCount, "n.vec.remaining"); | ||||
8462 | |||||
8463 | // Generate code to check if the loop's trip count is less than VF * UF of the | ||||
8464 | // vector epilogue loop. | ||||
8465 | auto P = | ||||
8466 | Cost->requiresScalarEpilogue() ? ICmpInst::ICMP_ULE : ICmpInst::ICMP_ULT; | ||||
8467 | |||||
8468 | Value *CheckMinIters = Builder.CreateICmp( | ||||
8469 | P, Count, | ||||
8470 | ConstantInt::get(Count->getType(), | ||||
8471 | EPI.EpilogueVF.getKnownMinValue() * EPI.EpilogueUF), | ||||
8472 | "min.epilog.iters.check"); | ||||
8473 | |||||
8474 | ReplaceInstWithInst( | ||||
8475 | Insert->getTerminator(), | ||||
8476 | BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters)); | ||||
8477 | |||||
8478 | LoopBypassBlocks.push_back(Insert); | ||||
8479 | return Insert; | ||||
8480 | } | ||||
8481 | |||||
8482 | void EpilogueVectorizerEpilogueLoop::printDebugTracesAtStart() { | ||||
8483 | LLVM_DEBUG({do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { { dbgs() << "Create Skeleton for epilogue vectorized loop (second pass)\n" << "Epilogue Loop VF:" << EPI.EpilogueVF.getKnownMinValue () << ", Epilogue Loop UF:" << EPI.EpilogueUF << "\n"; }; } } while (false) | ||||
8484 | dbgs() << "Create Skeleton for epilogue vectorized loop (second pass)\n"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { { dbgs() << "Create Skeleton for epilogue vectorized loop (second pass)\n" << "Epilogue Loop VF:" << EPI.EpilogueVF.getKnownMinValue () << ", Epilogue Loop UF:" << EPI.EpilogueUF << "\n"; }; } } while (false) | ||||
8485 | << "Epilogue Loop VF:" << EPI.EpilogueVF.getKnownMinValue()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { { dbgs() << "Create Skeleton for epilogue vectorized loop (second pass)\n" << "Epilogue Loop VF:" << EPI.EpilogueVF.getKnownMinValue () << ", Epilogue Loop UF:" << EPI.EpilogueUF << "\n"; }; } } while (false) | ||||
8486 | << ", Epilogue Loop UF:" << EPI.EpilogueUF << "\n";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { { dbgs() << "Create Skeleton for epilogue vectorized loop (second pass)\n" << "Epilogue Loop VF:" << EPI.EpilogueVF.getKnownMinValue () << ", Epilogue Loop UF:" << EPI.EpilogueUF << "\n"; }; } } while (false) | ||||
8487 | })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { { dbgs() << "Create Skeleton for epilogue vectorized loop (second pass)\n" << "Epilogue Loop VF:" << EPI.EpilogueVF.getKnownMinValue () << ", Epilogue Loop UF:" << EPI.EpilogueUF << "\n"; }; } } while (false); | ||||
8488 | } | ||||
8489 | |||||
8490 | void EpilogueVectorizerEpilogueLoop::printDebugTracesAtEnd() { | ||||
8491 | DEBUG_WITH_TYPE(VerboseDebug, {do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType (VerboseDebug)) { { dbgs() << "final fn:\n" << *Induction ->getFunction() << "\n"; }; } } while (false) | ||||
8492 | dbgs() << "final fn:\n" << *Induction->getFunction() << "\n";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType (VerboseDebug)) { { dbgs() << "final fn:\n" << *Induction ->getFunction() << "\n"; }; } } while (false) | ||||
8493 | })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType (VerboseDebug)) { { dbgs() << "final fn:\n" << *Induction ->getFunction() << "\n"; }; } } while (false); | ||||
8494 | } | ||||
8495 | |||||
8496 | bool LoopVectorizationPlanner::getDecisionAndClampRange( | ||||
8497 | const std::function<bool(ElementCount)> &Predicate, VFRange &Range) { | ||||
8498 | assert(!Range.isEmpty() && "Trying to test an empty VF range.")(static_cast <bool> (!Range.isEmpty() && "Trying to test an empty VF range." ) ? void (0) : __assert_fail ("!Range.isEmpty() && \"Trying to test an empty VF range.\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 8498, __extension__ __PRETTY_FUNCTION__)); | ||||
8499 | bool PredicateAtRangeStart = Predicate(Range.Start); | ||||
8500 | |||||
8501 | for (ElementCount TmpVF = Range.Start * 2; | ||||
8502 | ElementCount::isKnownLT(TmpVF, Range.End); TmpVF *= 2) | ||||
8503 | if (Predicate(TmpVF) != PredicateAtRangeStart) { | ||||
8504 | Range.End = TmpVF; | ||||
8505 | break; | ||||
8506 | } | ||||
8507 | |||||
8508 | return PredicateAtRangeStart; | ||||
8509 | } | ||||
8510 | |||||
8511 | /// Build VPlans for the full range of feasible VF's = {\p MinVF, 2 * \p MinVF, | ||||
8512 | /// 4 * \p MinVF, ..., \p MaxVF} by repeatedly building a VPlan for a sub-range | ||||
8513 | /// of VF's starting at a given VF and extending it as much as possible. Each | ||||
8514 | /// vectorization decision can potentially shorten this sub-range during | ||||
8515 | /// buildVPlan(). | ||||
8516 | void LoopVectorizationPlanner::buildVPlans(ElementCount MinVF, | ||||
8517 | ElementCount MaxVF) { | ||||
8518 | auto MaxVFPlusOne = MaxVF.getWithIncrement(1); | ||||
8519 | for (ElementCount VF = MinVF; ElementCount::isKnownLT(VF, MaxVFPlusOne);) { | ||||
8520 | VFRange SubRange = {VF, MaxVFPlusOne}; | ||||
8521 | VPlans.push_back(buildVPlan(SubRange)); | ||||
8522 | VF = SubRange.End; | ||||
8523 | } | ||||
8524 | } | ||||
8525 | |||||
8526 | VPValue *VPRecipeBuilder::createEdgeMask(BasicBlock *Src, BasicBlock *Dst, | ||||
8527 | VPlanPtr &Plan) { | ||||
8528 | assert(is_contained(predecessors(Dst), Src) && "Invalid edge")(static_cast <bool> (is_contained(predecessors(Dst), Src ) && "Invalid edge") ? void (0) : __assert_fail ("is_contained(predecessors(Dst), Src) && \"Invalid edge\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 8528, __extension__ __PRETTY_FUNCTION__)); | ||||
8529 | |||||
8530 | // Look for cached value. | ||||
8531 | std::pair<BasicBlock *, BasicBlock *> Edge(Src, Dst); | ||||
8532 | EdgeMaskCacheTy::iterator ECEntryIt = EdgeMaskCache.find(Edge); | ||||
8533 | if (ECEntryIt != EdgeMaskCache.end()) | ||||
8534 | return ECEntryIt->second; | ||||
8535 | |||||
8536 | VPValue *SrcMask = createBlockInMask(Src, Plan); | ||||
8537 | |||||
8538 | // The terminator has to be a branch inst! | ||||
8539 | BranchInst *BI = dyn_cast<BranchInst>(Src->getTerminator()); | ||||
8540 | assert(BI && "Unexpected terminator found")(static_cast <bool> (BI && "Unexpected terminator found" ) ? void (0) : __assert_fail ("BI && \"Unexpected terminator found\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 8540, __extension__ __PRETTY_FUNCTION__)); | ||||
8541 | |||||
8542 | if (!BI->isConditional() || BI->getSuccessor(0) == BI->getSuccessor(1)) | ||||
8543 | return EdgeMaskCache[Edge] = SrcMask; | ||||
8544 | |||||
8545 | // If source is an exiting block, we know the exit edge is dynamically dead | ||||
8546 | // in the vector loop, and thus we don't need to restrict the mask. Avoid | ||||
8547 | // adding uses of an otherwise potentially dead instruction. | ||||
8548 | if (OrigLoop->isLoopExiting(Src)) | ||||
8549 | return EdgeMaskCache[Edge] = SrcMask; | ||||
8550 | |||||
8551 | VPValue *EdgeMask = Plan->getOrAddVPValue(BI->getCondition()); | ||||
8552 | assert(EdgeMask && "No Edge Mask found for condition")(static_cast <bool> (EdgeMask && "No Edge Mask found for condition" ) ? void (0) : __assert_fail ("EdgeMask && \"No Edge Mask found for condition\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 8552, __extension__ __PRETTY_FUNCTION__)); | ||||
8553 | |||||
8554 | if (BI->getSuccessor(0) != Dst) | ||||
8555 | EdgeMask = Builder.createNot(EdgeMask); | ||||
8556 | |||||
8557 | if (SrcMask) { // Otherwise block in-mask is all-one, no need to AND. | ||||
8558 | // The condition is 'SrcMask && EdgeMask', which is equivalent to | ||||
8559 | // 'select i1 SrcMask, i1 EdgeMask, i1 false'. | ||||
8560 | // The select version does not introduce new UB if SrcMask is false and | ||||
8561 | // EdgeMask is poison. Using 'and' here introduces undefined behavior. | ||||
8562 | VPValue *False = Plan->getOrAddVPValue( | ||||
8563 | ConstantInt::getFalse(BI->getCondition()->getType())); | ||||
8564 | EdgeMask = Builder.createSelect(SrcMask, EdgeMask, False); | ||||
8565 | } | ||||
8566 | |||||
8567 | return EdgeMaskCache[Edge] = EdgeMask; | ||||
8568 | } | ||||
8569 | |||||
8570 | VPValue *VPRecipeBuilder::createBlockInMask(BasicBlock *BB, VPlanPtr &Plan) { | ||||
8571 | assert(OrigLoop->contains(BB) && "Block is not a part of a loop")(static_cast <bool> (OrigLoop->contains(BB) && "Block is not a part of a loop") ? void (0) : __assert_fail ( "OrigLoop->contains(BB) && \"Block is not a part of a loop\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 8571, __extension__ __PRETTY_FUNCTION__)); | ||||
8572 | |||||
8573 | // Look for cached value. | ||||
8574 | BlockMaskCacheTy::iterator BCEntryIt = BlockMaskCache.find(BB); | ||||
8575 | if (BCEntryIt != BlockMaskCache.end()) | ||||
8576 | return BCEntryIt->second; | ||||
8577 | |||||
8578 | // All-one mask is modelled as no-mask following the convention for masked | ||||
8579 | // load/store/gather/scatter. Initialize BlockMask to no-mask. | ||||
8580 | VPValue *BlockMask = nullptr; | ||||
8581 | |||||
8582 | if (OrigLoop->getHeader() == BB) { | ||||
8583 | if (!CM.blockNeedsPredication(BB)) | ||||
8584 | return BlockMaskCache[BB] = BlockMask; // Loop incoming mask is all-one. | ||||
8585 | |||||
8586 | // Create the block in mask as the first non-phi instruction in the block. | ||||
8587 | VPBuilder::InsertPointGuard Guard(Builder); | ||||
8588 | auto NewInsertionPoint = Builder.getInsertBlock()->getFirstNonPhi(); | ||||
8589 | Builder.setInsertPoint(Builder.getInsertBlock(), NewInsertionPoint); | ||||
8590 | |||||
8591 | // Introduce the early-exit compare IV <= BTC to form header block mask. | ||||
8592 | // This is used instead of IV < TC because TC may wrap, unlike BTC. | ||||
8593 | // Start by constructing the desired canonical IV. | ||||
8594 | VPValue *IV = nullptr; | ||||
8595 | if (Legal->getPrimaryInduction()) | ||||
8596 | IV = Plan->getOrAddVPValue(Legal->getPrimaryInduction()); | ||||
8597 | else { | ||||
8598 | auto IVRecipe = new VPWidenCanonicalIVRecipe(); | ||||
8599 | Builder.getInsertBlock()->insert(IVRecipe, NewInsertionPoint); | ||||
8600 | IV = IVRecipe->getVPSingleValue(); | ||||
8601 | } | ||||
8602 | VPValue *BTC = Plan->getOrCreateBackedgeTakenCount(); | ||||
8603 | bool TailFolded = !CM.isScalarEpilogueAllowed(); | ||||
8604 | |||||
8605 | if (TailFolded && CM.TTI.emitGetActiveLaneMask()) { | ||||
8606 | // While ActiveLaneMask is a binary op that consumes the loop tripcount | ||||
8607 | // as a second argument, we only pass the IV here and extract the | ||||
8608 | // tripcount from the transform state where codegen of the VP instructions | ||||
8609 | // happen. | ||||
8610 | BlockMask = Builder.createNaryOp(VPInstruction::ActiveLaneMask, {IV}); | ||||
8611 | } else { | ||||
8612 | BlockMask = Builder.createNaryOp(VPInstruction::ICmpULE, {IV, BTC}); | ||||
8613 | } | ||||
8614 | return BlockMaskCache[BB] = BlockMask; | ||||
8615 | } | ||||
8616 | |||||
8617 | // This is the block mask. We OR all incoming edges. | ||||
8618 | for (auto *Predecessor : predecessors(BB)) { | ||||
8619 | VPValue *EdgeMask = createEdgeMask(Predecessor, BB, Plan); | ||||
8620 | if (!EdgeMask) // Mask of predecessor is all-one so mask of block is too. | ||||
8621 | return BlockMaskCache[BB] = EdgeMask; | ||||
8622 | |||||
8623 | if (!BlockMask) { // BlockMask has its initialized nullptr value. | ||||
8624 | BlockMask = EdgeMask; | ||||
8625 | continue; | ||||
8626 | } | ||||
8627 | |||||
8628 | BlockMask = Builder.createOr(BlockMask, EdgeMask); | ||||
8629 | } | ||||
8630 | |||||
8631 | return BlockMaskCache[BB] = BlockMask; | ||||
8632 | } | ||||
8633 | |||||
8634 | VPRecipeBase *VPRecipeBuilder::tryToWidenMemory(Instruction *I, | ||||
8635 | ArrayRef<VPValue *> Operands, | ||||
8636 | VFRange &Range, | ||||
8637 | VPlanPtr &Plan) { | ||||
8638 | assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&(static_cast <bool> ((isa<LoadInst>(I) || isa< StoreInst>(I)) && "Must be called with either a load or store" ) ? void (0) : __assert_fail ("(isa<LoadInst>(I) || isa<StoreInst>(I)) && \"Must be called with either a load or store\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 8639, __extension__ __PRETTY_FUNCTION__)) | ||||
8639 | "Must be called with either a load or store")(static_cast <bool> ((isa<LoadInst>(I) || isa< StoreInst>(I)) && "Must be called with either a load or store" ) ? void (0) : __assert_fail ("(isa<LoadInst>(I) || isa<StoreInst>(I)) && \"Must be called with either a load or store\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 8639, __extension__ __PRETTY_FUNCTION__)); | ||||
8640 | |||||
8641 | auto willWiden = [&](ElementCount VF) -> bool { | ||||
8642 | if (VF.isScalar()) | ||||
8643 | return false; | ||||
8644 | LoopVectorizationCostModel::InstWidening Decision = | ||||
8645 | CM.getWideningDecision(I, VF); | ||||
8646 | assert(Decision != LoopVectorizationCostModel::CM_Unknown &&(static_cast <bool> (Decision != LoopVectorizationCostModel ::CM_Unknown && "CM decision should be taken at this point." ) ? void (0) : __assert_fail ("Decision != LoopVectorizationCostModel::CM_Unknown && \"CM decision should be taken at this point.\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 8647, __extension__ __PRETTY_FUNCTION__)) | ||||
8647 | "CM decision should be taken at this point.")(static_cast <bool> (Decision != LoopVectorizationCostModel ::CM_Unknown && "CM decision should be taken at this point." ) ? void (0) : __assert_fail ("Decision != LoopVectorizationCostModel::CM_Unknown && \"CM decision should be taken at this point.\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 8647, __extension__ __PRETTY_FUNCTION__)); | ||||
8648 | if (Decision == LoopVectorizationCostModel::CM_Interleave) | ||||
8649 | return true; | ||||
8650 | if (CM.isScalarAfterVectorization(I, VF) || | ||||
8651 | CM.isProfitableToScalarize(I, VF)) | ||||
8652 | return false; | ||||
8653 | return Decision != LoopVectorizationCostModel::CM_Scalarize; | ||||
8654 | }; | ||||
8655 | |||||
8656 | if (!LoopVectorizationPlanner::getDecisionAndClampRange(willWiden, Range)) | ||||
8657 | return nullptr; | ||||
8658 | |||||
8659 | VPValue *Mask = nullptr; | ||||
8660 | if (Legal->isMaskRequired(I)) | ||||
8661 | Mask = createBlockInMask(I->getParent(), Plan); | ||||
8662 | |||||
8663 | if (LoadInst *Load = dyn_cast<LoadInst>(I)) | ||||
8664 | return new VPWidenMemoryInstructionRecipe(*Load, Operands[0], Mask); | ||||
8665 | |||||
8666 | StoreInst *Store = cast<StoreInst>(I); | ||||
8667 | return new VPWidenMemoryInstructionRecipe(*Store, Operands[1], Operands[0], | ||||
8668 | Mask); | ||||
8669 | } | ||||
8670 | |||||
8671 | VPWidenIntOrFpInductionRecipe * | ||||
8672 | VPRecipeBuilder::tryToOptimizeInductionPHI(PHINode *Phi, | ||||
8673 | ArrayRef<VPValue *> Operands) const { | ||||
8674 | // Check if this is an integer or fp induction. If so, build the recipe that | ||||
8675 | // produces its scalar and vector values. | ||||
8676 | InductionDescriptor II = Legal->getInductionVars().lookup(Phi); | ||||
8677 | if (II.getKind() == InductionDescriptor::IK_IntInduction || | ||||
8678 | II.getKind() == InductionDescriptor::IK_FpInduction) { | ||||
8679 | assert(II.getStartValue() ==(static_cast <bool> (II.getStartValue() == Phi->getIncomingValueForBlock (OrigLoop->getLoopPreheader())) ? void (0) : __assert_fail ("II.getStartValue() == Phi->getIncomingValueForBlock(OrigLoop->getLoopPreheader())" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 8680, __extension__ __PRETTY_FUNCTION__)) | ||||
8680 | Phi->getIncomingValueForBlock(OrigLoop->getLoopPreheader()))(static_cast <bool> (II.getStartValue() == Phi->getIncomingValueForBlock (OrigLoop->getLoopPreheader())) ? void (0) : __assert_fail ("II.getStartValue() == Phi->getIncomingValueForBlock(OrigLoop->getLoopPreheader())" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 8680, __extension__ __PRETTY_FUNCTION__)); | ||||
8681 | const SmallVectorImpl<Instruction *> &Casts = II.getCastInsts(); | ||||
8682 | return new VPWidenIntOrFpInductionRecipe( | ||||
8683 | Phi, Operands[0], Casts.empty() ? nullptr : Casts.front()); | ||||
8684 | } | ||||
8685 | |||||
8686 | return nullptr; | ||||
8687 | } | ||||
8688 | |||||
8689 | VPWidenIntOrFpInductionRecipe *VPRecipeBuilder::tryToOptimizeInductionTruncate( | ||||
8690 | TruncInst *I, ArrayRef<VPValue *> Operands, VFRange &Range, | ||||
8691 | VPlan &Plan) const { | ||||
8692 | // Optimize the special case where the source is a constant integer | ||||
8693 | // induction variable. Notice that we can only optimize the 'trunc' case | ||||
8694 | // because (a) FP conversions lose precision, (b) sext/zext may wrap, and | ||||
8695 | // (c) other casts depend on pointer size. | ||||
8696 | |||||
8697 | // Determine whether \p K is a truncation based on an induction variable that | ||||
8698 | // can be optimized. | ||||
8699 | auto isOptimizableIVTruncate = | ||||
8700 | [&](Instruction *K) -> std::function<bool(ElementCount)> { | ||||
8701 | return [=](ElementCount VF) -> bool { | ||||
8702 | return CM.isOptimizableIVTruncate(K, VF); | ||||
8703 | }; | ||||
8704 | }; | ||||
8705 | |||||
8706 | if (LoopVectorizationPlanner::getDecisionAndClampRange( | ||||
8707 | isOptimizableIVTruncate(I), Range)) { | ||||
8708 | |||||
8709 | InductionDescriptor II = | ||||
8710 | Legal->getInductionVars().lookup(cast<PHINode>(I->getOperand(0))); | ||||
8711 | VPValue *Start = Plan.getOrAddVPValue(II.getStartValue()); | ||||
8712 | return new VPWidenIntOrFpInductionRecipe(cast<PHINode>(I->getOperand(0)), | ||||
8713 | Start, nullptr, I); | ||||
8714 | } | ||||
8715 | return nullptr; | ||||
8716 | } | ||||
8717 | |||||
8718 | VPRecipeOrVPValueTy VPRecipeBuilder::tryToBlend(PHINode *Phi, | ||||
8719 | ArrayRef<VPValue *> Operands, | ||||
8720 | VPlanPtr &Plan) { | ||||
8721 | // If all incoming values are equal, the incoming VPValue can be used directly | ||||
8722 | // instead of creating a new VPBlendRecipe. | ||||
8723 | VPValue *FirstIncoming = Operands[0]; | ||||
8724 | if (all_of(Operands, [FirstIncoming](const VPValue *Inc) { | ||||
8725 | return FirstIncoming == Inc; | ||||
8726 | })) { | ||||
8727 | return Operands[0]; | ||||
8728 | } | ||||
8729 | |||||
8730 | // We know that all PHIs in non-header blocks are converted into selects, so | ||||
8731 | // we don't have to worry about the insertion order and we can just use the | ||||
8732 | // builder. At this point we generate the predication tree. There may be | ||||
8733 | // duplications since this is a simple recursive scan, but future | ||||
8734 | // optimizations will clean it up. | ||||
8735 | SmallVector<VPValue *, 2> OperandsWithMask; | ||||
8736 | unsigned NumIncoming = Phi->getNumIncomingValues(); | ||||
8737 | |||||
8738 | for (unsigned In = 0; In < NumIncoming; In++) { | ||||
8739 | VPValue *EdgeMask = | ||||
8740 | createEdgeMask(Phi->getIncomingBlock(In), Phi->getParent(), Plan); | ||||
8741 | assert((EdgeMask || NumIncoming == 1) &&(static_cast <bool> ((EdgeMask || NumIncoming == 1) && "Multiple predecessors with one having a full mask") ? void ( 0) : __assert_fail ("(EdgeMask || NumIncoming == 1) && \"Multiple predecessors with one having a full mask\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 8742, __extension__ __PRETTY_FUNCTION__)) | ||||
8742 | "Multiple predecessors with one having a full mask")(static_cast <bool> ((EdgeMask || NumIncoming == 1) && "Multiple predecessors with one having a full mask") ? void ( 0) : __assert_fail ("(EdgeMask || NumIncoming == 1) && \"Multiple predecessors with one having a full mask\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 8742, __extension__ __PRETTY_FUNCTION__)); | ||||
8743 | OperandsWithMask.push_back(Operands[In]); | ||||
8744 | if (EdgeMask) | ||||
8745 | OperandsWithMask.push_back(EdgeMask); | ||||
8746 | } | ||||
8747 | return toVPRecipeResult(new VPBlendRecipe(Phi, OperandsWithMask)); | ||||
8748 | } | ||||
8749 | |||||
8750 | VPWidenCallRecipe *VPRecipeBuilder::tryToWidenCall(CallInst *CI, | ||||
8751 | ArrayRef<VPValue *> Operands, | ||||
8752 | VFRange &Range) const { | ||||
8753 | |||||
8754 | bool IsPredicated = LoopVectorizationPlanner::getDecisionAndClampRange( | ||||
8755 | [this, CI](ElementCount VF) { return CM.isScalarWithPredication(CI); }, | ||||
8756 | Range); | ||||
8757 | |||||
8758 | if (IsPredicated) | ||||
8759 | return nullptr; | ||||
8760 | |||||
8761 | Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); | ||||
8762 | if (ID && (ID == Intrinsic::assume || ID == Intrinsic::lifetime_end || | ||||
8763 | ID == Intrinsic::lifetime_start || ID == Intrinsic::sideeffect || | ||||
8764 | ID == Intrinsic::pseudoprobe || | ||||
8765 | ID == Intrinsic::experimental_noalias_scope_decl)) | ||||
8766 | return nullptr; | ||||
8767 | |||||
8768 | auto willWiden = [&](ElementCount VF) -> bool { | ||||
8769 | Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); | ||||
8770 | // The following case may be scalarized depending on the VF. | ||||
8771 | // The flag shows whether we use Intrinsic or a usual Call for vectorized | ||||
8772 | // version of the instruction. | ||||
8773 | // Is it beneficial to perform intrinsic call compared to lib call? | ||||
8774 | bool NeedToScalarize = false; | ||||
8775 | InstructionCost CallCost = CM.getVectorCallCost(CI, VF, NeedToScalarize); | ||||
8776 | InstructionCost IntrinsicCost = ID ? CM.getVectorIntrinsicCost(CI, VF) : 0; | ||||
8777 | bool UseVectorIntrinsic = ID && IntrinsicCost <= CallCost; | ||||
8778 | assert((IntrinsicCost.isValid() || CallCost.isValid()) &&(static_cast <bool> ((IntrinsicCost.isValid() || CallCost .isValid()) && "Either the intrinsic cost or vector call cost must be valid" ) ? void (0) : __assert_fail ("(IntrinsicCost.isValid() || CallCost.isValid()) && \"Either the intrinsic cost or vector call cost must be valid\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 8779, __extension__ __PRETTY_FUNCTION__)) | ||||
8779 | "Either the intrinsic cost or vector call cost must be valid")(static_cast <bool> ((IntrinsicCost.isValid() || CallCost .isValid()) && "Either the intrinsic cost or vector call cost must be valid" ) ? void (0) : __assert_fail ("(IntrinsicCost.isValid() || CallCost.isValid()) && \"Either the intrinsic cost or vector call cost must be valid\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 8779, __extension__ __PRETTY_FUNCTION__)); | ||||
8780 | return UseVectorIntrinsic || !NeedToScalarize; | ||||
8781 | }; | ||||
8782 | |||||
8783 | if (!LoopVectorizationPlanner::getDecisionAndClampRange(willWiden, Range)) | ||||
8784 | return nullptr; | ||||
8785 | |||||
8786 | ArrayRef<VPValue *> Ops = Operands.take_front(CI->getNumArgOperands()); | ||||
8787 | return new VPWidenCallRecipe(*CI, make_range(Ops.begin(), Ops.end())); | ||||
8788 | } | ||||
8789 | |||||
8790 | bool VPRecipeBuilder::shouldWiden(Instruction *I, VFRange &Range) const { | ||||
8791 | assert(!isa<BranchInst>(I) && !isa<PHINode>(I) && !isa<LoadInst>(I) &&(static_cast <bool> (!isa<BranchInst>(I) && !isa<PHINode>(I) && !isa<LoadInst>(I) && !isa<StoreInst>(I) && "Instruction should have been handled earlier" ) ? void (0) : __assert_fail ("!isa<BranchInst>(I) && !isa<PHINode>(I) && !isa<LoadInst>(I) && !isa<StoreInst>(I) && \"Instruction should have been handled earlier\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 8792, __extension__ __PRETTY_FUNCTION__)) | ||||
8792 | !isa<StoreInst>(I) && "Instruction should have been handled earlier")(static_cast <bool> (!isa<BranchInst>(I) && !isa<PHINode>(I) && !isa<LoadInst>(I) && !isa<StoreInst>(I) && "Instruction should have been handled earlier" ) ? void (0) : __assert_fail ("!isa<BranchInst>(I) && !isa<PHINode>(I) && !isa<LoadInst>(I) && !isa<StoreInst>(I) && \"Instruction should have been handled earlier\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 8792, __extension__ __PRETTY_FUNCTION__)); | ||||
8793 | // Instruction should be widened, unless it is scalar after vectorization, | ||||
8794 | // scalarization is profitable or it is predicated. | ||||
8795 | auto WillScalarize = [this, I](ElementCount VF) -> bool { | ||||
8796 | return CM.isScalarAfterVectorization(I, VF) || | ||||
8797 | CM.isProfitableToScalarize(I, VF) || CM.isScalarWithPredication(I); | ||||
8798 | }; | ||||
8799 | return !LoopVectorizationPlanner::getDecisionAndClampRange(WillScalarize, | ||||
8800 | Range); | ||||
8801 | } | ||||
8802 | |||||
8803 | VPWidenRecipe *VPRecipeBuilder::tryToWiden(Instruction *I, | ||||
8804 | ArrayRef<VPValue *> Operands) const { | ||||
8805 | auto IsVectorizableOpcode = [](unsigned Opcode) { | ||||
8806 | switch (Opcode) { | ||||
8807 | case Instruction::Add: | ||||
8808 | case Instruction::And: | ||||
8809 | case Instruction::AShr: | ||||
8810 | case Instruction::BitCast: | ||||
8811 | case Instruction::FAdd: | ||||
8812 | case Instruction::FCmp: | ||||
8813 | case Instruction::FDiv: | ||||
8814 | case Instruction::FMul: | ||||
8815 | case Instruction::FNeg: | ||||
8816 | case Instruction::FPExt: | ||||
8817 | case Instruction::FPToSI: | ||||
8818 | case Instruction::FPToUI: | ||||
8819 | case Instruction::FPTrunc: | ||||
8820 | case Instruction::FRem: | ||||
8821 | case Instruction::FSub: | ||||
8822 | case Instruction::ICmp: | ||||
8823 | case Instruction::IntToPtr: | ||||
8824 | case Instruction::LShr: | ||||
8825 | case Instruction::Mul: | ||||
8826 | case Instruction::Or: | ||||
8827 | case Instruction::PtrToInt: | ||||
8828 | case Instruction::SDiv: | ||||
8829 | case Instruction::Select: | ||||
8830 | case Instruction::SExt: | ||||
8831 | case Instruction::Shl: | ||||
8832 | case Instruction::SIToFP: | ||||
8833 | case Instruction::SRem: | ||||
8834 | case Instruction::Sub: | ||||
8835 | case Instruction::Trunc: | ||||
8836 | case Instruction::UDiv: | ||||
8837 | case Instruction::UIToFP: | ||||
8838 | case Instruction::URem: | ||||
8839 | case Instruction::Xor: | ||||
8840 | case Instruction::ZExt: | ||||
8841 | return true; | ||||
8842 | } | ||||
8843 | return false; | ||||
8844 | }; | ||||
8845 | |||||
8846 | if (!IsVectorizableOpcode(I->getOpcode())) | ||||
8847 | return nullptr; | ||||
8848 | |||||
8849 | // Success: widen this instruction. | ||||
8850 | return new VPWidenRecipe(*I, make_range(Operands.begin(), Operands.end())); | ||||
8851 | } | ||||
8852 | |||||
8853 | void VPRecipeBuilder::fixHeaderPhis() { | ||||
8854 | BasicBlock *OrigLatch = OrigLoop->getLoopLatch(); | ||||
8855 | for (VPWidenPHIRecipe *R : PhisToFix) { | ||||
8856 | auto *PN = cast<PHINode>(R->getUnderlyingValue()); | ||||
8857 | VPRecipeBase *IncR = | ||||
8858 | getRecipe(cast<Instruction>(PN->getIncomingValueForBlock(OrigLatch))); | ||||
8859 | R->addOperand(IncR->getVPSingleValue()); | ||||
8860 | } | ||||
8861 | } | ||||
8862 | |||||
8863 | VPBasicBlock *VPRecipeBuilder::handleReplication( | ||||
8864 | Instruction *I, VFRange &Range, VPBasicBlock *VPBB, | ||||
8865 | VPlanPtr &Plan) { | ||||
8866 | bool IsUniform = LoopVectorizationPlanner::getDecisionAndClampRange( | ||||
8867 | [&](ElementCount VF) { return CM.isUniformAfterVectorization(I, VF); }, | ||||
8868 | Range); | ||||
8869 | |||||
8870 | bool IsPredicated = LoopVectorizationPlanner::getDecisionAndClampRange( | ||||
8871 | [&](ElementCount VF) { return CM.isPredicatedInst(I); }, Range); | ||||
8872 | |||||
8873 | auto *Recipe = new VPReplicateRecipe(I, Plan->mapToVPValues(I->operands()), | ||||
8874 | IsUniform, IsPredicated); | ||||
8875 | setRecipe(I, Recipe); | ||||
8876 | Plan->addVPValue(I, Recipe); | ||||
8877 | |||||
8878 | // Find if I uses a predicated instruction. If so, it will use its scalar | ||||
8879 | // value. Avoid hoisting the insert-element which packs the scalar value into | ||||
8880 | // a vector value, as that happens iff all users use the vector value. | ||||
8881 | for (VPValue *Op : Recipe->operands()) { | ||||
8882 | auto *PredR = dyn_cast_or_null<VPPredInstPHIRecipe>(Op->getDef()); | ||||
8883 | if (!PredR) | ||||
8884 | continue; | ||||
8885 | auto *RepR = | ||||
8886 | cast_or_null<VPReplicateRecipe>(PredR->getOperand(0)->getDef()); | ||||
8887 | assert(RepR->isPredicated() &&(static_cast <bool> (RepR->isPredicated() && "expected Replicate recipe to be predicated") ? void (0) : __assert_fail ("RepR->isPredicated() && \"expected Replicate recipe to be predicated\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 8888, __extension__ __PRETTY_FUNCTION__)) | ||||
8888 | "expected Replicate recipe to be predicated")(static_cast <bool> (RepR->isPredicated() && "expected Replicate recipe to be predicated") ? void (0) : __assert_fail ("RepR->isPredicated() && \"expected Replicate recipe to be predicated\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 8888, __extension__ __PRETTY_FUNCTION__)); | ||||
8889 | RepR->setAlsoPack(false); | ||||
8890 | } | ||||
8891 | |||||
8892 | // Finalize the recipe for Instr, first if it is not predicated. | ||||
8893 | if (!IsPredicated) { | ||||
8894 | LLVM_DEBUG(dbgs() << "LV: Scalarizing:" << *I << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { dbgs() << "LV: Scalarizing:" << *I << "\n"; } } while (false); | ||||
8895 | VPBB->appendRecipe(Recipe); | ||||
8896 | return VPBB; | ||||
8897 | } | ||||
8898 | LLVM_DEBUG(dbgs() << "LV: Scalarizing and predicating:" << *I << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { dbgs() << "LV: Scalarizing and predicating:" << *I << "\n"; } } while (false); | ||||
8899 | assert(VPBB->getSuccessors().empty() &&(static_cast <bool> (VPBB->getSuccessors().empty() && "VPBB has successors when handling predicated replication.") ? void (0) : __assert_fail ("VPBB->getSuccessors().empty() && \"VPBB has successors when handling predicated replication.\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 8900, __extension__ __PRETTY_FUNCTION__)) | ||||
8900 | "VPBB has successors when handling predicated replication.")(static_cast <bool> (VPBB->getSuccessors().empty() && "VPBB has successors when handling predicated replication.") ? void (0) : __assert_fail ("VPBB->getSuccessors().empty() && \"VPBB has successors when handling predicated replication.\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 8900, __extension__ __PRETTY_FUNCTION__)); | ||||
8901 | // Record predicated instructions for above packing optimizations. | ||||
8902 | VPBlockBase *Region = createReplicateRegion(I, Recipe, Plan); | ||||
8903 | VPBlockUtils::insertBlockAfter(Region, VPBB); | ||||
8904 | auto *RegSucc = new VPBasicBlock(); | ||||
8905 | VPBlockUtils::insertBlockAfter(RegSucc, Region); | ||||
8906 | return RegSucc; | ||||
8907 | } | ||||
8908 | |||||
8909 | VPRegionBlock *VPRecipeBuilder::createReplicateRegion(Instruction *Instr, | ||||
8910 | VPRecipeBase *PredRecipe, | ||||
8911 | VPlanPtr &Plan) { | ||||
8912 | // Instructions marked for predication are replicated and placed under an | ||||
8913 | // if-then construct to prevent side-effects. | ||||
8914 | |||||
8915 | // Generate recipes to compute the block mask for this region. | ||||
8916 | VPValue *BlockInMask = createBlockInMask(Instr->getParent(), Plan); | ||||
8917 | |||||
8918 | // Build the triangular if-then region. | ||||
8919 | std::string RegionName = (Twine("pred.") + Instr->getOpcodeName()).str(); | ||||
8920 | assert(Instr->getParent() && "Predicated instruction not in any basic block")(static_cast <bool> (Instr->getParent() && "Predicated instruction not in any basic block" ) ? void (0) : __assert_fail ("Instr->getParent() && \"Predicated instruction not in any basic block\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 8920, __extension__ __PRETTY_FUNCTION__)); | ||||
8921 | auto *BOMRecipe = new VPBranchOnMaskRecipe(BlockInMask); | ||||
8922 | auto *Entry = new VPBasicBlock(Twine(RegionName) + ".entry", BOMRecipe); | ||||
8923 | auto *PHIRecipe = Instr->getType()->isVoidTy() | ||||
8924 | ? nullptr | ||||
8925 | : new VPPredInstPHIRecipe(Plan->getOrAddVPValue(Instr)); | ||||
8926 | if (PHIRecipe) { | ||||
8927 | Plan->removeVPValueFor(Instr); | ||||
8928 | Plan->addVPValue(Instr, PHIRecipe); | ||||
8929 | } | ||||
8930 | auto *Exit = new VPBasicBlock(Twine(RegionName) + ".continue", PHIRecipe); | ||||
8931 | auto *Pred = new VPBasicBlock(Twine(RegionName) + ".if", PredRecipe); | ||||
8932 | VPRegionBlock *Region = new VPRegionBlock(Entry, Exit, RegionName, true); | ||||
8933 | |||||
8934 | // Note: first set Entry as region entry and then connect successors starting | ||||
8935 | // from it in order, to propagate the "parent" of each VPBasicBlock. | ||||
8936 | VPBlockUtils::insertTwoBlocksAfter(Pred, Exit, BlockInMask, Entry); | ||||
8937 | VPBlockUtils::connectBlocks(Pred, Exit); | ||||
8938 | |||||
8939 | return Region; | ||||
8940 | } | ||||
8941 | |||||
8942 | VPRecipeOrVPValueTy | ||||
8943 | VPRecipeBuilder::tryToCreateWidenRecipe(Instruction *Instr, | ||||
8944 | ArrayRef<VPValue *> Operands, | ||||
8945 | VFRange &Range, VPlanPtr &Plan) { | ||||
8946 | // First, check for specific widening recipes that deal with calls, memory | ||||
8947 | // operations, inductions and Phi nodes. | ||||
8948 | if (auto *CI = dyn_cast<CallInst>(Instr)) | ||||
8949 | return toVPRecipeResult(tryToWidenCall(CI, Operands, Range)); | ||||
8950 | |||||
8951 | if (isa<LoadInst>(Instr) || isa<StoreInst>(Instr)) | ||||
8952 | return toVPRecipeResult(tryToWidenMemory(Instr, Operands, Range, Plan)); | ||||
8953 | |||||
8954 | VPRecipeBase *Recipe; | ||||
8955 | if (auto Phi = dyn_cast<PHINode>(Instr)) { | ||||
8956 | if (Phi->getParent() != OrigLoop->getHeader()) | ||||
8957 | return tryToBlend(Phi, Operands, Plan); | ||||
8958 | if ((Recipe = tryToOptimizeInductionPHI(Phi, Operands))) | ||||
8959 | return toVPRecipeResult(Recipe); | ||||
8960 | |||||
8961 | if (Legal->isReductionVariable(Phi)) { | ||||
8962 | RecurrenceDescriptor &RdxDesc = Legal->getReductionVars()[Phi]; | ||||
8963 | assert(RdxDesc.getRecurrenceStartValue() ==(static_cast <bool> (RdxDesc.getRecurrenceStartValue() == Phi->getIncomingValueForBlock(OrigLoop->getLoopPreheader ())) ? void (0) : __assert_fail ("RdxDesc.getRecurrenceStartValue() == Phi->getIncomingValueForBlock(OrigLoop->getLoopPreheader())" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 8964, __extension__ __PRETTY_FUNCTION__)) | ||||
8964 | Phi->getIncomingValueForBlock(OrigLoop->getLoopPreheader()))(static_cast <bool> (RdxDesc.getRecurrenceStartValue() == Phi->getIncomingValueForBlock(OrigLoop->getLoopPreheader ())) ? void (0) : __assert_fail ("RdxDesc.getRecurrenceStartValue() == Phi->getIncomingValueForBlock(OrigLoop->getLoopPreheader())" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 8964, __extension__ __PRETTY_FUNCTION__)); | ||||
8965 | VPValue *StartV = Operands[0]; | ||||
8966 | |||||
8967 | auto *PhiRecipe = new VPWidenPHIRecipe(Phi, RdxDesc, *StartV); | ||||
8968 | PhisToFix.push_back(PhiRecipe); | ||||
8969 | // Record the incoming value from the backedge, so we can add the incoming | ||||
8970 | // value from the backedge after all recipes have been created. | ||||
8971 | recordRecipeOf(cast<Instruction>( | ||||
8972 | Phi->getIncomingValueForBlock(OrigLoop->getLoopLatch()))); | ||||
8973 | return toVPRecipeResult(PhiRecipe); | ||||
8974 | } | ||||
8975 | |||||
8976 | return toVPRecipeResult(new VPWidenPHIRecipe(Phi)); | ||||
8977 | } | ||||
8978 | |||||
8979 | if (isa<TruncInst>(Instr) && | ||||
8980 | (Recipe = tryToOptimizeInductionTruncate(cast<TruncInst>(Instr), Operands, | ||||
8981 | Range, *Plan))) | ||||
8982 | return toVPRecipeResult(Recipe); | ||||
8983 | |||||
8984 | if (!shouldWiden(Instr, Range)) | ||||
8985 | return nullptr; | ||||
8986 | |||||
8987 | if (auto GEP = dyn_cast<GetElementPtrInst>(Instr)) | ||||
8988 | return toVPRecipeResult(new VPWidenGEPRecipe( | ||||
8989 | GEP, make_range(Operands.begin(), Operands.end()), OrigLoop)); | ||||
8990 | |||||
8991 | if (auto *SI = dyn_cast<SelectInst>(Instr)) { | ||||
8992 | bool InvariantCond = | ||||
8993 | PSE.getSE()->isLoopInvariant(PSE.getSCEV(SI->getOperand(0)), OrigLoop); | ||||
8994 | return toVPRecipeResult(new VPWidenSelectRecipe( | ||||
8995 | *SI, make_range(Operands.begin(), Operands.end()), InvariantCond)); | ||||
8996 | } | ||||
8997 | |||||
8998 | return toVPRecipeResult(tryToWiden(Instr, Operands)); | ||||
8999 | } | ||||
9000 | |||||
9001 | void LoopVectorizationPlanner::buildVPlansWithVPRecipes(ElementCount MinVF, | ||||
9002 | ElementCount MaxVF) { | ||||
9003 | assert(OrigLoop->isInnermost() && "Inner loop expected.")(static_cast <bool> (OrigLoop->isInnermost() && "Inner loop expected.") ? void (0) : __assert_fail ("OrigLoop->isInnermost() && \"Inner loop expected.\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 9003, __extension__ __PRETTY_FUNCTION__)); | ||||
9004 | |||||
9005 | // Collect instructions from the original loop that will become trivially dead | ||||
9006 | // in the vectorized loop. We don't need to vectorize these instructions. For | ||||
9007 | // example, original induction update instructions can become dead because we | ||||
9008 | // separately emit induction "steps" when generating code for the new loop. | ||||
9009 | // Similarly, we create a new latch condition when setting up the structure | ||||
9010 | // of the new loop, so the old one can become dead. | ||||
9011 | SmallPtrSet<Instruction *, 4> DeadInstructions; | ||||
9012 | collectTriviallyDeadInstructions(DeadInstructions); | ||||
9013 | |||||
9014 | // Add assume instructions we need to drop to DeadInstructions, to prevent | ||||
9015 | // them from being added to the VPlan. | ||||
9016 | // TODO: We only need to drop assumes in blocks that get flattend. If the | ||||
9017 | // control flow is preserved, we should keep them. | ||||
9018 | auto &ConditionalAssumes = Legal->getConditionalAssumes(); | ||||
9019 | DeadInstructions.insert(ConditionalAssumes.begin(), ConditionalAssumes.end()); | ||||
9020 | |||||
9021 | MapVector<Instruction *, Instruction *> &SinkAfter = Legal->getSinkAfter(); | ||||
9022 | // Dead instructions do not need sinking. Remove them from SinkAfter. | ||||
9023 | for (Instruction *I : DeadInstructions) | ||||
9024 | SinkAfter.erase(I); | ||||
9025 | |||||
9026 | auto MaxVFPlusOne = MaxVF.getWithIncrement(1); | ||||
9027 | for (ElementCount VF = MinVF; ElementCount::isKnownLT(VF, MaxVFPlusOne);) { | ||||
9028 | VFRange SubRange = {VF, MaxVFPlusOne}; | ||||
9029 | VPlans.push_back( | ||||
9030 | buildVPlanWithVPRecipes(SubRange, DeadInstructions, SinkAfter)); | ||||
9031 | VF = SubRange.End; | ||||
9032 | } | ||||
9033 | } | ||||
9034 | |||||
9035 | VPlanPtr LoopVectorizationPlanner::buildVPlanWithVPRecipes( | ||||
9036 | VFRange &Range, SmallPtrSetImpl<Instruction *> &DeadInstructions, | ||||
9037 | const MapVector<Instruction *, Instruction *> &SinkAfter) { | ||||
9038 | |||||
9039 | SmallPtrSet<const InterleaveGroup<Instruction> *, 1> InterleaveGroups; | ||||
9040 | |||||
9041 | VPRecipeBuilder RecipeBuilder(OrigLoop, TLI, Legal, CM, PSE, Builder); | ||||
9042 | |||||
9043 | // --------------------------------------------------------------------------- | ||||
9044 | // Pre-construction: record ingredients whose recipes we'll need to further | ||||
9045 | // process after constructing the initial VPlan. | ||||
9046 | // --------------------------------------------------------------------------- | ||||
9047 | |||||
9048 | // Mark instructions we'll need to sink later and their targets as | ||||
9049 | // ingredients whose recipe we'll need to record. | ||||
9050 | for (auto &Entry : SinkAfter) { | ||||
9051 | RecipeBuilder.recordRecipeOf(Entry.first); | ||||
9052 | RecipeBuilder.recordRecipeOf(Entry.second); | ||||
9053 | } | ||||
9054 | for (auto &Reduction : CM.getInLoopReductionChains()) { | ||||
9055 | PHINode *Phi = Reduction.first; | ||||
9056 | RecurKind Kind = Legal->getReductionVars()[Phi].getRecurrenceKind(); | ||||
9057 | const SmallVector<Instruction *, 4> &ReductionOperations = Reduction.second; | ||||
9058 | |||||
9059 | RecipeBuilder.recordRecipeOf(Phi); | ||||
9060 | for (auto &R : ReductionOperations) { | ||||
9061 | RecipeBuilder.recordRecipeOf(R); | ||||
9062 | // For min/max reducitons, where we have a pair of icmp/select, we also | ||||
9063 | // need to record the ICmp recipe, so it can be removed later. | ||||
9064 | if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) | ||||
9065 | RecipeBuilder.recordRecipeOf(cast<Instruction>(R->getOperand(0))); | ||||
9066 | } | ||||
9067 | } | ||||
9068 | |||||
9069 | // For each interleave group which is relevant for this (possibly trimmed) | ||||
9070 | // Range, add it to the set of groups to be later applied to the VPlan and add | ||||
9071 | // placeholders for its members' Recipes which we'll be replacing with a | ||||
9072 | // single VPInterleaveRecipe. | ||||
9073 | for (InterleaveGroup<Instruction> *IG : IAI.getInterleaveGroups()) { | ||||
9074 | auto applyIG = [IG, this](ElementCount VF) -> bool { | ||||
9075 | return (VF.isVector() && // Query is illegal for VF == 1 | ||||
9076 | CM.getWideningDecision(IG->getInsertPos(), VF) == | ||||
9077 | LoopVectorizationCostModel::CM_Interleave); | ||||
9078 | }; | ||||
9079 | if (!getDecisionAndClampRange(applyIG, Range)) | ||||
9080 | continue; | ||||
9081 | InterleaveGroups.insert(IG); | ||||
9082 | for (unsigned i = 0; i < IG->getFactor(); i++) | ||||
9083 | if (Instruction *Member = IG->getMember(i)) | ||||
9084 | RecipeBuilder.recordRecipeOf(Member); | ||||
9085 | }; | ||||
9086 | |||||
9087 | // --------------------------------------------------------------------------- | ||||
9088 | // Build initial VPlan: Scan the body of the loop in a topological order to | ||||
9089 | // visit each basic block after having visited its predecessor basic blocks. | ||||
9090 | // --------------------------------------------------------------------------- | ||||
9091 | |||||
9092 | // Create a dummy pre-entry VPBasicBlock to start building the VPlan. | ||||
9093 | auto Plan = std::make_unique<VPlan>(); | ||||
9094 | VPBasicBlock *VPBB = new VPBasicBlock("Pre-Entry"); | ||||
9095 | Plan->setEntry(VPBB); | ||||
9096 | |||||
9097 | // Scan the body of the loop in a topological order to visit each basic block | ||||
9098 | // after having visited its predecessor basic blocks. | ||||
9099 | LoopBlocksDFS DFS(OrigLoop); | ||||
9100 | DFS.perform(LI); | ||||
9101 | |||||
9102 | for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) { | ||||
9103 | // Relevant instructions from basic block BB will be grouped into VPRecipe | ||||
9104 | // ingredients and fill a new VPBasicBlock. | ||||
9105 | unsigned VPBBsForBB = 0; | ||||
9106 | auto *FirstVPBBForBB = new VPBasicBlock(BB->getName()); | ||||
9107 | VPBlockUtils::insertBlockAfter(FirstVPBBForBB, VPBB); | ||||
9108 | VPBB = FirstVPBBForBB; | ||||
9109 | Builder.setInsertPoint(VPBB); | ||||
9110 | |||||
9111 | // Introduce each ingredient into VPlan. | ||||
9112 | // TODO: Model and preserve debug instrinsics in VPlan. | ||||
9113 | for (Instruction &I : BB->instructionsWithoutDebug()) { | ||||
9114 | Instruction *Instr = &I; | ||||
9115 | |||||
9116 | // First filter out irrelevant instructions, to ensure no recipes are | ||||
9117 | // built for them. | ||||
9118 | if (isa<BranchInst>(Instr) || DeadInstructions.count(Instr)) | ||||
9119 | continue; | ||||
9120 | |||||
9121 | SmallVector<VPValue *, 4> Operands; | ||||
9122 | auto *Phi = dyn_cast<PHINode>(Instr); | ||||
9123 | if (Phi && Phi->getParent() == OrigLoop->getHeader()) { | ||||
9124 | Operands.push_back(Plan->getOrAddVPValue( | ||||
9125 | Phi->getIncomingValueForBlock(OrigLoop->getLoopPreheader()))); | ||||
9126 | } else { | ||||
9127 | auto OpRange = Plan->mapToVPValues(Instr->operands()); | ||||
9128 | Operands = {OpRange.begin(), OpRange.end()}; | ||||
9129 | } | ||||
9130 | if (auto RecipeOrValue = RecipeBuilder.tryToCreateWidenRecipe( | ||||
9131 | Instr, Operands, Range, Plan)) { | ||||
9132 | // If Instr can be simplified to an existing VPValue, use it. | ||||
9133 | if (RecipeOrValue.is<VPValue *>()) { | ||||
9134 | auto *VPV = RecipeOrValue.get<VPValue *>(); | ||||
9135 | Plan->addVPValue(Instr, VPV); | ||||
9136 | // If the re-used value is a recipe, register the recipe for the | ||||
9137 | // instruction, in case the recipe for Instr needs to be recorded. | ||||
9138 | if (auto *R = dyn_cast_or_null<VPRecipeBase>(VPV->getDef())) | ||||
9139 | RecipeBuilder.setRecipe(Instr, R); | ||||
9140 | continue; | ||||
9141 | } | ||||
9142 | // Otherwise, add the new recipe. | ||||
9143 | VPRecipeBase *Recipe = RecipeOrValue.get<VPRecipeBase *>(); | ||||
9144 | for (auto *Def : Recipe->definedValues()) { | ||||
9145 | auto *UV = Def->getUnderlyingValue(); | ||||
9146 | Plan->addVPValue(UV, Def); | ||||
9147 | } | ||||
9148 | |||||
9149 | RecipeBuilder.setRecipe(Instr, Recipe); | ||||
9150 | VPBB->appendRecipe(Recipe); | ||||
9151 | continue; | ||||
9152 | } | ||||
9153 | |||||
9154 | // Otherwise, if all widening options failed, Instruction is to be | ||||
9155 | // replicated. This may create a successor for VPBB. | ||||
9156 | VPBasicBlock *NextVPBB = | ||||
9157 | RecipeBuilder.handleReplication(Instr, Range, VPBB, Plan); | ||||
9158 | if (NextVPBB != VPBB) { | ||||
9159 | VPBB = NextVPBB; | ||||
9160 | VPBB->setName(BB->hasName() ? BB->getName() + "." + Twine(VPBBsForBB++) | ||||
9161 | : ""); | ||||
9162 | } | ||||
9163 | } | ||||
9164 | } | ||||
9165 | |||||
9166 | RecipeBuilder.fixHeaderPhis(); | ||||
9167 | |||||
9168 | // Discard empty dummy pre-entry VPBasicBlock. Note that other VPBasicBlocks | ||||
9169 | // may also be empty, such as the last one VPBB, reflecting original | ||||
9170 | // basic-blocks with no recipes. | ||||
9171 | VPBasicBlock *PreEntry = cast<VPBasicBlock>(Plan->getEntry()); | ||||
9172 | assert(PreEntry->empty() && "Expecting empty pre-entry block.")(static_cast <bool> (PreEntry->empty() && "Expecting empty pre-entry block." ) ? void (0) : __assert_fail ("PreEntry->empty() && \"Expecting empty pre-entry block.\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 9172, __extension__ __PRETTY_FUNCTION__)); | ||||
9173 | VPBlockBase *Entry = Plan->setEntry(PreEntry->getSingleSuccessor()); | ||||
9174 | VPBlockUtils::disconnectBlocks(PreEntry, Entry); | ||||
9175 | delete PreEntry; | ||||
9176 | |||||
9177 | // --------------------------------------------------------------------------- | ||||
9178 | // Transform initial VPlan: Apply previously taken decisions, in order, to | ||||
9179 | // bring the VPlan to its final state. | ||||
9180 | // --------------------------------------------------------------------------- | ||||
9181 | |||||
9182 | // Apply Sink-After legal constraints. | ||||
9183 | for (auto &Entry : SinkAfter) { | ||||
9184 | VPRecipeBase *Sink = RecipeBuilder.getRecipe(Entry.first); | ||||
9185 | VPRecipeBase *Target = RecipeBuilder.getRecipe(Entry.second); | ||||
9186 | |||||
9187 | auto GetReplicateRegion = [](VPRecipeBase *R) -> VPRegionBlock * { | ||||
9188 | auto *Region = | ||||
9189 | dyn_cast_or_null<VPRegionBlock>(R->getParent()->getParent()); | ||||
9190 | if (Region && Region->isReplicator()) { | ||||
9191 | assert(Region->getNumSuccessors() == 1 &&(static_cast <bool> (Region->getNumSuccessors() == 1 && Region->getNumPredecessors() == 1 && "Expected SESE region!" ) ? void (0) : __assert_fail ("Region->getNumSuccessors() == 1 && Region->getNumPredecessors() == 1 && \"Expected SESE region!\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 9192, __extension__ __PRETTY_FUNCTION__)) | ||||
9192 | Region->getNumPredecessors() == 1 && "Expected SESE region!")(static_cast <bool> (Region->getNumSuccessors() == 1 && Region->getNumPredecessors() == 1 && "Expected SESE region!" ) ? void (0) : __assert_fail ("Region->getNumSuccessors() == 1 && Region->getNumPredecessors() == 1 && \"Expected SESE region!\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 9192, __extension__ __PRETTY_FUNCTION__)); | ||||
9193 | assert(R->getParent()->size() == 1 &&(static_cast <bool> (R->getParent()->size() == 1 && "A recipe in an original replicator region must be the only " "recipe in its block") ? void (0) : __assert_fail ("R->getParent()->size() == 1 && \"A recipe in an original replicator region must be the only \" \"recipe in its block\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 9195, __extension__ __PRETTY_FUNCTION__)) | ||||
9194 | "A recipe in an original replicator region must be the only "(static_cast <bool> (R->getParent()->size() == 1 && "A recipe in an original replicator region must be the only " "recipe in its block") ? void (0) : __assert_fail ("R->getParent()->size() == 1 && \"A recipe in an original replicator region must be the only \" \"recipe in its block\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 9195, __extension__ __PRETTY_FUNCTION__)) | ||||
9195 | "recipe in its block")(static_cast <bool> (R->getParent()->size() == 1 && "A recipe in an original replicator region must be the only " "recipe in its block") ? void (0) : __assert_fail ("R->getParent()->size() == 1 && \"A recipe in an original replicator region must be the only \" \"recipe in its block\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 9195, __extension__ __PRETTY_FUNCTION__)); | ||||
9196 | return Region; | ||||
9197 | } | ||||
9198 | return nullptr; | ||||
9199 | }; | ||||
9200 | auto *TargetRegion = GetReplicateRegion(Target); | ||||
9201 | auto *SinkRegion = GetReplicateRegion(Sink); | ||||
9202 | if (!SinkRegion) { | ||||
9203 | // If the sink source is not a replicate region, sink the recipe directly. | ||||
9204 | if (TargetRegion) { | ||||
9205 | // The target is in a replication region, make sure to move Sink to | ||||
9206 | // the block after it, not into the replication region itself. | ||||
9207 | VPBasicBlock *NextBlock = | ||||
9208 | cast<VPBasicBlock>(TargetRegion->getSuccessors().front()); | ||||
9209 | Sink->moveBefore(*NextBlock, NextBlock->getFirstNonPhi()); | ||||
9210 | } else | ||||
9211 | Sink->moveAfter(Target); | ||||
9212 | continue; | ||||
9213 | } | ||||
9214 | |||||
9215 | // The sink source is in a replicate region. Unhook the region from the CFG. | ||||
9216 | auto *SinkPred = SinkRegion->getSinglePredecessor(); | ||||
9217 | auto *SinkSucc = SinkRegion->getSingleSuccessor(); | ||||
9218 | VPBlockUtils::disconnectBlocks(SinkPred, SinkRegion); | ||||
9219 | VPBlockUtils::disconnectBlocks(SinkRegion, SinkSucc); | ||||
9220 | VPBlockUtils::connectBlocks(SinkPred, SinkSucc); | ||||
9221 | |||||
9222 | if (TargetRegion) { | ||||
9223 | // The target recipe is also in a replicate region, move the sink region | ||||
9224 | // after the target region. | ||||
9225 | auto *TargetSucc = TargetRegion->getSingleSuccessor(); | ||||
9226 | VPBlockUtils::disconnectBlocks(TargetRegion, TargetSucc); | ||||
9227 | VPBlockUtils::connectBlocks(TargetRegion, SinkRegion); | ||||
9228 | VPBlockUtils::connectBlocks(SinkRegion, TargetSucc); | ||||
9229 | } else { | ||||
9230 | // The sink source is in a replicate region, we need to move the whole | ||||
9231 | // replicate region, which should only contain a single recipe in the main | ||||
9232 | // block. | ||||
9233 | auto *SplitBlock = | ||||
9234 | Target->getParent()->splitAt(std::next(Target->getIterator())); | ||||
9235 | |||||
9236 | auto *SplitPred = SplitBlock->getSinglePredecessor(); | ||||
9237 | |||||
9238 | VPBlockUtils::disconnectBlocks(SplitPred, SplitBlock); | ||||
9239 | VPBlockUtils::connectBlocks(SplitPred, SinkRegion); | ||||
9240 | VPBlockUtils::connectBlocks(SinkRegion, SplitBlock); | ||||
9241 | if (VPBB == SplitPred) | ||||
9242 | VPBB = SplitBlock; | ||||
9243 | } | ||||
9244 | } | ||||
9245 | |||||
9246 | // Interleave memory: for each Interleave Group we marked earlier as relevant | ||||
9247 | // for this VPlan, replace the Recipes widening its memory instructions with a | ||||
9248 | // single VPInterleaveRecipe at its insertion point. | ||||
9249 | for (auto IG : InterleaveGroups) { | ||||
9250 | auto *Recipe = cast<VPWidenMemoryInstructionRecipe>( | ||||
9251 | RecipeBuilder.getRecipe(IG->getInsertPos())); | ||||
9252 | SmallVector<VPValue *, 4> StoredValues; | ||||
9253 | for (unsigned i = 0; i < IG->getFactor(); ++i) | ||||
9254 | if (auto *SI = dyn_cast_or_null<StoreInst>(IG->getMember(i))) | ||||
9255 | StoredValues.push_back(Plan->getOrAddVPValue(SI->getOperand(0))); | ||||
9256 | |||||
9257 | auto *VPIG = new VPInterleaveRecipe(IG, Recipe->getAddr(), StoredValues, | ||||
9258 | Recipe->getMask()); | ||||
9259 | VPIG->insertBefore(Recipe); | ||||
9260 | unsigned J = 0; | ||||
9261 | for (unsigned i = 0; i < IG->getFactor(); ++i) | ||||
9262 | if (Instruction *Member = IG->getMember(i)) { | ||||
9263 | if (!Member->getType()->isVoidTy()) { | ||||
9264 | VPValue *OriginalV = Plan->getVPValue(Member); | ||||
9265 | Plan->removeVPValueFor(Member); | ||||
9266 | Plan->addVPValue(Member, VPIG->getVPValue(J)); | ||||
9267 | OriginalV->replaceAllUsesWith(VPIG->getVPValue(J)); | ||||
9268 | J++; | ||||
9269 | } | ||||
9270 | RecipeBuilder.getRecipe(Member)->eraseFromParent(); | ||||
9271 | } | ||||
9272 | } | ||||
9273 | |||||
9274 | // Adjust the recipes for any inloop reductions. | ||||
9275 | if (Range.Start.isVector()) | ||||
9276 | adjustRecipesForInLoopReductions(Plan, RecipeBuilder); | ||||
9277 | |||||
9278 | // Finally, if tail is folded by masking, introduce selects between the phi | ||||
9279 | // and the live-out instruction of each reduction, at the end of the latch. | ||||
9280 | if (CM.foldTailByMasking() && !Legal->getReductionVars().empty()) { | ||||
9281 | Builder.setInsertPoint(VPBB); | ||||
9282 | auto *Cond = RecipeBuilder.createBlockInMask(OrigLoop->getHeader(), Plan); | ||||
9283 | for (auto &Reduction : Legal->getReductionVars()) { | ||||
9284 | if (CM.isInLoopReduction(Reduction.first)) | ||||
9285 | continue; | ||||
9286 | VPValue *Phi = Plan->getOrAddVPValue(Reduction.first); | ||||
9287 | VPValue *Red = Plan->getOrAddVPValue(Reduction.second.getLoopExitInstr()); | ||||
9288 | Builder.createNaryOp(Instruction::Select, {Cond, Red, Phi}); | ||||
9289 | } | ||||
9290 | } | ||||
9291 | |||||
9292 | VPlanTransforms::sinkScalarOperands(*Plan); | ||||
9293 | |||||
9294 | std::string PlanName; | ||||
9295 | raw_string_ostream RSO(PlanName); | ||||
9296 | ElementCount VF = Range.Start; | ||||
9297 | Plan->addVF(VF); | ||||
9298 | RSO << "Initial VPlan for VF={" << VF; | ||||
9299 | for (VF *= 2; ElementCount::isKnownLT(VF, Range.End); VF *= 2) { | ||||
9300 | Plan->addVF(VF); | ||||
9301 | RSO << "," << VF; | ||||
9302 | } | ||||
9303 | RSO << "},UF>=1"; | ||||
9304 | RSO.flush(); | ||||
9305 | Plan->setName(PlanName); | ||||
9306 | |||||
9307 | return Plan; | ||||
9308 | } | ||||
9309 | |||||
9310 | VPlanPtr LoopVectorizationPlanner::buildVPlan(VFRange &Range) { | ||||
9311 | // Outer loop handling: They may require CFG and instruction level | ||||
9312 | // transformations before even evaluating whether vectorization is profitable. | ||||
9313 | // Since we cannot modify the incoming IR, we need to build VPlan upfront in | ||||
9314 | // the vectorization pipeline. | ||||
9315 | assert(!OrigLoop->isInnermost())(static_cast <bool> (!OrigLoop->isInnermost()) ? void (0) : __assert_fail ("!OrigLoop->isInnermost()", "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 9315, __extension__ __PRETTY_FUNCTION__)); | ||||
9316 | assert(EnableVPlanNativePath && "VPlan-native path is not enabled.")(static_cast <bool> (EnableVPlanNativePath && "VPlan-native path is not enabled." ) ? void (0) : __assert_fail ("EnableVPlanNativePath && \"VPlan-native path is not enabled.\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 9316, __extension__ __PRETTY_FUNCTION__)); | ||||
9317 | |||||
9318 | // Create new empty VPlan | ||||
9319 | auto Plan = std::make_unique<VPlan>(); | ||||
9320 | |||||
9321 | // Build hierarchical CFG | ||||
9322 | VPlanHCFGBuilder HCFGBuilder(OrigLoop, LI, *Plan); | ||||
9323 | HCFGBuilder.buildHierarchicalCFG(); | ||||
9324 | |||||
9325 | for (ElementCount VF = Range.Start; ElementCount::isKnownLT(VF, Range.End); | ||||
9326 | VF *= 2) | ||||
9327 | Plan->addVF(VF); | ||||
9328 | |||||
9329 | if (EnableVPlanPredication) { | ||||
9330 | VPlanPredicator VPP(*Plan); | ||||
9331 | VPP.predicate(); | ||||
9332 | |||||
9333 | // Avoid running transformation to recipes until masked code generation in | ||||
9334 | // VPlan-native path is in place. | ||||
9335 | return Plan; | ||||
9336 | } | ||||
9337 | |||||
9338 | SmallPtrSet<Instruction *, 1> DeadInstructions; | ||||
9339 | VPlanTransforms::VPInstructionsToVPRecipes(OrigLoop, Plan, | ||||
9340 | Legal->getInductionVars(), | ||||
9341 | DeadInstructions, *PSE.getSE()); | ||||
9342 | return Plan; | ||||
9343 | } | ||||
9344 | |||||
9345 | // Adjust the recipes for any inloop reductions. The chain of instructions | ||||
9346 | // leading from the loop exit instr to the phi need to be converted to | ||||
9347 | // reductions, with one operand being vector and the other being the scalar | ||||
9348 | // reduction chain. | ||||
9349 | void LoopVectorizationPlanner::adjustRecipesForInLoopReductions( | ||||
9350 | VPlanPtr &Plan, VPRecipeBuilder &RecipeBuilder) { | ||||
9351 | for (auto &Reduction : CM.getInLoopReductionChains()) { | ||||
9352 | PHINode *Phi = Reduction.first; | ||||
9353 | RecurrenceDescriptor &RdxDesc = Legal->getReductionVars()[Phi]; | ||||
9354 | const SmallVector<Instruction *, 4> &ReductionOperations = Reduction.second; | ||||
9355 | |||||
9356 | // ReductionOperations are orders top-down from the phi's use to the | ||||
9357 | // LoopExitValue. We keep a track of the previous item (the Chain) to tell | ||||
9358 | // which of the two operands will remain scalar and which will be reduced. | ||||
9359 | // For minmax the chain will be the select instructions. | ||||
9360 | Instruction *Chain = Phi; | ||||
9361 | for (Instruction *R : ReductionOperations) { | ||||
9362 | VPRecipeBase *WidenRecipe = RecipeBuilder.getRecipe(R); | ||||
9363 | RecurKind Kind = RdxDesc.getRecurrenceKind(); | ||||
9364 | |||||
9365 | VPValue *ChainOp = Plan->getVPValue(Chain); | ||||
9366 | unsigned FirstOpId; | ||||
9367 | if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) { | ||||
9368 | assert(isa<VPWidenSelectRecipe>(WidenRecipe) &&(static_cast <bool> (isa<VPWidenSelectRecipe>(WidenRecipe ) && "Expected to replace a VPWidenSelectSC") ? void ( 0) : __assert_fail ("isa<VPWidenSelectRecipe>(WidenRecipe) && \"Expected to replace a VPWidenSelectSC\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 9369, __extension__ __PRETTY_FUNCTION__)) | ||||
9369 | "Expected to replace a VPWidenSelectSC")(static_cast <bool> (isa<VPWidenSelectRecipe>(WidenRecipe ) && "Expected to replace a VPWidenSelectSC") ? void ( 0) : __assert_fail ("isa<VPWidenSelectRecipe>(WidenRecipe) && \"Expected to replace a VPWidenSelectSC\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 9369, __extension__ __PRETTY_FUNCTION__)); | ||||
9370 | FirstOpId = 1; | ||||
9371 | } else { | ||||
9372 | assert(isa<VPWidenRecipe>(WidenRecipe) &&(static_cast <bool> (isa<VPWidenRecipe>(WidenRecipe ) && "Expected to replace a VPWidenSC") ? void (0) : __assert_fail ("isa<VPWidenRecipe>(WidenRecipe) && \"Expected to replace a VPWidenSC\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 9373, __extension__ __PRETTY_FUNCTION__)) | ||||
9373 | "Expected to replace a VPWidenSC")(static_cast <bool> (isa<VPWidenRecipe>(WidenRecipe ) && "Expected to replace a VPWidenSC") ? void (0) : __assert_fail ("isa<VPWidenRecipe>(WidenRecipe) && \"Expected to replace a VPWidenSC\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 9373, __extension__ __PRETTY_FUNCTION__)); | ||||
9374 | FirstOpId = 0; | ||||
9375 | } | ||||
9376 | unsigned VecOpId = | ||||
9377 | R->getOperand(FirstOpId) == Chain ? FirstOpId + 1 : FirstOpId; | ||||
9378 | VPValue *VecOp = Plan->getVPValue(R->getOperand(VecOpId)); | ||||
9379 | |||||
9380 | auto *CondOp = CM.foldTailByMasking() | ||||
9381 | ? RecipeBuilder.createBlockInMask(R->getParent(), Plan) | ||||
9382 | : nullptr; | ||||
9383 | VPReductionRecipe *RedRecipe = new VPReductionRecipe( | ||||
9384 | &RdxDesc, R, ChainOp, VecOp, CondOp, TTI); | ||||
9385 | WidenRecipe->getVPSingleValue()->replaceAllUsesWith(RedRecipe); | ||||
9386 | Plan->removeVPValueFor(R); | ||||
9387 | Plan->addVPValue(R, RedRecipe); | ||||
9388 | WidenRecipe->getParent()->insert(RedRecipe, WidenRecipe->getIterator()); | ||||
9389 | WidenRecipe->getVPSingleValue()->replaceAllUsesWith(RedRecipe); | ||||
9390 | WidenRecipe->eraseFromParent(); | ||||
9391 | |||||
9392 | if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) { | ||||
9393 | VPRecipeBase *CompareRecipe = | ||||
9394 | RecipeBuilder.getRecipe(cast<Instruction>(R->getOperand(0))); | ||||
9395 | assert(isa<VPWidenRecipe>(CompareRecipe) &&(static_cast <bool> (isa<VPWidenRecipe>(CompareRecipe ) && "Expected to replace a VPWidenSC") ? void (0) : __assert_fail ("isa<VPWidenRecipe>(CompareRecipe) && \"Expected to replace a VPWidenSC\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 9396, __extension__ __PRETTY_FUNCTION__)) | ||||
9396 | "Expected to replace a VPWidenSC")(static_cast <bool> (isa<VPWidenRecipe>(CompareRecipe ) && "Expected to replace a VPWidenSC") ? void (0) : __assert_fail ("isa<VPWidenRecipe>(CompareRecipe) && \"Expected to replace a VPWidenSC\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 9396, __extension__ __PRETTY_FUNCTION__)); | ||||
9397 | assert(cast<VPWidenRecipe>(CompareRecipe)->getNumUsers() == 0 &&(static_cast <bool> (cast<VPWidenRecipe>(CompareRecipe )->getNumUsers() == 0 && "Expected no remaining users" ) ? void (0) : __assert_fail ("cast<VPWidenRecipe>(CompareRecipe)->getNumUsers() == 0 && \"Expected no remaining users\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 9398, __extension__ __PRETTY_FUNCTION__)) | ||||
9398 | "Expected no remaining users")(static_cast <bool> (cast<VPWidenRecipe>(CompareRecipe )->getNumUsers() == 0 && "Expected no remaining users" ) ? void (0) : __assert_fail ("cast<VPWidenRecipe>(CompareRecipe)->getNumUsers() == 0 && \"Expected no remaining users\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 9398, __extension__ __PRETTY_FUNCTION__)); | ||||
9399 | CompareRecipe->eraseFromParent(); | ||||
9400 | } | ||||
9401 | Chain = R; | ||||
9402 | } | ||||
9403 | } | ||||
9404 | } | ||||
9405 | |||||
9406 | #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) | ||||
9407 | void VPInterleaveRecipe::print(raw_ostream &O, const Twine &Indent, | ||||
9408 | VPSlotTracker &SlotTracker) const { | ||||
9409 | O << Indent << "INTERLEAVE-GROUP with factor " << IG->getFactor() << " at "; | ||||
9410 | IG->getInsertPos()->printAsOperand(O, false); | ||||
9411 | O << ", "; | ||||
9412 | getAddr()->printAsOperand(O, SlotTracker); | ||||
9413 | VPValue *Mask = getMask(); | ||||
9414 | if (Mask) { | ||||
9415 | O << ", "; | ||||
9416 | Mask->printAsOperand(O, SlotTracker); | ||||
9417 | } | ||||
9418 | for (unsigned i = 0; i < IG->getFactor(); ++i) | ||||
9419 | if (Instruction *I = IG->getMember(i)) | ||||
9420 | O << "\n" << Indent << " " << VPlanIngredient(I) << " " << i; | ||||
9421 | } | ||||
9422 | #endif | ||||
9423 | |||||
9424 | void VPWidenCallRecipe::execute(VPTransformState &State) { | ||||
9425 | State.ILV->widenCallInstruction(*cast<CallInst>(getUnderlyingInstr()), this, | ||||
9426 | *this, State); | ||||
9427 | } | ||||
9428 | |||||
9429 | void VPWidenSelectRecipe::execute(VPTransformState &State) { | ||||
9430 | State.ILV->widenSelectInstruction(*cast<SelectInst>(getUnderlyingInstr()), | ||||
9431 | this, *this, InvariantCond, State); | ||||
9432 | } | ||||
9433 | |||||
9434 | void VPWidenRecipe::execute(VPTransformState &State) { | ||||
9435 | State.ILV->widenInstruction(*getUnderlyingInstr(), this, *this, State); | ||||
9436 | } | ||||
9437 | |||||
9438 | void VPWidenGEPRecipe::execute(VPTransformState &State) { | ||||
9439 | State.ILV->widenGEP(cast<GetElementPtrInst>(getUnderlyingInstr()), this, | ||||
9440 | *this, State.UF, State.VF, IsPtrLoopInvariant, | ||||
9441 | IsIndexLoopInvariant, State); | ||||
9442 | } | ||||
9443 | |||||
9444 | void VPWidenIntOrFpInductionRecipe::execute(VPTransformState &State) { | ||||
9445 | assert(!State.Instance && "Int or FP induction being replicated.")(static_cast <bool> (!State.Instance && "Int or FP induction being replicated." ) ? void (0) : __assert_fail ("!State.Instance && \"Int or FP induction being replicated.\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 9445, __extension__ __PRETTY_FUNCTION__)); | ||||
9446 | State.ILV->widenIntOrFpInduction(IV, getStartValue()->getLiveInIRValue(), | ||||
9447 | getTruncInst(), getVPValue(0), | ||||
9448 | getCastValue(), State); | ||||
9449 | } | ||||
9450 | |||||
9451 | void VPWidenPHIRecipe::execute(VPTransformState &State) { | ||||
9452 | State.ILV->widenPHIInstruction(cast<PHINode>(getUnderlyingValue()), RdxDesc, | ||||
| |||||
9453 | this, State); | ||||
9454 | } | ||||
9455 | |||||
9456 | void VPBlendRecipe::execute(VPTransformState &State) { | ||||
9457 | State.ILV->setDebugLocFromInst(State.Builder, Phi); | ||||
9458 | // We know that all PHIs in non-header blocks are converted into | ||||
9459 | // selects, so we don't have to worry about the insertion order and we | ||||
9460 | // can just use the builder. | ||||
9461 | // At this point we generate the predication tree. There may be | ||||
9462 | // duplications since this is a simple recursive scan, but future | ||||
9463 | // optimizations will clean it up. | ||||
9464 | |||||
9465 | unsigned NumIncoming = getNumIncomingValues(); | ||||
9466 | |||||
9467 | // Generate a sequence of selects of the form: | ||||
9468 | // SELECT(Mask3, In3, | ||||
9469 | // SELECT(Mask2, In2, | ||||
9470 | // SELECT(Mask1, In1, | ||||
9471 | // In0))) | ||||
9472 | // Note that Mask0 is never used: lanes for which no path reaches this phi and | ||||
9473 | // are essentially undef are taken from In0. | ||||
9474 | InnerLoopVectorizer::VectorParts Entry(State.UF); | ||||
9475 | for (unsigned In = 0; In < NumIncoming; ++In) { | ||||
9476 | for (unsigned Part = 0; Part < State.UF; ++Part) { | ||||
9477 | // We might have single edge PHIs (blocks) - use an identity | ||||
9478 | // 'select' for the first PHI operand. | ||||
9479 | Value *In0 = State.get(getIncomingValue(In), Part); | ||||
9480 | if (In == 0) | ||||
9481 | Entry[Part] = In0; // Initialize with the first incoming value. | ||||
9482 | else { | ||||
9483 | // Select between the current value and the previous incoming edge | ||||
9484 | // based on the incoming mask. | ||||
9485 | Value *Cond = State.get(getMask(In), Part); | ||||
9486 | Entry[Part] = | ||||
9487 | State.Builder.CreateSelect(Cond, In0, Entry[Part], "predphi"); | ||||
9488 | } | ||||
9489 | } | ||||
9490 | } | ||||
9491 | for (unsigned Part = 0; Part < State.UF; ++Part) | ||||
9492 | State.set(this, Entry[Part], Part); | ||||
9493 | } | ||||
9494 | |||||
9495 | void VPInterleaveRecipe::execute(VPTransformState &State) { | ||||
9496 | assert(!State.Instance && "Interleave group being replicated.")(static_cast <bool> (!State.Instance && "Interleave group being replicated." ) ? void (0) : __assert_fail ("!State.Instance && \"Interleave group being replicated.\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 9496, __extension__ __PRETTY_FUNCTION__)); | ||||
9497 | State.ILV->vectorizeInterleaveGroup(IG, definedValues(), State, getAddr(), | ||||
9498 | getStoredValues(), getMask()); | ||||
9499 | } | ||||
9500 | |||||
9501 | void VPReductionRecipe::execute(VPTransformState &State) { | ||||
9502 | assert(!State.Instance && "Reduction being replicated.")(static_cast <bool> (!State.Instance && "Reduction being replicated." ) ? void (0) : __assert_fail ("!State.Instance && \"Reduction being replicated.\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 9502, __extension__ __PRETTY_FUNCTION__)); | ||||
9503 | Value *PrevInChain = State.get(getChainOp(), 0); | ||||
9504 | for (unsigned Part = 0; Part < State.UF; ++Part) { | ||||
9505 | RecurKind Kind = RdxDesc->getRecurrenceKind(); | ||||
9506 | bool IsOrdered = State.ILV->useOrderedReductions(*RdxDesc); | ||||
9507 | Value *NewVecOp = State.get(getVecOp(), Part); | ||||
9508 | if (VPValue *Cond = getCondOp()) { | ||||
9509 | Value *NewCond = State.get(Cond, Part); | ||||
9510 | VectorType *VecTy = cast<VectorType>(NewVecOp->getType()); | ||||
9511 | Constant *Iden = RecurrenceDescriptor::getRecurrenceIdentity( | ||||
9512 | Kind, VecTy->getElementType(), RdxDesc->getFastMathFlags()); | ||||
9513 | Constant *IdenVec = | ||||
9514 | ConstantVector::getSplat(VecTy->getElementCount(), Iden); | ||||
9515 | Value *Select = State.Builder.CreateSelect(NewCond, NewVecOp, IdenVec); | ||||
9516 | NewVecOp = Select; | ||||
9517 | } | ||||
9518 | Value *NewRed; | ||||
9519 | Value *NextInChain; | ||||
9520 | if (IsOrdered) { | ||||
9521 | NewRed = createOrderedReduction(State.Builder, *RdxDesc, NewVecOp, | ||||
9522 | PrevInChain); | ||||
9523 | PrevInChain = NewRed; | ||||
9524 | } else { | ||||
9525 | PrevInChain = State.get(getChainOp(), Part); | ||||
9526 | NewRed = createTargetReduction(State.Builder, TTI, *RdxDesc, NewVecOp); | ||||
9527 | } | ||||
9528 | if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) { | ||||
9529 | NextInChain = | ||||
9530 | createMinMaxOp(State.Builder, RdxDesc->getRecurrenceKind(), | ||||
9531 | NewRed, PrevInChain); | ||||
9532 | } else if (IsOrdered) | ||||
9533 | NextInChain = NewRed; | ||||
9534 | else { | ||||
9535 | NextInChain = State.Builder.CreateBinOp( | ||||
9536 | (Instruction::BinaryOps)getUnderlyingInstr()->getOpcode(), NewRed, | ||||
9537 | PrevInChain); | ||||
9538 | } | ||||
9539 | State.set(this, NextInChain, Part); | ||||
9540 | } | ||||
9541 | } | ||||
9542 | |||||
9543 | void VPReplicateRecipe::execute(VPTransformState &State) { | ||||
9544 | if (State.Instance) { // Generate a single instance. | ||||
9545 | assert(!State.VF.isScalable() && "Can't scalarize a scalable vector")(static_cast <bool> (!State.VF.isScalable() && "Can't scalarize a scalable vector" ) ? void (0) : __assert_fail ("!State.VF.isScalable() && \"Can't scalarize a scalable vector\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 9545, __extension__ __PRETTY_FUNCTION__)); | ||||
9546 | State.ILV->scalarizeInstruction(getUnderlyingInstr(), this, *this, | ||||
9547 | *State.Instance, IsPredicated, State); | ||||
9548 | // Insert scalar instance packing it into a vector. | ||||
9549 | if (AlsoPack && State.VF.isVector()) { | ||||
9550 | // If we're constructing lane 0, initialize to start from poison. | ||||
9551 | if (State.Instance->Lane.isFirstLane()) { | ||||
9552 | assert(!State.VF.isScalable() && "VF is assumed to be non scalable.")(static_cast <bool> (!State.VF.isScalable() && "VF is assumed to be non scalable." ) ? void (0) : __assert_fail ("!State.VF.isScalable() && \"VF is assumed to be non scalable.\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 9552, __extension__ __PRETTY_FUNCTION__)); | ||||
9553 | Value *Poison = PoisonValue::get( | ||||
9554 | VectorType::get(getUnderlyingValue()->getType(), State.VF)); | ||||
9555 | State.set(this, Poison, State.Instance->Part); | ||||
9556 | } | ||||
9557 | State.ILV->packScalarIntoVectorValue(this, *State.Instance, State); | ||||
9558 | } | ||||
9559 | return; | ||||
9560 | } | ||||
9561 | |||||
9562 | // Generate scalar instances for all VF lanes of all UF parts, unless the | ||||
9563 | // instruction is uniform inwhich case generate only the first lane for each | ||||
9564 | // of the UF parts. | ||||
9565 | unsigned EndLane = IsUniform ? 1 : State.VF.getKnownMinValue(); | ||||
9566 | assert((!State.VF.isScalable() || IsUniform) &&(static_cast <bool> ((!State.VF.isScalable() || IsUniform ) && "Can't scalarize a scalable vector") ? void (0) : __assert_fail ("(!State.VF.isScalable() || IsUniform) && \"Can't scalarize a scalable vector\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 9567, __extension__ __PRETTY_FUNCTION__)) | ||||
9567 | "Can't scalarize a scalable vector")(static_cast <bool> ((!State.VF.isScalable() || IsUniform ) && "Can't scalarize a scalable vector") ? void (0) : __assert_fail ("(!State.VF.isScalable() || IsUniform) && \"Can't scalarize a scalable vector\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 9567, __extension__ __PRETTY_FUNCTION__)); | ||||
9568 | for (unsigned Part = 0; Part < State.UF; ++Part) | ||||
9569 | for (unsigned Lane = 0; Lane < EndLane; ++Lane) | ||||
9570 | State.ILV->scalarizeInstruction(getUnderlyingInstr(), this, *this, | ||||
9571 | VPIteration(Part, Lane), IsPredicated, | ||||
9572 | State); | ||||
9573 | } | ||||
9574 | |||||
9575 | void VPBranchOnMaskRecipe::execute(VPTransformState &State) { | ||||
9576 | assert(State.Instance && "Branch on Mask works only on single instance.")(static_cast <bool> (State.Instance && "Branch on Mask works only on single instance." ) ? void (0) : __assert_fail ("State.Instance && \"Branch on Mask works only on single instance.\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 9576, __extension__ __PRETTY_FUNCTION__)); | ||||
9577 | |||||
9578 | unsigned Part = State.Instance->Part; | ||||
9579 | unsigned Lane = State.Instance->Lane.getKnownLane(); | ||||
9580 | |||||
9581 | Value *ConditionBit = nullptr; | ||||
9582 | VPValue *BlockInMask = getMask(); | ||||
9583 | if (BlockInMask) { | ||||
9584 | ConditionBit = State.get(BlockInMask, Part); | ||||
9585 | if (ConditionBit->getType()->isVectorTy()) | ||||
9586 | ConditionBit = State.Builder.CreateExtractElement( | ||||
9587 | ConditionBit, State.Builder.getInt32(Lane)); | ||||
9588 | } else // Block in mask is all-one. | ||||
9589 | ConditionBit = State.Builder.getTrue(); | ||||
9590 | |||||
9591 | // Replace the temporary unreachable terminator with a new conditional branch, | ||||
9592 | // whose two destinations will be set later when they are created. | ||||
9593 | auto *CurrentTerminator = State.CFG.PrevBB->getTerminator(); | ||||
9594 | assert(isa<UnreachableInst>(CurrentTerminator) &&(static_cast <bool> (isa<UnreachableInst>(CurrentTerminator ) && "Expected to replace unreachable terminator with conditional branch." ) ? void (0) : __assert_fail ("isa<UnreachableInst>(CurrentTerminator) && \"Expected to replace unreachable terminator with conditional branch.\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 9595, __extension__ __PRETTY_FUNCTION__)) | ||||
9595 | "Expected to replace unreachable terminator with conditional branch.")(static_cast <bool> (isa<UnreachableInst>(CurrentTerminator ) && "Expected to replace unreachable terminator with conditional branch." ) ? void (0) : __assert_fail ("isa<UnreachableInst>(CurrentTerminator) && \"Expected to replace unreachable terminator with conditional branch.\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 9595, __extension__ __PRETTY_FUNCTION__)); | ||||
9596 | auto *CondBr = BranchInst::Create(State.CFG.PrevBB, nullptr, ConditionBit); | ||||
9597 | CondBr->setSuccessor(0, nullptr); | ||||
9598 | ReplaceInstWithInst(CurrentTerminator, CondBr); | ||||
9599 | } | ||||
9600 | |||||
9601 | void VPPredInstPHIRecipe::execute(VPTransformState &State) { | ||||
9602 | assert(State.Instance && "Predicated instruction PHI works per instance.")(static_cast <bool> (State.Instance && "Predicated instruction PHI works per instance." ) ? void (0) : __assert_fail ("State.Instance && \"Predicated instruction PHI works per instance.\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 9602, __extension__ __PRETTY_FUNCTION__)); | ||||
9603 | Instruction *ScalarPredInst = | ||||
9604 | cast<Instruction>(State.get(getOperand(0), *State.Instance)); | ||||
9605 | BasicBlock *PredicatedBB = ScalarPredInst->getParent(); | ||||
9606 | BasicBlock *PredicatingBB = PredicatedBB->getSinglePredecessor(); | ||||
9607 | assert(PredicatingBB && "Predicated block has no single predecessor.")(static_cast <bool> (PredicatingBB && "Predicated block has no single predecessor." ) ? void (0) : __assert_fail ("PredicatingBB && \"Predicated block has no single predecessor.\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 9607, __extension__ __PRETTY_FUNCTION__)); | ||||
9608 | assert(isa<VPReplicateRecipe>(getOperand(0)) &&(static_cast <bool> (isa<VPReplicateRecipe>(getOperand (0)) && "operand must be VPReplicateRecipe") ? void ( 0) : __assert_fail ("isa<VPReplicateRecipe>(getOperand(0)) && \"operand must be VPReplicateRecipe\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 9609, __extension__ __PRETTY_FUNCTION__)) | ||||
9609 | "operand must be VPReplicateRecipe")(static_cast <bool> (isa<VPReplicateRecipe>(getOperand (0)) && "operand must be VPReplicateRecipe") ? void ( 0) : __assert_fail ("isa<VPReplicateRecipe>(getOperand(0)) && \"operand must be VPReplicateRecipe\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 9609, __extension__ __PRETTY_FUNCTION__)); | ||||
9610 | |||||
9611 | // By current pack/unpack logic we need to generate only a single phi node: if | ||||
9612 | // a vector value for the predicated instruction exists at this point it means | ||||
9613 | // the instruction has vector users only, and a phi for the vector value is | ||||
9614 | // needed. In this case the recipe of the predicated instruction is marked to | ||||
9615 | // also do that packing, thereby "hoisting" the insert-element sequence. | ||||
9616 | // Otherwise, a phi node for the scalar value is needed. | ||||
9617 | unsigned Part = State.Instance->Part; | ||||
9618 | if (State.hasVectorValue(getOperand(0), Part)) { | ||||
9619 | Value *VectorValue = State.get(getOperand(0), Part); | ||||
9620 | InsertElementInst *IEI = cast<InsertElementInst>(VectorValue); | ||||
9621 | PHINode *VPhi = State.Builder.CreatePHI(IEI->getType(), 2); | ||||
9622 | VPhi->addIncoming(IEI->getOperand(0), PredicatingBB); // Unmodified vector. | ||||
9623 | VPhi->addIncoming(IEI, PredicatedBB); // New vector with inserted element. | ||||
9624 | if (State.hasVectorValue(this, Part)) | ||||
9625 | State.reset(this, VPhi, Part); | ||||
9626 | else | ||||
9627 | State.set(this, VPhi, Part); | ||||
9628 | // NOTE: Currently we need to update the value of the operand, so the next | ||||
9629 | // predicated iteration inserts its generated value in the correct vector. | ||||
9630 | State.reset(getOperand(0), VPhi, Part); | ||||
9631 | } else { | ||||
9632 | Type *PredInstType = getOperand(0)->getUnderlyingValue()->getType(); | ||||
9633 | PHINode *Phi = State.Builder.CreatePHI(PredInstType, 2); | ||||
9634 | Phi->addIncoming(PoisonValue::get(ScalarPredInst->getType()), | ||||
9635 | PredicatingBB); | ||||
9636 | Phi->addIncoming(ScalarPredInst, PredicatedBB); | ||||
9637 | if (State.hasScalarValue(this, *State.Instance)) | ||||
9638 | State.reset(this, Phi, *State.Instance); | ||||
9639 | else | ||||
9640 | State.set(this, Phi, *State.Instance); | ||||
9641 | // NOTE: Currently we need to update the value of the operand, so the next | ||||
9642 | // predicated iteration inserts its generated value in the correct vector. | ||||
9643 | State.reset(getOperand(0), Phi, *State.Instance); | ||||
9644 | } | ||||
9645 | } | ||||
9646 | |||||
9647 | void VPWidenMemoryInstructionRecipe::execute(VPTransformState &State) { | ||||
9648 | VPValue *StoredValue = isStore() ? getStoredValue() : nullptr; | ||||
9649 | State.ILV->vectorizeMemoryInstruction( | ||||
9650 | &Ingredient, State, StoredValue ? nullptr : getVPSingleValue(), getAddr(), | ||||
9651 | StoredValue, getMask()); | ||||
9652 | } | ||||
9653 | |||||
9654 | // Determine how to lower the scalar epilogue, which depends on 1) optimising | ||||
9655 | // for minimum code-size, 2) predicate compiler options, 3) loop hints forcing | ||||
9656 | // predication, and 4) a TTI hook that analyses whether the loop is suitable | ||||
9657 | // for predication. | ||||
9658 | static ScalarEpilogueLowering getScalarEpilogueLowering( | ||||
9659 | Function *F, Loop *L, LoopVectorizeHints &Hints, ProfileSummaryInfo *PSI, | ||||
9660 | BlockFrequencyInfo *BFI, TargetTransformInfo *TTI, TargetLibraryInfo *TLI, | ||||
9661 | AssumptionCache *AC, LoopInfo *LI, ScalarEvolution *SE, DominatorTree *DT, | ||||
9662 | LoopVectorizationLegality &LVL) { | ||||
9663 | // 1) OptSize takes precedence over all other options, i.e. if this is set, | ||||
9664 | // don't look at hints or options, and don't request a scalar epilogue. | ||||
9665 | // (For PGSO, as shouldOptimizeForSize isn't currently accessible from | ||||
9666 | // LoopAccessInfo (due to code dependency and not being able to reliably get | ||||
9667 | // PSI/BFI from a loop analysis under NPM), we cannot suppress the collection | ||||
9668 | // of strides in LoopAccessInfo::analyzeLoop() and vectorize without | ||||
9669 | // versioning when the vectorization is forced, unlike hasOptSize. So revert | ||||
9670 | // back to the old way and vectorize with versioning when forced. See D81345.) | ||||
9671 | if (F->hasOptSize() || (llvm::shouldOptimizeForSize(L->getHeader(), PSI, BFI, | ||||
9672 | PGSOQueryType::IRPass) && | ||||
9673 | Hints.getForce() != LoopVectorizeHints::FK_Enabled)) | ||||
9674 | return CM_ScalarEpilogueNotAllowedOptSize; | ||||
9675 | |||||
9676 | // 2) If set, obey the directives | ||||
9677 | if (PreferPredicateOverEpilogue.getNumOccurrences()) { | ||||
9678 | switch (PreferPredicateOverEpilogue) { | ||||
9679 | case PreferPredicateTy::ScalarEpilogue: | ||||
9680 | return CM_ScalarEpilogueAllowed; | ||||
9681 | case PreferPredicateTy::PredicateElseScalarEpilogue: | ||||
9682 | return CM_ScalarEpilogueNotNeededUsePredicate; | ||||
9683 | case PreferPredicateTy::PredicateOrDontVectorize: | ||||
9684 | return CM_ScalarEpilogueNotAllowedUsePredicate; | ||||
9685 | }; | ||||
9686 | } | ||||
9687 | |||||
9688 | // 3) If set, obey the hints | ||||
9689 | switch (Hints.getPredicate()) { | ||||
9690 | case LoopVectorizeHints::FK_Enabled: | ||||
9691 | return CM_ScalarEpilogueNotNeededUsePredicate; | ||||
9692 | case LoopVectorizeHints::FK_Disabled: | ||||
9693 | return CM_ScalarEpilogueAllowed; | ||||
9694 | }; | ||||
9695 | |||||
9696 | // 4) if the TTI hook indicates this is profitable, request predication. | ||||
9697 | if (TTI->preferPredicateOverEpilogue(L, LI, *SE, *AC, TLI, DT, | ||||
9698 | LVL.getLAI())) | ||||
9699 | return CM_ScalarEpilogueNotNeededUsePredicate; | ||||
9700 | |||||
9701 | return CM_ScalarEpilogueAllowed; | ||||
9702 | } | ||||
9703 | |||||
9704 | Value *VPTransformState::get(VPValue *Def, unsigned Part) { | ||||
9705 | // If Values have been set for this Def return the one relevant for \p Part. | ||||
9706 | if (hasVectorValue(Def, Part)) | ||||
9707 | return Data.PerPartOutput[Def][Part]; | ||||
9708 | |||||
9709 | if (!hasScalarValue(Def, {Part, 0})) { | ||||
9710 | Value *IRV = Def->getLiveInIRValue(); | ||||
9711 | Value *B = ILV->getBroadcastInstrs(IRV); | ||||
9712 | set(Def, B, Part); | ||||
9713 | return B; | ||||
9714 | } | ||||
9715 | |||||
9716 | Value *ScalarValue = get(Def, {Part, 0}); | ||||
9717 | // If we aren't vectorizing, we can just copy the scalar map values over | ||||
9718 | // to the vector map. | ||||
9719 | if (VF.isScalar()) { | ||||
9720 | set(Def, ScalarValue, Part); | ||||
9721 | return ScalarValue; | ||||
9722 | } | ||||
9723 | |||||
9724 | auto *RepR = dyn_cast<VPReplicateRecipe>(Def); | ||||
9725 | bool IsUniform = RepR && RepR->isUniform(); | ||||
9726 | |||||
9727 | unsigned LastLane = IsUniform ? 0 : VF.getKnownMinValue() - 1; | ||||
9728 | // Check if there is a scalar value for the selected lane. | ||||
9729 | if (!hasScalarValue(Def, {Part, LastLane})) { | ||||
9730 | // At the moment, VPWidenIntOrFpInductionRecipes can also be uniform. | ||||
9731 | assert(isa<VPWidenIntOrFpInductionRecipe>(Def->getDef()) &&(static_cast <bool> (isa<VPWidenIntOrFpInductionRecipe >(Def->getDef()) && "unexpected recipe found to be invariant" ) ? void (0) : __assert_fail ("isa<VPWidenIntOrFpInductionRecipe>(Def->getDef()) && \"unexpected recipe found to be invariant\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 9732, __extension__ __PRETTY_FUNCTION__)) | ||||
9732 | "unexpected recipe found to be invariant")(static_cast <bool> (isa<VPWidenIntOrFpInductionRecipe >(Def->getDef()) && "unexpected recipe found to be invariant" ) ? void (0) : __assert_fail ("isa<VPWidenIntOrFpInductionRecipe>(Def->getDef()) && \"unexpected recipe found to be invariant\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 9732, __extension__ __PRETTY_FUNCTION__)); | ||||
9733 | IsUniform = true; | ||||
9734 | LastLane = 0; | ||||
9735 | } | ||||
9736 | |||||
9737 | auto *LastInst = cast<Instruction>(get(Def, {Part, LastLane})); | ||||
9738 | // Set the insert point after the last scalarized instruction or after the | ||||
9739 | // last PHI, if LastInst is a PHI. This ensures the insertelement sequence | ||||
9740 | // will directly follow the scalar definitions. | ||||
9741 | auto OldIP = Builder.saveIP(); | ||||
9742 | auto NewIP = | ||||
9743 | isa<PHINode>(LastInst) | ||||
9744 | ? BasicBlock::iterator(LastInst->getParent()->getFirstNonPHI()) | ||||
9745 | : std::next(BasicBlock::iterator(LastInst)); | ||||
9746 | Builder.SetInsertPoint(&*NewIP); | ||||
9747 | |||||
9748 | // However, if we are vectorizing, we need to construct the vector values. | ||||
9749 | // If the value is known to be uniform after vectorization, we can just | ||||
9750 | // broadcast the scalar value corresponding to lane zero for each unroll | ||||
9751 | // iteration. Otherwise, we construct the vector values using | ||||
9752 | // insertelement instructions. Since the resulting vectors are stored in | ||||
9753 | // State, we will only generate the insertelements once. | ||||
9754 | Value *VectorValue = nullptr; | ||||
9755 | if (IsUniform) { | ||||
9756 | VectorValue = ILV->getBroadcastInstrs(ScalarValue); | ||||
9757 | set(Def, VectorValue, Part); | ||||
9758 | } else { | ||||
9759 | // Initialize packing with insertelements to start from undef. | ||||
9760 | assert(!VF.isScalable() && "VF is assumed to be non scalable.")(static_cast <bool> (!VF.isScalable() && "VF is assumed to be non scalable." ) ? void (0) : __assert_fail ("!VF.isScalable() && \"VF is assumed to be non scalable.\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 9760, __extension__ __PRETTY_FUNCTION__)); | ||||
9761 | Value *Undef = PoisonValue::get(VectorType::get(LastInst->getType(), VF)); | ||||
9762 | set(Def, Undef, Part); | ||||
9763 | for (unsigned Lane = 0; Lane < VF.getKnownMinValue(); ++Lane) | ||||
9764 | ILV->packScalarIntoVectorValue(Def, {Part, Lane}, *this); | ||||
9765 | VectorValue = get(Def, Part); | ||||
9766 | } | ||||
9767 | Builder.restoreIP(OldIP); | ||||
9768 | return VectorValue; | ||||
9769 | } | ||||
9770 | |||||
9771 | // Process the loop in the VPlan-native vectorization path. This path builds | ||||
9772 | // VPlan upfront in the vectorization pipeline, which allows to apply | ||||
9773 | // VPlan-to-VPlan transformations from the very beginning without modifying the | ||||
9774 | // input LLVM IR. | ||||
9775 | static bool processLoopInVPlanNativePath( | ||||
9776 | Loop *L, PredicatedScalarEvolution &PSE, LoopInfo *LI, DominatorTree *DT, | ||||
9777 | LoopVectorizationLegality *LVL, TargetTransformInfo *TTI, | ||||
9778 | TargetLibraryInfo *TLI, DemandedBits *DB, AssumptionCache *AC, | ||||
9779 | OptimizationRemarkEmitter *ORE, BlockFrequencyInfo *BFI, | ||||
9780 | ProfileSummaryInfo *PSI, LoopVectorizeHints &Hints, | ||||
9781 | LoopVectorizationRequirements &Requirements) { | ||||
9782 | |||||
9783 | if (isa<SCEVCouldNotCompute>(PSE.getBackedgeTakenCount())) { | ||||
9784 | LLVM_DEBUG(dbgs() << "LV: cannot compute the outer-loop trip count\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { dbgs() << "LV: cannot compute the outer-loop trip count\n" ; } } while (false); | ||||
9785 | return false; | ||||
9786 | } | ||||
9787 | assert(EnableVPlanNativePath && "VPlan-native path is disabled.")(static_cast <bool> (EnableVPlanNativePath && "VPlan-native path is disabled." ) ? void (0) : __assert_fail ("EnableVPlanNativePath && \"VPlan-native path is disabled.\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 9787, __extension__ __PRETTY_FUNCTION__)); | ||||
9788 | Function *F = L->getHeader()->getParent(); | ||||
9789 | InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL->getLAI()); | ||||
9790 | |||||
9791 | ScalarEpilogueLowering SEL = getScalarEpilogueLowering( | ||||
9792 | F, L, Hints, PSI, BFI, TTI, TLI, AC, LI, PSE.getSE(), DT, *LVL); | ||||
9793 | |||||
9794 | LoopVectorizationCostModel CM(SEL, L, PSE, LI, LVL, *TTI, TLI, DB, AC, ORE, F, | ||||
9795 | &Hints, IAI); | ||||
9796 | // Use the planner for outer loop vectorization. | ||||
9797 | // TODO: CM is not used at this point inside the planner. Turn CM into an | ||||
9798 | // optional argument if we don't need it in the future. | ||||
9799 | LoopVectorizationPlanner LVP(L, LI, TLI, TTI, LVL, CM, IAI, PSE, Hints, | ||||
9800 | Requirements, ORE); | ||||
9801 | |||||
9802 | // Get user vectorization factor. | ||||
9803 | ElementCount UserVF = Hints.getWidth(); | ||||
9804 | |||||
9805 | // Plan how to best vectorize, return the best VF and its cost. | ||||
9806 | const VectorizationFactor VF = LVP.planInVPlanNativePath(UserVF); | ||||
9807 | |||||
9808 | // If we are stress testing VPlan builds, do not attempt to generate vector | ||||
9809 | // code. Masked vector code generation support will follow soon. | ||||
9810 | // Also, do not attempt to vectorize if no vector code will be produced. | ||||
9811 | if (VPlanBuildStressTest || EnableVPlanPredication || | ||||
9812 | VectorizationFactor::Disabled() == VF) | ||||
9813 | return false; | ||||
9814 | |||||
9815 | LVP.setBestPlan(VF.Width, 1); | ||||
9816 | |||||
9817 | { | ||||
9818 | GeneratedRTChecks Checks(*PSE.getSE(), DT, LI, | ||||
9819 | F->getParent()->getDataLayout()); | ||||
9820 | InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, 1, LVL, | ||||
9821 | &CM, BFI, PSI, Checks); | ||||
9822 | LLVM_DEBUG(dbgs() << "Vectorizing outer loop in \""do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { dbgs() << "Vectorizing outer loop in \"" << L->getHeader()->getParent()->getName() << "\"\n"; } } while (false) | ||||
9823 | << L->getHeader()->getParent()->getName() << "\"\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { dbgs() << "Vectorizing outer loop in \"" << L->getHeader()->getParent()->getName() << "\"\n"; } } while (false); | ||||
9824 | LVP.executePlan(LB, DT); | ||||
9825 | } | ||||
9826 | |||||
9827 | // Mark the loop as already vectorized to avoid vectorizing again. | ||||
9828 | Hints.setAlreadyVectorized(); | ||||
9829 | assert(!verifyFunction(*L->getHeader()->getParent(), &dbgs()))(static_cast <bool> (!verifyFunction(*L->getHeader() ->getParent(), &dbgs())) ? void (0) : __assert_fail ("!verifyFunction(*L->getHeader()->getParent(), &dbgs())" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 9829, __extension__ __PRETTY_FUNCTION__)); | ||||
9830 | return true; | ||||
9831 | } | ||||
9832 | |||||
9833 | // Emit a remark if there are stores to floats that required a floating point | ||||
9834 | // extension. If the vectorized loop was generated with floating point there | ||||
9835 | // will be a performance penalty from the conversion overhead and the change in | ||||
9836 | // the vector width. | ||||
9837 | static void checkMixedPrecision(Loop *L, OptimizationRemarkEmitter *ORE) { | ||||
9838 | SmallVector<Instruction *, 4> Worklist; | ||||
9839 | for (BasicBlock *BB : L->getBlocks()) { | ||||
9840 | for (Instruction &Inst : *BB) { | ||||
9841 | if (auto *S = dyn_cast<StoreInst>(&Inst)) { | ||||
9842 | if (S->getValueOperand()->getType()->isFloatTy()) | ||||
9843 | Worklist.push_back(S); | ||||
9844 | } | ||||
9845 | } | ||||
9846 | } | ||||
9847 | |||||
9848 | // Traverse the floating point stores upwards searching, for floating point | ||||
9849 | // conversions. | ||||
9850 | SmallPtrSet<const Instruction *, 4> Visited; | ||||
9851 | SmallPtrSet<const Instruction *, 4> EmittedRemark; | ||||
9852 | while (!Worklist.empty()) { | ||||
9853 | auto *I = Worklist.pop_back_val(); | ||||
9854 | if (!L->contains(I)) | ||||
9855 | continue; | ||||
9856 | if (!Visited.insert(I).second) | ||||
9857 | continue; | ||||
9858 | |||||
9859 | // Emit a remark if the floating point store required a floating | ||||
9860 | // point conversion. | ||||
9861 | // TODO: More work could be done to identify the root cause such as a | ||||
9862 | // constant or a function return type and point the user to it. | ||||
9863 | if (isa<FPExtInst>(I) && EmittedRemark.insert(I).second) | ||||
9864 | ORE->emit([&]() { | ||||
9865 | return OptimizationRemarkAnalysis(LV_NAME"loop-vectorize", "VectorMixedPrecision", | ||||
9866 | I->getDebugLoc(), L->getHeader()) | ||||
9867 | << "floating point conversion changes vector width. " | ||||
9868 | << "Mixed floating point precision requires an up/down " | ||||
9869 | << "cast that will negatively impact performance."; | ||||
9870 | }); | ||||
9871 | |||||
9872 | for (Use &Op : I->operands()) | ||||
9873 | if (auto *OpI = dyn_cast<Instruction>(Op)) | ||||
9874 | Worklist.push_back(OpI); | ||||
9875 | } | ||||
9876 | } | ||||
9877 | |||||
9878 | LoopVectorizePass::LoopVectorizePass(LoopVectorizeOptions Opts) | ||||
9879 | : InterleaveOnlyWhenForced(Opts.InterleaveOnlyWhenForced || | ||||
9880 | !EnableLoopInterleaving), | ||||
9881 | VectorizeOnlyWhenForced(Opts.VectorizeOnlyWhenForced || | ||||
9882 | !EnableLoopVectorization) {} | ||||
9883 | |||||
9884 | bool LoopVectorizePass::processLoop(Loop *L) { | ||||
9885 | assert((EnableVPlanNativePath || L->isInnermost()) &&(static_cast <bool> ((EnableVPlanNativePath || L->isInnermost ()) && "VPlan-native path is not enabled. Only process inner loops." ) ? void (0) : __assert_fail ("(EnableVPlanNativePath || L->isInnermost()) && \"VPlan-native path is not enabled. Only process inner loops.\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 9886, __extension__ __PRETTY_FUNCTION__)) | ||||
9886 | "VPlan-native path is not enabled. Only process inner loops.")(static_cast <bool> ((EnableVPlanNativePath || L->isInnermost ()) && "VPlan-native path is not enabled. Only process inner loops." ) ? void (0) : __assert_fail ("(EnableVPlanNativePath || L->isInnermost()) && \"VPlan-native path is not enabled. Only process inner loops.\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 9886, __extension__ __PRETTY_FUNCTION__)); | ||||
9887 | |||||
9888 | #ifndef NDEBUG | ||||
9889 | const std::string DebugLocStr = getDebugLocString(L); | ||||
9890 | #endif /* NDEBUG */ | ||||
9891 | |||||
9892 | LLVM_DEBUG(dbgs() << "\nLV: Checking a loop in \""do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { dbgs() << "\nLV: Checking a loop in \"" << L->getHeader()->getParent()->getName() << "\" from " << DebugLocStr << "\n"; } } while (false ) | ||||
9893 | << L->getHeader()->getParent()->getName() << "\" from "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { dbgs() << "\nLV: Checking a loop in \"" << L->getHeader()->getParent()->getName() << "\" from " << DebugLocStr << "\n"; } } while (false ) | ||||
9894 | << DebugLocStr << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { dbgs() << "\nLV: Checking a loop in \"" << L->getHeader()->getParent()->getName() << "\" from " << DebugLocStr << "\n"; } } while (false ); | ||||
9895 | |||||
9896 | LoopVectorizeHints Hints(L, InterleaveOnlyWhenForced, *ORE); | ||||
9897 | |||||
9898 | LLVM_DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { dbgs() << "LV: Loop hints:" << " force=" << (Hints.getForce() == LoopVectorizeHints:: FK_Disabled ? "disabled" : (Hints.getForce() == LoopVectorizeHints ::FK_Enabled ? "enabled" : "?")) << " width=" << Hints .getWidth() << " interleave=" << Hints.getInterleave () << "\n"; } } while (false) | ||||
9899 | dbgs() << "LV: Loop hints:"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { dbgs() << "LV: Loop hints:" << " force=" << (Hints.getForce() == LoopVectorizeHints:: FK_Disabled ? "disabled" : (Hints.getForce() == LoopVectorizeHints ::FK_Enabled ? "enabled" : "?")) << " width=" << Hints .getWidth() << " interleave=" << Hints.getInterleave () << "\n"; } } while (false) | ||||
9900 | << " force="do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { dbgs() << "LV: Loop hints:" << " force=" << (Hints.getForce() == LoopVectorizeHints:: FK_Disabled ? "disabled" : (Hints.getForce() == LoopVectorizeHints ::FK_Enabled ? "enabled" : "?")) << " width=" << Hints .getWidth() << " interleave=" << Hints.getInterleave () << "\n"; } } while (false) | ||||
9901 | << (Hints.getForce() == LoopVectorizeHints::FK_Disableddo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { dbgs() << "LV: Loop hints:" << " force=" << (Hints.getForce() == LoopVectorizeHints:: FK_Disabled ? "disabled" : (Hints.getForce() == LoopVectorizeHints ::FK_Enabled ? "enabled" : "?")) << " width=" << Hints .getWidth() << " interleave=" << Hints.getInterleave () << "\n"; } } while (false) | ||||
9902 | ? "disabled"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { dbgs() << "LV: Loop hints:" << " force=" << (Hints.getForce() == LoopVectorizeHints:: FK_Disabled ? "disabled" : (Hints.getForce() == LoopVectorizeHints ::FK_Enabled ? "enabled" : "?")) << " width=" << Hints .getWidth() << " interleave=" << Hints.getInterleave () << "\n"; } } while (false) | ||||
9903 | : (Hints.getForce() == LoopVectorizeHints::FK_Enableddo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { dbgs() << "LV: Loop hints:" << " force=" << (Hints.getForce() == LoopVectorizeHints:: FK_Disabled ? "disabled" : (Hints.getForce() == LoopVectorizeHints ::FK_Enabled ? "enabled" : "?")) << " width=" << Hints .getWidth() << " interleave=" << Hints.getInterleave () << "\n"; } } while (false) | ||||
9904 | ? "enabled"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { dbgs() << "LV: Loop hints:" << " force=" << (Hints.getForce() == LoopVectorizeHints:: FK_Disabled ? "disabled" : (Hints.getForce() == LoopVectorizeHints ::FK_Enabled ? "enabled" : "?")) << " width=" << Hints .getWidth() << " interleave=" << Hints.getInterleave () << "\n"; } } while (false) | ||||
9905 | : "?"))do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { dbgs() << "LV: Loop hints:" << " force=" << (Hints.getForce() == LoopVectorizeHints:: FK_Disabled ? "disabled" : (Hints.getForce() == LoopVectorizeHints ::FK_Enabled ? "enabled" : "?")) << " width=" << Hints .getWidth() << " interleave=" << Hints.getInterleave () << "\n"; } } while (false) | ||||
9906 | << " width=" << Hints.getWidth()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { dbgs() << "LV: Loop hints:" << " force=" << (Hints.getForce() == LoopVectorizeHints:: FK_Disabled ? "disabled" : (Hints.getForce() == LoopVectorizeHints ::FK_Enabled ? "enabled" : "?")) << " width=" << Hints .getWidth() << " interleave=" << Hints.getInterleave () << "\n"; } } while (false) | ||||
9907 | << " interleave=" << Hints.getInterleave() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { dbgs() << "LV: Loop hints:" << " force=" << (Hints.getForce() == LoopVectorizeHints:: FK_Disabled ? "disabled" : (Hints.getForce() == LoopVectorizeHints ::FK_Enabled ? "enabled" : "?")) << " width=" << Hints .getWidth() << " interleave=" << Hints.getInterleave () << "\n"; } } while (false); | ||||
9908 | |||||
9909 | // Function containing loop | ||||
9910 | Function *F = L->getHeader()->getParent(); | ||||
9911 | |||||
9912 | // Looking at the diagnostic output is the only way to determine if a loop | ||||
9913 | // was vectorized (other than looking at the IR or machine code), so it | ||||
9914 | // is important to generate an optimization remark for each loop. Most of | ||||
9915 | // these messages are generated as OptimizationRemarkAnalysis. Remarks | ||||
9916 | // generated as OptimizationRemark and OptimizationRemarkMissed are | ||||
9917 | // less verbose reporting vectorized loops and unvectorized loops that may | ||||
9918 | // benefit from vectorization, respectively. | ||||
9919 | |||||
9920 | if (!Hints.allowVectorization(F, L, VectorizeOnlyWhenForced)) { | ||||
9921 | LLVM_DEBUG(dbgs() << "LV: Loop hints prevent vectorization.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { dbgs() << "LV: Loop hints prevent vectorization.\n" ; } } while (false); | ||||
9922 | return false; | ||||
9923 | } | ||||
9924 | |||||
9925 | PredicatedScalarEvolution PSE(*SE, *L); | ||||
9926 | |||||
9927 | // Check if it is legal to vectorize the loop. | ||||
9928 | LoopVectorizationRequirements Requirements; | ||||
9929 | LoopVectorizationLegality LVL(L, PSE, DT, TTI, TLI, AA, F, GetLAA, LI, ORE, | ||||
9930 | &Requirements, &Hints, DB, AC, BFI, PSI); | ||||
9931 | if (!LVL.canVectorize(EnableVPlanNativePath)) { | ||||
9932 | LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Cannot prove legality.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { dbgs() << "LV: Not vectorizing: Cannot prove legality.\n" ; } } while (false); | ||||
9933 | Hints.emitRemarkWithHints(); | ||||
9934 | return false; | ||||
9935 | } | ||||
9936 | |||||
9937 | // Check the function attributes and profiles to find out if this function | ||||
9938 | // should be optimized for size. | ||||
9939 | ScalarEpilogueLowering SEL = getScalarEpilogueLowering( | ||||
9940 | F, L, Hints, PSI, BFI, TTI, TLI, AC, LI, PSE.getSE(), DT, LVL); | ||||
9941 | |||||
9942 | // Entrance to the VPlan-native vectorization path. Outer loops are processed | ||||
9943 | // here. They may require CFG and instruction level transformations before | ||||
9944 | // even evaluating whether vectorization is profitable. Since we cannot modify | ||||
9945 | // the incoming IR, we need to build VPlan upfront in the vectorization | ||||
9946 | // pipeline. | ||||
9947 | if (!L->isInnermost()) | ||||
9948 | return processLoopInVPlanNativePath(L, PSE, LI, DT, &LVL, TTI, TLI, DB, AC, | ||||
9949 | ORE, BFI, PSI, Hints, Requirements); | ||||
9950 | |||||
9951 | assert(L->isInnermost() && "Inner loop expected.")(static_cast <bool> (L->isInnermost() && "Inner loop expected." ) ? void (0) : __assert_fail ("L->isInnermost() && \"Inner loop expected.\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 9951, __extension__ __PRETTY_FUNCTION__)); | ||||
9952 | |||||
9953 | // Check the loop for a trip count threshold: vectorize loops with a tiny trip | ||||
9954 | // count by optimizing for size, to minimize overheads. | ||||
9955 | auto ExpectedTC = getSmallBestKnownTC(*SE, L); | ||||
9956 | if (ExpectedTC && *ExpectedTC < TinyTripCountVectorThreshold) { | ||||
9957 | LLVM_DEBUG(dbgs() << "LV: Found a loop with a very small trip count. "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { dbgs() << "LV: Found a loop with a very small trip count. " << "This loop is worth vectorizing only if no scalar " << "iteration overheads are incurred."; } } while (false ) | ||||
9958 | << "This loop is worth vectorizing only if no scalar "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { dbgs() << "LV: Found a loop with a very small trip count. " << "This loop is worth vectorizing only if no scalar " << "iteration overheads are incurred."; } } while (false ) | ||||
9959 | << "iteration overheads are incurred.")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { dbgs() << "LV: Found a loop with a very small trip count. " << "This loop is worth vectorizing only if no scalar " << "iteration overheads are incurred."; } } while (false ); | ||||
9960 | if (Hints.getForce() == LoopVectorizeHints::FK_Enabled) | ||||
9961 | LLVM_DEBUG(dbgs() << " But vectorizing was explicitly forced.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { dbgs() << " But vectorizing was explicitly forced.\n" ; } } while (false); | ||||
9962 | else { | ||||
9963 | LLVM_DEBUG(dbgs() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { dbgs() << "\n"; } } while (false); | ||||
9964 | SEL = CM_ScalarEpilogueNotAllowedLowTripLoop; | ||||
9965 | } | ||||
9966 | } | ||||
9967 | |||||
9968 | // Check the function attributes to see if implicit floats are allowed. | ||||
9969 | // FIXME: This check doesn't seem possibly correct -- what if the loop is | ||||
9970 | // an integer loop and the vector instructions selected are purely integer | ||||
9971 | // vector instructions? | ||||
9972 | if (F->hasFnAttribute(Attribute::NoImplicitFloat)) { | ||||
9973 | reportVectorizationFailure( | ||||
9974 | "Can't vectorize when the NoImplicitFloat attribute is used", | ||||
9975 | "loop not vectorized due to NoImplicitFloat attribute", | ||||
9976 | "NoImplicitFloat", ORE, L); | ||||
9977 | Hints.emitRemarkWithHints(); | ||||
9978 | return false; | ||||
9979 | } | ||||
9980 | |||||
9981 | // Check if the target supports potentially unsafe FP vectorization. | ||||
9982 | // FIXME: Add a check for the type of safety issue (denormal, signaling) | ||||
9983 | // for the target we're vectorizing for, to make sure none of the | ||||
9984 | // additional fp-math flags can help. | ||||
9985 | if (Hints.isPotentiallyUnsafe() && | ||||
9986 | TTI->isFPVectorizationPotentiallyUnsafe()) { | ||||
9987 | reportVectorizationFailure( | ||||
9988 | "Potentially unsafe FP op prevents vectorization", | ||||
9989 | "loop not vectorized due to unsafe FP support.", | ||||
9990 | "UnsafeFP", ORE, L); | ||||
9991 | Hints.emitRemarkWithHints(); | ||||
9992 | return false; | ||||
9993 | } | ||||
9994 | |||||
9995 | if (!LVL.canVectorizeFPMath(EnableStrictReductions)) { | ||||
9996 | ORE->emit([&]() { | ||||
9997 | auto *ExactFPMathInst = Requirements.getExactFPInst(); | ||||
9998 | return OptimizationRemarkAnalysisFPCommute(DEBUG_TYPE"loop-vectorize", "CantReorderFPOps", | ||||
9999 | ExactFPMathInst->getDebugLoc(), | ||||
10000 | ExactFPMathInst->getParent()) | ||||
10001 | << "loop not vectorized: cannot prove it is safe to reorder " | ||||
10002 | "floating-point operations"; | ||||
10003 | }); | ||||
10004 | LLVM_DEBUG(dbgs() << "LV: loop not vectorized: cannot prove it is safe to "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { dbgs() << "LV: loop not vectorized: cannot prove it is safe to " "reorder floating-point operations\n"; } } while (false) | ||||
10005 | "reorder floating-point operations\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { dbgs() << "LV: loop not vectorized: cannot prove it is safe to " "reorder floating-point operations\n"; } } while (false); | ||||
10006 | Hints.emitRemarkWithHints(); | ||||
10007 | return false; | ||||
10008 | } | ||||
10009 | |||||
10010 | bool UseInterleaved = TTI->enableInterleavedAccessVectorization(); | ||||
10011 | InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL.getLAI()); | ||||
10012 | |||||
10013 | // If an override option has been passed in for interleaved accesses, use it. | ||||
10014 | if (EnableInterleavedMemAccesses.getNumOccurrences() > 0) | ||||
10015 | UseInterleaved = EnableInterleavedMemAccesses; | ||||
10016 | |||||
10017 | // Analyze interleaved memory accesses. | ||||
10018 | if (UseInterleaved) { | ||||
10019 | IAI.analyzeInterleaving(useMaskedInterleavedAccesses(*TTI)); | ||||
10020 | } | ||||
10021 | |||||
10022 | // Use the cost model. | ||||
10023 | LoopVectorizationCostModel CM(SEL, L, PSE, LI, &LVL, *TTI, TLI, DB, AC, ORE, | ||||
10024 | F, &Hints, IAI); | ||||
10025 | CM.collectValuesToIgnore(); | ||||
10026 | |||||
10027 | // Use the planner for vectorization. | ||||
10028 | LoopVectorizationPlanner LVP(L, LI, TLI, TTI, &LVL, CM, IAI, PSE, Hints, | ||||
10029 | Requirements, ORE); | ||||
10030 | |||||
10031 | // Get user vectorization factor and interleave count. | ||||
10032 | ElementCount UserVF = Hints.getWidth(); | ||||
10033 | unsigned UserIC = Hints.getInterleave(); | ||||
10034 | |||||
10035 | // Plan how to best vectorize, return the best VF and its cost. | ||||
10036 | Optional<VectorizationFactor> MaybeVF = LVP.plan(UserVF, UserIC); | ||||
10037 | |||||
10038 | VectorizationFactor VF = VectorizationFactor::Disabled(); | ||||
10039 | unsigned IC = 1; | ||||
10040 | |||||
10041 | if (MaybeVF) { | ||||
10042 | VF = *MaybeVF; | ||||
10043 | // Select the interleave count. | ||||
10044 | IC = CM.selectInterleaveCount(VF.Width, *VF.Cost.getValue()); | ||||
10045 | } | ||||
10046 | |||||
10047 | // Identify the diagnostic messages that should be produced. | ||||
10048 | std::pair<StringRef, std::string> VecDiagMsg, IntDiagMsg; | ||||
10049 | bool VectorizeLoop = true, InterleaveLoop = true; | ||||
10050 | if (VF.Width.isScalar()) { | ||||
10051 | LLVM_DEBUG(dbgs() << "LV: Vectorization is possible but not beneficial.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { dbgs() << "LV: Vectorization is possible but not beneficial.\n" ; } } while (false); | ||||
10052 | VecDiagMsg = std::make_pair( | ||||
10053 | "VectorizationNotBeneficial", | ||||
10054 | "the cost-model indicates that vectorization is not beneficial"); | ||||
10055 | VectorizeLoop = false; | ||||
10056 | } | ||||
10057 | |||||
10058 | if (!MaybeVF && UserIC > 1) { | ||||
10059 | // Tell the user interleaving was avoided up-front, despite being explicitly | ||||
10060 | // requested. | ||||
10061 | LLVM_DEBUG(dbgs() << "LV: Ignoring UserIC, because vectorization and "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { dbgs() << "LV: Ignoring UserIC, because vectorization and " "interleaving should be avoided up front\n"; } } while (false ) | ||||
10062 | "interleaving should be avoided up front\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { dbgs() << "LV: Ignoring UserIC, because vectorization and " "interleaving should be avoided up front\n"; } } while (false ); | ||||
10063 | IntDiagMsg = std::make_pair( | ||||
10064 | "InterleavingAvoided", | ||||
10065 | "Ignoring UserIC, because interleaving was avoided up front"); | ||||
10066 | InterleaveLoop = false; | ||||
10067 | } else if (IC == 1 && UserIC <= 1) { | ||||
10068 | // Tell the user interleaving is not beneficial. | ||||
10069 | LLVM_DEBUG(dbgs() << "LV: Interleaving is not beneficial.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { dbgs() << "LV: Interleaving is not beneficial.\n" ; } } while (false); | ||||
10070 | IntDiagMsg = std::make_pair( | ||||
10071 | "InterleavingNotBeneficial", | ||||
10072 | "the cost-model indicates that interleaving is not beneficial"); | ||||
10073 | InterleaveLoop = false; | ||||
10074 | if (UserIC == 1) { | ||||
10075 | IntDiagMsg.first = "InterleavingNotBeneficialAndDisabled"; | ||||
10076 | IntDiagMsg.second += | ||||
10077 | " and is explicitly disabled or interleave count is set to 1"; | ||||
10078 | } | ||||
10079 | } else if (IC > 1 && UserIC == 1) { | ||||
10080 | // Tell the user interleaving is beneficial, but it explicitly disabled. | ||||
10081 | LLVM_DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { dbgs() << "LV: Interleaving is beneficial but is explicitly disabled." ; } } while (false) | ||||
10082 | dbgs() << "LV: Interleaving is beneficial but is explicitly disabled.")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { dbgs() << "LV: Interleaving is beneficial but is explicitly disabled." ; } } while (false); | ||||
10083 | IntDiagMsg = std::make_pair( | ||||
10084 | "InterleavingBeneficialButDisabled", | ||||
10085 | "the cost-model indicates that interleaving is beneficial " | ||||
10086 | "but is explicitly disabled or interleave count is set to 1"); | ||||
10087 | InterleaveLoop = false; | ||||
10088 | } | ||||
10089 | |||||
10090 | // Override IC if user provided an interleave count. | ||||
10091 | IC = UserIC > 0 ? UserIC : IC; | ||||
10092 | |||||
10093 | // Emit diagnostic messages, if any. | ||||
10094 | const char *VAPassName = Hints.vectorizeAnalysisPassName(); | ||||
10095 | if (!VectorizeLoop && !InterleaveLoop) { | ||||
10096 | // Do not vectorize or interleaving the loop. | ||||
10097 | ORE->emit([&]() { | ||||
10098 | return OptimizationRemarkMissed(VAPassName, VecDiagMsg.first, | ||||
10099 | L->getStartLoc(), L->getHeader()) | ||||
10100 | << VecDiagMsg.second; | ||||
10101 | }); | ||||
10102 | ORE->emit([&]() { | ||||
10103 | return OptimizationRemarkMissed(LV_NAME"loop-vectorize", IntDiagMsg.first, | ||||
10104 | L->getStartLoc(), L->getHeader()) | ||||
10105 | << IntDiagMsg.second; | ||||
10106 | }); | ||||
10107 | return false; | ||||
10108 | } else if (!VectorizeLoop && InterleaveLoop) { | ||||
10109 | LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { dbgs() << "LV: Interleave Count is " << IC << '\n'; } } while (false); | ||||
10110 | ORE->emit([&]() { | ||||
10111 | return OptimizationRemarkAnalysis(VAPassName, VecDiagMsg.first, | ||||
10112 | L->getStartLoc(), L->getHeader()) | ||||
10113 | << VecDiagMsg.second; | ||||
10114 | }); | ||||
10115 | } else if (VectorizeLoop && !InterleaveLoop) { | ||||
10116 | LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Widthdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { dbgs() << "LV: Found a vectorizable loop (" << VF.Width << ") in " << DebugLocStr << '\n'; } } while (false) | ||||
10117 | << ") in " << DebugLocStr << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { dbgs() << "LV: Found a vectorizable loop (" << VF.Width << ") in " << DebugLocStr << '\n'; } } while (false); | ||||
10118 | ORE->emit([&]() { | ||||
10119 | return OptimizationRemarkAnalysis(LV_NAME"loop-vectorize", IntDiagMsg.first, | ||||
10120 | L->getStartLoc(), L->getHeader()) | ||||
10121 | << IntDiagMsg.second; | ||||
10122 | }); | ||||
10123 | } else if (VectorizeLoop && InterleaveLoop) { | ||||
10124 | LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Widthdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { dbgs() << "LV: Found a vectorizable loop (" << VF.Width << ") in " << DebugLocStr << '\n'; } } while (false) | ||||
10125 | << ") in " << DebugLocStr << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { dbgs() << "LV: Found a vectorizable loop (" << VF.Width << ") in " << DebugLocStr << '\n'; } } while (false); | ||||
10126 | LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-vectorize")) { dbgs() << "LV: Interleave Count is " << IC << '\n'; } } while (false); | ||||
10127 | } | ||||
10128 | |||||
10129 | bool DisableRuntimeUnroll = false; | ||||
10130 | MDNode *OrigLoopID = L->getLoopID(); | ||||
10131 | { | ||||
10132 | // Optimistically generate runtime checks. Drop them if they turn out to not | ||||
10133 | // be profitable. Limit the scope of Checks, so the cleanup happens | ||||
10134 | // immediately after vector codegeneration is done. | ||||
10135 | GeneratedRTChecks Checks(*PSE.getSE(), DT, LI, | ||||
10136 | F->getParent()->getDataLayout()); | ||||
10137 | if (!VF.Width.isScalar() || IC > 1) | ||||
10138 | Checks.Create(L, *LVL.getLAI(), PSE.getUnionPredicate()); | ||||
10139 | LVP.setBestPlan(VF.Width, IC); | ||||
10140 | |||||
10141 | using namespace ore; | ||||
10142 | if (!VectorizeLoop) { | ||||
10143 | assert(IC > 1 && "interleave count should not be 1 or 0")(static_cast <bool> (IC > 1 && "interleave count should not be 1 or 0" ) ? void (0) : __assert_fail ("IC > 1 && \"interleave count should not be 1 or 0\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 10143, __extension__ __PRETTY_FUNCTION__)); | ||||
10144 | // If we decided that it is not legal to vectorize the loop, then | ||||
10145 | // interleave it. | ||||
10146 | InnerLoopUnroller Unroller(L, PSE, LI, DT, TLI, TTI, AC, ORE, IC, &LVL, | ||||
10147 | &CM, BFI, PSI, Checks); | ||||
10148 | LVP.executePlan(Unroller, DT); | ||||
10149 | |||||
10150 | ORE->emit([&]() { | ||||
10151 | return OptimizationRemark(LV_NAME"loop-vectorize", "Interleaved", L->getStartLoc(), | ||||
10152 | L->getHeader()) | ||||
10153 | << "interleaved loop (interleaved count: " | ||||
10154 | << NV("InterleaveCount", IC) << ")"; | ||||
10155 | }); | ||||
10156 | } else { | ||||
10157 | // If we decided that it is *legal* to vectorize the loop, then do it. | ||||
10158 | |||||
10159 | // Consider vectorizing the epilogue too if it's profitable. | ||||
10160 | VectorizationFactor EpilogueVF = | ||||
10161 | CM.selectEpilogueVectorizationFactor(VF.Width, LVP); | ||||
10162 | if (EpilogueVF.Width.isVector()) { | ||||
10163 | |||||
10164 | // The first pass vectorizes the main loop and creates a scalar epilogue | ||||
10165 | // to be vectorized by executing the plan (potentially with a different | ||||
10166 | // factor) again shortly afterwards. | ||||
10167 | EpilogueLoopVectorizationInfo EPI(VF.Width.getKnownMinValue(), IC, | ||||
10168 | EpilogueVF.Width.getKnownMinValue(), | ||||
10169 | 1); | ||||
10170 | EpilogueVectorizerMainLoop MainILV(L, PSE, LI, DT, TLI, TTI, AC, ORE, | ||||
10171 | EPI, &LVL, &CM, BFI, PSI, Checks); | ||||
10172 | |||||
10173 | LVP.setBestPlan(EPI.MainLoopVF, EPI.MainLoopUF); | ||||
10174 | LVP.executePlan(MainILV, DT); | ||||
10175 | ++LoopsVectorized; | ||||
10176 | |||||
10177 | simplifyLoop(L, DT, LI, SE, AC, nullptr, false /* PreserveLCSSA */); | ||||
10178 | formLCSSARecursively(*L, *DT, LI, SE); | ||||
10179 | |||||
10180 | // Second pass vectorizes the epilogue and adjusts the control flow | ||||
10181 | // edges from the first pass. | ||||
10182 | LVP.setBestPlan(EPI.EpilogueVF, EPI.EpilogueUF); | ||||
10183 | EPI.MainLoopVF = EPI.EpilogueVF; | ||||
10184 | EPI.MainLoopUF = EPI.EpilogueUF; | ||||
10185 | EpilogueVectorizerEpilogueLoop EpilogILV(L, PSE, LI, DT, TLI, TTI, AC, | ||||
10186 | ORE, EPI, &LVL, &CM, BFI, PSI, | ||||
10187 | Checks); | ||||
10188 | LVP.executePlan(EpilogILV, DT); | ||||
10189 | ++LoopsEpilogueVectorized; | ||||
10190 | |||||
10191 | if (!MainILV.areSafetyChecksAdded()) | ||||
10192 | DisableRuntimeUnroll = true; | ||||
10193 | } else { | ||||
10194 | InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, IC, | ||||
10195 | &LVL, &CM, BFI, PSI, Checks); | ||||
10196 | LVP.executePlan(LB, DT); | ||||
10197 | ++LoopsVectorized; | ||||
10198 | |||||
10199 | // Add metadata to disable runtime unrolling a scalar loop when there | ||||
10200 | // are no runtime checks about strides and memory. A scalar loop that is | ||||
10201 | // rarely used is not worth unrolling. | ||||
10202 | if (!LB.areSafetyChecksAdded()) | ||||
10203 | DisableRuntimeUnroll = true; | ||||
10204 | } | ||||
10205 | // Report the vectorization decision. | ||||
10206 | ORE->emit([&]() { | ||||
10207 | return OptimizationRemark(LV_NAME"loop-vectorize", "Vectorized", L->getStartLoc(), | ||||
10208 | L->getHeader()) | ||||
10209 | << "vectorized loop (vectorization width: " | ||||
10210 | << NV("VectorizationFactor", VF.Width) | ||||
10211 | << ", interleaved count: " << NV("InterleaveCount", IC) << ")"; | ||||
10212 | }); | ||||
10213 | } | ||||
10214 | |||||
10215 | if (ORE->allowExtraAnalysis(LV_NAME"loop-vectorize")) | ||||
10216 | checkMixedPrecision(L, ORE); | ||||
10217 | } | ||||
10218 | |||||
10219 | Optional<MDNode *> RemainderLoopID = | ||||
10220 | makeFollowupLoopID(OrigLoopID, {LLVMLoopVectorizeFollowupAll, | ||||
10221 | LLVMLoopVectorizeFollowupEpilogue}); | ||||
10222 | if (RemainderLoopID.hasValue()) { | ||||
10223 | L->setLoopID(RemainderLoopID.getValue()); | ||||
10224 | } else { | ||||
10225 | if (DisableRuntimeUnroll) | ||||
10226 | AddRuntimeUnrollDisableMetaData(L); | ||||
10227 | |||||
10228 | // Mark the loop as already vectorized to avoid vectorizing again. | ||||
10229 | Hints.setAlreadyVectorized(); | ||||
10230 | } | ||||
10231 | |||||
10232 | assert(!verifyFunction(*L->getHeader()->getParent(), &dbgs()))(static_cast <bool> (!verifyFunction(*L->getHeader() ->getParent(), &dbgs())) ? void (0) : __assert_fail ("!verifyFunction(*L->getHeader()->getParent(), &dbgs())" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp" , 10232, __extension__ __PRETTY_FUNCTION__)); | ||||
10233 | return true; | ||||
10234 | } | ||||
10235 | |||||
10236 | LoopVectorizeResult LoopVectorizePass::runImpl( | ||||
10237 | Function &F, ScalarEvolution &SE_, LoopInfo &LI_, TargetTransformInfo &TTI_, | ||||
10238 | DominatorTree &DT_, BlockFrequencyInfo &BFI_, TargetLibraryInfo *TLI_, | ||||
10239 | DemandedBits &DB_, AAResults &AA_, AssumptionCache &AC_, | ||||
10240 | std::function<const LoopAccessInfo &(Loop &)> &GetLAA_, | ||||
10241 | OptimizationRemarkEmitter &ORE_, ProfileSummaryInfo *PSI_) { | ||||
10242 | SE = &SE_; | ||||
10243 | LI = &LI_; | ||||
10244 | TTI = &TTI_; | ||||
10245 | DT = &DT_; | ||||
10246 | BFI = &BFI_; | ||||
10247 | TLI = TLI_; | ||||
10248 | AA = &AA_; | ||||
10249 | AC = &AC_; | ||||
10250 | GetLAA = &GetLAA_; | ||||
10251 | DB = &DB_; | ||||
10252 | ORE = &ORE_; | ||||
10253 | PSI = PSI_; | ||||
10254 | |||||
10255 | // Don't attempt if | ||||
10256 | // 1. the target claims to have no vector registers, and | ||||
10257 | // 2. interleaving won't help ILP. | ||||
10258 | // | ||||
10259 | // The second condition is necessary because, even if the target has no | ||||
10260 | // vector registers, loop vectorization may still enable scalar | ||||
10261 | // interleaving. | ||||
10262 | if (!TTI->getNumberOfRegisters(TTI->getRegisterClassForType(true)) && | ||||
10263 | TTI->getMaxInterleaveFactor(1) < 2) | ||||
10264 | return LoopVectorizeResult(false, false); | ||||
10265 | |||||
10266 | bool Changed = false, CFGChanged = false; | ||||
10267 | |||||
10268 | // The vectorizer requires loops to be in simplified form. | ||||
10269 | // Since simplification may add new inner loops, it has to run before the | ||||
10270 | // legality and profitability checks. This means running the loop vectorizer | ||||
10271 | // will simplify all loops, regardless of whether anything end up being | ||||
10272 | // vectorized. | ||||
10273 | for (auto &L : *LI) | ||||
10274 | Changed |= CFGChanged |= | ||||
10275 | simplifyLoop(L, DT, LI, SE, AC, nullptr, false /* PreserveLCSSA */); | ||||
10276 | |||||
10277 | // Build up a worklist of inner-loops to vectorize. This is necessary as | ||||
10278 | // the act of vectorizing or partially unrolling a loop creates new loops | ||||
10279 | // and can invalidate iterators across the loops. | ||||
10280 | SmallVector<Loop *, 8> Worklist; | ||||
10281 | |||||
10282 | for (Loop *L : *LI) | ||||
10283 | collectSupportedLoops(*L, LI, ORE, Worklist); | ||||
10284 | |||||
10285 | LoopsAnalyzed += Worklist.size(); | ||||
10286 | |||||
10287 | // Now walk the identified inner loops. | ||||
10288 | while (!Worklist.empty()) { | ||||
10289 | Loop *L = Worklist.pop_back_val(); | ||||
10290 | |||||
10291 | // For the inner loops we actually process, form LCSSA to simplify the | ||||
10292 | // transform. | ||||
10293 | Changed |= formLCSSARecursively(*L, *DT, LI, SE); | ||||
10294 | |||||
10295 | Changed |= CFGChanged |= processLoop(L); | ||||
10296 | } | ||||
10297 | |||||
10298 | // Process each loop nest in the function. | ||||
10299 | return LoopVectorizeResult(Changed, CFGChanged); | ||||
10300 | } | ||||
10301 | |||||
10302 | PreservedAnalyses LoopVectorizePass::run(Function &F, | ||||
10303 | FunctionAnalysisManager &AM) { | ||||
10304 | auto &SE = AM.getResult<ScalarEvolutionAnalysis>(F); | ||||
10305 | auto &LI = AM.getResult<LoopAnalysis>(F); | ||||
10306 | auto &TTI = AM.getResult<TargetIRAnalysis>(F); | ||||
10307 | auto &DT = AM.getResult<DominatorTreeAnalysis>(F); | ||||
10308 | auto &BFI = AM.getResult<BlockFrequencyAnalysis>(F); | ||||
10309 | auto &TLI = AM.getResult<TargetLibraryAnalysis>(F); | ||||
10310 | auto &AA = AM.getResult<AAManager>(F); | ||||
10311 | auto &AC = AM.getResult<AssumptionAnalysis>(F); | ||||
10312 | auto &DB = AM.getResult<DemandedBitsAnalysis>(F); | ||||
10313 | auto &ORE = AM.getResult<OptimizationRemarkEmitterAnalysis>(F); | ||||
10314 | MemorySSA *MSSA = EnableMSSALoopDependency | ||||
10315 | ? &AM.getResult<MemorySSAAnalysis>(F).getMSSA() | ||||
10316 | : nullptr; | ||||
10317 | |||||
10318 | auto &LAM = AM.getResult<LoopAnalysisManagerFunctionProxy>(F).getManager(); | ||||
10319 | std::function<const LoopAccessInfo &(Loop &)> GetLAA = | ||||
10320 | [&](Loop &L) -> const LoopAccessInfo & { | ||||
10321 | LoopStandardAnalysisResults AR = {AA, AC, DT, LI, SE, | ||||
10322 | TLI, TTI, nullptr, MSSA}; | ||||
10323 | return LAM.getResult<LoopAccessAnalysis>(L, AR); | ||||
10324 | }; | ||||
10325 | auto &MAMProxy = AM.getResult<ModuleAnalysisManagerFunctionProxy>(F); | ||||
10326 | ProfileSummaryInfo *PSI = | ||||
10327 | MAMProxy.getCachedResult<ProfileSummaryAnalysis>(*F.getParent()); | ||||
10328 | LoopVectorizeResult Result = | ||||
10329 | runImpl(F, SE, LI, TTI, DT, BFI, &TLI, DB, AA, AC, GetLAA, ORE, PSI); | ||||
10330 | if (!Result.MadeAnyChange) | ||||
10331 | return PreservedAnalyses::all(); | ||||
10332 | PreservedAnalyses PA; | ||||
10333 | |||||
10334 | // We currently do not preserve loopinfo/dominator analyses with outer loop | ||||
10335 | // vectorization. Until this is addressed, mark these analyses as preserved | ||||
10336 | // only for non-VPlan-native path. | ||||
10337 | // TODO: Preserve Loop and Dominator analyses for VPlan-native path. | ||||
10338 | if (!EnableVPlanNativePath) { | ||||
10339 | PA.preserve<LoopAnalysis>(); | ||||
10340 | PA.preserve<DominatorTreeAnalysis>(); | ||||
10341 | } | ||||
10342 | if (!Result.MadeCFGChange) | ||||
10343 | PA.preserveSet<CFGAnalyses>(); | ||||
10344 | return PA; | ||||
10345 | } |
1 | //===- TypeSize.h - Wrapper around type sizes -------------------*- C++ -*-===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // This file provides a struct that can be used to query the size of IR types |
10 | // which may be scalable vectors. It provides convenience operators so that |
11 | // it can be used in much the same way as a single scalar value. |
12 | // |
13 | //===----------------------------------------------------------------------===// |
14 | |
15 | #ifndef LLVM_SUPPORT_TYPESIZE_H |
16 | #define LLVM_SUPPORT_TYPESIZE_H |
17 | |
18 | #include "llvm/ADT/ArrayRef.h" |
19 | #include "llvm/Support/MathExtras.h" |
20 | #include "llvm/Support/WithColor.h" |
21 | |
22 | #include <algorithm> |
23 | #include <array> |
24 | #include <cassert> |
25 | #include <cstdint> |
26 | #include <type_traits> |
27 | |
28 | namespace llvm { |
29 | |
30 | /// Reports a diagnostic message to indicate an invalid size request has been |
31 | /// done on a scalable vector. This function may not return. |
32 | void reportInvalidSizeRequest(const char *Msg); |
33 | |
34 | template <typename LeafTy> struct LinearPolyBaseTypeTraits {}; |
35 | |
36 | //===----------------------------------------------------------------------===// |
37 | // LinearPolyBase - a base class for linear polynomials with multiple |
38 | // dimensions. This can e.g. be used to describe offsets that are have both a |
39 | // fixed and scalable component. |
40 | //===----------------------------------------------------------------------===// |
41 | |
42 | /// LinearPolyBase describes a linear polynomial: |
43 | /// c0 * scale0 + c1 * scale1 + ... + cK * scaleK |
44 | /// where the scale is implicit, so only the coefficients are encoded. |
45 | template <typename LeafTy> |
46 | class LinearPolyBase { |
47 | public: |
48 | using ScalarTy = typename LinearPolyBaseTypeTraits<LeafTy>::ScalarTy; |
49 | static constexpr auto Dimensions = LinearPolyBaseTypeTraits<LeafTy>::Dimensions; |
50 | static_assert(Dimensions != std::numeric_limits<unsigned>::max(), |
51 | "Dimensions out of range"); |
52 | |
53 | private: |
54 | std::array<ScalarTy, Dimensions> Coefficients; |
55 | |
56 | protected: |
57 | LinearPolyBase(ArrayRef<ScalarTy> Values) { |
58 | std::copy(Values.begin(), Values.end(), Coefficients.begin()); |
59 | } |
60 | |
61 | public: |
62 | friend LeafTy &operator+=(LeafTy &LHS, const LeafTy &RHS) { |
63 | for (unsigned I=0; I<Dimensions; ++I) |
64 | LHS.Coefficients[I] += RHS.Coefficients[I]; |
65 | return LHS; |
66 | } |
67 | |
68 | friend LeafTy &operator-=(LeafTy &LHS, const LeafTy &RHS) { |
69 | for (unsigned I=0; I<Dimensions; ++I) |
70 | LHS.Coefficients[I] -= RHS.Coefficients[I]; |
71 | return LHS; |
72 | } |
73 | |
74 | friend LeafTy &operator*=(LeafTy &LHS, ScalarTy RHS) { |
75 | for (auto &C : LHS.Coefficients) |
76 | C *= RHS; |
77 | return LHS; |
78 | } |
79 | |
80 | friend LeafTy operator+(const LeafTy &LHS, const LeafTy &RHS) { |
81 | LeafTy Copy = LHS; |
82 | return Copy += RHS; |
83 | } |
84 | |
85 | friend LeafTy operator-(const LeafTy &LHS, const LeafTy &RHS) { |
86 | LeafTy Copy = LHS; |
87 | return Copy -= RHS; |
88 | } |
89 | |
90 | friend LeafTy operator*(const LeafTy &LHS, ScalarTy RHS) { |
91 | LeafTy Copy = LHS; |
92 | return Copy *= RHS; |
93 | } |
94 | |
95 | template <typename U = ScalarTy> |
96 | friend typename std::enable_if_t<std::is_signed<U>::value, LeafTy> |
97 | operator-(const LeafTy &LHS) { |
98 | LeafTy Copy = LHS; |
99 | return Copy *= -1; |
100 | } |
101 | |
102 | bool operator==(const LinearPolyBase &RHS) const { |
103 | return std::equal(Coefficients.begin(), Coefficients.end(), |
104 | RHS.Coefficients.begin()); |
105 | } |
106 | |
107 | bool operator!=(const LinearPolyBase &RHS) const { |
108 | return !(*this == RHS); |
109 | } |
110 | |
111 | bool isZero() const { |
112 | return all_of(Coefficients, [](const ScalarTy &C) { return C == 0; }); |
113 | } |
114 | bool isNonZero() const { return !isZero(); } |
115 | explicit operator bool() const { return isNonZero(); } |
116 | |
117 | ScalarTy getValue(unsigned Dim) const { return Coefficients[Dim]; } |
118 | }; |
119 | |
120 | //===----------------------------------------------------------------------===// |
121 | // StackOffset - Represent an offset with named fixed and scalable components. |
122 | //===----------------------------------------------------------------------===// |
123 | |
124 | class StackOffset; |
125 | template <> struct LinearPolyBaseTypeTraits<StackOffset> { |
126 | using ScalarTy = int64_t; |
127 | static constexpr unsigned Dimensions = 2; |
128 | }; |
129 | |
130 | /// StackOffset is a class to represent an offset with 2 dimensions, |
131 | /// named fixed and scalable, respectively. This class allows a value for both |
132 | /// dimensions to depict e.g. "8 bytes and 16 scalable bytes", which is needed |
133 | /// to represent stack offsets. |
134 | class StackOffset : public LinearPolyBase<StackOffset> { |
135 | protected: |
136 | StackOffset(ScalarTy Fixed, ScalarTy Scalable) |
137 | : LinearPolyBase<StackOffset>({Fixed, Scalable}) {} |
138 | |
139 | public: |
140 | StackOffset() : StackOffset({0, 0}) {} |
141 | StackOffset(const LinearPolyBase<StackOffset> &Other) |
142 | : LinearPolyBase<StackOffset>(Other) {} |
143 | static StackOffset getFixed(ScalarTy Fixed) { return {Fixed, 0}; } |
144 | static StackOffset getScalable(ScalarTy Scalable) { return {0, Scalable}; } |
145 | static StackOffset get(ScalarTy Fixed, ScalarTy Scalable) { |
146 | return {Fixed, Scalable}; |
147 | } |
148 | |
149 | ScalarTy getFixed() const { return this->getValue(0); } |
150 | ScalarTy getScalable() const { return this->getValue(1); } |
151 | }; |
152 | |
153 | //===----------------------------------------------------------------------===// |
154 | // UnivariateLinearPolyBase - a base class for linear polynomials with multiple |
155 | // dimensions, but where only one dimension can be set at any time. |
156 | // This can e.g. be used to describe sizes that are either fixed or scalable. |
157 | //===----------------------------------------------------------------------===// |
158 | |
159 | /// UnivariateLinearPolyBase is a base class for ElementCount and TypeSize. |
160 | /// Like LinearPolyBase it tries to represent a linear polynomial |
161 | /// where only one dimension can be set at any time, e.g. |
162 | /// 0 * scale0 + 0 * scale1 + ... + cJ * scaleJ + ... + 0 * scaleK |
163 | /// The dimension that is set is the univariate dimension. |
164 | template <typename LeafTy> |
165 | class UnivariateLinearPolyBase { |
166 | public: |
167 | using ScalarTy = typename LinearPolyBaseTypeTraits<LeafTy>::ScalarTy; |
168 | static constexpr auto Dimensions = LinearPolyBaseTypeTraits<LeafTy>::Dimensions; |
169 | static_assert(Dimensions != std::numeric_limits<unsigned>::max(), |
170 | "Dimensions out of range"); |
171 | |
172 | protected: |
173 | ScalarTy Value; // The value at the univeriate dimension. |
174 | unsigned UnivariateDim; // The univeriate dimension. |
175 | |
176 | UnivariateLinearPolyBase(ScalarTy Val, unsigned UnivariateDim) |
177 | : Value(Val), UnivariateDim(UnivariateDim) { |
178 | assert(UnivariateDim < Dimensions && "Dimension out of range")(static_cast <bool> (UnivariateDim < Dimensions && "Dimension out of range") ? void (0) : __assert_fail ("UnivariateDim < Dimensions && \"Dimension out of range\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/include/llvm/Support/TypeSize.h" , 178, __extension__ __PRETTY_FUNCTION__)); |
179 | } |
180 | |
181 | friend LeafTy &operator+=(LeafTy &LHS, const LeafTy &RHS) { |
182 | assert(LHS.UnivariateDim == RHS.UnivariateDim && "Invalid dimensions")(static_cast <bool> (LHS.UnivariateDim == RHS.UnivariateDim && "Invalid dimensions") ? void (0) : __assert_fail ( "LHS.UnivariateDim == RHS.UnivariateDim && \"Invalid dimensions\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/include/llvm/Support/TypeSize.h" , 182, __extension__ __PRETTY_FUNCTION__)); |
183 | LHS.Value += RHS.Value; |
184 | return LHS; |
185 | } |
186 | |
187 | friend LeafTy &operator-=(LeafTy &LHS, const LeafTy &RHS) { |
188 | assert(LHS.UnivariateDim == RHS.UnivariateDim && "Invalid dimensions")(static_cast <bool> (LHS.UnivariateDim == RHS.UnivariateDim && "Invalid dimensions") ? void (0) : __assert_fail ( "LHS.UnivariateDim == RHS.UnivariateDim && \"Invalid dimensions\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/include/llvm/Support/TypeSize.h" , 188, __extension__ __PRETTY_FUNCTION__)); |
189 | LHS.Value -= RHS.Value; |
190 | return LHS; |
191 | } |
192 | |
193 | friend LeafTy &operator*=(LeafTy &LHS, ScalarTy RHS) { |
194 | LHS.Value *= RHS; |
195 | return LHS; |
196 | } |
197 | |
198 | friend LeafTy operator+(const LeafTy &LHS, const LeafTy &RHS) { |
199 | LeafTy Copy = LHS; |
200 | return Copy += RHS; |
201 | } |
202 | |
203 | friend LeafTy operator-(const LeafTy &LHS, const LeafTy &RHS) { |
204 | LeafTy Copy = LHS; |
205 | return Copy -= RHS; |
206 | } |
207 | |
208 | friend LeafTy operator*(const LeafTy &LHS, ScalarTy RHS) { |
209 | LeafTy Copy = LHS; |
210 | return Copy *= RHS; |
211 | } |
212 | |
213 | template <typename U = ScalarTy> |
214 | friend typename std::enable_if<std::is_signed<U>::value, LeafTy>::type |
215 | operator-(const LeafTy &LHS) { |
216 | LeafTy Copy = LHS; |
217 | return Copy *= -1; |
218 | } |
219 | |
220 | public: |
221 | bool operator==(const UnivariateLinearPolyBase &RHS) const { |
222 | return Value == RHS.Value && UnivariateDim == RHS.UnivariateDim; |
223 | } |
224 | |
225 | bool operator!=(const UnivariateLinearPolyBase &RHS) const { |
226 | return !(*this == RHS); |
227 | } |
228 | |
229 | bool isZero() const { return !Value; } |
230 | bool isNonZero() const { return !isZero(); } |
231 | explicit operator bool() const { return isNonZero(); } |
232 | ScalarTy getValue() const { return Value; } |
233 | ScalarTy getValue(unsigned Dim) const { |
234 | return Dim == UnivariateDim ? Value : 0; |
235 | } |
236 | |
237 | /// Add \p RHS to the value at the univariate dimension. |
238 | LeafTy getWithIncrement(ScalarTy RHS) const { |
239 | return static_cast<LeafTy>( |
240 | UnivariateLinearPolyBase(Value + RHS, UnivariateDim)); |
241 | } |
242 | |
243 | /// Subtract \p RHS from the value at the univariate dimension. |
244 | LeafTy getWithDecrement(ScalarTy RHS) const { |
245 | return static_cast<LeafTy>( |
246 | UnivariateLinearPolyBase(Value - RHS, UnivariateDim)); |
247 | } |
248 | }; |
249 | |
250 | |
251 | //===----------------------------------------------------------------------===// |
252 | // LinearPolySize - base class for fixed- or scalable sizes. |
253 | // ^ ^ |
254 | // | | |
255 | // | +----- ElementCount - Leaf class to represent an element count |
256 | // | (vscale x unsigned) |
257 | // | |
258 | // +-------- TypeSize - Leaf class to represent a type size |
259 | // (vscale x uint64_t) |
260 | //===----------------------------------------------------------------------===// |
261 | |
262 | /// LinearPolySize is a base class to represent sizes. It is either |
263 | /// fixed-sized or it is scalable-sized, but it cannot be both. |
264 | template <typename LeafTy> |
265 | class LinearPolySize : public UnivariateLinearPolyBase<LeafTy> { |
266 | // Make the parent class a friend, so that it can access the protected |
267 | // conversion/copy-constructor for UnivariatePolyBase<LeafTy> -> |
268 | // LinearPolySize<LeafTy>. |
269 | friend class UnivariateLinearPolyBase<LeafTy>; |
270 | |
271 | public: |
272 | using ScalarTy = typename UnivariateLinearPolyBase<LeafTy>::ScalarTy; |
273 | enum Dims : unsigned { FixedDim = 0, ScalableDim = 1 }; |
274 | |
275 | protected: |
276 | LinearPolySize(ScalarTy MinVal, Dims D) |
277 | : UnivariateLinearPolyBase<LeafTy>(MinVal, D) {} |
278 | |
279 | LinearPolySize(const UnivariateLinearPolyBase<LeafTy> &V) |
280 | : UnivariateLinearPolyBase<LeafTy>(V) {} |
281 | |
282 | public: |
283 | |
284 | static LeafTy getFixed(ScalarTy MinVal) { |
285 | return static_cast<LeafTy>(LinearPolySize(MinVal, FixedDim)); |
286 | } |
287 | static LeafTy getScalable(ScalarTy MinVal) { |
288 | return static_cast<LeafTy>(LinearPolySize(MinVal, ScalableDim)); |
289 | } |
290 | static LeafTy get(ScalarTy MinVal, bool Scalable) { |
291 | return static_cast<LeafTy>( |
292 | LinearPolySize(MinVal, Scalable ? ScalableDim : FixedDim)); |
293 | } |
294 | static LeafTy getNull() { return get(0, false); } |
295 | |
296 | /// Returns the minimum value this size can represent. |
297 | ScalarTy getKnownMinValue() const { return this->getValue(); } |
298 | /// Returns whether the size is scaled by a runtime quantity (vscale). |
299 | bool isScalable() const { return this->UnivariateDim == ScalableDim; } |
300 | /// A return value of true indicates we know at compile time that the number |
301 | /// of elements (vscale * Min) is definitely even. However, returning false |
302 | /// does not guarantee that the total number of elements is odd. |
303 | bool isKnownEven() const { return (getKnownMinValue() & 0x1) == 0; } |
304 | /// This function tells the caller whether the element count is known at |
305 | /// compile time to be a multiple of the scalar value RHS. |
306 | bool isKnownMultipleOf(ScalarTy RHS) const { |
307 | return getKnownMinValue() % RHS == 0; |
308 | } |
309 | |
310 | // Return the minimum value with the assumption that the count is exact. |
311 | // Use in places where a scalable count doesn't make sense (e.g. non-vector |
312 | // types, or vectors in backends which don't support scalable vectors). |
313 | ScalarTy getFixedValue() const { |
314 | assert(!isScalable() &&(static_cast <bool> (!isScalable() && "Request for a fixed element count on a scalable object" ) ? void (0) : __assert_fail ("!isScalable() && \"Request for a fixed element count on a scalable object\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/include/llvm/Support/TypeSize.h" , 315, __extension__ __PRETTY_FUNCTION__)) |
315 | "Request for a fixed element count on a scalable object")(static_cast <bool> (!isScalable() && "Request for a fixed element count on a scalable object" ) ? void (0) : __assert_fail ("!isScalable() && \"Request for a fixed element count on a scalable object\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/include/llvm/Support/TypeSize.h" , 315, __extension__ __PRETTY_FUNCTION__)); |
316 | return getKnownMinValue(); |
317 | } |
318 | |
319 | // For some cases, size ordering between scalable and fixed size types cannot |
320 | // be determined at compile time, so such comparisons aren't allowed. |
321 | // |
322 | // e.g. <vscale x 2 x i16> could be bigger than <4 x i32> with a runtime |
323 | // vscale >= 5, equal sized with a vscale of 4, and smaller with |
324 | // a vscale <= 3. |
325 | // |
326 | // All the functions below make use of the fact vscale is always >= 1, which |
327 | // means that <vscale x 4 x i32> is guaranteed to be >= <4 x i32>, etc. |
328 | |
329 | static bool isKnownLT(const LinearPolySize &LHS, const LinearPolySize &RHS) { |
330 | if (!LHS.isScalable() || RHS.isScalable()) |
331 | return LHS.getKnownMinValue() < RHS.getKnownMinValue(); |
332 | return false; |
333 | } |
334 | |
335 | static bool isKnownGT(const LinearPolySize &LHS, const LinearPolySize &RHS) { |
336 | if (LHS.isScalable() || !RHS.isScalable()) |
337 | return LHS.getKnownMinValue() > RHS.getKnownMinValue(); |
338 | return false; |
339 | } |
340 | |
341 | static bool isKnownLE(const LinearPolySize &LHS, const LinearPolySize &RHS) { |
342 | if (!LHS.isScalable() || RHS.isScalable()) |
343 | return LHS.getKnownMinValue() <= RHS.getKnownMinValue(); |
344 | return false; |
345 | } |
346 | |
347 | static bool isKnownGE(const LinearPolySize &LHS, const LinearPolySize &RHS) { |
348 | if (LHS.isScalable() || !RHS.isScalable()) |
349 | return LHS.getKnownMinValue() >= RHS.getKnownMinValue(); |
350 | return false; |
351 | } |
352 | |
353 | /// We do not provide the '/' operator here because division for polynomial |
354 | /// types does not work in the same way as for normal integer types. We can |
355 | /// only divide the minimum value (or coefficient) by RHS, which is not the |
356 | /// same as |
357 | /// (Min * Vscale) / RHS |
358 | /// The caller is recommended to use this function in combination with |
359 | /// isKnownMultipleOf(RHS), which lets the caller know if it's possible to |
360 | /// perform a lossless divide by RHS. |
361 | LeafTy divideCoefficientBy(ScalarTy RHS) const { |
362 | return static_cast<LeafTy>( |
363 | LinearPolySize::get(getKnownMinValue() / RHS, isScalable())); |
364 | } |
365 | |
366 | LeafTy coefficientNextPowerOf2() const { |
367 | return static_cast<LeafTy>(LinearPolySize::get( |
368 | static_cast<ScalarTy>(llvm::NextPowerOf2(getKnownMinValue())), |
369 | isScalable())); |
370 | } |
371 | |
372 | /// Printing function. |
373 | void print(raw_ostream &OS) const { |
374 | if (isScalable()) |
375 | OS << "vscale x "; |
376 | OS << getKnownMinValue(); |
377 | } |
378 | }; |
379 | |
380 | class ElementCount; |
381 | template <> struct LinearPolyBaseTypeTraits<ElementCount> { |
382 | using ScalarTy = unsigned; |
383 | static constexpr unsigned Dimensions = 2; |
384 | }; |
385 | |
386 | class ElementCount : public LinearPolySize<ElementCount> { |
387 | public: |
388 | ElementCount() : LinearPolySize(LinearPolySize::getNull()) {} |
389 | |
390 | ElementCount(const LinearPolySize<ElementCount> &V) : LinearPolySize(V) {} |
391 | |
392 | /// Counting predicates. |
393 | /// |
394 | ///@{ Number of elements.. |
395 | /// Exactly one element. |
396 | bool isScalar() const { return !isScalable() && getKnownMinValue() == 1; } |
397 | /// One or more elements. |
398 | bool isVector() const { |
399 | return (isScalable() && getKnownMinValue() != 0) || getKnownMinValue() > 1; |
400 | } |
401 | ///@} |
402 | }; |
403 | |
404 | // This class is used to represent the size of types. If the type is of fixed |
405 | class TypeSize; |
406 | template <> struct LinearPolyBaseTypeTraits<TypeSize> { |
407 | using ScalarTy = uint64_t; |
408 | static constexpr unsigned Dimensions = 2; |
409 | }; |
410 | |
411 | // TODO: Most functionality in this class will gradually be phased out |
412 | // so it will resemble LinearPolySize as much as possible. |
413 | // |
414 | // TypeSize is used to represent the size of types. If the type is of fixed |
415 | // size, it will represent the exact size. If the type is a scalable vector, |
416 | // it will represent the known minimum size. |
417 | class TypeSize : public LinearPolySize<TypeSize> { |
418 | public: |
419 | TypeSize(const LinearPolySize<TypeSize> &V) : LinearPolySize(V) {} |
420 | TypeSize(ScalarTy MinVal, bool IsScalable) |
421 | : LinearPolySize(LinearPolySize::get(MinVal, IsScalable)) {} |
422 | |
423 | static TypeSize Fixed(ScalarTy MinVal) { return TypeSize(MinVal, false); } |
424 | static TypeSize Scalable(ScalarTy MinVal) { return TypeSize(MinVal, true); } |
425 | |
426 | ScalarTy getFixedSize() const { return getFixedValue(); } |
427 | ScalarTy getKnownMinSize() const { return getKnownMinValue(); } |
428 | |
429 | // All code for this class below this point is needed because of the |
430 | // temporary implicit conversion to uint64_t. The operator overloads are |
431 | // needed because otherwise the conversion of the parent class |
432 | // UnivariateLinearPolyBase -> TypeSize is ambiguous. |
433 | // TODO: Remove the implicit conversion. |
434 | |
435 | // Casts to a uint64_t if this is a fixed-width size. |
436 | // |
437 | // This interface is deprecated and will be removed in a future version |
438 | // of LLVM in favour of upgrading uses that rely on this implicit conversion |
439 | // to uint64_t. Calls to functions that return a TypeSize should use the |
440 | // proper interfaces to TypeSize. |
441 | // In practice this is mostly calls to MVT/EVT::getSizeInBits(). |
442 | // |
443 | // To determine how to upgrade the code: |
444 | // |
445 | // if (<algorithm works for both scalable and fixed-width vectors>) |
446 | // use getKnownMinValue() |
447 | // else if (<algorithm works only for fixed-width vectors>) { |
448 | // if <algorithm can be adapted for both scalable and fixed-width vectors> |
449 | // update the algorithm and use getKnownMinValue() |
450 | // else |
451 | // bail out early for scalable vectors and use getFixedValue() |
452 | // } |
453 | operator ScalarTy() const; |
454 | |
455 | // Additional operators needed to avoid ambiguous parses |
456 | // because of the implicit conversion hack. |
457 | friend TypeSize operator*(const TypeSize &LHS, const int RHS) { |
458 | return LHS * (ScalarTy)RHS; |
459 | } |
460 | friend TypeSize operator*(const TypeSize &LHS, const unsigned RHS) { |
461 | return LHS * (ScalarTy)RHS; |
462 | } |
463 | friend TypeSize operator*(const TypeSize &LHS, const int64_t RHS) { |
464 | return LHS * (ScalarTy)RHS; |
465 | } |
466 | friend TypeSize operator*(const int LHS, const TypeSize &RHS) { |
467 | return RHS * LHS; |
468 | } |
469 | friend TypeSize operator*(const unsigned LHS, const TypeSize &RHS) { |
470 | return RHS * LHS; |
471 | } |
472 | friend TypeSize operator*(const int64_t LHS, const TypeSize &RHS) { |
473 | return RHS * LHS; |
474 | } |
475 | friend TypeSize operator*(const uint64_t LHS, const TypeSize &RHS) { |
476 | return RHS * LHS; |
477 | } |
478 | }; |
479 | |
480 | //===----------------------------------------------------------------------===// |
481 | // Utilities |
482 | //===----------------------------------------------------------------------===// |
483 | |
484 | /// Returns a TypeSize with a known minimum size that is the next integer |
485 | /// (mod 2**64) that is greater than or equal to \p Value and is a multiple |
486 | /// of \p Align. \p Align must be non-zero. |
487 | /// |
488 | /// Similar to the alignTo functions in MathExtras.h |
489 | inline TypeSize alignTo(TypeSize Size, uint64_t Align) { |
490 | assert(Align != 0u && "Align must be non-zero")(static_cast <bool> (Align != 0u && "Align must be non-zero" ) ? void (0) : __assert_fail ("Align != 0u && \"Align must be non-zero\"" , "/build/llvm-toolchain-snapshot-13~++20210626100611+81b2f95971ed/llvm/include/llvm/Support/TypeSize.h" , 490, __extension__ __PRETTY_FUNCTION__)); |
491 | return {(Size.getKnownMinValue() + Align - 1) / Align * Align, |
492 | Size.isScalable()}; |
493 | } |
494 | |
495 | /// Stream operator function for `LinearPolySize`. |
496 | template <typename LeafTy> |
497 | inline raw_ostream &operator<<(raw_ostream &OS, |
498 | const LinearPolySize<LeafTy> &PS) { |
499 | PS.print(OS); |
500 | return OS; |
501 | } |
502 | |
503 | template <typename T> struct DenseMapInfo; |
504 | template <> struct DenseMapInfo<ElementCount> { |
505 | static inline ElementCount getEmptyKey() { |
506 | return ElementCount::getScalable(~0U); |
507 | } |
508 | static inline ElementCount getTombstoneKey() { |
509 | return ElementCount::getFixed(~0U - 1); |
510 | } |
511 | static unsigned getHashValue(const ElementCount &EltCnt) { |
512 | unsigned HashVal = EltCnt.getKnownMinValue() * 37U; |
513 | if (EltCnt.isScalable()) |
514 | return (HashVal - 1U); |
515 | |
516 | return HashVal; |
517 | } |
518 | |
519 | static bool isEqual(const ElementCount &LHS, const ElementCount &RHS) { |
520 | return LHS == RHS; |
521 | } |
522 | }; |
523 | |
524 | } // end namespace llvm |
525 | |
526 | #endif // LLVM_SUPPORT_TYPESIZE_H |