Bug Summary

File:llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
Warning:line 9215, column 34
Use of memory after it is freed

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -clear-ast-before-backend -disable-llvm-verifier -discard-value-names -main-file-name LoopVectorize.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mframe-pointer=none -fmath-errno -ffp-contract=on -fno-rounding-math -mconstructor-aliases -funwind-tables=2 -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -ffunction-sections -fdata-sections -fcoverage-compilation-dir=/build/llvm-toolchain-snapshot-14~++20220119111520+da61cb019eb2/build-llvm -resource-dir /usr/lib/llvm-14/lib/clang/14.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I lib/Transforms/Vectorize -I /build/llvm-toolchain-snapshot-14~++20220119111520+da61cb019eb2/llvm/lib/Transforms/Vectorize -I include -I /build/llvm-toolchain-snapshot-14~++20220119111520+da61cb019eb2/llvm/include -D _FORTIFY_SOURCE=2 -D NDEBUG -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/x86_64-linux-gnu/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10/backward -internal-isystem /usr/lib/llvm-14/lib/clang/14.0.0/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -fmacro-prefix-map=/build/llvm-toolchain-snapshot-14~++20220119111520+da61cb019eb2/build-llvm=build-llvm -fmacro-prefix-map=/build/llvm-toolchain-snapshot-14~++20220119111520+da61cb019eb2/= -fcoverage-prefix-map=/build/llvm-toolchain-snapshot-14~++20220119111520+da61cb019eb2/build-llvm=build-llvm -fcoverage-prefix-map=/build/llvm-toolchain-snapshot-14~++20220119111520+da61cb019eb2/= -O3 -Wno-unused-command-line-argument -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-class-memaccess -Wno-redundant-move -Wno-pessimizing-move -Wno-noexcept-type -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir=/build/llvm-toolchain-snapshot-14~++20220119111520+da61cb019eb2/build-llvm -fdebug-prefix-map=/build/llvm-toolchain-snapshot-14~++20220119111520+da61cb019eb2/build-llvm=build-llvm -fdebug-prefix-map=/build/llvm-toolchain-snapshot-14~++20220119111520+da61cb019eb2/= -ferror-limit 19 -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -fcolor-diagnostics -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /tmp/scan-build-2022-01-19-134126-35450-1 -x c++ /build/llvm-toolchain-snapshot-14~++20220119111520+da61cb019eb2/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp

/build/llvm-toolchain-snapshot-14~++20220119111520+da61cb019eb2/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp

1//===- LoopVectorize.cpp - A Loop Vectorizer ------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This is the LLVM loop vectorizer. This pass modifies 'vectorizable' loops
10// and generates target-independent LLVM-IR.
11// The vectorizer uses the TargetTransformInfo analysis to estimate the costs
12// of instructions in order to estimate the profitability of vectorization.
13//
14// The loop vectorizer combines consecutive loop iterations into a single
15// 'wide' iteration. After this transformation the index is incremented
16// by the SIMD vector width, and not by one.
17//
18// This pass has three parts:
19// 1. The main loop pass that drives the different parts.
20// 2. LoopVectorizationLegality - A unit that checks for the legality
21// of the vectorization.
22// 3. InnerLoopVectorizer - A unit that performs the actual
23// widening of instructions.
24// 4. LoopVectorizationCostModel - A unit that checks for the profitability
25// of vectorization. It decides on the optimal vector width, which
26// can be one, if vectorization is not profitable.
27//
28// There is a development effort going on to migrate loop vectorizer to the
29// VPlan infrastructure and to introduce outer loop vectorization support (see
30// docs/Proposal/VectorizationPlan.rst and
31// http://lists.llvm.org/pipermail/llvm-dev/2017-December/119523.html). For this
32// purpose, we temporarily introduced the VPlan-native vectorization path: an
33// alternative vectorization path that is natively implemented on top of the
34// VPlan infrastructure. See EnableVPlanNativePath for enabling.
35//
36//===----------------------------------------------------------------------===//
37//
38// The reduction-variable vectorization is based on the paper:
39// D. Nuzman and R. Henderson. Multi-platform Auto-vectorization.
40//
41// Variable uniformity checks are inspired by:
42// Karrenberg, R. and Hack, S. Whole Function Vectorization.
43//
44// The interleaved access vectorization is based on the paper:
45// Dorit Nuzman, Ira Rosen and Ayal Zaks. Auto-Vectorization of Interleaved
46// Data for SIMD
47//
48// Other ideas/concepts are from:
49// A. Zaks and D. Nuzman. Autovectorization in GCC-two years later.
50//
51// S. Maleki, Y. Gao, M. Garzaran, T. Wong and D. Padua. An Evaluation of
52// Vectorizing Compilers.
53//
54//===----------------------------------------------------------------------===//
55
56#include "llvm/Transforms/Vectorize/LoopVectorize.h"
57#include "LoopVectorizationPlanner.h"
58#include "VPRecipeBuilder.h"
59#include "VPlan.h"
60#include "VPlanHCFGBuilder.h"
61#include "VPlanPredicator.h"
62#include "VPlanTransforms.h"
63#include "llvm/ADT/APInt.h"
64#include "llvm/ADT/ArrayRef.h"
65#include "llvm/ADT/DenseMap.h"
66#include "llvm/ADT/DenseMapInfo.h"
67#include "llvm/ADT/Hashing.h"
68#include "llvm/ADT/MapVector.h"
69#include "llvm/ADT/None.h"
70#include "llvm/ADT/Optional.h"
71#include "llvm/ADT/STLExtras.h"
72#include "llvm/ADT/SmallPtrSet.h"
73#include "llvm/ADT/SmallSet.h"
74#include "llvm/ADT/SmallVector.h"
75#include "llvm/ADT/Statistic.h"
76#include "llvm/ADT/StringRef.h"
77#include "llvm/ADT/Twine.h"
78#include "llvm/ADT/iterator_range.h"
79#include "llvm/Analysis/AssumptionCache.h"
80#include "llvm/Analysis/BasicAliasAnalysis.h"
81#include "llvm/Analysis/BlockFrequencyInfo.h"
82#include "llvm/Analysis/CFG.h"
83#include "llvm/Analysis/CodeMetrics.h"
84#include "llvm/Analysis/DemandedBits.h"
85#include "llvm/Analysis/GlobalsModRef.h"
86#include "llvm/Analysis/LoopAccessAnalysis.h"
87#include "llvm/Analysis/LoopAnalysisManager.h"
88#include "llvm/Analysis/LoopInfo.h"
89#include "llvm/Analysis/LoopIterator.h"
90#include "llvm/Analysis/OptimizationRemarkEmitter.h"
91#include "llvm/Analysis/ProfileSummaryInfo.h"
92#include "llvm/Analysis/ScalarEvolution.h"
93#include "llvm/Analysis/ScalarEvolutionExpressions.h"
94#include "llvm/Analysis/TargetLibraryInfo.h"
95#include "llvm/Analysis/TargetTransformInfo.h"
96#include "llvm/Analysis/VectorUtils.h"
97#include "llvm/IR/Attributes.h"
98#include "llvm/IR/BasicBlock.h"
99#include "llvm/IR/CFG.h"
100#include "llvm/IR/Constant.h"
101#include "llvm/IR/Constants.h"
102#include "llvm/IR/DataLayout.h"
103#include "llvm/IR/DebugInfoMetadata.h"
104#include "llvm/IR/DebugLoc.h"
105#include "llvm/IR/DerivedTypes.h"
106#include "llvm/IR/DiagnosticInfo.h"
107#include "llvm/IR/Dominators.h"
108#include "llvm/IR/Function.h"
109#include "llvm/IR/IRBuilder.h"
110#include "llvm/IR/InstrTypes.h"
111#include "llvm/IR/Instruction.h"
112#include "llvm/IR/Instructions.h"
113#include "llvm/IR/IntrinsicInst.h"
114#include "llvm/IR/Intrinsics.h"
115#include "llvm/IR/LLVMContext.h"
116#include "llvm/IR/Metadata.h"
117#include "llvm/IR/Module.h"
118#include "llvm/IR/Operator.h"
119#include "llvm/IR/PatternMatch.h"
120#include "llvm/IR/Type.h"
121#include "llvm/IR/Use.h"
122#include "llvm/IR/User.h"
123#include "llvm/IR/Value.h"
124#include "llvm/IR/ValueHandle.h"
125#include "llvm/IR/Verifier.h"
126#include "llvm/InitializePasses.h"
127#include "llvm/Pass.h"
128#include "llvm/Support/Casting.h"
129#include "llvm/Support/CommandLine.h"
130#include "llvm/Support/Compiler.h"
131#include "llvm/Support/Debug.h"
132#include "llvm/Support/ErrorHandling.h"
133#include "llvm/Support/InstructionCost.h"
134#include "llvm/Support/MathExtras.h"
135#include "llvm/Support/raw_ostream.h"
136#include "llvm/Transforms/Utils/BasicBlockUtils.h"
137#include "llvm/Transforms/Utils/InjectTLIMappings.h"
138#include "llvm/Transforms/Utils/LoopSimplify.h"
139#include "llvm/Transforms/Utils/LoopUtils.h"
140#include "llvm/Transforms/Utils/LoopVersioning.h"
141#include "llvm/Transforms/Utils/ScalarEvolutionExpander.h"
142#include "llvm/Transforms/Utils/SizeOpts.h"
143#include "llvm/Transforms/Vectorize/LoopVectorizationLegality.h"
144#include <algorithm>
145#include <cassert>
146#include <cstdint>
147#include <cstdlib>
148#include <functional>
149#include <iterator>
150#include <limits>
151#include <memory>
152#include <string>
153#include <tuple>
154#include <utility>
155
156using namespace llvm;
157
158#define LV_NAME"loop-vectorize" "loop-vectorize"
159#define DEBUG_TYPE"loop-vectorize" LV_NAME"loop-vectorize"
160
161#ifndef NDEBUG
162const char VerboseDebug[] = DEBUG_TYPE"loop-vectorize" "-verbose";
163#endif
164
165/// @{
166/// Metadata attribute names
167const char LLVMLoopVectorizeFollowupAll[] = "llvm.loop.vectorize.followup_all";
168const char LLVMLoopVectorizeFollowupVectorized[] =
169 "llvm.loop.vectorize.followup_vectorized";
170const char LLVMLoopVectorizeFollowupEpilogue[] =
171 "llvm.loop.vectorize.followup_epilogue";
172/// @}
173
174STATISTIC(LoopsVectorized, "Number of loops vectorized")static llvm::Statistic LoopsVectorized = {"loop-vectorize", "LoopsVectorized"
, "Number of loops vectorized"}
;
175STATISTIC(LoopsAnalyzed, "Number of loops analyzed for vectorization")static llvm::Statistic LoopsAnalyzed = {"loop-vectorize", "LoopsAnalyzed"
, "Number of loops analyzed for vectorization"}
;
176STATISTIC(LoopsEpilogueVectorized, "Number of epilogues vectorized")static llvm::Statistic LoopsEpilogueVectorized = {"loop-vectorize"
, "LoopsEpilogueVectorized", "Number of epilogues vectorized"
}
;
177
178static cl::opt<bool> EnableEpilogueVectorization(
179 "enable-epilogue-vectorization", cl::init(true), cl::Hidden,
180 cl::desc("Enable vectorization of epilogue loops."));
181
182static cl::opt<unsigned> EpilogueVectorizationForceVF(
183 "epilogue-vectorization-force-VF", cl::init(1), cl::Hidden,
184 cl::desc("When epilogue vectorization is enabled, and a value greater than "
185 "1 is specified, forces the given VF for all applicable epilogue "
186 "loops."));
187
188static cl::opt<unsigned> EpilogueVectorizationMinVF(
189 "epilogue-vectorization-minimum-VF", cl::init(16), cl::Hidden,
190 cl::desc("Only loops with vectorization factor equal to or larger than "
191 "the specified value are considered for epilogue vectorization."));
192
193/// Loops with a known constant trip count below this number are vectorized only
194/// if no scalar iteration overheads are incurred.
195static cl::opt<unsigned> TinyTripCountVectorThreshold(
196 "vectorizer-min-trip-count", cl::init(16), cl::Hidden,
197 cl::desc("Loops with a constant trip count that is smaller than this "
198 "value are vectorized only if no scalar iteration overheads "
199 "are incurred."));
200
201static cl::opt<unsigned> PragmaVectorizeMemoryCheckThreshold(
202 "pragma-vectorize-memory-check-threshold", cl::init(128), cl::Hidden,
203 cl::desc("The maximum allowed number of runtime memory checks with a "
204 "vectorize(enable) pragma."));
205
206// Option prefer-predicate-over-epilogue indicates that an epilogue is undesired,
207// that predication is preferred, and this lists all options. I.e., the
208// vectorizer will try to fold the tail-loop (epilogue) into the vector body
209// and predicate the instructions accordingly. If tail-folding fails, there are
210// different fallback strategies depending on these values:
211namespace PreferPredicateTy {
212 enum Option {
213 ScalarEpilogue = 0,
214 PredicateElseScalarEpilogue,
215 PredicateOrDontVectorize
216 };
217} // namespace PreferPredicateTy
218
219static cl::opt<PreferPredicateTy::Option> PreferPredicateOverEpilogue(
220 "prefer-predicate-over-epilogue",
221 cl::init(PreferPredicateTy::ScalarEpilogue),
222 cl::Hidden,
223 cl::desc("Tail-folding and predication preferences over creating a scalar "
224 "epilogue loop."),
225 cl::values(clEnumValN(PreferPredicateTy::ScalarEpilogue,llvm::cl::OptionEnumValue { "scalar-epilogue", int(PreferPredicateTy
::ScalarEpilogue), "Don't tail-predicate loops, create scalar epilogue"
}
226 "scalar-epilogue",llvm::cl::OptionEnumValue { "scalar-epilogue", int(PreferPredicateTy
::ScalarEpilogue), "Don't tail-predicate loops, create scalar epilogue"
}
227 "Don't tail-predicate loops, create scalar epilogue")llvm::cl::OptionEnumValue { "scalar-epilogue", int(PreferPredicateTy
::ScalarEpilogue), "Don't tail-predicate loops, create scalar epilogue"
}
,
228 clEnumValN(PreferPredicateTy::PredicateElseScalarEpilogue,llvm::cl::OptionEnumValue { "predicate-else-scalar-epilogue",
int(PreferPredicateTy::PredicateElseScalarEpilogue), "prefer tail-folding, create scalar epilogue if tail "
"folding fails." }
229 "predicate-else-scalar-epilogue",llvm::cl::OptionEnumValue { "predicate-else-scalar-epilogue",
int(PreferPredicateTy::PredicateElseScalarEpilogue), "prefer tail-folding, create scalar epilogue if tail "
"folding fails." }
230 "prefer tail-folding, create scalar epilogue if tail "llvm::cl::OptionEnumValue { "predicate-else-scalar-epilogue",
int(PreferPredicateTy::PredicateElseScalarEpilogue), "prefer tail-folding, create scalar epilogue if tail "
"folding fails." }
231 "folding fails.")llvm::cl::OptionEnumValue { "predicate-else-scalar-epilogue",
int(PreferPredicateTy::PredicateElseScalarEpilogue), "prefer tail-folding, create scalar epilogue if tail "
"folding fails." }
,
232 clEnumValN(PreferPredicateTy::PredicateOrDontVectorize,llvm::cl::OptionEnumValue { "predicate-dont-vectorize", int(PreferPredicateTy
::PredicateOrDontVectorize), "prefers tail-folding, don't attempt vectorization if "
"tail-folding fails." }
233 "predicate-dont-vectorize",llvm::cl::OptionEnumValue { "predicate-dont-vectorize", int(PreferPredicateTy
::PredicateOrDontVectorize), "prefers tail-folding, don't attempt vectorization if "
"tail-folding fails." }
234 "prefers tail-folding, don't attempt vectorization if "llvm::cl::OptionEnumValue { "predicate-dont-vectorize", int(PreferPredicateTy
::PredicateOrDontVectorize), "prefers tail-folding, don't attempt vectorization if "
"tail-folding fails." }
235 "tail-folding fails.")llvm::cl::OptionEnumValue { "predicate-dont-vectorize", int(PreferPredicateTy
::PredicateOrDontVectorize), "prefers tail-folding, don't attempt vectorization if "
"tail-folding fails." }
));
236
237static cl::opt<bool> MaximizeBandwidth(
238 "vectorizer-maximize-bandwidth", cl::init(false), cl::Hidden,
239 cl::desc("Maximize bandwidth when selecting vectorization factor which "
240 "will be determined by the smallest type in loop."));
241
242static cl::opt<bool> EnableInterleavedMemAccesses(
243 "enable-interleaved-mem-accesses", cl::init(false), cl::Hidden,
244 cl::desc("Enable vectorization on interleaved memory accesses in a loop"));
245
246/// An interleave-group may need masking if it resides in a block that needs
247/// predication, or in order to mask away gaps.
248static cl::opt<bool> EnableMaskedInterleavedMemAccesses(
249 "enable-masked-interleaved-mem-accesses", cl::init(false), cl::Hidden,
250 cl::desc("Enable vectorization on masked interleaved memory accesses in a loop"));
251
252static cl::opt<unsigned> TinyTripCountInterleaveThreshold(
253 "tiny-trip-count-interleave-threshold", cl::init(128), cl::Hidden,
254 cl::desc("We don't interleave loops with a estimated constant trip count "
255 "below this number"));
256
257static cl::opt<unsigned> ForceTargetNumScalarRegs(
258 "force-target-num-scalar-regs", cl::init(0), cl::Hidden,
259 cl::desc("A flag that overrides the target's number of scalar registers."));
260
261static cl::opt<unsigned> ForceTargetNumVectorRegs(
262 "force-target-num-vector-regs", cl::init(0), cl::Hidden,
263 cl::desc("A flag that overrides the target's number of vector registers."));
264
265static cl::opt<unsigned> ForceTargetMaxScalarInterleaveFactor(
266 "force-target-max-scalar-interleave", cl::init(0), cl::Hidden,
267 cl::desc("A flag that overrides the target's max interleave factor for "
268 "scalar loops."));
269
270static cl::opt<unsigned> ForceTargetMaxVectorInterleaveFactor(
271 "force-target-max-vector-interleave", cl::init(0), cl::Hidden,
272 cl::desc("A flag that overrides the target's max interleave factor for "
273 "vectorized loops."));
274
275static cl::opt<unsigned> ForceTargetInstructionCost(
276 "force-target-instruction-cost", cl::init(0), cl::Hidden,
277 cl::desc("A flag that overrides the target's expected cost for "
278 "an instruction to a single constant value. Mostly "
279 "useful for getting consistent testing."));
280
281static cl::opt<bool> ForceTargetSupportsScalableVectors(
282 "force-target-supports-scalable-vectors", cl::init(false), cl::Hidden,
283 cl::desc(
284 "Pretend that scalable vectors are supported, even if the target does "
285 "not support them. This flag should only be used for testing."));
286
287static cl::opt<unsigned> SmallLoopCost(
288 "small-loop-cost", cl::init(20), cl::Hidden,
289 cl::desc(
290 "The cost of a loop that is considered 'small' by the interleaver."));
291
292static cl::opt<bool> LoopVectorizeWithBlockFrequency(
293 "loop-vectorize-with-block-frequency", cl::init(true), cl::Hidden,
294 cl::desc("Enable the use of the block frequency analysis to access PGO "
295 "heuristics minimizing code growth in cold regions and being more "
296 "aggressive in hot regions."));
297
298// Runtime interleave loops for load/store throughput.
299static cl::opt<bool> EnableLoadStoreRuntimeInterleave(
300 "enable-loadstore-runtime-interleave", cl::init(true), cl::Hidden,
301 cl::desc(
302 "Enable runtime interleaving until load/store ports are saturated"));
303
304/// Interleave small loops with scalar reductions.
305static cl::opt<bool> InterleaveSmallLoopScalarReduction(
306 "interleave-small-loop-scalar-reduction", cl::init(false), cl::Hidden,
307 cl::desc("Enable interleaving for loops with small iteration counts that "
308 "contain scalar reductions to expose ILP."));
309
310/// The number of stores in a loop that are allowed to need predication.
311static cl::opt<unsigned> NumberOfStoresToPredicate(
312 "vectorize-num-stores-pred", cl::init(1), cl::Hidden,
313 cl::desc("Max number of stores to be predicated behind an if."));
314
315static cl::opt<bool> EnableIndVarRegisterHeur(
316 "enable-ind-var-reg-heur", cl::init(true), cl::Hidden,
317 cl::desc("Count the induction variable only once when interleaving"));
318
319static cl::opt<bool> EnableCondStoresVectorization(
320 "enable-cond-stores-vec", cl::init(true), cl::Hidden,
321 cl::desc("Enable if predication of stores during vectorization."));
322
323static cl::opt<unsigned> MaxNestedScalarReductionIC(
324 "max-nested-scalar-reduction-interleave", cl::init(2), cl::Hidden,
325 cl::desc("The maximum interleave count to use when interleaving a scalar "
326 "reduction in a nested loop."));
327
328static cl::opt<bool>
329 PreferInLoopReductions("prefer-inloop-reductions", cl::init(false),
330 cl::Hidden,
331 cl::desc("Prefer in-loop vector reductions, "
332 "overriding the targets preference."));
333
334static cl::opt<bool> ForceOrderedReductions(
335 "force-ordered-reductions", cl::init(false), cl::Hidden,
336 cl::desc("Enable the vectorisation of loops with in-order (strict) "
337 "FP reductions"));
338
339static cl::opt<bool> PreferPredicatedReductionSelect(
340 "prefer-predicated-reduction-select", cl::init(false), cl::Hidden,
341 cl::desc(
342 "Prefer predicating a reduction operation over an after loop select."));
343
344cl::opt<bool> EnableVPlanNativePath(
345 "enable-vplan-native-path", cl::init(false), cl::Hidden,
346 cl::desc("Enable VPlan-native vectorization path with "
347 "support for outer loop vectorization."));
348
349// FIXME: Remove this switch once we have divergence analysis. Currently we
350// assume divergent non-backedge branches when this switch is true.
351cl::opt<bool> EnableVPlanPredication(
352 "enable-vplan-predication", cl::init(false), cl::Hidden,
353 cl::desc("Enable VPlan-native vectorization path predicator with "
354 "support for outer loop vectorization."));
355
356// This flag enables the stress testing of the VPlan H-CFG construction in the
357// VPlan-native vectorization path. It must be used in conjuction with
358// -enable-vplan-native-path. -vplan-verify-hcfg can also be used to enable the
359// verification of the H-CFGs built.
360static cl::opt<bool> VPlanBuildStressTest(
361 "vplan-build-stress-test", cl::init(false), cl::Hidden,
362 cl::desc(
363 "Build VPlan for every supported loop nest in the function and bail "
364 "out right after the build (stress test the VPlan H-CFG construction "
365 "in the VPlan-native vectorization path)."));
366
367cl::opt<bool> llvm::EnableLoopInterleaving(
368 "interleave-loops", cl::init(true), cl::Hidden,
369 cl::desc("Enable loop interleaving in Loop vectorization passes"));
370cl::opt<bool> llvm::EnableLoopVectorization(
371 "vectorize-loops", cl::init(true), cl::Hidden,
372 cl::desc("Run the Loop vectorization passes"));
373
374cl::opt<bool> PrintVPlansInDotFormat(
375 "vplan-print-in-dot-format", cl::init(false), cl::Hidden,
376 cl::desc("Use dot format instead of plain text when dumping VPlans"));
377
378/// A helper function that returns true if the given type is irregular. The
379/// type is irregular if its allocated size doesn't equal the store size of an
380/// element of the corresponding vector type.
381static bool hasIrregularType(Type *Ty, const DataLayout &DL) {
382 // Determine if an array of N elements of type Ty is "bitcast compatible"
383 // with a <N x Ty> vector.
384 // This is only true if there is no padding between the array elements.
385 return DL.getTypeAllocSizeInBits(Ty) != DL.getTypeSizeInBits(Ty);
386}
387
388/// A helper function that returns the reciprocal of the block probability of
389/// predicated blocks. If we return X, we are assuming the predicated block
390/// will execute once for every X iterations of the loop header.
391///
392/// TODO: We should use actual block probability here, if available. Currently,
393/// we always assume predicated blocks have a 50% chance of executing.
394static unsigned getReciprocalPredBlockProb() { return 2; }
395
396/// A helper function that returns an integer or floating-point constant with
397/// value C.
398static Constant *getSignedIntOrFpConstant(Type *Ty, int64_t C) {
399 return Ty->isIntegerTy() ? ConstantInt::getSigned(Ty, C)
400 : ConstantFP::get(Ty, C);
401}
402
403/// Returns "best known" trip count for the specified loop \p L as defined by
404/// the following procedure:
405/// 1) Returns exact trip count if it is known.
406/// 2) Returns expected trip count according to profile data if any.
407/// 3) Returns upper bound estimate if it is known.
408/// 4) Returns None if all of the above failed.
409static Optional<unsigned> getSmallBestKnownTC(ScalarEvolution &SE, Loop *L) {
410 // Check if exact trip count is known.
411 if (unsigned ExpectedTC = SE.getSmallConstantTripCount(L))
412 return ExpectedTC;
413
414 // Check if there is an expected trip count available from profile data.
415 if (LoopVectorizeWithBlockFrequency)
416 if (auto EstimatedTC = getLoopEstimatedTripCount(L))
417 return EstimatedTC;
418
419 // Check if upper bound estimate is known.
420 if (unsigned ExpectedTC = SE.getSmallConstantMaxTripCount(L))
421 return ExpectedTC;
422
423 return None;
424}
425
426// Forward declare GeneratedRTChecks.
427class GeneratedRTChecks;
428
429namespace llvm {
430
431AnalysisKey ShouldRunExtraVectorPasses::Key;
432
433/// InnerLoopVectorizer vectorizes loops which contain only one basic
434/// block to a specified vectorization factor (VF).
435/// This class performs the widening of scalars into vectors, or multiple
436/// scalars. This class also implements the following features:
437/// * It inserts an epilogue loop for handling loops that don't have iteration
438/// counts that are known to be a multiple of the vectorization factor.
439/// * It handles the code generation for reduction variables.
440/// * Scalarization (implementation using scalars) of un-vectorizable
441/// instructions.
442/// InnerLoopVectorizer does not perform any vectorization-legality
443/// checks, and relies on the caller to check for the different legality
444/// aspects. The InnerLoopVectorizer relies on the
445/// LoopVectorizationLegality class to provide information about the induction
446/// and reduction variables that were found to a given vectorization factor.
447class InnerLoopVectorizer {
448public:
449 InnerLoopVectorizer(Loop *OrigLoop, PredicatedScalarEvolution &PSE,
450 LoopInfo *LI, DominatorTree *DT,
451 const TargetLibraryInfo *TLI,
452 const TargetTransformInfo *TTI, AssumptionCache *AC,
453 OptimizationRemarkEmitter *ORE, ElementCount VecWidth,
454 unsigned UnrollFactor, LoopVectorizationLegality *LVL,
455 LoopVectorizationCostModel *CM, BlockFrequencyInfo *BFI,
456 ProfileSummaryInfo *PSI, GeneratedRTChecks &RTChecks)
457 : OrigLoop(OrigLoop), PSE(PSE), LI(LI), DT(DT), TLI(TLI), TTI(TTI),
458 AC(AC), ORE(ORE), VF(VecWidth), UF(UnrollFactor),
459 Builder(PSE.getSE()->getContext()), Legal(LVL), Cost(CM), BFI(BFI),
460 PSI(PSI), RTChecks(RTChecks) {
461 // Query this against the original loop and save it here because the profile
462 // of the original loop header may change as the transformation happens.
463 OptForSizeBasedOnProfile = llvm::shouldOptimizeForSize(
464 OrigLoop->getHeader(), PSI, BFI, PGSOQueryType::IRPass);
465 }
466
467 virtual ~InnerLoopVectorizer() = default;
468
469 /// Create a new empty loop that will contain vectorized instructions later
470 /// on, while the old loop will be used as the scalar remainder. Control flow
471 /// is generated around the vectorized (and scalar epilogue) loops consisting
472 /// of various checks and bypasses. Return the pre-header block of the new
473 /// loop and the start value for the canonical induction, if it is != 0. The
474 /// latter is the case when vectorizing the epilogue loop. In the case of
475 /// epilogue vectorization, this function is overriden to handle the more
476 /// complex control flow around the loops.
477 virtual std::pair<BasicBlock *, Value *> createVectorizedLoopSkeleton();
478
479 /// Widen a single call instruction within the innermost loop.
480 void widenCallInstruction(CallInst &I, VPValue *Def, VPUser &ArgOperands,
481 VPTransformState &State);
482
483 /// Fix the vectorized code, taking care of header phi's, live-outs, and more.
484 void fixVectorizedLoop(VPTransformState &State);
485
486 // Return true if any runtime check is added.
487 bool areSafetyChecksAdded() { return AddedSafetyChecks; }
488
489 /// A type for vectorized values in the new loop. Each value from the
490 /// original loop, when vectorized, is represented by UF vector values in the
491 /// new unrolled loop, where UF is the unroll factor.
492 using VectorParts = SmallVector<Value *, 2>;
493
494 /// Vectorize a single first-order recurrence or pointer induction PHINode in
495 /// a block. This method handles the induction variable canonicalization. It
496 /// supports both VF = 1 for unrolled loops and arbitrary length vectors.
497 void widenPHIInstruction(Instruction *PN, VPWidenPHIRecipe *PhiR,
498 VPTransformState &State);
499
500 /// A helper function to scalarize a single Instruction in the innermost loop.
501 /// Generates a sequence of scalar instances for each lane between \p MinLane
502 /// and \p MaxLane, times each part between \p MinPart and \p MaxPart,
503 /// inclusive. Uses the VPValue operands from \p RepRecipe instead of \p
504 /// Instr's operands.
505 void scalarizeInstruction(Instruction *Instr, VPReplicateRecipe *RepRecipe,
506 const VPIteration &Instance, bool IfPredicateInstr,
507 VPTransformState &State);
508
509 /// Widen an integer or floating-point induction variable \p IV. If \p Trunc
510 /// is provided, the integer induction variable will first be truncated to
511 /// the corresponding type. \p CanonicalIV is the scalar value generated for
512 /// the canonical induction variable.
513 void widenIntOrFpInduction(PHINode *IV, const InductionDescriptor &ID,
514 Value *Start, TruncInst *Trunc, VPValue *Def,
515 VPTransformState &State, Value *CanonicalIV);
516
517 /// Construct the vector value of a scalarized value \p V one lane at a time.
518 void packScalarIntoVectorValue(VPValue *Def, const VPIteration &Instance,
519 VPTransformState &State);
520
521 /// Try to vectorize interleaved access group \p Group with the base address
522 /// given in \p Addr, optionally masking the vector operations if \p
523 /// BlockInMask is non-null. Use \p State to translate given VPValues to IR
524 /// values in the vectorized loop.
525 void vectorizeInterleaveGroup(const InterleaveGroup<Instruction> *Group,
526 ArrayRef<VPValue *> VPDefs,
527 VPTransformState &State, VPValue *Addr,
528 ArrayRef<VPValue *> StoredValues,
529 VPValue *BlockInMask = nullptr);
530
531 /// Set the debug location in the builder \p Ptr using the debug location in
532 /// \p V. If \p Ptr is None then it uses the class member's Builder.
533 void setDebugLocFromInst(const Value *V,
534 Optional<IRBuilder<> *> CustomBuilder = None);
535
536 /// Fix the non-induction PHIs in the OrigPHIsToFix vector.
537 void fixNonInductionPHIs(VPTransformState &State);
538
539 /// Returns true if the reordering of FP operations is not allowed, but we are
540 /// able to vectorize with strict in-order reductions for the given RdxDesc.
541 bool useOrderedReductions(const RecurrenceDescriptor &RdxDesc);
542
543 /// Create a broadcast instruction. This method generates a broadcast
544 /// instruction (shuffle) for loop invariant values and for the induction
545 /// value. If this is the induction variable then we extend it to N, N+1, ...
546 /// this is needed because each iteration in the loop corresponds to a SIMD
547 /// element.
548 virtual Value *getBroadcastInstrs(Value *V);
549
550 /// Add metadata from one instruction to another.
551 ///
552 /// This includes both the original MDs from \p From and additional ones (\see
553 /// addNewMetadata). Use this for *newly created* instructions in the vector
554 /// loop.
555 void addMetadata(Instruction *To, Instruction *From);
556
557 /// Similar to the previous function but it adds the metadata to a
558 /// vector of instructions.
559 void addMetadata(ArrayRef<Value *> To, Instruction *From);
560
561protected:
562 friend class LoopVectorizationPlanner;
563
564 /// A small list of PHINodes.
565 using PhiVector = SmallVector<PHINode *, 4>;
566
567 /// A type for scalarized values in the new loop. Each value from the
568 /// original loop, when scalarized, is represented by UF x VF scalar values
569 /// in the new unrolled loop, where UF is the unroll factor and VF is the
570 /// vectorization factor.
571 using ScalarParts = SmallVector<SmallVector<Value *, 4>, 2>;
572
573 /// Set up the values of the IVs correctly when exiting the vector loop.
574 void fixupIVUsers(PHINode *OrigPhi, const InductionDescriptor &II,
575 Value *CountRoundDown, Value *EndValue,
576 BasicBlock *MiddleBlock);
577
578 /// Introduce a conditional branch (on true, condition to be set later) at the
579 /// end of the header=latch connecting it to itself (across the backedge) and
580 /// to the exit block of \p L.
581 void createHeaderBranch(Loop *L);
582
583 /// Handle all cross-iteration phis in the header.
584 void fixCrossIterationPHIs(VPTransformState &State);
585
586 /// Create the exit value of first order recurrences in the middle block and
587 /// update their users.
588 void fixFirstOrderRecurrence(VPFirstOrderRecurrencePHIRecipe *PhiR,
589 VPTransformState &State);
590
591 /// Create code for the loop exit value of the reduction.
592 void fixReduction(VPReductionPHIRecipe *Phi, VPTransformState &State);
593
594 /// Clear NSW/NUW flags from reduction instructions if necessary.
595 void clearReductionWrapFlags(const RecurrenceDescriptor &RdxDesc,
596 VPTransformState &State);
597
598 /// Fixup the LCSSA phi nodes in the unique exit block. This simply
599 /// means we need to add the appropriate incoming value from the middle
600 /// block as exiting edges from the scalar epilogue loop (if present) are
601 /// already in place, and we exit the vector loop exclusively to the middle
602 /// block.
603 void fixLCSSAPHIs(VPTransformState &State);
604
605 /// Iteratively sink the scalarized operands of a predicated instruction into
606 /// the block that was created for it.
607 void sinkScalarOperands(Instruction *PredInst);
608
609 /// Shrinks vector element sizes to the smallest bitwidth they can be legally
610 /// represented as.
611 void truncateToMinimalBitwidths(VPTransformState &State);
612
613 /// Compute scalar induction steps. \p ScalarIV is the scalar induction
614 /// variable on which to base the steps, \p Step is the size of the step, and
615 /// \p EntryVal is the value from the original loop that maps to the steps.
616 /// Note that \p EntryVal doesn't have to be an induction variable - it
617 /// can also be a truncate instruction.
618 void buildScalarSteps(Value *ScalarIV, Value *Step, Instruction *EntryVal,
619 const InductionDescriptor &ID, VPValue *Def,
620 VPTransformState &State);
621
622 /// Create a vector induction phi node based on an existing scalar one. \p
623 /// EntryVal is the value from the original loop that maps to the vector phi
624 /// node, and \p Step is the loop-invariant step. If \p EntryVal is a
625 /// truncate instruction, instead of widening the original IV, we widen a
626 /// version of the IV truncated to \p EntryVal's type.
627 void createVectorIntOrFpInductionPHI(const InductionDescriptor &II,
628 Value *Step, Value *Start,
629 Instruction *EntryVal, VPValue *Def,
630 VPTransformState &State);
631
632 /// Returns true if an instruction \p I should be scalarized instead of
633 /// vectorized for the chosen vectorization factor.
634 bool shouldScalarizeInstruction(Instruction *I) const;
635
636 /// Returns true if we should generate a scalar version of \p IV.
637 bool needsScalarInduction(Instruction *IV) const;
638
639 /// Returns (and creates if needed) the original loop trip count.
640 Value *getOrCreateTripCount(Loop *NewLoop);
641
642 /// Returns (and creates if needed) the trip count of the widened loop.
643 Value *getOrCreateVectorTripCount(Loop *NewLoop);
644
645 /// Returns a bitcasted value to the requested vector type.
646 /// Also handles bitcasts of vector<float> <-> vector<pointer> types.
647 Value *createBitOrPointerCast(Value *V, VectorType *DstVTy,
648 const DataLayout &DL);
649
650 /// Emit a bypass check to see if the vector trip count is zero, including if
651 /// it overflows.
652 void emitMinimumIterationCountCheck(Loop *L, BasicBlock *Bypass);
653
654 /// Emit a bypass check to see if all of the SCEV assumptions we've
655 /// had to make are correct. Returns the block containing the checks or
656 /// nullptr if no checks have been added.
657 BasicBlock *emitSCEVChecks(Loop *L, BasicBlock *Bypass);
658
659 /// Emit bypass checks to check any memory assumptions we may have made.
660 /// Returns the block containing the checks or nullptr if no checks have been
661 /// added.
662 BasicBlock *emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass);
663
664 /// Compute the transformed value of Index at offset StartValue using step
665 /// StepValue.
666 /// For integer induction, returns StartValue + Index * StepValue.
667 /// For pointer induction, returns StartValue[Index * StepValue].
668 /// FIXME: The newly created binary instructions should contain nsw/nuw
669 /// flags, which can be found from the original scalar operations.
670 Value *emitTransformedIndex(IRBuilder<> &B, Value *Index, ScalarEvolution *SE,
671 const DataLayout &DL,
672 const InductionDescriptor &ID,
673 BasicBlock *VectorHeader) const;
674
675 /// Emit basic blocks (prefixed with \p Prefix) for the iteration check,
676 /// vector loop preheader, middle block and scalar preheader. Also
677 /// allocate a loop object for the new vector loop and return it.
678 Loop *createVectorLoopSkeleton(StringRef Prefix);
679
680 /// Create new phi nodes for the induction variables to resume iteration count
681 /// in the scalar epilogue, from where the vectorized loop left off.
682 /// In cases where the loop skeleton is more complicated (eg. epilogue
683 /// vectorization) and the resume values can come from an additional bypass
684 /// block, the \p AdditionalBypass pair provides information about the bypass
685 /// block and the end value on the edge from bypass to this loop.
686 void createInductionResumeValues(
687 Loop *L,
688 std::pair<BasicBlock *, Value *> AdditionalBypass = {nullptr, nullptr});
689
690 /// Complete the loop skeleton by adding debug MDs, creating appropriate
691 /// conditional branches in the middle block, preparing the builder and
692 /// running the verifier. Take in the vector loop \p L as argument, and return
693 /// the preheader of the completed vector loop.
694 BasicBlock *completeLoopSkeleton(Loop *L, MDNode *OrigLoopID);
695
696 /// Add additional metadata to \p To that was not present on \p Orig.
697 ///
698 /// Currently this is used to add the noalias annotations based on the
699 /// inserted memchecks. Use this for instructions that are *cloned* into the
700 /// vector loop.
701 void addNewMetadata(Instruction *To, const Instruction *Orig);
702
703 /// Collect poison-generating recipes that may generate a poison value that is
704 /// used after vectorization, even when their operands are not poison. Those
705 /// recipes meet the following conditions:
706 /// * Contribute to the address computation of a recipe generating a widen
707 /// memory load/store (VPWidenMemoryInstructionRecipe or
708 /// VPInterleaveRecipe).
709 /// * Such a widen memory load/store has at least one underlying Instruction
710 /// that is in a basic block that needs predication and after vectorization
711 /// the generated instruction won't be predicated.
712 void collectPoisonGeneratingRecipes(VPTransformState &State);
713
714 /// Allow subclasses to override and print debug traces before/after vplan
715 /// execution, when trace information is requested.
716 virtual void printDebugTracesAtStart(){};
717 virtual void printDebugTracesAtEnd(){};
718
719 /// The original loop.
720 Loop *OrigLoop;
721
722 /// A wrapper around ScalarEvolution used to add runtime SCEV checks. Applies
723 /// dynamic knowledge to simplify SCEV expressions and converts them to a
724 /// more usable form.
725 PredicatedScalarEvolution &PSE;
726
727 /// Loop Info.
728 LoopInfo *LI;
729
730 /// Dominator Tree.
731 DominatorTree *DT;
732
733 /// Alias Analysis.
734 AAResults *AA;
735
736 /// Target Library Info.
737 const TargetLibraryInfo *TLI;
738
739 /// Target Transform Info.
740 const TargetTransformInfo *TTI;
741
742 /// Assumption Cache.
743 AssumptionCache *AC;
744
745 /// Interface to emit optimization remarks.
746 OptimizationRemarkEmitter *ORE;
747
748 /// LoopVersioning. It's only set up (non-null) if memchecks were
749 /// used.
750 ///
751 /// This is currently only used to add no-alias metadata based on the
752 /// memchecks. The actually versioning is performed manually.
753 std::unique_ptr<LoopVersioning> LVer;
754
755 /// The vectorization SIMD factor to use. Each vector will have this many
756 /// vector elements.
757 ElementCount VF;
758
759 /// The vectorization unroll factor to use. Each scalar is vectorized to this
760 /// many different vector instructions.
761 unsigned UF;
762
763 /// The builder that we use
764 IRBuilder<> Builder;
765
766 // --- Vectorization state ---
767
768 /// The vector-loop preheader.
769 BasicBlock *LoopVectorPreHeader;
770
771 /// The scalar-loop preheader.
772 BasicBlock *LoopScalarPreHeader;
773
774 /// Middle Block between the vector and the scalar.
775 BasicBlock *LoopMiddleBlock;
776
777 /// The unique ExitBlock of the scalar loop if one exists. Note that
778 /// there can be multiple exiting edges reaching this block.
779 BasicBlock *LoopExitBlock;
780
781 /// The vector loop body.
782 BasicBlock *LoopVectorBody;
783
784 /// The scalar loop body.
785 BasicBlock *LoopScalarBody;
786
787 /// A list of all bypass blocks. The first block is the entry of the loop.
788 SmallVector<BasicBlock *, 4> LoopBypassBlocks;
789
790 /// Store instructions that were predicated.
791 SmallVector<Instruction *, 4> PredicatedInstructions;
792
793 /// Trip count of the original loop.
794 Value *TripCount = nullptr;
795
796 /// Trip count of the widened loop (TripCount - TripCount % (VF*UF))
797 Value *VectorTripCount = nullptr;
798
799 /// The legality analysis.
800 LoopVectorizationLegality *Legal;
801
802 /// The profitablity analysis.
803 LoopVectorizationCostModel *Cost;
804
805 // Record whether runtime checks are added.
806 bool AddedSafetyChecks = false;
807
808 // Holds the end values for each induction variable. We save the end values
809 // so we can later fix-up the external users of the induction variables.
810 DenseMap<PHINode *, Value *> IVEndValues;
811
812 // Vector of original scalar PHIs whose corresponding widened PHIs need to be
813 // fixed up at the end of vector code generation.
814 SmallVector<PHINode *, 8> OrigPHIsToFix;
815
816 /// BFI and PSI are used to check for profile guided size optimizations.
817 BlockFrequencyInfo *BFI;
818 ProfileSummaryInfo *PSI;
819
820 // Whether this loop should be optimized for size based on profile guided size
821 // optimizatios.
822 bool OptForSizeBasedOnProfile;
823
824 /// Structure to hold information about generated runtime checks, responsible
825 /// for cleaning the checks, if vectorization turns out unprofitable.
826 GeneratedRTChecks &RTChecks;
827};
828
829class InnerLoopUnroller : public InnerLoopVectorizer {
830public:
831 InnerLoopUnroller(Loop *OrigLoop, PredicatedScalarEvolution &PSE,
832 LoopInfo *LI, DominatorTree *DT,
833 const TargetLibraryInfo *TLI,
834 const TargetTransformInfo *TTI, AssumptionCache *AC,
835 OptimizationRemarkEmitter *ORE, unsigned UnrollFactor,
836 LoopVectorizationLegality *LVL,
837 LoopVectorizationCostModel *CM, BlockFrequencyInfo *BFI,
838 ProfileSummaryInfo *PSI, GeneratedRTChecks &Check)
839 : InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE,
840 ElementCount::getFixed(1), UnrollFactor, LVL, CM,
841 BFI, PSI, Check) {}
842
843private:
844 Value *getBroadcastInstrs(Value *V) override;
845};
846
847/// Encapsulate information regarding vectorization of a loop and its epilogue.
848/// This information is meant to be updated and used across two stages of
849/// epilogue vectorization.
850struct EpilogueLoopVectorizationInfo {
851 ElementCount MainLoopVF = ElementCount::getFixed(0);
852 unsigned MainLoopUF = 0;
853 ElementCount EpilogueVF = ElementCount::getFixed(0);
854 unsigned EpilogueUF = 0;
855 BasicBlock *MainLoopIterationCountCheck = nullptr;
856 BasicBlock *EpilogueIterationCountCheck = nullptr;
857 BasicBlock *SCEVSafetyCheck = nullptr;
858 BasicBlock *MemSafetyCheck = nullptr;
859 Value *TripCount = nullptr;
860 Value *VectorTripCount = nullptr;
861
862 EpilogueLoopVectorizationInfo(ElementCount MVF, unsigned MUF,
863 ElementCount EVF, unsigned EUF)
864 : MainLoopVF(MVF), MainLoopUF(MUF), EpilogueVF(EVF), EpilogueUF(EUF) {
865 assert(EUF == 1 &&(static_cast <bool> (EUF == 1 && "A high UF for the epilogue loop is likely not beneficial."
) ? void (0) : __assert_fail ("EUF == 1 && \"A high UF for the epilogue loop is likely not beneficial.\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 866, __extension__
__PRETTY_FUNCTION__))
866 "A high UF for the epilogue loop is likely not beneficial.")(static_cast <bool> (EUF == 1 && "A high UF for the epilogue loop is likely not beneficial."
) ? void (0) : __assert_fail ("EUF == 1 && \"A high UF for the epilogue loop is likely not beneficial.\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 866, __extension__
__PRETTY_FUNCTION__))
;
867 }
868};
869
870/// An extension of the inner loop vectorizer that creates a skeleton for a
871/// vectorized loop that has its epilogue (residual) also vectorized.
872/// The idea is to run the vplan on a given loop twice, firstly to setup the
873/// skeleton and vectorize the main loop, and secondly to complete the skeleton
874/// from the first step and vectorize the epilogue. This is achieved by
875/// deriving two concrete strategy classes from this base class and invoking
876/// them in succession from the loop vectorizer planner.
877class InnerLoopAndEpilogueVectorizer : public InnerLoopVectorizer {
878public:
879 InnerLoopAndEpilogueVectorizer(
880 Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI,
881 DominatorTree *DT, const TargetLibraryInfo *TLI,
882 const TargetTransformInfo *TTI, AssumptionCache *AC,
883 OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI,
884 LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM,
885 BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI,
886 GeneratedRTChecks &Checks)
887 : InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE,
888 EPI.MainLoopVF, EPI.MainLoopUF, LVL, CM, BFI, PSI,
889 Checks),
890 EPI(EPI) {}
891
892 // Override this function to handle the more complex control flow around the
893 // three loops.
894 std::pair<BasicBlock *, Value *>
895 createVectorizedLoopSkeleton() final override {
896 return createEpilogueVectorizedLoopSkeleton();
897 }
898
899 /// The interface for creating a vectorized skeleton using one of two
900 /// different strategies, each corresponding to one execution of the vplan
901 /// as described above.
902 virtual std::pair<BasicBlock *, Value *>
903 createEpilogueVectorizedLoopSkeleton() = 0;
904
905 /// Holds and updates state information required to vectorize the main loop
906 /// and its epilogue in two separate passes. This setup helps us avoid
907 /// regenerating and recomputing runtime safety checks. It also helps us to
908 /// shorten the iteration-count-check path length for the cases where the
909 /// iteration count of the loop is so small that the main vector loop is
910 /// completely skipped.
911 EpilogueLoopVectorizationInfo &EPI;
912};
913
914/// A specialized derived class of inner loop vectorizer that performs
915/// vectorization of *main* loops in the process of vectorizing loops and their
916/// epilogues.
917class EpilogueVectorizerMainLoop : public InnerLoopAndEpilogueVectorizer {
918public:
919 EpilogueVectorizerMainLoop(
920 Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI,
921 DominatorTree *DT, const TargetLibraryInfo *TLI,
922 const TargetTransformInfo *TTI, AssumptionCache *AC,
923 OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI,
924 LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM,
925 BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI,
926 GeneratedRTChecks &Check)
927 : InnerLoopAndEpilogueVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE,
928 EPI, LVL, CM, BFI, PSI, Check) {}
929 /// Implements the interface for creating a vectorized skeleton using the
930 /// *main loop* strategy (ie the first pass of vplan execution).
931 std::pair<BasicBlock *, Value *>
932 createEpilogueVectorizedLoopSkeleton() final override;
933
934protected:
935 /// Emits an iteration count bypass check once for the main loop (when \p
936 /// ForEpilogue is false) and once for the epilogue loop (when \p
937 /// ForEpilogue is true).
938 BasicBlock *emitMinimumIterationCountCheck(Loop *L, BasicBlock *Bypass,
939 bool ForEpilogue);
940 void printDebugTracesAtStart() override;
941 void printDebugTracesAtEnd() override;
942};
943
944// A specialized derived class of inner loop vectorizer that performs
945// vectorization of *epilogue* loops in the process of vectorizing loops and
946// their epilogues.
947class EpilogueVectorizerEpilogueLoop : public InnerLoopAndEpilogueVectorizer {
948public:
949 EpilogueVectorizerEpilogueLoop(
950 Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI,
951 DominatorTree *DT, const TargetLibraryInfo *TLI,
952 const TargetTransformInfo *TTI, AssumptionCache *AC,
953 OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI,
954 LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM,
955 BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI,
956 GeneratedRTChecks &Checks)
957 : InnerLoopAndEpilogueVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE,
958 EPI, LVL, CM, BFI, PSI, Checks) {}
959 /// Implements the interface for creating a vectorized skeleton using the
960 /// *epilogue loop* strategy (ie the second pass of vplan execution).
961 std::pair<BasicBlock *, Value *>
962 createEpilogueVectorizedLoopSkeleton() final override;
963
964protected:
965 /// Emits an iteration count bypass check after the main vector loop has
966 /// finished to see if there are any iterations left to execute by either
967 /// the vector epilogue or the scalar epilogue.
968 BasicBlock *emitMinimumVectorEpilogueIterCountCheck(Loop *L,
969 BasicBlock *Bypass,
970 BasicBlock *Insert);
971 void printDebugTracesAtStart() override;
972 void printDebugTracesAtEnd() override;
973};
974} // end namespace llvm
975
976/// Look for a meaningful debug location on the instruction or it's
977/// operands.
978static Instruction *getDebugLocFromInstOrOperands(Instruction *I) {
979 if (!I)
980 return I;
981
982 DebugLoc Empty;
983 if (I->getDebugLoc() != Empty)
984 return I;
985
986 for (Use &Op : I->operands()) {
987 if (Instruction *OpInst = dyn_cast<Instruction>(Op))
988 if (OpInst->getDebugLoc() != Empty)
989 return OpInst;
990 }
991
992 return I;
993}
994
995void InnerLoopVectorizer::setDebugLocFromInst(
996 const Value *V, Optional<IRBuilder<> *> CustomBuilder) {
997 IRBuilder<> *B = (CustomBuilder == None) ? &Builder : *CustomBuilder;
998 if (const Instruction *Inst = dyn_cast_or_null<Instruction>(V)) {
999 const DILocation *DIL = Inst->getDebugLoc();
1000
1001 // When a FSDiscriminator is enabled, we don't need to add the multiply
1002 // factors to the discriminators.
1003 if (DIL && Inst->getFunction()->isDebugInfoForProfiling() &&
1004 !isa<DbgInfoIntrinsic>(Inst) && !EnableFSDiscriminator) {
1005 // FIXME: For scalable vectors, assume vscale=1.
1006 auto NewDIL =
1007 DIL->cloneByMultiplyingDuplicationFactor(UF * VF.getKnownMinValue());
1008 if (NewDIL)
1009 B->SetCurrentDebugLocation(NewDIL.getValue());
1010 else
1011 LLVM_DEBUG(dbgs()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-vectorize")) { dbgs() << "Failed to create new discriminator: "
<< DIL->getFilename() << " Line: " << DIL
->getLine(); } } while (false)
1012 << "Failed to create new discriminator: "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-vectorize")) { dbgs() << "Failed to create new discriminator: "
<< DIL->getFilename() << " Line: " << DIL
->getLine(); } } while (false)
1013 << DIL->getFilename() << " Line: " << DIL->getLine())do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-vectorize")) { dbgs() << "Failed to create new discriminator: "
<< DIL->getFilename() << " Line: " << DIL
->getLine(); } } while (false)
;
1014 } else
1015 B->SetCurrentDebugLocation(DIL);
1016 } else
1017 B->SetCurrentDebugLocation(DebugLoc());
1018}
1019
1020/// Write a \p DebugMsg about vectorization to the debug output stream. If \p I
1021/// is passed, the message relates to that particular instruction.
1022#ifndef NDEBUG
1023static void debugVectorizationMessage(const StringRef Prefix,
1024 const StringRef DebugMsg,
1025 Instruction *I) {
1026 dbgs() << "LV: " << Prefix << DebugMsg;
1027 if (I != nullptr)
1028 dbgs() << " " << *I;
1029 else
1030 dbgs() << '.';
1031 dbgs() << '\n';
1032}
1033#endif
1034
1035/// Create an analysis remark that explains why vectorization failed
1036///
1037/// \p PassName is the name of the pass (e.g. can be AlwaysPrint). \p
1038/// RemarkName is the identifier for the remark. If \p I is passed it is an
1039/// instruction that prevents vectorization. Otherwise \p TheLoop is used for
1040/// the location of the remark. \return the remark object that can be
1041/// streamed to.
1042static OptimizationRemarkAnalysis createLVAnalysis(const char *PassName,
1043 StringRef RemarkName, Loop *TheLoop, Instruction *I) {
1044 Value *CodeRegion = TheLoop->getHeader();
1045 DebugLoc DL = TheLoop->getStartLoc();
1046
1047 if (I) {
1048 CodeRegion = I->getParent();
1049 // If there is no debug location attached to the instruction, revert back to
1050 // using the loop's.
1051 if (I->getDebugLoc())
1052 DL = I->getDebugLoc();
1053 }
1054
1055 return OptimizationRemarkAnalysis(PassName, RemarkName, DL, CodeRegion);
1056}
1057
1058namespace llvm {
1059
1060/// Return a value for Step multiplied by VF.
1061Value *createStepForVF(IRBuilder<> &B, Type *Ty, ElementCount VF,
1062 int64_t Step) {
1063 assert(Ty->isIntegerTy() && "Expected an integer step")(static_cast <bool> (Ty->isIntegerTy() && "Expected an integer step"
) ? void (0) : __assert_fail ("Ty->isIntegerTy() && \"Expected an integer step\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 1063, __extension__
__PRETTY_FUNCTION__))
;
1064 Constant *StepVal = ConstantInt::get(Ty, Step * VF.getKnownMinValue());
1065 return VF.isScalable() ? B.CreateVScale(StepVal) : StepVal;
1066}
1067
1068/// Return the runtime value for VF.
1069Value *getRuntimeVF(IRBuilder<> &B, Type *Ty, ElementCount VF) {
1070 Constant *EC = ConstantInt::get(Ty, VF.getKnownMinValue());
1071 return VF.isScalable() ? B.CreateVScale(EC) : EC;
1072}
1073
1074static Value *getRuntimeVFAsFloat(IRBuilder<> &B, Type *FTy, ElementCount VF) {
1075 assert(FTy->isFloatingPointTy() && "Expected floating point type!")(static_cast <bool> (FTy->isFloatingPointTy() &&
"Expected floating point type!") ? void (0) : __assert_fail (
"FTy->isFloatingPointTy() && \"Expected floating point type!\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 1075, __extension__
__PRETTY_FUNCTION__))
;
1076 Type *IntTy = IntegerType::get(FTy->getContext(), FTy->getScalarSizeInBits());
1077 Value *RuntimeVF = getRuntimeVF(B, IntTy, VF);
1078 return B.CreateUIToFP(RuntimeVF, FTy);
1079}
1080
1081void reportVectorizationFailure(const StringRef DebugMsg,
1082 const StringRef OREMsg, const StringRef ORETag,
1083 OptimizationRemarkEmitter *ORE, Loop *TheLoop,
1084 Instruction *I) {
1085 LLVM_DEBUG(debugVectorizationMessage("Not vectorizing: ", DebugMsg, I))do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-vectorize")) { debugVectorizationMessage("Not vectorizing: "
, DebugMsg, I); } } while (false)
;
1086 LoopVectorizeHints Hints(TheLoop, true /* doesn't matter */, *ORE);
1087 ORE->emit(
1088 createLVAnalysis(Hints.vectorizeAnalysisPassName(), ORETag, TheLoop, I)
1089 << "loop not vectorized: " << OREMsg);
1090}
1091
1092void reportVectorizationInfo(const StringRef Msg, const StringRef ORETag,
1093 OptimizationRemarkEmitter *ORE, Loop *TheLoop,
1094 Instruction *I) {
1095 LLVM_DEBUG(debugVectorizationMessage("", Msg, I))do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-vectorize")) { debugVectorizationMessage("", Msg, I); }
} while (false)
;
1096 LoopVectorizeHints Hints(TheLoop, true /* doesn't matter */, *ORE);
1097 ORE->emit(
1098 createLVAnalysis(Hints.vectorizeAnalysisPassName(), ORETag, TheLoop, I)
1099 << Msg);
1100}
1101
1102} // end namespace llvm
1103
1104#ifndef NDEBUG
1105/// \return string containing a file name and a line # for the given loop.
1106static std::string getDebugLocString(const Loop *L) {
1107 std::string Result;
1108 if (L) {
1109 raw_string_ostream OS(Result);
1110 if (const DebugLoc LoopDbgLoc = L->getStartLoc())
1111 LoopDbgLoc.print(OS);
1112 else
1113 // Just print the module name.
1114 OS << L->getHeader()->getParent()->getParent()->getModuleIdentifier();
1115 OS.flush();
1116 }
1117 return Result;
1118}
1119#endif
1120
1121void InnerLoopVectorizer::addNewMetadata(Instruction *To,
1122 const Instruction *Orig) {
1123 // If the loop was versioned with memchecks, add the corresponding no-alias
1124 // metadata.
1125 if (LVer && (isa<LoadInst>(Orig) || isa<StoreInst>(Orig)))
1126 LVer->annotateInstWithNoAlias(To, Orig);
1127}
1128
1129void InnerLoopVectorizer::collectPoisonGeneratingRecipes(
1130 VPTransformState &State) {
1131
1132 // Collect recipes in the backward slice of `Root` that may generate a poison
1133 // value that is used after vectorization.
1134 SmallPtrSet<VPRecipeBase *, 16> Visited;
1135 auto collectPoisonGeneratingInstrsInBackwardSlice([&](VPRecipeBase *Root) {
1136 SmallVector<VPRecipeBase *, 16> Worklist;
1137 Worklist.push_back(Root);
1138
1139 // Traverse the backward slice of Root through its use-def chain.
1140 while (!Worklist.empty()) {
1141 VPRecipeBase *CurRec = Worklist.back();
1142 Worklist.pop_back();
1143
1144 if (!Visited.insert(CurRec).second)
1145 continue;
1146
1147 // Prune search if we find another recipe generating a widen memory
1148 // instruction. Widen memory instructions involved in address computation
1149 // will lead to gather/scatter instructions, which don't need to be
1150 // handled.
1151 if (isa<VPWidenMemoryInstructionRecipe>(CurRec) ||
1152 isa<VPInterleaveRecipe>(CurRec) ||
1153 isa<VPCanonicalIVPHIRecipe>(CurRec))
1154 continue;
1155
1156 // This recipe contributes to the address computation of a widen
1157 // load/store. Collect recipe if its underlying instruction has
1158 // poison-generating flags.
1159 Instruction *Instr = CurRec->getUnderlyingInstr();
1160 if (Instr && Instr->hasPoisonGeneratingFlags())
1161 State.MayGeneratePoisonRecipes.insert(CurRec);
1162
1163 // Add new definitions to the worklist.
1164 for (VPValue *operand : CurRec->operands())
1165 if (VPDef *OpDef = operand->getDef())
1166 Worklist.push_back(cast<VPRecipeBase>(OpDef));
1167 }
1168 });
1169
1170 // Traverse all the recipes in the VPlan and collect the poison-generating
1171 // recipes in the backward slice starting at the address of a VPWidenRecipe or
1172 // VPInterleaveRecipe.
1173 auto Iter = depth_first(
1174 VPBlockRecursiveTraversalWrapper<VPBlockBase *>(State.Plan->getEntry()));
1175 for (VPBasicBlock *VPBB : VPBlockUtils::blocksOnly<VPBasicBlock>(Iter)) {
1176 for (VPRecipeBase &Recipe : *VPBB) {
1177 if (auto *WidenRec = dyn_cast<VPWidenMemoryInstructionRecipe>(&Recipe)) {
1178 Instruction *UnderlyingInstr = WidenRec->getUnderlyingInstr();
1179 VPDef *AddrDef = WidenRec->getAddr()->getDef();
1180 if (AddrDef && WidenRec->isConsecutive() && UnderlyingInstr &&
1181 Legal->blockNeedsPredication(UnderlyingInstr->getParent()))
1182 collectPoisonGeneratingInstrsInBackwardSlice(
1183 cast<VPRecipeBase>(AddrDef));
1184 } else if (auto *InterleaveRec = dyn_cast<VPInterleaveRecipe>(&Recipe)) {
1185 VPDef *AddrDef = InterleaveRec->getAddr()->getDef();
1186 if (AddrDef) {
1187 // Check if any member of the interleave group needs predication.
1188 const InterleaveGroup<Instruction> *InterGroup =
1189 InterleaveRec->getInterleaveGroup();
1190 bool NeedPredication = false;
1191 for (int I = 0, NumMembers = InterGroup->getNumMembers();
1192 I < NumMembers; ++I) {
1193 Instruction *Member = InterGroup->getMember(I);
1194 if (Member)
1195 NeedPredication |=
1196 Legal->blockNeedsPredication(Member->getParent());
1197 }
1198
1199 if (NeedPredication)
1200 collectPoisonGeneratingInstrsInBackwardSlice(
1201 cast<VPRecipeBase>(AddrDef));
1202 }
1203 }
1204 }
1205 }
1206}
1207
1208void InnerLoopVectorizer::addMetadata(Instruction *To,
1209 Instruction *From) {
1210 propagateMetadata(To, From);
1211 addNewMetadata(To, From);
1212}
1213
1214void InnerLoopVectorizer::addMetadata(ArrayRef<Value *> To,
1215 Instruction *From) {
1216 for (Value *V : To) {
1217 if (Instruction *I = dyn_cast<Instruction>(V))
1218 addMetadata(I, From);
1219 }
1220}
1221
1222namespace llvm {
1223
1224// Loop vectorization cost-model hints how the scalar epilogue loop should be
1225// lowered.
1226enum ScalarEpilogueLowering {
1227
1228 // The default: allowing scalar epilogues.
1229 CM_ScalarEpilogueAllowed,
1230
1231 // Vectorization with OptForSize: don't allow epilogues.
1232 CM_ScalarEpilogueNotAllowedOptSize,
1233
1234 // A special case of vectorisation with OptForSize: loops with a very small
1235 // trip count are considered for vectorization under OptForSize, thereby
1236 // making sure the cost of their loop body is dominant, free of runtime
1237 // guards and scalar iteration overheads.
1238 CM_ScalarEpilogueNotAllowedLowTripLoop,
1239
1240 // Loop hint predicate indicating an epilogue is undesired.
1241 CM_ScalarEpilogueNotNeededUsePredicate,
1242
1243 // Directive indicating we must either tail fold or not vectorize
1244 CM_ScalarEpilogueNotAllowedUsePredicate
1245};
1246
1247/// ElementCountComparator creates a total ordering for ElementCount
1248/// for the purposes of using it in a set structure.
1249struct ElementCountComparator {
1250 bool operator()(const ElementCount &LHS, const ElementCount &RHS) const {
1251 return std::make_tuple(LHS.isScalable(), LHS.getKnownMinValue()) <
1252 std::make_tuple(RHS.isScalable(), RHS.getKnownMinValue());
1253 }
1254};
1255using ElementCountSet = SmallSet<ElementCount, 16, ElementCountComparator>;
1256
1257/// LoopVectorizationCostModel - estimates the expected speedups due to
1258/// vectorization.
1259/// In many cases vectorization is not profitable. This can happen because of
1260/// a number of reasons. In this class we mainly attempt to predict the
1261/// expected speedup/slowdowns due to the supported instruction set. We use the
1262/// TargetTransformInfo to query the different backends for the cost of
1263/// different operations.
1264class LoopVectorizationCostModel {
1265public:
1266 LoopVectorizationCostModel(ScalarEpilogueLowering SEL, Loop *L,
1267 PredicatedScalarEvolution &PSE, LoopInfo *LI,
1268 LoopVectorizationLegality *Legal,
1269 const TargetTransformInfo &TTI,
1270 const TargetLibraryInfo *TLI, DemandedBits *DB,
1271 AssumptionCache *AC,
1272 OptimizationRemarkEmitter *ORE, const Function *F,
1273 const LoopVectorizeHints *Hints,
1274 InterleavedAccessInfo &IAI)
1275 : ScalarEpilogueStatus(SEL), TheLoop(L), PSE(PSE), LI(LI), Legal(Legal),
1276 TTI(TTI), TLI(TLI), DB(DB), AC(AC), ORE(ORE), TheFunction(F),
1277 Hints(Hints), InterleaveInfo(IAI) {}
1278
1279 /// \return An upper bound for the vectorization factors (both fixed and
1280 /// scalable). If the factors are 0, vectorization and interleaving should be
1281 /// avoided up front.
1282 FixedScalableVFPair computeMaxVF(ElementCount UserVF, unsigned UserIC);
1283
1284 /// \return True if runtime checks are required for vectorization, and false
1285 /// otherwise.
1286 bool runtimeChecksRequired();
1287
1288 /// \return The most profitable vectorization factor and the cost of that VF.
1289 /// This method checks every VF in \p CandidateVFs. If UserVF is not ZERO
1290 /// then this vectorization factor will be selected if vectorization is
1291 /// possible.
1292 VectorizationFactor
1293 selectVectorizationFactor(const ElementCountSet &CandidateVFs);
1294
1295 VectorizationFactor
1296 selectEpilogueVectorizationFactor(const ElementCount MaxVF,
1297 const LoopVectorizationPlanner &LVP);
1298
1299 /// Setup cost-based decisions for user vectorization factor.
1300 /// \return true if the UserVF is a feasible VF to be chosen.
1301 bool selectUserVectorizationFactor(ElementCount UserVF) {
1302 collectUniformsAndScalars(UserVF);
1303 collectInstsToScalarize(UserVF);
1304 return expectedCost(UserVF).first.isValid();
1305 }
1306
1307 /// \return The size (in bits) of the smallest and widest types in the code
1308 /// that needs to be vectorized. We ignore values that remain scalar such as
1309 /// 64 bit loop indices.
1310 std::pair<unsigned, unsigned> getSmallestAndWidestTypes();
1311
1312 /// \return The desired interleave count.
1313 /// If interleave count has been specified by metadata it will be returned.
1314 /// Otherwise, the interleave count is computed and returned. VF and LoopCost
1315 /// are the selected vectorization factor and the cost of the selected VF.
1316 unsigned selectInterleaveCount(ElementCount VF, unsigned LoopCost);
1317
1318 /// Memory access instruction may be vectorized in more than one way.
1319 /// Form of instruction after vectorization depends on cost.
1320 /// This function takes cost-based decisions for Load/Store instructions
1321 /// and collects them in a map. This decisions map is used for building
1322 /// the lists of loop-uniform and loop-scalar instructions.
1323 /// The calculated cost is saved with widening decision in order to
1324 /// avoid redundant calculations.
1325 void setCostBasedWideningDecision(ElementCount VF);
1326
1327 /// A struct that represents some properties of the register usage
1328 /// of a loop.
1329 struct RegisterUsage {
1330 /// Holds the number of loop invariant values that are used in the loop.
1331 /// The key is ClassID of target-provided register class.
1332 SmallMapVector<unsigned, unsigned, 4> LoopInvariantRegs;
1333 /// Holds the maximum number of concurrent live intervals in the loop.
1334 /// The key is ClassID of target-provided register class.
1335 SmallMapVector<unsigned, unsigned, 4> MaxLocalUsers;
1336 };
1337
1338 /// \return Returns information about the register usages of the loop for the
1339 /// given vectorization factors.
1340 SmallVector<RegisterUsage, 8>
1341 calculateRegisterUsage(ArrayRef<ElementCount> VFs);
1342
1343 /// Collect values we want to ignore in the cost model.
1344 void collectValuesToIgnore();
1345
1346 /// Collect all element types in the loop for which widening is needed.
1347 void collectElementTypesForWidening();
1348
1349 /// Split reductions into those that happen in the loop, and those that happen
1350 /// outside. In loop reductions are collected into InLoopReductionChains.
1351 void collectInLoopReductions();
1352
1353 /// Returns true if we should use strict in-order reductions for the given
1354 /// RdxDesc. This is true if the -enable-strict-reductions flag is passed,
1355 /// the IsOrdered flag of RdxDesc is set and we do not allow reordering
1356 /// of FP operations.
1357 bool useOrderedReductions(const RecurrenceDescriptor &RdxDesc) {
1358 return !Hints->allowReordering() && RdxDesc.isOrdered();
1359 }
1360
1361 /// \returns The smallest bitwidth each instruction can be represented with.
1362 /// The vector equivalents of these instructions should be truncated to this
1363 /// type.
1364 const MapVector<Instruction *, uint64_t> &getMinimalBitwidths() const {
1365 return MinBWs;
1366 }
1367
1368 /// \returns True if it is more profitable to scalarize instruction \p I for
1369 /// vectorization factor \p VF.
1370 bool isProfitableToScalarize(Instruction *I, ElementCount VF) const {
1371 assert(VF.isVector() &&(static_cast <bool> (VF.isVector() && "Profitable to scalarize relevant only for VF > 1."
) ? void (0) : __assert_fail ("VF.isVector() && \"Profitable to scalarize relevant only for VF > 1.\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 1372, __extension__
__PRETTY_FUNCTION__))
1372 "Profitable to scalarize relevant only for VF > 1.")(static_cast <bool> (VF.isVector() && "Profitable to scalarize relevant only for VF > 1."
) ? void (0) : __assert_fail ("VF.isVector() && \"Profitable to scalarize relevant only for VF > 1.\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 1372, __extension__
__PRETTY_FUNCTION__))
;
1373
1374 // Cost model is not run in the VPlan-native path - return conservative
1375 // result until this changes.
1376 if (EnableVPlanNativePath)
1377 return false;
1378
1379 auto Scalars = InstsToScalarize.find(VF);
1380 assert(Scalars != InstsToScalarize.end() &&(static_cast <bool> (Scalars != InstsToScalarize.end() &&
"VF not yet analyzed for scalarization profitability") ? void
(0) : __assert_fail ("Scalars != InstsToScalarize.end() && \"VF not yet analyzed for scalarization profitability\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 1381, __extension__
__PRETTY_FUNCTION__))
1381 "VF not yet analyzed for scalarization profitability")(static_cast <bool> (Scalars != InstsToScalarize.end() &&
"VF not yet analyzed for scalarization profitability") ? void
(0) : __assert_fail ("Scalars != InstsToScalarize.end() && \"VF not yet analyzed for scalarization profitability\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 1381, __extension__
__PRETTY_FUNCTION__))
;
1382 return Scalars->second.find(I) != Scalars->second.end();
1383 }
1384
1385 /// Returns true if \p I is known to be uniform after vectorization.
1386 bool isUniformAfterVectorization(Instruction *I, ElementCount VF) const {
1387 if (VF.isScalar())
1388 return true;
1389
1390 // Cost model is not run in the VPlan-native path - return conservative
1391 // result until this changes.
1392 if (EnableVPlanNativePath)
1393 return false;
1394
1395 auto UniformsPerVF = Uniforms.find(VF);
1396 assert(UniformsPerVF != Uniforms.end() &&(static_cast <bool> (UniformsPerVF != Uniforms.end() &&
"VF not yet analyzed for uniformity") ? void (0) : __assert_fail
("UniformsPerVF != Uniforms.end() && \"VF not yet analyzed for uniformity\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 1397, __extension__
__PRETTY_FUNCTION__))
1397 "VF not yet analyzed for uniformity")(static_cast <bool> (UniformsPerVF != Uniforms.end() &&
"VF not yet analyzed for uniformity") ? void (0) : __assert_fail
("UniformsPerVF != Uniforms.end() && \"VF not yet analyzed for uniformity\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 1397, __extension__
__PRETTY_FUNCTION__))
;
1398 return UniformsPerVF->second.count(I);
1399 }
1400
1401 /// Returns true if \p I is known to be scalar after vectorization.
1402 bool isScalarAfterVectorization(Instruction *I, ElementCount VF) const {
1403 if (VF.isScalar())
1404 return true;
1405
1406 // Cost model is not run in the VPlan-native path - return conservative
1407 // result until this changes.
1408 if (EnableVPlanNativePath)
1409 return false;
1410
1411 auto ScalarsPerVF = Scalars.find(VF);
1412 assert(ScalarsPerVF != Scalars.end() &&(static_cast <bool> (ScalarsPerVF != Scalars.end() &&
"Scalar values are not calculated for VF") ? void (0) : __assert_fail
("ScalarsPerVF != Scalars.end() && \"Scalar values are not calculated for VF\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 1413, __extension__
__PRETTY_FUNCTION__))
1413 "Scalar values are not calculated for VF")(static_cast <bool> (ScalarsPerVF != Scalars.end() &&
"Scalar values are not calculated for VF") ? void (0) : __assert_fail
("ScalarsPerVF != Scalars.end() && \"Scalar values are not calculated for VF\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 1413, __extension__
__PRETTY_FUNCTION__))
;
1414 return ScalarsPerVF->second.count(I);
1415 }
1416
1417 /// \returns True if instruction \p I can be truncated to a smaller bitwidth
1418 /// for vectorization factor \p VF.
1419 bool canTruncateToMinimalBitwidth(Instruction *I, ElementCount VF) const {
1420 return VF.isVector() && MinBWs.find(I) != MinBWs.end() &&
1421 !isProfitableToScalarize(I, VF) &&
1422 !isScalarAfterVectorization(I, VF);
1423 }
1424
1425 /// Decision that was taken during cost calculation for memory instruction.
1426 enum InstWidening {
1427 CM_Unknown,
1428 CM_Widen, // For consecutive accesses with stride +1.
1429 CM_Widen_Reverse, // For consecutive accesses with stride -1.
1430 CM_Interleave,
1431 CM_GatherScatter,
1432 CM_Scalarize
1433 };
1434
1435 /// Save vectorization decision \p W and \p Cost taken by the cost model for
1436 /// instruction \p I and vector width \p VF.
1437 void setWideningDecision(Instruction *I, ElementCount VF, InstWidening W,
1438 InstructionCost Cost) {
1439 assert(VF.isVector() && "Expected VF >=2")(static_cast <bool> (VF.isVector() && "Expected VF >=2"
) ? void (0) : __assert_fail ("VF.isVector() && \"Expected VF >=2\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 1439, __extension__
__PRETTY_FUNCTION__))
;
1440 WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost);
1441 }
1442
1443 /// Save vectorization decision \p W and \p Cost taken by the cost model for
1444 /// interleaving group \p Grp and vector width \p VF.
1445 void setWideningDecision(const InterleaveGroup<Instruction> *Grp,
1446 ElementCount VF, InstWidening W,
1447 InstructionCost Cost) {
1448 assert(VF.isVector() && "Expected VF >=2")(static_cast <bool> (VF.isVector() && "Expected VF >=2"
) ? void (0) : __assert_fail ("VF.isVector() && \"Expected VF >=2\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 1448, __extension__
__PRETTY_FUNCTION__))
;
1449 /// Broadcast this decicion to all instructions inside the group.
1450 /// But the cost will be assigned to one instruction only.
1451 for (unsigned i = 0; i < Grp->getFactor(); ++i) {
1452 if (auto *I = Grp->getMember(i)) {
1453 if (Grp->getInsertPos() == I)
1454 WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost);
1455 else
1456 WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, 0);
1457 }
1458 }
1459 }
1460
1461 /// Return the cost model decision for the given instruction \p I and vector
1462 /// width \p VF. Return CM_Unknown if this instruction did not pass
1463 /// through the cost modeling.
1464 InstWidening getWideningDecision(Instruction *I, ElementCount VF) const {
1465 assert(VF.isVector() && "Expected VF to be a vector VF")(static_cast <bool> (VF.isVector() && "Expected VF to be a vector VF"
) ? void (0) : __assert_fail ("VF.isVector() && \"Expected VF to be a vector VF\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 1465, __extension__
__PRETTY_FUNCTION__))
;
1466 // Cost model is not run in the VPlan-native path - return conservative
1467 // result until this changes.
1468 if (EnableVPlanNativePath)
1469 return CM_GatherScatter;
1470
1471 std::pair<Instruction *, ElementCount> InstOnVF = std::make_pair(I, VF);
1472 auto Itr = WideningDecisions.find(InstOnVF);
1473 if (Itr == WideningDecisions.end())
1474 return CM_Unknown;
1475 return Itr->second.first;
1476 }
1477
1478 /// Return the vectorization cost for the given instruction \p I and vector
1479 /// width \p VF.
1480 InstructionCost getWideningCost(Instruction *I, ElementCount VF) {
1481 assert(VF.isVector() && "Expected VF >=2")(static_cast <bool> (VF.isVector() && "Expected VF >=2"
) ? void (0) : __assert_fail ("VF.isVector() && \"Expected VF >=2\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 1481, __extension__
__PRETTY_FUNCTION__))
;
1482 std::pair<Instruction *, ElementCount> InstOnVF = std::make_pair(I, VF);
1483 assert(WideningDecisions.find(InstOnVF) != WideningDecisions.end() &&(static_cast <bool> (WideningDecisions.find(InstOnVF) !=
WideningDecisions.end() && "The cost is not calculated"
) ? void (0) : __assert_fail ("WideningDecisions.find(InstOnVF) != WideningDecisions.end() && \"The cost is not calculated\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 1484, __extension__
__PRETTY_FUNCTION__))
1484 "The cost is not calculated")(static_cast <bool> (WideningDecisions.find(InstOnVF) !=
WideningDecisions.end() && "The cost is not calculated"
) ? void (0) : __assert_fail ("WideningDecisions.find(InstOnVF) != WideningDecisions.end() && \"The cost is not calculated\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 1484, __extension__
__PRETTY_FUNCTION__))
;
1485 return WideningDecisions[InstOnVF].second;
1486 }
1487
1488 /// Return True if instruction \p I is an optimizable truncate whose operand
1489 /// is an induction variable. Such a truncate will be removed by adding a new
1490 /// induction variable with the destination type.
1491 bool isOptimizableIVTruncate(Instruction *I, ElementCount VF) {
1492 // If the instruction is not a truncate, return false.
1493 auto *Trunc = dyn_cast<TruncInst>(I);
1494 if (!Trunc)
1495 return false;
1496
1497 // Get the source and destination types of the truncate.
1498 Type *SrcTy = ToVectorTy(cast<CastInst>(I)->getSrcTy(), VF);
1499 Type *DestTy = ToVectorTy(cast<CastInst>(I)->getDestTy(), VF);
1500
1501 // If the truncate is free for the given types, return false. Replacing a
1502 // free truncate with an induction variable would add an induction variable
1503 // update instruction to each iteration of the loop. We exclude from this
1504 // check the primary induction variable since it will need an update
1505 // instruction regardless.
1506 Value *Op = Trunc->getOperand(0);
1507 if (Op != Legal->getPrimaryInduction() && TTI.isTruncateFree(SrcTy, DestTy))
1508 return false;
1509
1510 // If the truncated value is not an induction variable, return false.
1511 return Legal->isInductionPhi(Op);
1512 }
1513
1514 /// Collects the instructions to scalarize for each predicated instruction in
1515 /// the loop.
1516 void collectInstsToScalarize(ElementCount VF);
1517
1518 /// Collect Uniform and Scalar values for the given \p VF.
1519 /// The sets depend on CM decision for Load/Store instructions
1520 /// that may be vectorized as interleave, gather-scatter or scalarized.
1521 void collectUniformsAndScalars(ElementCount VF) {
1522 // Do the analysis once.
1523 if (VF.isScalar() || Uniforms.find(VF) != Uniforms.end())
1524 return;
1525 setCostBasedWideningDecision(VF);
1526 collectLoopUniforms(VF);
1527 collectLoopScalars(VF);
1528 }
1529
1530 /// Returns true if the target machine supports masked store operation
1531 /// for the given \p DataType and kind of access to \p Ptr.
1532 bool isLegalMaskedStore(Type *DataType, Value *Ptr, Align Alignment) const {
1533 return Legal->isConsecutivePtr(DataType, Ptr) &&
1534 TTI.isLegalMaskedStore(DataType, Alignment);
1535 }
1536
1537 /// Returns true if the target machine supports masked load operation
1538 /// for the given \p DataType and kind of access to \p Ptr.
1539 bool isLegalMaskedLoad(Type *DataType, Value *Ptr, Align Alignment) const {
1540 return Legal->isConsecutivePtr(DataType, Ptr) &&
1541 TTI.isLegalMaskedLoad(DataType, Alignment);
1542 }
1543
1544 /// Returns true if the target machine can represent \p V as a masked gather
1545 /// or scatter operation.
1546 bool isLegalGatherOrScatter(Value *V,
1547 ElementCount VF = ElementCount::getFixed(1)) {
1548 bool LI = isa<LoadInst>(V);
1549 bool SI = isa<StoreInst>(V);
1550 if (!LI && !SI)
1551 return false;
1552 auto *Ty = getLoadStoreType(V);
1553 Align Align = getLoadStoreAlignment(V);
1554 if (VF.isVector())
1555 Ty = VectorType::get(Ty, VF);
1556 return (LI && TTI.isLegalMaskedGather(Ty, Align)) ||
1557 (SI && TTI.isLegalMaskedScatter(Ty, Align));
1558 }
1559
1560 /// Returns true if the target machine supports all of the reduction
1561 /// variables found for the given VF.
1562 bool canVectorizeReductions(ElementCount VF) const {
1563 return (all_of(Legal->getReductionVars(), [&](auto &Reduction) -> bool {
1564 const RecurrenceDescriptor &RdxDesc = Reduction.second;
1565 return TTI.isLegalToVectorizeReduction(RdxDesc, VF);
1566 }));
1567 }
1568
1569 /// Returns true if \p I is an instruction that will be scalarized with
1570 /// predication when vectorizing \p I with vectorization factor \p VF. Such
1571 /// instructions include conditional stores and instructions that may divide
1572 /// by zero.
1573 bool isScalarWithPredication(Instruction *I, ElementCount VF) const;
1574
1575 // Returns true if \p I is an instruction that will be predicated either
1576 // through scalar predication or masked load/store or masked gather/scatter.
1577 // \p VF is the vectorization factor that will be used to vectorize \p I.
1578 // Superset of instructions that return true for isScalarWithPredication.
1579 bool isPredicatedInst(Instruction *I, ElementCount VF,
1580 bool IsKnownUniform = false) {
1581 // When we know the load is uniform and the original scalar loop was not
1582 // predicated we don't need to mark it as a predicated instruction. Any
1583 // vectorised blocks created when tail-folding are something artificial we
1584 // have introduced and we know there is always at least one active lane.
1585 // That's why we call Legal->blockNeedsPredication here because it doesn't
1586 // query tail-folding.
1587 if (IsKnownUniform && isa<LoadInst>(I) &&
1588 !Legal->blockNeedsPredication(I->getParent()))
1589 return false;
1590 if (!blockNeedsPredicationForAnyReason(I->getParent()))
1591 return false;
1592 // Loads and stores that need some form of masked operation are predicated
1593 // instructions.
1594 if (isa<LoadInst>(I) || isa<StoreInst>(I))
1595 return Legal->isMaskRequired(I);
1596 return isScalarWithPredication(I, VF);
1597 }
1598
1599 /// Returns true if \p I is a memory instruction with consecutive memory
1600 /// access that can be widened.
1601 bool
1602 memoryInstructionCanBeWidened(Instruction *I,
1603 ElementCount VF = ElementCount::getFixed(1));
1604
1605 /// Returns true if \p I is a memory instruction in an interleaved-group
1606 /// of memory accesses that can be vectorized with wide vector loads/stores
1607 /// and shuffles.
1608 bool
1609 interleavedAccessCanBeWidened(Instruction *I,
1610 ElementCount VF = ElementCount::getFixed(1));
1611
1612 /// Check if \p Instr belongs to any interleaved access group.
1613 bool isAccessInterleaved(Instruction *Instr) {
1614 return InterleaveInfo.isInterleaved(Instr);
1615 }
1616
1617 /// Get the interleaved access group that \p Instr belongs to.
1618 const InterleaveGroup<Instruction> *
1619 getInterleavedAccessGroup(Instruction *Instr) {
1620 return InterleaveInfo.getInterleaveGroup(Instr);
1621 }
1622
1623 /// Returns true if we're required to use a scalar epilogue for at least
1624 /// the final iteration of the original loop.
1625 bool requiresScalarEpilogue(ElementCount VF) const {
1626 if (!isScalarEpilogueAllowed())
1627 return false;
1628 // If we might exit from anywhere but the latch, must run the exiting
1629 // iteration in scalar form.
1630 if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch())
1631 return true;
1632 return VF.isVector() && InterleaveInfo.requiresScalarEpilogue();
1633 }
1634
1635 /// Returns true if a scalar epilogue is not allowed due to optsize or a
1636 /// loop hint annotation.
1637 bool isScalarEpilogueAllowed() const {
1638 return ScalarEpilogueStatus == CM_ScalarEpilogueAllowed;
1639 }
1640
1641 /// Returns true if all loop blocks should be masked to fold tail loop.
1642 bool foldTailByMasking() const { return FoldTailByMasking; }
1643
1644 /// Returns true if the instructions in this block requires predication
1645 /// for any reason, e.g. because tail folding now requires a predicate
1646 /// or because the block in the original loop was predicated.
1647 bool blockNeedsPredicationForAnyReason(BasicBlock *BB) const {
1648 return foldTailByMasking() || Legal->blockNeedsPredication(BB);
1649 }
1650
1651 /// A SmallMapVector to store the InLoop reduction op chains, mapping phi
1652 /// nodes to the chain of instructions representing the reductions. Uses a
1653 /// MapVector to ensure deterministic iteration order.
1654 using ReductionChainMap =
1655 SmallMapVector<PHINode *, SmallVector<Instruction *, 4>, 4>;
1656
1657 /// Return the chain of instructions representing an inloop reduction.
1658 const ReductionChainMap &getInLoopReductionChains() const {
1659 return InLoopReductionChains;
1660 }
1661
1662 /// Returns true if the Phi is part of an inloop reduction.
1663 bool isInLoopReduction(PHINode *Phi) const {
1664 return InLoopReductionChains.count(Phi);
1665 }
1666
1667 /// Estimate cost of an intrinsic call instruction CI if it were vectorized
1668 /// with factor VF. Return the cost of the instruction, including
1669 /// scalarization overhead if it's needed.
1670 InstructionCost getVectorIntrinsicCost(CallInst *CI, ElementCount VF) const;
1671
1672 /// Estimate cost of a call instruction CI if it were vectorized with factor
1673 /// VF. Return the cost of the instruction, including scalarization overhead
1674 /// if it's needed. The flag NeedToScalarize shows if the call needs to be
1675 /// scalarized -
1676 /// i.e. either vector version isn't available, or is too expensive.
1677 InstructionCost getVectorCallCost(CallInst *CI, ElementCount VF,
1678 bool &NeedToScalarize) const;
1679
1680 /// Returns true if the per-lane cost of VectorizationFactor A is lower than
1681 /// that of B.
1682 bool isMoreProfitable(const VectorizationFactor &A,
1683 const VectorizationFactor &B) const;
1684
1685 /// Invalidates decisions already taken by the cost model.
1686 void invalidateCostModelingDecisions() {
1687 WideningDecisions.clear();
1688 Uniforms.clear();
1689 Scalars.clear();
1690 }
1691
1692private:
1693 unsigned NumPredStores = 0;
1694
1695 /// \return An upper bound for the vectorization factors for both
1696 /// fixed and scalable vectorization, where the minimum-known number of
1697 /// elements is a power-of-2 larger than zero. If scalable vectorization is
1698 /// disabled or unsupported, then the scalable part will be equal to
1699 /// ElementCount::getScalable(0).
1700 FixedScalableVFPair computeFeasibleMaxVF(unsigned ConstTripCount,
1701 ElementCount UserVF,
1702 bool FoldTailByMasking);
1703
1704 /// \return the maximized element count based on the targets vector
1705 /// registers and the loop trip-count, but limited to a maximum safe VF.
1706 /// This is a helper function of computeFeasibleMaxVF.
1707 /// FIXME: MaxSafeVF is currently passed by reference to avoid some obscure
1708 /// issue that occurred on one of the buildbots which cannot be reproduced
1709 /// without having access to the properietary compiler (see comments on
1710 /// D98509). The issue is currently under investigation and this workaround
1711 /// will be removed as soon as possible.
1712 ElementCount getMaximizedVFForTarget(unsigned ConstTripCount,
1713 unsigned SmallestType,
1714 unsigned WidestType,
1715 const ElementCount &MaxSafeVF,
1716 bool FoldTailByMasking);
1717
1718 /// \return the maximum legal scalable VF, based on the safe max number
1719 /// of elements.
1720 ElementCount getMaxLegalScalableVF(unsigned MaxSafeElements);
1721
1722 /// The vectorization cost is a combination of the cost itself and a boolean
1723 /// indicating whether any of the contributing operations will actually
1724 /// operate on vector values after type legalization in the backend. If this
1725 /// latter value is false, then all operations will be scalarized (i.e. no
1726 /// vectorization has actually taken place).
1727 using VectorizationCostTy = std::pair<InstructionCost, bool>;
1728
1729 /// Returns the expected execution cost. The unit of the cost does
1730 /// not matter because we use the 'cost' units to compare different
1731 /// vector widths. The cost that is returned is *not* normalized by
1732 /// the factor width. If \p Invalid is not nullptr, this function
1733 /// will add a pair(Instruction*, ElementCount) to \p Invalid for
1734 /// each instruction that has an Invalid cost for the given VF.
1735 using InstructionVFPair = std::pair<Instruction *, ElementCount>;
1736 VectorizationCostTy
1737 expectedCost(ElementCount VF,
1738 SmallVectorImpl<InstructionVFPair> *Invalid = nullptr);
1739
1740 /// Returns the execution time cost of an instruction for a given vector
1741 /// width. Vector width of one means scalar.
1742 VectorizationCostTy getInstructionCost(Instruction *I, ElementCount VF);
1743
1744 /// The cost-computation logic from getInstructionCost which provides
1745 /// the vector type as an output parameter.
1746 InstructionCost getInstructionCost(Instruction *I, ElementCount VF,
1747 Type *&VectorTy);
1748
1749 /// Return the cost of instructions in an inloop reduction pattern, if I is
1750 /// part of that pattern.
1751 Optional<InstructionCost>
1752 getReductionPatternCost(Instruction *I, ElementCount VF, Type *VectorTy,
1753 TTI::TargetCostKind CostKind);
1754
1755 /// Calculate vectorization cost of memory instruction \p I.
1756 InstructionCost getMemoryInstructionCost(Instruction *I, ElementCount VF);
1757
1758 /// The cost computation for scalarized memory instruction.
1759 InstructionCost getMemInstScalarizationCost(Instruction *I, ElementCount VF);
1760
1761 /// The cost computation for interleaving group of memory instructions.
1762 InstructionCost getInterleaveGroupCost(Instruction *I, ElementCount VF);
1763
1764 /// The cost computation for Gather/Scatter instruction.
1765 InstructionCost getGatherScatterCost(Instruction *I, ElementCount VF);
1766
1767 /// The cost computation for widening instruction \p I with consecutive
1768 /// memory access.
1769 InstructionCost getConsecutiveMemOpCost(Instruction *I, ElementCount VF);
1770
1771 /// The cost calculation for Load/Store instruction \p I with uniform pointer -
1772 /// Load: scalar load + broadcast.
1773 /// Store: scalar store + (loop invariant value stored? 0 : extract of last
1774 /// element)
1775 InstructionCost getUniformMemOpCost(Instruction *I, ElementCount VF);
1776
1777 /// Estimate the overhead of scalarizing an instruction. This is a
1778 /// convenience wrapper for the type-based getScalarizationOverhead API.
1779 InstructionCost getScalarizationOverhead(Instruction *I,
1780 ElementCount VF) const;
1781
1782 /// Returns whether the instruction is a load or store and will be a emitted
1783 /// as a vector operation.
1784 bool isConsecutiveLoadOrStore(Instruction *I);
1785
1786 /// Returns true if an artificially high cost for emulated masked memrefs
1787 /// should be used.
1788 bool useEmulatedMaskMemRefHack(Instruction *I, ElementCount VF);
1789
1790 /// Map of scalar integer values to the smallest bitwidth they can be legally
1791 /// represented as. The vector equivalents of these values should be truncated
1792 /// to this type.
1793 MapVector<Instruction *, uint64_t> MinBWs;
1794
1795 /// A type representing the costs for instructions if they were to be
1796 /// scalarized rather than vectorized. The entries are Instruction-Cost
1797 /// pairs.
1798 using ScalarCostsTy = DenseMap<Instruction *, InstructionCost>;
1799
1800 /// A set containing all BasicBlocks that are known to present after
1801 /// vectorization as a predicated block.
1802 SmallPtrSet<BasicBlock *, 4> PredicatedBBsAfterVectorization;
1803
1804 /// Records whether it is allowed to have the original scalar loop execute at
1805 /// least once. This may be needed as a fallback loop in case runtime
1806 /// aliasing/dependence checks fail, or to handle the tail/remainder
1807 /// iterations when the trip count is unknown or doesn't divide by the VF,
1808 /// or as a peel-loop to handle gaps in interleave-groups.
1809 /// Under optsize and when the trip count is very small we don't allow any
1810 /// iterations to execute in the scalar loop.
1811 ScalarEpilogueLowering ScalarEpilogueStatus = CM_ScalarEpilogueAllowed;
1812
1813 /// All blocks of loop are to be masked to fold tail of scalar iterations.
1814 bool FoldTailByMasking = false;
1815
1816 /// A map holding scalar costs for different vectorization factors. The
1817 /// presence of a cost for an instruction in the mapping indicates that the
1818 /// instruction will be scalarized when vectorizing with the associated
1819 /// vectorization factor. The entries are VF-ScalarCostTy pairs.
1820 DenseMap<ElementCount, ScalarCostsTy> InstsToScalarize;
1821
1822 /// Holds the instructions known to be uniform after vectorization.
1823 /// The data is collected per VF.
1824 DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> Uniforms;
1825
1826 /// Holds the instructions known to be scalar after vectorization.
1827 /// The data is collected per VF.
1828 DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> Scalars;
1829
1830 /// Holds the instructions (address computations) that are forced to be
1831 /// scalarized.
1832 DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> ForcedScalars;
1833
1834 /// PHINodes of the reductions that should be expanded in-loop along with
1835 /// their associated chains of reduction operations, in program order from top
1836 /// (PHI) to bottom
1837 ReductionChainMap InLoopReductionChains;
1838
1839 /// A Map of inloop reduction operations and their immediate chain operand.
1840 /// FIXME: This can be removed once reductions can be costed correctly in
1841 /// vplan. This was added to allow quick lookup to the inloop operations,
1842 /// without having to loop through InLoopReductionChains.
1843 DenseMap<Instruction *, Instruction *> InLoopReductionImmediateChains;
1844
1845 /// Returns the expected difference in cost from scalarizing the expression
1846 /// feeding a predicated instruction \p PredInst. The instructions to
1847 /// scalarize and their scalar costs are collected in \p ScalarCosts. A
1848 /// non-negative return value implies the expression will be scalarized.
1849 /// Currently, only single-use chains are considered for scalarization.
1850 int computePredInstDiscount(Instruction *PredInst, ScalarCostsTy &ScalarCosts,
1851 ElementCount VF);
1852
1853 /// Collect the instructions that are uniform after vectorization. An
1854 /// instruction is uniform if we represent it with a single scalar value in
1855 /// the vectorized loop corresponding to each vector iteration. Examples of
1856 /// uniform instructions include pointer operands of consecutive or
1857 /// interleaved memory accesses. Note that although uniformity implies an
1858 /// instruction will be scalar, the reverse is not true. In general, a
1859 /// scalarized instruction will be represented by VF scalar values in the
1860 /// vectorized loop, each corresponding to an iteration of the original
1861 /// scalar loop.
1862 void collectLoopUniforms(ElementCount VF);
1863
1864 /// Collect the instructions that are scalar after vectorization. An
1865 /// instruction is scalar if it is known to be uniform or will be scalarized
1866 /// during vectorization. collectLoopScalars should only add non-uniform nodes
1867 /// to the list if they are used by a load/store instruction that is marked as
1868 /// CM_Scalarize. Non-uniform scalarized instructions will be represented by
1869 /// VF values in the vectorized loop, each corresponding to an iteration of
1870 /// the original scalar loop.
1871 void collectLoopScalars(ElementCount VF);
1872
1873 /// Keeps cost model vectorization decision and cost for instructions.
1874 /// Right now it is used for memory instructions only.
1875 using DecisionList = DenseMap<std::pair<Instruction *, ElementCount>,
1876 std::pair<InstWidening, InstructionCost>>;
1877
1878 DecisionList WideningDecisions;
1879
1880 /// Returns true if \p V is expected to be vectorized and it needs to be
1881 /// extracted.
1882 bool needsExtract(Value *V, ElementCount VF) const {
1883 Instruction *I = dyn_cast<Instruction>(V);
1884 if (VF.isScalar() || !I || !TheLoop->contains(I) ||
1885 TheLoop->isLoopInvariant(I))
1886 return false;
1887
1888 // Assume we can vectorize V (and hence we need extraction) if the
1889 // scalars are not computed yet. This can happen, because it is called
1890 // via getScalarizationOverhead from setCostBasedWideningDecision, before
1891 // the scalars are collected. That should be a safe assumption in most
1892 // cases, because we check if the operands have vectorizable types
1893 // beforehand in LoopVectorizationLegality.
1894 return Scalars.find(VF) == Scalars.end() ||
1895 !isScalarAfterVectorization(I, VF);
1896 };
1897
1898 /// Returns a range containing only operands needing to be extracted.
1899 SmallVector<Value *, 4> filterExtractingOperands(Instruction::op_range Ops,
1900 ElementCount VF) const {
1901 return SmallVector<Value *, 4>(make_filter_range(
1902 Ops, [this, VF](Value *V) { return this->needsExtract(V, VF); }));
1903 }
1904
1905 /// Determines if we have the infrastructure to vectorize loop \p L and its
1906 /// epilogue, assuming the main loop is vectorized by \p VF.
1907 bool isCandidateForEpilogueVectorization(const Loop &L,
1908 const ElementCount VF) const;
1909
1910 /// Returns true if epilogue vectorization is considered profitable, and
1911 /// false otherwise.
1912 /// \p VF is the vectorization factor chosen for the original loop.
1913 bool isEpilogueVectorizationProfitable(const ElementCount VF) const;
1914
1915public:
1916 /// The loop that we evaluate.
1917 Loop *TheLoop;
1918
1919 /// Predicated scalar evolution analysis.
1920 PredicatedScalarEvolution &PSE;
1921
1922 /// Loop Info analysis.
1923 LoopInfo *LI;
1924
1925 /// Vectorization legality.
1926 LoopVectorizationLegality *Legal;
1927
1928 /// Vector target information.
1929 const TargetTransformInfo &TTI;
1930
1931 /// Target Library Info.
1932 const TargetLibraryInfo *TLI;
1933
1934 /// Demanded bits analysis.
1935 DemandedBits *DB;
1936
1937 /// Assumption cache.
1938 AssumptionCache *AC;
1939
1940 /// Interface to emit optimization remarks.
1941 OptimizationRemarkEmitter *ORE;
1942
1943 const Function *TheFunction;
1944
1945 /// Loop Vectorize Hint.
1946 const LoopVectorizeHints *Hints;
1947
1948 /// The interleave access information contains groups of interleaved accesses
1949 /// with the same stride and close to each other.
1950 InterleavedAccessInfo &InterleaveInfo;
1951
1952 /// Values to ignore in the cost model.
1953 SmallPtrSet<const Value *, 16> ValuesToIgnore;
1954
1955 /// Values to ignore in the cost model when VF > 1.
1956 SmallPtrSet<const Value *, 16> VecValuesToIgnore;
1957
1958 /// All element types found in the loop.
1959 SmallPtrSet<Type *, 16> ElementTypesInLoop;
1960
1961 /// Profitable vector factors.
1962 SmallVector<VectorizationFactor, 8> ProfitableVFs;
1963};
1964} // end namespace llvm
1965
1966/// Helper struct to manage generating runtime checks for vectorization.
1967///
1968/// The runtime checks are created up-front in temporary blocks to allow better
1969/// estimating the cost and un-linked from the existing IR. After deciding to
1970/// vectorize, the checks are moved back. If deciding not to vectorize, the
1971/// temporary blocks are completely removed.
1972class GeneratedRTChecks {
1973 /// Basic block which contains the generated SCEV checks, if any.
1974 BasicBlock *SCEVCheckBlock = nullptr;
1975
1976 /// The value representing the result of the generated SCEV checks. If it is
1977 /// nullptr, either no SCEV checks have been generated or they have been used.
1978 Value *SCEVCheckCond = nullptr;
1979
1980 /// Basic block which contains the generated memory runtime checks, if any.
1981 BasicBlock *MemCheckBlock = nullptr;
1982
1983 /// The value representing the result of the generated memory runtime checks.
1984 /// If it is nullptr, either no memory runtime checks have been generated or
1985 /// they have been used.
1986 Value *MemRuntimeCheckCond = nullptr;
1987
1988 DominatorTree *DT;
1989 LoopInfo *LI;
1990
1991 SCEVExpander SCEVExp;
1992 SCEVExpander MemCheckExp;
1993
1994public:
1995 GeneratedRTChecks(ScalarEvolution &SE, DominatorTree *DT, LoopInfo *LI,
1996 const DataLayout &DL)
1997 : DT(DT), LI(LI), SCEVExp(SE, DL, "scev.check"),
1998 MemCheckExp(SE, DL, "scev.check") {}
1999
2000 /// Generate runtime checks in SCEVCheckBlock and MemCheckBlock, so we can
2001 /// accurately estimate the cost of the runtime checks. The blocks are
2002 /// un-linked from the IR and is added back during vector code generation. If
2003 /// there is no vector code generation, the check blocks are removed
2004 /// completely.
2005 void Create(Loop *L, const LoopAccessInfo &LAI,
2006 const SCEVUnionPredicate &UnionPred) {
2007
2008 BasicBlock *LoopHeader = L->getHeader();
2009 BasicBlock *Preheader = L->getLoopPreheader();
2010
2011 // Use SplitBlock to create blocks for SCEV & memory runtime checks to
2012 // ensure the blocks are properly added to LoopInfo & DominatorTree. Those
2013 // may be used by SCEVExpander. The blocks will be un-linked from their
2014 // predecessors and removed from LI & DT at the end of the function.
2015 if (!UnionPred.isAlwaysTrue()) {
2016 SCEVCheckBlock = SplitBlock(Preheader, Preheader->getTerminator(), DT, LI,
2017 nullptr, "vector.scevcheck");
2018
2019 SCEVCheckCond = SCEVExp.expandCodeForPredicate(
2020 &UnionPred, SCEVCheckBlock->getTerminator());
2021 }
2022
2023 const auto &RtPtrChecking = *LAI.getRuntimePointerChecking();
2024 if (RtPtrChecking.Need) {
2025 auto *Pred = SCEVCheckBlock ? SCEVCheckBlock : Preheader;
2026 MemCheckBlock = SplitBlock(Pred, Pred->getTerminator(), DT, LI, nullptr,
2027 "vector.memcheck");
2028
2029 MemRuntimeCheckCond =
2030 addRuntimeChecks(MemCheckBlock->getTerminator(), L,
2031 RtPtrChecking.getChecks(), MemCheckExp);
2032 assert(MemRuntimeCheckCond &&(static_cast <bool> (MemRuntimeCheckCond && "no RT checks generated although RtPtrChecking "
"claimed checks are required") ? void (0) : __assert_fail ("MemRuntimeCheckCond && \"no RT checks generated although RtPtrChecking \" \"claimed checks are required\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 2034, __extension__
__PRETTY_FUNCTION__))
2033 "no RT checks generated although RtPtrChecking "(static_cast <bool> (MemRuntimeCheckCond && "no RT checks generated although RtPtrChecking "
"claimed checks are required") ? void (0) : __assert_fail ("MemRuntimeCheckCond && \"no RT checks generated although RtPtrChecking \" \"claimed checks are required\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 2034, __extension__
__PRETTY_FUNCTION__))
2034 "claimed checks are required")(static_cast <bool> (MemRuntimeCheckCond && "no RT checks generated although RtPtrChecking "
"claimed checks are required") ? void (0) : __assert_fail ("MemRuntimeCheckCond && \"no RT checks generated although RtPtrChecking \" \"claimed checks are required\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 2034, __extension__
__PRETTY_FUNCTION__))
;
2035 }
2036
2037 if (!MemCheckBlock && !SCEVCheckBlock)
2038 return;
2039
2040 // Unhook the temporary block with the checks, update various places
2041 // accordingly.
2042 if (SCEVCheckBlock)
2043 SCEVCheckBlock->replaceAllUsesWith(Preheader);
2044 if (MemCheckBlock)
2045 MemCheckBlock->replaceAllUsesWith(Preheader);
2046
2047 if (SCEVCheckBlock) {
2048 SCEVCheckBlock->getTerminator()->moveBefore(Preheader->getTerminator());
2049 new UnreachableInst(Preheader->getContext(), SCEVCheckBlock);
2050 Preheader->getTerminator()->eraseFromParent();
2051 }
2052 if (MemCheckBlock) {
2053 MemCheckBlock->getTerminator()->moveBefore(Preheader->getTerminator());
2054 new UnreachableInst(Preheader->getContext(), MemCheckBlock);
2055 Preheader->getTerminator()->eraseFromParent();
2056 }
2057
2058 DT->changeImmediateDominator(LoopHeader, Preheader);
2059 if (MemCheckBlock) {
2060 DT->eraseNode(MemCheckBlock);
2061 LI->removeBlock(MemCheckBlock);
2062 }
2063 if (SCEVCheckBlock) {
2064 DT->eraseNode(SCEVCheckBlock);
2065 LI->removeBlock(SCEVCheckBlock);
2066 }
2067 }
2068
2069 /// Remove the created SCEV & memory runtime check blocks & instructions, if
2070 /// unused.
2071 ~GeneratedRTChecks() {
2072 SCEVExpanderCleaner SCEVCleaner(SCEVExp);
2073 SCEVExpanderCleaner MemCheckCleaner(MemCheckExp);
2074 if (!SCEVCheckCond)
2075 SCEVCleaner.markResultUsed();
2076
2077 if (!MemRuntimeCheckCond)
2078 MemCheckCleaner.markResultUsed();
2079
2080 if (MemRuntimeCheckCond) {
2081 auto &SE = *MemCheckExp.getSE();
2082 // Memory runtime check generation creates compares that use expanded
2083 // values. Remove them before running the SCEVExpanderCleaners.
2084 for (auto &I : make_early_inc_range(reverse(*MemCheckBlock))) {
2085 if (MemCheckExp.isInsertedInstruction(&I))
2086 continue;
2087 SE.forgetValue(&I);
2088 I.eraseFromParent();
2089 }
2090 }
2091 MemCheckCleaner.cleanup();
2092 SCEVCleaner.cleanup();
2093
2094 if (SCEVCheckCond)
2095 SCEVCheckBlock->eraseFromParent();
2096 if (MemRuntimeCheckCond)
2097 MemCheckBlock->eraseFromParent();
2098 }
2099
2100 /// Adds the generated SCEVCheckBlock before \p LoopVectorPreHeader and
2101 /// adjusts the branches to branch to the vector preheader or \p Bypass,
2102 /// depending on the generated condition.
2103 BasicBlock *emitSCEVChecks(Loop *L, BasicBlock *Bypass,
2104 BasicBlock *LoopVectorPreHeader,
2105 BasicBlock *LoopExitBlock) {
2106 if (!SCEVCheckCond)
2107 return nullptr;
2108 if (auto *C = dyn_cast<ConstantInt>(SCEVCheckCond))
2109 if (C->isZero())
2110 return nullptr;
2111
2112 auto *Pred = LoopVectorPreHeader->getSinglePredecessor();
2113
2114 BranchInst::Create(LoopVectorPreHeader, SCEVCheckBlock);
2115 // Create new preheader for vector loop.
2116 if (auto *PL = LI->getLoopFor(LoopVectorPreHeader))
2117 PL->addBasicBlockToLoop(SCEVCheckBlock, *LI);
2118
2119 SCEVCheckBlock->getTerminator()->eraseFromParent();
2120 SCEVCheckBlock->moveBefore(LoopVectorPreHeader);
2121 Pred->getTerminator()->replaceSuccessorWith(LoopVectorPreHeader,
2122 SCEVCheckBlock);
2123
2124 DT->addNewBlock(SCEVCheckBlock, Pred);
2125 DT->changeImmediateDominator(LoopVectorPreHeader, SCEVCheckBlock);
2126
2127 ReplaceInstWithInst(
2128 SCEVCheckBlock->getTerminator(),
2129 BranchInst::Create(Bypass, LoopVectorPreHeader, SCEVCheckCond));
2130 // Mark the check as used, to prevent it from being removed during cleanup.
2131 SCEVCheckCond = nullptr;
2132 return SCEVCheckBlock;
2133 }
2134
2135 /// Adds the generated MemCheckBlock before \p LoopVectorPreHeader and adjusts
2136 /// the branches to branch to the vector preheader or \p Bypass, depending on
2137 /// the generated condition.
2138 BasicBlock *emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass,
2139 BasicBlock *LoopVectorPreHeader) {
2140 // Check if we generated code that checks in runtime if arrays overlap.
2141 if (!MemRuntimeCheckCond)
2142 return nullptr;
2143
2144 auto *Pred = LoopVectorPreHeader->getSinglePredecessor();
2145 Pred->getTerminator()->replaceSuccessorWith(LoopVectorPreHeader,
2146 MemCheckBlock);
2147
2148 DT->addNewBlock(MemCheckBlock, Pred);
2149 DT->changeImmediateDominator(LoopVectorPreHeader, MemCheckBlock);
2150 MemCheckBlock->moveBefore(LoopVectorPreHeader);
2151
2152 if (auto *PL = LI->getLoopFor(LoopVectorPreHeader))
2153 PL->addBasicBlockToLoop(MemCheckBlock, *LI);
2154
2155 ReplaceInstWithInst(
2156 MemCheckBlock->getTerminator(),
2157 BranchInst::Create(Bypass, LoopVectorPreHeader, MemRuntimeCheckCond));
2158 MemCheckBlock->getTerminator()->setDebugLoc(
2159 Pred->getTerminator()->getDebugLoc());
2160
2161 // Mark the check as used, to prevent it from being removed during cleanup.
2162 MemRuntimeCheckCond = nullptr;
2163 return MemCheckBlock;
2164 }
2165};
2166
2167// Return true if \p OuterLp is an outer loop annotated with hints for explicit
2168// vectorization. The loop needs to be annotated with #pragma omp simd
2169// simdlen(#) or #pragma clang vectorize(enable) vectorize_width(#). If the
2170// vector length information is not provided, vectorization is not considered
2171// explicit. Interleave hints are not allowed either. These limitations will be
2172// relaxed in the future.
2173// Please, note that we are currently forced to abuse the pragma 'clang
2174// vectorize' semantics. This pragma provides *auto-vectorization hints*
2175// (i.e., LV must check that vectorization is legal) whereas pragma 'omp simd'
2176// provides *explicit vectorization hints* (LV can bypass legal checks and
2177// assume that vectorization is legal). However, both hints are implemented
2178// using the same metadata (llvm.loop.vectorize, processed by
2179// LoopVectorizeHints). This will be fixed in the future when the native IR
2180// representation for pragma 'omp simd' is introduced.
2181static bool isExplicitVecOuterLoop(Loop *OuterLp,
2182 OptimizationRemarkEmitter *ORE) {
2183 assert(!OuterLp->isInnermost() && "This is not an outer loop")(static_cast <bool> (!OuterLp->isInnermost() &&
"This is not an outer loop") ? void (0) : __assert_fail ("!OuterLp->isInnermost() && \"This is not an outer loop\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 2183, __extension__
__PRETTY_FUNCTION__))
;
2184 LoopVectorizeHints Hints(OuterLp, true /*DisableInterleaving*/, *ORE);
2185
2186 // Only outer loops with an explicit vectorization hint are supported.
2187 // Unannotated outer loops are ignored.
2188 if (Hints.getForce() == LoopVectorizeHints::FK_Undefined)
2189 return false;
2190
2191 Function *Fn = OuterLp->getHeader()->getParent();
2192 if (!Hints.allowVectorization(Fn, OuterLp,
2193 true /*VectorizeOnlyWhenForced*/)) {
2194 LLVM_DEBUG(dbgs() << "LV: Loop hints prevent outer loop vectorization.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-vectorize")) { dbgs() << "LV: Loop hints prevent outer loop vectorization.\n"
; } } while (false)
;
2195 return false;
2196 }
2197
2198 if (Hints.getInterleave() > 1) {
2199 // TODO: Interleave support is future work.
2200 LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Interleave is not supported for "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-vectorize")) { dbgs() << "LV: Not vectorizing: Interleave is not supported for "
"outer loops.\n"; } } while (false)
2201 "outer loops.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-vectorize")) { dbgs() << "LV: Not vectorizing: Interleave is not supported for "
"outer loops.\n"; } } while (false)
;
2202 Hints.emitRemarkWithHints();
2203 return false;
2204 }
2205
2206 return true;
2207}
2208
2209static void collectSupportedLoops(Loop &L, LoopInfo *LI,
2210 OptimizationRemarkEmitter *ORE,
2211 SmallVectorImpl<Loop *> &V) {
2212 // Collect inner loops and outer loops without irreducible control flow. For
2213 // now, only collect outer loops that have explicit vectorization hints. If we
2214 // are stress testing the VPlan H-CFG construction, we collect the outermost
2215 // loop of every loop nest.
2216 if (L.isInnermost() || VPlanBuildStressTest ||
2217 (EnableVPlanNativePath && isExplicitVecOuterLoop(&L, ORE))) {
2218 LoopBlocksRPO RPOT(&L);
2219 RPOT.perform(LI);
2220 if (!containsIrreducibleCFG<const BasicBlock *>(RPOT, *LI)) {
2221 V.push_back(&L);
2222 // TODO: Collect inner loops inside marked outer loops in case
2223 // vectorization fails for the outer loop. Do not invoke
2224 // 'containsIrreducibleCFG' again for inner loops when the outer loop is
2225 // already known to be reducible. We can use an inherited attribute for
2226 // that.
2227 return;
2228 }
2229 }
2230 for (Loop *InnerL : L)
2231 collectSupportedLoops(*InnerL, LI, ORE, V);
2232}
2233
2234namespace {
2235
2236/// The LoopVectorize Pass.
2237struct LoopVectorize : public FunctionPass {
2238 /// Pass identification, replacement for typeid
2239 static char ID;
2240
2241 LoopVectorizePass Impl;
2242
2243 explicit LoopVectorize(bool InterleaveOnlyWhenForced = false,
2244 bool VectorizeOnlyWhenForced = false)
2245 : FunctionPass(ID),
2246 Impl({InterleaveOnlyWhenForced, VectorizeOnlyWhenForced}) {
2247 initializeLoopVectorizePass(*PassRegistry::getPassRegistry());
2248 }
2249
2250 bool runOnFunction(Function &F) override {
2251 if (skipFunction(F))
2252 return false;
2253
2254 auto *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
2255 auto *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
2256 auto *TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
2257 auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
2258 auto *BFI = &getAnalysis<BlockFrequencyInfoWrapperPass>().getBFI();
2259 auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>();
2260 auto *TLI = TLIP ? &TLIP->getTLI(F) : nullptr;
2261 auto *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
2262 auto *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
2263 auto *LAA = &getAnalysis<LoopAccessLegacyAnalysis>();
2264 auto *DB = &getAnalysis<DemandedBitsWrapperPass>().getDemandedBits();
2265 auto *ORE = &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE();
2266 auto *PSI = &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI();
2267
2268 std::function<const LoopAccessInfo &(Loop &)> GetLAA =
2269 [&](Loop &L) -> const LoopAccessInfo & { return LAA->getInfo(&L); };
2270
2271 return Impl.runImpl(F, *SE, *LI, *TTI, *DT, *BFI, TLI, *DB, *AA, *AC,
2272 GetLAA, *ORE, PSI).MadeAnyChange;
2273 }
2274
2275 void getAnalysisUsage(AnalysisUsage &AU) const override {
2276 AU.addRequired<AssumptionCacheTracker>();
2277 AU.addRequired<BlockFrequencyInfoWrapperPass>();
2278 AU.addRequired<DominatorTreeWrapperPass>();
2279 AU.addRequired<LoopInfoWrapperPass>();
2280 AU.addRequired<ScalarEvolutionWrapperPass>();
2281 AU.addRequired<TargetTransformInfoWrapperPass>();
2282 AU.addRequired<AAResultsWrapperPass>();
2283 AU.addRequired<LoopAccessLegacyAnalysis>();
2284 AU.addRequired<DemandedBitsWrapperPass>();
2285 AU.addRequired<OptimizationRemarkEmitterWrapperPass>();
2286 AU.addRequired<InjectTLIMappingsLegacy>();
2287
2288 // We currently do not preserve loopinfo/dominator analyses with outer loop
2289 // vectorization. Until this is addressed, mark these analyses as preserved
2290 // only for non-VPlan-native path.
2291 // TODO: Preserve Loop and Dominator analyses for VPlan-native path.
2292 if (!EnableVPlanNativePath) {
2293 AU.addPreserved<LoopInfoWrapperPass>();
2294 AU.addPreserved<DominatorTreeWrapperPass>();
2295 }
2296
2297 AU.addPreserved<BasicAAWrapperPass>();
2298 AU.addPreserved<GlobalsAAWrapperPass>();
2299 AU.addRequired<ProfileSummaryInfoWrapperPass>();
2300 }
2301};
2302
2303} // end anonymous namespace
2304
2305//===----------------------------------------------------------------------===//
2306// Implementation of LoopVectorizationLegality, InnerLoopVectorizer and
2307// LoopVectorizationCostModel and LoopVectorizationPlanner.
2308//===----------------------------------------------------------------------===//
2309
2310Value *InnerLoopVectorizer::getBroadcastInstrs(Value *V) {
2311 // We need to place the broadcast of invariant variables outside the loop,
2312 // but only if it's proven safe to do so. Else, broadcast will be inside
2313 // vector loop body.
2314 Instruction *Instr = dyn_cast<Instruction>(V);
2315 bool SafeToHoist = OrigLoop->isLoopInvariant(V) &&
2316 (!Instr ||
2317 DT->dominates(Instr->getParent(), LoopVectorPreHeader));
2318 // Place the code for broadcasting invariant variables in the new preheader.
2319 IRBuilder<>::InsertPointGuard Guard(Builder);
2320 if (SafeToHoist)
2321 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator());
2322
2323 // Broadcast the scalar into all locations in the vector.
2324 Value *Shuf = Builder.CreateVectorSplat(VF, V, "broadcast");
2325
2326 return Shuf;
2327}
2328
2329/// This function adds
2330/// (StartIdx * Step, (StartIdx + 1) * Step, (StartIdx + 2) * Step, ...)
2331/// to each vector element of Val. The sequence starts at StartIndex.
2332/// \p Opcode is relevant for FP induction variable.
2333static Value *getStepVector(Value *Val, Value *StartIdx, Value *Step,
2334 Instruction::BinaryOps BinOp, ElementCount VF,
2335 IRBuilder<> &Builder) {
2336 assert(VF.isVector() && "only vector VFs are supported")(static_cast <bool> (VF.isVector() && "only vector VFs are supported"
) ? void (0) : __assert_fail ("VF.isVector() && \"only vector VFs are supported\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 2336, __extension__
__PRETTY_FUNCTION__))
;
2337
2338 // Create and check the types.
2339 auto *ValVTy = cast<VectorType>(Val->getType());
2340 ElementCount VLen = ValVTy->getElementCount();
2341
2342 Type *STy = Val->getType()->getScalarType();
2343 assert((STy->isIntegerTy() || STy->isFloatingPointTy()) &&(static_cast <bool> ((STy->isIntegerTy() || STy->
isFloatingPointTy()) && "Induction Step must be an integer or FP"
) ? void (0) : __assert_fail ("(STy->isIntegerTy() || STy->isFloatingPointTy()) && \"Induction Step must be an integer or FP\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 2344, __extension__
__PRETTY_FUNCTION__))
2344 "Induction Step must be an integer or FP")(static_cast <bool> ((STy->isIntegerTy() || STy->
isFloatingPointTy()) && "Induction Step must be an integer or FP"
) ? void (0) : __assert_fail ("(STy->isIntegerTy() || STy->isFloatingPointTy()) && \"Induction Step must be an integer or FP\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 2344, __extension__
__PRETTY_FUNCTION__))
;
2345 assert(Step->getType() == STy && "Step has wrong type")(static_cast <bool> (Step->getType() == STy &&
"Step has wrong type") ? void (0) : __assert_fail ("Step->getType() == STy && \"Step has wrong type\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 2345, __extension__
__PRETTY_FUNCTION__))
;
2346
2347 SmallVector<Constant *, 8> Indices;
2348
2349 // Create a vector of consecutive numbers from zero to VF.
2350 VectorType *InitVecValVTy = ValVTy;
2351 Type *InitVecValSTy = STy;
2352 if (STy->isFloatingPointTy()) {
2353 InitVecValSTy =
2354 IntegerType::get(STy->getContext(), STy->getScalarSizeInBits());
2355 InitVecValVTy = VectorType::get(InitVecValSTy, VLen);
2356 }
2357 Value *InitVec = Builder.CreateStepVector(InitVecValVTy);
2358
2359 // Splat the StartIdx
2360 Value *StartIdxSplat = Builder.CreateVectorSplat(VLen, StartIdx);
2361
2362 if (STy->isIntegerTy()) {
2363 InitVec = Builder.CreateAdd(InitVec, StartIdxSplat);
2364 Step = Builder.CreateVectorSplat(VLen, Step);
2365 assert(Step->getType() == Val->getType() && "Invalid step vec")(static_cast <bool> (Step->getType() == Val->getType
() && "Invalid step vec") ? void (0) : __assert_fail (
"Step->getType() == Val->getType() && \"Invalid step vec\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 2365, __extension__
__PRETTY_FUNCTION__))
;
2366 // FIXME: The newly created binary instructions should contain nsw/nuw
2367 // flags, which can be found from the original scalar operations.
2368 Step = Builder.CreateMul(InitVec, Step);
2369 return Builder.CreateAdd(Val, Step, "induction");
2370 }
2371
2372 // Floating point induction.
2373 assert((BinOp == Instruction::FAdd || BinOp == Instruction::FSub) &&(static_cast <bool> ((BinOp == Instruction::FAdd || BinOp
== Instruction::FSub) && "Binary Opcode should be specified for FP induction"
) ? void (0) : __assert_fail ("(BinOp == Instruction::FAdd || BinOp == Instruction::FSub) && \"Binary Opcode should be specified for FP induction\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 2374, __extension__
__PRETTY_FUNCTION__))
2374 "Binary Opcode should be specified for FP induction")(static_cast <bool> ((BinOp == Instruction::FAdd || BinOp
== Instruction::FSub) && "Binary Opcode should be specified for FP induction"
) ? void (0) : __assert_fail ("(BinOp == Instruction::FAdd || BinOp == Instruction::FSub) && \"Binary Opcode should be specified for FP induction\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 2374, __extension__
__PRETTY_FUNCTION__))
;
2375 InitVec = Builder.CreateUIToFP(InitVec, ValVTy);
2376 InitVec = Builder.CreateFAdd(InitVec, StartIdxSplat);
2377
2378 Step = Builder.CreateVectorSplat(VLen, Step);
2379 Value *MulOp = Builder.CreateFMul(InitVec, Step);
2380 return Builder.CreateBinOp(BinOp, Val, MulOp, "induction");
2381}
2382
2383void InnerLoopVectorizer::createVectorIntOrFpInductionPHI(
2384 const InductionDescriptor &II, Value *Step, Value *Start,
2385 Instruction *EntryVal, VPValue *Def, VPTransformState &State) {
2386 IRBuilder<> &Builder = State.Builder;
2387 assert((isa<PHINode>(EntryVal) || isa<TruncInst>(EntryVal)) &&(static_cast <bool> ((isa<PHINode>(EntryVal) || isa
<TruncInst>(EntryVal)) && "Expected either an induction phi-node or a truncate of it!"
) ? void (0) : __assert_fail ("(isa<PHINode>(EntryVal) || isa<TruncInst>(EntryVal)) && \"Expected either an induction phi-node or a truncate of it!\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 2388, __extension__
__PRETTY_FUNCTION__))
2388 "Expected either an induction phi-node or a truncate of it!")(static_cast <bool> ((isa<PHINode>(EntryVal) || isa
<TruncInst>(EntryVal)) && "Expected either an induction phi-node or a truncate of it!"
) ? void (0) : __assert_fail ("(isa<PHINode>(EntryVal) || isa<TruncInst>(EntryVal)) && \"Expected either an induction phi-node or a truncate of it!\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 2388, __extension__
__PRETTY_FUNCTION__))
;
2389
2390 // Construct the initial value of the vector IV in the vector loop preheader
2391 auto CurrIP = Builder.saveIP();
2392 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator());
2393 if (isa<TruncInst>(EntryVal)) {
2394 assert(Start->getType()->isIntegerTy() &&(static_cast <bool> (Start->getType()->isIntegerTy
() && "Truncation requires an integer type") ? void (
0) : __assert_fail ("Start->getType()->isIntegerTy() && \"Truncation requires an integer type\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 2395, __extension__
__PRETTY_FUNCTION__))
2395 "Truncation requires an integer type")(static_cast <bool> (Start->getType()->isIntegerTy
() && "Truncation requires an integer type") ? void (
0) : __assert_fail ("Start->getType()->isIntegerTy() && \"Truncation requires an integer type\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 2395, __extension__
__PRETTY_FUNCTION__))
;
2396 auto *TruncType = cast<IntegerType>(EntryVal->getType());
2397 Step = Builder.CreateTrunc(Step, TruncType);
2398 Start = Builder.CreateCast(Instruction::Trunc, Start, TruncType);
2399 }
2400
2401 Value *Zero = getSignedIntOrFpConstant(Start->getType(), 0);
2402 Value *SplatStart = Builder.CreateVectorSplat(State.VF, Start);
2403 Value *SteppedStart = getStepVector(
2404 SplatStart, Zero, Step, II.getInductionOpcode(), State.VF, State.Builder);
2405
2406 // We create vector phi nodes for both integer and floating-point induction
2407 // variables. Here, we determine the kind of arithmetic we will perform.
2408 Instruction::BinaryOps AddOp;
2409 Instruction::BinaryOps MulOp;
2410 if (Step->getType()->isIntegerTy()) {
2411 AddOp = Instruction::Add;
2412 MulOp = Instruction::Mul;
2413 } else {
2414 AddOp = II.getInductionOpcode();
2415 MulOp = Instruction::FMul;
2416 }
2417
2418 // Multiply the vectorization factor by the step using integer or
2419 // floating-point arithmetic as appropriate.
2420 Type *StepType = Step->getType();
2421 Value *RuntimeVF;
2422 if (Step->getType()->isFloatingPointTy())
2423 RuntimeVF = getRuntimeVFAsFloat(Builder, StepType, State.VF);
2424 else
2425 RuntimeVF = getRuntimeVF(Builder, StepType, State.VF);
2426 Value *Mul = Builder.CreateBinOp(MulOp, Step, RuntimeVF);
2427
2428 // Create a vector splat to use in the induction update.
2429 //
2430 // FIXME: If the step is non-constant, we create the vector splat with
2431 // IRBuilder. IRBuilder can constant-fold the multiply, but it doesn't
2432 // handle a constant vector splat.
2433 Value *SplatVF = isa<Constant>(Mul)
2434 ? ConstantVector::getSplat(State.VF, cast<Constant>(Mul))
2435 : Builder.CreateVectorSplat(State.VF, Mul);
2436 Builder.restoreIP(CurrIP);
2437
2438 // We may need to add the step a number of times, depending on the unroll
2439 // factor. The last of those goes into the PHI.
2440 PHINode *VecInd = PHINode::Create(SteppedStart->getType(), 2, "vec.ind",
2441 &*LoopVectorBody->getFirstInsertionPt());
2442 VecInd->setDebugLoc(EntryVal->getDebugLoc());
2443 Instruction *LastInduction = VecInd;
2444 for (unsigned Part = 0; Part < UF; ++Part) {
2445 State.set(Def, LastInduction, Part);
2446
2447 if (isa<TruncInst>(EntryVal))
2448 addMetadata(LastInduction, EntryVal);
2449
2450 LastInduction = cast<Instruction>(
2451 Builder.CreateBinOp(AddOp, LastInduction, SplatVF, "step.add"));
2452 LastInduction->setDebugLoc(EntryVal->getDebugLoc());
2453 }
2454
2455 // Move the last step to the end of the latch block. This ensures consistent
2456 // placement of all induction updates.
2457 auto *LoopVectorLatch = LI->getLoopFor(LoopVectorBody)->getLoopLatch();
2458 auto *Br = cast<BranchInst>(LoopVectorLatch->getTerminator());
2459 LastInduction->moveBefore(Br);
2460 LastInduction->setName("vec.ind.next");
2461
2462 VecInd->addIncoming(SteppedStart, LoopVectorPreHeader);
2463 VecInd->addIncoming(LastInduction, LoopVectorLatch);
2464}
2465
2466bool InnerLoopVectorizer::shouldScalarizeInstruction(Instruction *I) const {
2467 return Cost->isScalarAfterVectorization(I, VF) ||
2468 Cost->isProfitableToScalarize(I, VF);
2469}
2470
2471bool InnerLoopVectorizer::needsScalarInduction(Instruction *IV) const {
2472 if (shouldScalarizeInstruction(IV))
2473 return true;
2474 auto isScalarInst = [&](User *U) -> bool {
2475 auto *I = cast<Instruction>(U);
2476 return (OrigLoop->contains(I) && shouldScalarizeInstruction(I));
2477 };
2478 return llvm::any_of(IV->users(), isScalarInst);
2479}
2480
2481/// Returns true if \p ID starts at 0 and has a step of 1.
2482static bool isCanonicalID(const InductionDescriptor &ID) {
2483 if (!ID.getConstIntStepValue() || !ID.getConstIntStepValue()->isOne())
2484 return false;
2485 auto *StartC = dyn_cast<ConstantInt>(ID.getStartValue());
2486 return StartC && StartC->isZero();
2487}
2488
2489void InnerLoopVectorizer::widenIntOrFpInduction(
2490 PHINode *IV, const InductionDescriptor &ID, Value *Start, TruncInst *Trunc,
2491 VPValue *Def, VPTransformState &State, Value *CanonicalIV) {
2492 IRBuilder<> &Builder = State.Builder;
2493 assert(IV->getType() == ID.getStartValue()->getType() && "Types must match")(static_cast <bool> (IV->getType() == ID.getStartValue
()->getType() && "Types must match") ? void (0) : __assert_fail
("IV->getType() == ID.getStartValue()->getType() && \"Types must match\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 2493, __extension__
__PRETTY_FUNCTION__))
;
2494 assert(!State.VF.isZero() && "VF must be non-zero")(static_cast <bool> (!State.VF.isZero() && "VF must be non-zero"
) ? void (0) : __assert_fail ("!State.VF.isZero() && \"VF must be non-zero\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 2494, __extension__
__PRETTY_FUNCTION__))
;
2495
2496 // The value from the original loop to which we are mapping the new induction
2497 // variable.
2498 Instruction *EntryVal = Trunc ? cast<Instruction>(Trunc) : IV;
2499
2500 auto &DL = EntryVal->getModule()->getDataLayout();
2501
2502 // Generate code for the induction step. Note that induction steps are
2503 // required to be loop-invariant
2504 auto CreateStepValue = [&](const SCEV *Step) -> Value * {
2505 assert(PSE.getSE()->isLoopInvariant(Step, OrigLoop) &&(static_cast <bool> (PSE.getSE()->isLoopInvariant(Step
, OrigLoop) && "Induction step should be loop invariant"
) ? void (0) : __assert_fail ("PSE.getSE()->isLoopInvariant(Step, OrigLoop) && \"Induction step should be loop invariant\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 2506, __extension__
__PRETTY_FUNCTION__))
2506 "Induction step should be loop invariant")(static_cast <bool> (PSE.getSE()->isLoopInvariant(Step
, OrigLoop) && "Induction step should be loop invariant"
) ? void (0) : __assert_fail ("PSE.getSE()->isLoopInvariant(Step, OrigLoop) && \"Induction step should be loop invariant\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 2506, __extension__
__PRETTY_FUNCTION__))
;
2507 if (PSE.getSE()->isSCEVable(IV->getType())) {
2508 SCEVExpander Exp(*PSE.getSE(), DL, "induction");
2509 return Exp.expandCodeFor(Step, Step->getType(),
2510 State.CFG.VectorPreHeader->getTerminator());
2511 }
2512 return cast<SCEVUnknown>(Step)->getValue();
2513 };
2514
2515 // The scalar value to broadcast. This is derived from the canonical
2516 // induction variable. If a truncation type is given, truncate the canonical
2517 // induction variable and step. Otherwise, derive these values from the
2518 // induction descriptor.
2519 auto CreateScalarIV = [&](Value *&Step) -> Value * {
2520 Value *ScalarIV = CanonicalIV;
2521 Type *NeededType = IV->getType();
2522 if (!isCanonicalID(ID) || ScalarIV->getType() != NeededType) {
2523 ScalarIV =
2524 NeededType->isIntegerTy()
2525 ? Builder.CreateSExtOrTrunc(ScalarIV, NeededType)
2526 : Builder.CreateCast(Instruction::SIToFP, ScalarIV, NeededType);
2527 ScalarIV = emitTransformedIndex(Builder, ScalarIV, PSE.getSE(), DL, ID,
2528 State.CFG.PrevBB);
2529 ScalarIV->setName("offset.idx");
2530 }
2531 if (Trunc) {
2532 auto *TruncType = cast<IntegerType>(Trunc->getType());
2533 assert(Step->getType()->isIntegerTy() &&(static_cast <bool> (Step->getType()->isIntegerTy
() && "Truncation requires an integer step") ? void (
0) : __assert_fail ("Step->getType()->isIntegerTy() && \"Truncation requires an integer step\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 2534, __extension__
__PRETTY_FUNCTION__))
2534 "Truncation requires an integer step")(static_cast <bool> (Step->getType()->isIntegerTy
() && "Truncation requires an integer step") ? void (
0) : __assert_fail ("Step->getType()->isIntegerTy() && \"Truncation requires an integer step\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 2534, __extension__
__PRETTY_FUNCTION__))
;
2535 ScalarIV = Builder.CreateTrunc(ScalarIV, TruncType);
2536 Step = Builder.CreateTrunc(Step, TruncType);
2537 }
2538 return ScalarIV;
2539 };
2540
2541 // Create the vector values from the scalar IV, in the absence of creating a
2542 // vector IV.
2543 auto CreateSplatIV = [&](Value *ScalarIV, Value *Step) {
2544 Value *Broadcasted = getBroadcastInstrs(ScalarIV);
2545 for (unsigned Part = 0; Part < UF; ++Part) {
2546 Value *StartIdx;
2547 if (Step->getType()->isFloatingPointTy())
2548 StartIdx =
2549 getRuntimeVFAsFloat(Builder, Step->getType(), State.VF * Part);
2550 else
2551 StartIdx = getRuntimeVF(Builder, Step->getType(), State.VF * Part);
2552
2553 Value *EntryPart =
2554 getStepVector(Broadcasted, StartIdx, Step, ID.getInductionOpcode(),
2555 State.VF, State.Builder);
2556 State.set(Def, EntryPart, Part);
2557 if (Trunc)
2558 addMetadata(EntryPart, Trunc);
2559 }
2560 };
2561
2562 // Fast-math-flags propagate from the original induction instruction.
2563 IRBuilder<>::FastMathFlagGuard FMFG(Builder);
2564 if (ID.getInductionBinOp() && isa<FPMathOperator>(ID.getInductionBinOp()))
2565 Builder.setFastMathFlags(ID.getInductionBinOp()->getFastMathFlags());
2566
2567 // Now do the actual transformations, and start with creating the step value.
2568 Value *Step = CreateStepValue(ID.getStep());
2569 if (State.VF.isScalar()) {
2570 Value *ScalarIV = CreateScalarIV(Step);
2571 Type *ScalarTy = IntegerType::get(ScalarIV->getContext(),
2572 Step->getType()->getScalarSizeInBits());
2573
2574 Instruction::BinaryOps IncOp = ID.getInductionOpcode();
2575 if (IncOp == Instruction::BinaryOpsEnd)
2576 IncOp = Instruction::Add;
2577 for (unsigned Part = 0; Part < UF; ++Part) {
2578 Value *StartIdx = ConstantInt::get(ScalarTy, Part);
2579 Instruction::BinaryOps MulOp = Instruction::Mul;
2580 if (Step->getType()->isFloatingPointTy()) {
2581 StartIdx = Builder.CreateUIToFP(StartIdx, Step->getType());
2582 MulOp = Instruction::FMul;
2583 }
2584
2585 Value *Mul = Builder.CreateBinOp(MulOp, StartIdx, Step);
2586 Value *EntryPart = Builder.CreateBinOp(IncOp, ScalarIV, Mul, "induction");
2587 State.set(Def, EntryPart, Part);
2588 if (Trunc) {
2589 assert(!Step->getType()->isFloatingPointTy() &&(static_cast <bool> (!Step->getType()->isFloatingPointTy
() && "fp inductions shouldn't be truncated") ? void (
0) : __assert_fail ("!Step->getType()->isFloatingPointTy() && \"fp inductions shouldn't be truncated\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 2590, __extension__
__PRETTY_FUNCTION__))
2590 "fp inductions shouldn't be truncated")(static_cast <bool> (!Step->getType()->isFloatingPointTy
() && "fp inductions shouldn't be truncated") ? void (
0) : __assert_fail ("!Step->getType()->isFloatingPointTy() && \"fp inductions shouldn't be truncated\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 2590, __extension__
__PRETTY_FUNCTION__))
;
2591 addMetadata(EntryPart, Trunc);
2592 }
2593 }
2594 return;
2595 }
2596
2597 // Determine if we want a scalar version of the induction variable. This is
2598 // true if the induction variable itself is not widened, or if it has at
2599 // least one user in the loop that is not widened.
2600 auto NeedsScalarIV = needsScalarInduction(EntryVal);
2601 if (!NeedsScalarIV) {
2602 createVectorIntOrFpInductionPHI(ID, Step, Start, EntryVal, Def, State);
2603 return;
2604 }
2605
2606 // Try to create a new independent vector induction variable. If we can't
2607 // create the phi node, we will splat the scalar induction variable in each
2608 // loop iteration.
2609 if (!shouldScalarizeInstruction(EntryVal)) {
2610 createVectorIntOrFpInductionPHI(ID, Step, Start, EntryVal, Def, State);
2611 Value *ScalarIV = CreateScalarIV(Step);
2612 // Create scalar steps that can be used by instructions we will later
2613 // scalarize. Note that the addition of the scalar steps will not increase
2614 // the number of instructions in the loop in the common case prior to
2615 // InstCombine. We will be trading one vector extract for each scalar step.
2616 buildScalarSteps(ScalarIV, Step, EntryVal, ID, Def, State);
2617 return;
2618 }
2619
2620 // All IV users are scalar instructions, so only emit a scalar IV, not a
2621 // vectorised IV. Except when we tail-fold, then the splat IV feeds the
2622 // predicate used by the masked loads/stores.
2623 Value *ScalarIV = CreateScalarIV(Step);
2624 if (!Cost->isScalarEpilogueAllowed())
2625 CreateSplatIV(ScalarIV, Step);
2626 buildScalarSteps(ScalarIV, Step, EntryVal, ID, Def, State);
2627}
2628
2629void InnerLoopVectorizer::buildScalarSteps(Value *ScalarIV, Value *Step,
2630 Instruction *EntryVal,
2631 const InductionDescriptor &ID,
2632 VPValue *Def,
2633 VPTransformState &State) {
2634 IRBuilder<> &Builder = State.Builder;
2635 // We shouldn't have to build scalar steps if we aren't vectorizing.
2636 assert(State.VF.isVector() && "VF should be greater than one")(static_cast <bool> (State.VF.isVector() && "VF should be greater than one"
) ? void (0) : __assert_fail ("State.VF.isVector() && \"VF should be greater than one\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 2636, __extension__
__PRETTY_FUNCTION__))
;
2637 // Get the value type and ensure it and the step have the same integer type.
2638 Type *ScalarIVTy = ScalarIV->getType()->getScalarType();
2639 assert(ScalarIVTy == Step->getType() &&(static_cast <bool> (ScalarIVTy == Step->getType() &&
"Val and Step should have the same type") ? void (0) : __assert_fail
("ScalarIVTy == Step->getType() && \"Val and Step should have the same type\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 2640, __extension__
__PRETTY_FUNCTION__))
2640 "Val and Step should have the same type")(static_cast <bool> (ScalarIVTy == Step->getType() &&
"Val and Step should have the same type") ? void (0) : __assert_fail
("ScalarIVTy == Step->getType() && \"Val and Step should have the same type\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 2640, __extension__
__PRETTY_FUNCTION__))
;
2641
2642 // We build scalar steps for both integer and floating-point induction
2643 // variables. Here, we determine the kind of arithmetic we will perform.
2644 Instruction::BinaryOps AddOp;
2645 Instruction::BinaryOps MulOp;
2646 if (ScalarIVTy->isIntegerTy()) {
2647 AddOp = Instruction::Add;
2648 MulOp = Instruction::Mul;
2649 } else {
2650 AddOp = ID.getInductionOpcode();
2651 MulOp = Instruction::FMul;
2652 }
2653
2654 // Determine the number of scalars we need to generate for each unroll
2655 // iteration. If EntryVal is uniform, we only need to generate the first
2656 // lane. Otherwise, we generate all VF values.
2657 bool IsUniform =
2658 Cost->isUniformAfterVectorization(cast<Instruction>(EntryVal), State.VF);
2659 unsigned Lanes = IsUniform ? 1 : State.VF.getKnownMinValue();
2660 // Compute the scalar steps and save the results in State.
2661 Type *IntStepTy = IntegerType::get(ScalarIVTy->getContext(),
2662 ScalarIVTy->getScalarSizeInBits());
2663 Type *VecIVTy = nullptr;
2664 Value *UnitStepVec = nullptr, *SplatStep = nullptr, *SplatIV = nullptr;
2665 if (!IsUniform && State.VF.isScalable()) {
2666 VecIVTy = VectorType::get(ScalarIVTy, State.VF);
2667 UnitStepVec =
2668 Builder.CreateStepVector(VectorType::get(IntStepTy, State.VF));
2669 SplatStep = Builder.CreateVectorSplat(State.VF, Step);
2670 SplatIV = Builder.CreateVectorSplat(State.VF, ScalarIV);
2671 }
2672
2673 for (unsigned Part = 0; Part < State.UF; ++Part) {
2674 Value *StartIdx0 = createStepForVF(Builder, IntStepTy, State.VF, Part);
2675
2676 if (!IsUniform && State.VF.isScalable()) {
2677 auto *SplatStartIdx = Builder.CreateVectorSplat(State.VF, StartIdx0);
2678 auto *InitVec = Builder.CreateAdd(SplatStartIdx, UnitStepVec);
2679 if (ScalarIVTy->isFloatingPointTy())
2680 InitVec = Builder.CreateSIToFP(InitVec, VecIVTy);
2681 auto *Mul = Builder.CreateBinOp(MulOp, InitVec, SplatStep);
2682 auto *Add = Builder.CreateBinOp(AddOp, SplatIV, Mul);
2683 State.set(Def, Add, Part);
2684 // It's useful to record the lane values too for the known minimum number
2685 // of elements so we do those below. This improves the code quality when
2686 // trying to extract the first element, for example.
2687 }
2688
2689 if (ScalarIVTy->isFloatingPointTy())
2690 StartIdx0 = Builder.CreateSIToFP(StartIdx0, ScalarIVTy);
2691
2692 for (unsigned Lane = 0; Lane < Lanes; ++Lane) {
2693 Value *StartIdx = Builder.CreateBinOp(
2694 AddOp, StartIdx0, getSignedIntOrFpConstant(ScalarIVTy, Lane));
2695 // The step returned by `createStepForVF` is a runtime-evaluated value
2696 // when VF is scalable. Otherwise, it should be folded into a Constant.
2697 assert((State.VF.isScalable() || isa<Constant>(StartIdx)) &&(static_cast <bool> ((State.VF.isScalable() || isa<Constant
>(StartIdx)) && "Expected StartIdx to be folded to a constant when VF is not "
"scalable") ? void (0) : __assert_fail ("(State.VF.isScalable() || isa<Constant>(StartIdx)) && \"Expected StartIdx to be folded to a constant when VF is not \" \"scalable\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 2699, __extension__
__PRETTY_FUNCTION__))
2698 "Expected StartIdx to be folded to a constant when VF is not "(static_cast <bool> ((State.VF.isScalable() || isa<Constant
>(StartIdx)) && "Expected StartIdx to be folded to a constant when VF is not "
"scalable") ? void (0) : __assert_fail ("(State.VF.isScalable() || isa<Constant>(StartIdx)) && \"Expected StartIdx to be folded to a constant when VF is not \" \"scalable\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 2699, __extension__
__PRETTY_FUNCTION__))
2699 "scalable")(static_cast <bool> ((State.VF.isScalable() || isa<Constant
>(StartIdx)) && "Expected StartIdx to be folded to a constant when VF is not "
"scalable") ? void (0) : __assert_fail ("(State.VF.isScalable() || isa<Constant>(StartIdx)) && \"Expected StartIdx to be folded to a constant when VF is not \" \"scalable\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 2699, __extension__
__PRETTY_FUNCTION__))
;
2700 auto *Mul = Builder.CreateBinOp(MulOp, StartIdx, Step);
2701 auto *Add = Builder.CreateBinOp(AddOp, ScalarIV, Mul);
2702 State.set(Def, Add, VPIteration(Part, Lane));
2703 }
2704 }
2705}
2706
2707void InnerLoopVectorizer::packScalarIntoVectorValue(VPValue *Def,
2708 const VPIteration &Instance,
2709 VPTransformState &State) {
2710 Value *ScalarInst = State.get(Def, Instance);
2711 Value *VectorValue = State.get(Def, Instance.Part);
2712 VectorValue = Builder.CreateInsertElement(
2713 VectorValue, ScalarInst,
2714 Instance.Lane.getAsRuntimeExpr(State.Builder, VF));
2715 State.set(Def, VectorValue, Instance.Part);
2716}
2717
2718// Return whether we allow using masked interleave-groups (for dealing with
2719// strided loads/stores that reside in predicated blocks, or for dealing
2720// with gaps).
2721static bool useMaskedInterleavedAccesses(const TargetTransformInfo &TTI) {
2722 // If an override option has been passed in for interleaved accesses, use it.
2723 if (EnableMaskedInterleavedMemAccesses.getNumOccurrences() > 0)
2724 return EnableMaskedInterleavedMemAccesses;
2725
2726 return TTI.enableMaskedInterleavedAccessVectorization();
2727}
2728
2729// Try to vectorize the interleave group that \p Instr belongs to.
2730//
2731// E.g. Translate following interleaved load group (factor = 3):
2732// for (i = 0; i < N; i+=3) {
2733// R = Pic[i]; // Member of index 0
2734// G = Pic[i+1]; // Member of index 1
2735// B = Pic[i+2]; // Member of index 2
2736// ... // do something to R, G, B
2737// }
2738// To:
2739// %wide.vec = load <12 x i32> ; Read 4 tuples of R,G,B
2740// %R.vec = shuffle %wide.vec, poison, <0, 3, 6, 9> ; R elements
2741// %G.vec = shuffle %wide.vec, poison, <1, 4, 7, 10> ; G elements
2742// %B.vec = shuffle %wide.vec, poison, <2, 5, 8, 11> ; B elements
2743//
2744// Or translate following interleaved store group (factor = 3):
2745// for (i = 0; i < N; i+=3) {
2746// ... do something to R, G, B
2747// Pic[i] = R; // Member of index 0
2748// Pic[i+1] = G; // Member of index 1
2749// Pic[i+2] = B; // Member of index 2
2750// }
2751// To:
2752// %R_G.vec = shuffle %R.vec, %G.vec, <0, 1, 2, ..., 7>
2753// %B_U.vec = shuffle %B.vec, poison, <0, 1, 2, 3, u, u, u, u>
2754// %interleaved.vec = shuffle %R_G.vec, %B_U.vec,
2755// <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11> ; Interleave R,G,B elements
2756// store <12 x i32> %interleaved.vec ; Write 4 tuples of R,G,B
2757void InnerLoopVectorizer::vectorizeInterleaveGroup(
2758 const InterleaveGroup<Instruction> *Group, ArrayRef<VPValue *> VPDefs,
2759 VPTransformState &State, VPValue *Addr, ArrayRef<VPValue *> StoredValues,
2760 VPValue *BlockInMask) {
2761 Instruction *Instr = Group->getInsertPos();
2762 const DataLayout &DL = Instr->getModule()->getDataLayout();
2763
2764 // Prepare for the vector type of the interleaved load/store.
2765 Type *ScalarTy = getLoadStoreType(Instr);
2766 unsigned InterleaveFactor = Group->getFactor();
2767 assert(!VF.isScalable() && "scalable vectors not yet supported.")(static_cast <bool> (!VF.isScalable() && "scalable vectors not yet supported."
) ? void (0) : __assert_fail ("!VF.isScalable() && \"scalable vectors not yet supported.\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 2767, __extension__
__PRETTY_FUNCTION__))
;
2768 auto *VecTy = VectorType::get(ScalarTy, VF * InterleaveFactor);
2769
2770 // Prepare for the new pointers.
2771 SmallVector<Value *, 2> AddrParts;
2772 unsigned Index = Group->getIndex(Instr);
2773
2774 // TODO: extend the masked interleaved-group support to reversed access.
2775 assert((!BlockInMask || !Group->isReverse()) &&(static_cast <bool> ((!BlockInMask || !Group->isReverse
()) && "Reversed masked interleave-group not supported."
) ? void (0) : __assert_fail ("(!BlockInMask || !Group->isReverse()) && \"Reversed masked interleave-group not supported.\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 2776, __extension__
__PRETTY_FUNCTION__))
2776 "Reversed masked interleave-group not supported.")(static_cast <bool> ((!BlockInMask || !Group->isReverse
()) && "Reversed masked interleave-group not supported."
) ? void (0) : __assert_fail ("(!BlockInMask || !Group->isReverse()) && \"Reversed masked interleave-group not supported.\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 2776, __extension__
__PRETTY_FUNCTION__))
;
2777
2778 // If the group is reverse, adjust the index to refer to the last vector lane
2779 // instead of the first. We adjust the index from the first vector lane,
2780 // rather than directly getting the pointer for lane VF - 1, because the
2781 // pointer operand of the interleaved access is supposed to be uniform. For
2782 // uniform instructions, we're only required to generate a value for the
2783 // first vector lane in each unroll iteration.
2784 if (Group->isReverse())
2785 Index += (VF.getKnownMinValue() - 1) * Group->getFactor();
2786
2787 for (unsigned Part = 0; Part < UF; Part++) {
2788 Value *AddrPart = State.get(Addr, VPIteration(Part, 0));
2789 setDebugLocFromInst(AddrPart);
2790
2791 // Notice current instruction could be any index. Need to adjust the address
2792 // to the member of index 0.
2793 //
2794 // E.g. a = A[i+1]; // Member of index 1 (Current instruction)
2795 // b = A[i]; // Member of index 0
2796 // Current pointer is pointed to A[i+1], adjust it to A[i].
2797 //
2798 // E.g. A[i+1] = a; // Member of index 1
2799 // A[i] = b; // Member of index 0
2800 // A[i+2] = c; // Member of index 2 (Current instruction)
2801 // Current pointer is pointed to A[i+2], adjust it to A[i].
2802
2803 bool InBounds = false;
2804 if (auto *gep = dyn_cast<GetElementPtrInst>(AddrPart->stripPointerCasts()))
2805 InBounds = gep->isInBounds();
2806 AddrPart = Builder.CreateGEP(ScalarTy, AddrPart, Builder.getInt32(-Index));
2807 cast<GetElementPtrInst>(AddrPart)->setIsInBounds(InBounds);
2808
2809 // Cast to the vector pointer type.
2810 unsigned AddressSpace = AddrPart->getType()->getPointerAddressSpace();
2811 Type *PtrTy = VecTy->getPointerTo(AddressSpace);
2812 AddrParts.push_back(Builder.CreateBitCast(AddrPart, PtrTy));
2813 }
2814
2815 setDebugLocFromInst(Instr);
2816 Value *PoisonVec = PoisonValue::get(VecTy);
2817
2818 Value *MaskForGaps = nullptr;
2819 if (Group->requiresScalarEpilogue() && !Cost->isScalarEpilogueAllowed()) {
2820 MaskForGaps = createBitMaskForGaps(Builder, VF.getKnownMinValue(), *Group);
2821 assert(MaskForGaps && "Mask for Gaps is required but it is null")(static_cast <bool> (MaskForGaps && "Mask for Gaps is required but it is null"
) ? void (0) : __assert_fail ("MaskForGaps && \"Mask for Gaps is required but it is null\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 2821, __extension__
__PRETTY_FUNCTION__))
;
2822 }
2823
2824 // Vectorize the interleaved load group.
2825 if (isa<LoadInst>(Instr)) {
2826 // For each unroll part, create a wide load for the group.
2827 SmallVector<Value *, 2> NewLoads;
2828 for (unsigned Part = 0; Part < UF; Part++) {
2829 Instruction *NewLoad;
2830 if (BlockInMask || MaskForGaps) {
2831 assert(useMaskedInterleavedAccesses(*TTI) &&(static_cast <bool> (useMaskedInterleavedAccesses(*TTI)
&& "masked interleaved groups are not allowed.") ? void
(0) : __assert_fail ("useMaskedInterleavedAccesses(*TTI) && \"masked interleaved groups are not allowed.\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 2832, __extension__
__PRETTY_FUNCTION__))
2832 "masked interleaved groups are not allowed.")(static_cast <bool> (useMaskedInterleavedAccesses(*TTI)
&& "masked interleaved groups are not allowed.") ? void
(0) : __assert_fail ("useMaskedInterleavedAccesses(*TTI) && \"masked interleaved groups are not allowed.\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 2832, __extension__
__PRETTY_FUNCTION__))
;
2833 Value *GroupMask = MaskForGaps;
2834 if (BlockInMask) {
2835 Value *BlockInMaskPart = State.get(BlockInMask, Part);
2836 Value *ShuffledMask = Builder.CreateShuffleVector(
2837 BlockInMaskPart,
2838 createReplicatedMask(InterleaveFactor, VF.getKnownMinValue()),
2839 "interleaved.mask");
2840 GroupMask = MaskForGaps
2841 ? Builder.CreateBinOp(Instruction::And, ShuffledMask,
2842 MaskForGaps)
2843 : ShuffledMask;
2844 }
2845 NewLoad =
2846 Builder.CreateMaskedLoad(VecTy, AddrParts[Part], Group->getAlign(),
2847 GroupMask, PoisonVec, "wide.masked.vec");
2848 }
2849 else
2850 NewLoad = Builder.CreateAlignedLoad(VecTy, AddrParts[Part],
2851 Group->getAlign(), "wide.vec");
2852 Group->addMetadata(NewLoad);
2853 NewLoads.push_back(NewLoad);
2854 }
2855
2856 // For each member in the group, shuffle out the appropriate data from the
2857 // wide loads.
2858 unsigned J = 0;
2859 for (unsigned I = 0; I < InterleaveFactor; ++I) {
2860 Instruction *Member = Group->getMember(I);
2861
2862 // Skip the gaps in the group.
2863 if (!Member)
2864 continue;
2865
2866 auto StrideMask =
2867 createStrideMask(I, InterleaveFactor, VF.getKnownMinValue());
2868 for (unsigned Part = 0; Part < UF; Part++) {
2869 Value *StridedVec = Builder.CreateShuffleVector(
2870 NewLoads[Part], StrideMask, "strided.vec");
2871
2872 // If this member has different type, cast the result type.
2873 if (Member->getType() != ScalarTy) {
2874 assert(!VF.isScalable() && "VF is assumed to be non scalable.")(static_cast <bool> (!VF.isScalable() && "VF is assumed to be non scalable."
) ? void (0) : __assert_fail ("!VF.isScalable() && \"VF is assumed to be non scalable.\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 2874, __extension__
__PRETTY_FUNCTION__))
;
2875 VectorType *OtherVTy = VectorType::get(Member->getType(), VF);
2876 StridedVec = createBitOrPointerCast(StridedVec, OtherVTy, DL);
2877 }
2878
2879 if (Group->isReverse())
2880 StridedVec = Builder.CreateVectorReverse(StridedVec, "reverse");
2881
2882 State.set(VPDefs[J], StridedVec, Part);
2883 }
2884 ++J;
2885 }
2886 return;
2887 }
2888
2889 // The sub vector type for current instruction.
2890 auto *SubVT = VectorType::get(ScalarTy, VF);
2891
2892 // Vectorize the interleaved store group.
2893 MaskForGaps = createBitMaskForGaps(Builder, VF.getKnownMinValue(), *Group);
2894 assert((!MaskForGaps || useMaskedInterleavedAccesses(*TTI)) &&(static_cast <bool> ((!MaskForGaps || useMaskedInterleavedAccesses
(*TTI)) && "masked interleaved groups are not allowed."
) ? void (0) : __assert_fail ("(!MaskForGaps || useMaskedInterleavedAccesses(*TTI)) && \"masked interleaved groups are not allowed.\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 2895, __extension__
__PRETTY_FUNCTION__))
2895 "masked interleaved groups are not allowed.")(static_cast <bool> ((!MaskForGaps || useMaskedInterleavedAccesses
(*TTI)) && "masked interleaved groups are not allowed."
) ? void (0) : __assert_fail ("(!MaskForGaps || useMaskedInterleavedAccesses(*TTI)) && \"masked interleaved groups are not allowed.\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 2895, __extension__
__PRETTY_FUNCTION__))
;
2896 assert((!MaskForGaps || !VF.isScalable()) &&(static_cast <bool> ((!MaskForGaps || !VF.isScalable())
&& "masking gaps for scalable vectors is not yet supported."
) ? void (0) : __assert_fail ("(!MaskForGaps || !VF.isScalable()) && \"masking gaps for scalable vectors is not yet supported.\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 2897, __extension__
__PRETTY_FUNCTION__))
2897 "masking gaps for scalable vectors is not yet supported.")(static_cast <bool> ((!MaskForGaps || !VF.isScalable())
&& "masking gaps for scalable vectors is not yet supported."
) ? void (0) : __assert_fail ("(!MaskForGaps || !VF.isScalable()) && \"masking gaps for scalable vectors is not yet supported.\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 2897, __extension__
__PRETTY_FUNCTION__))
;
2898 for (unsigned Part = 0; Part < UF; Part++) {
2899 // Collect the stored vector from each member.
2900 SmallVector<Value *, 4> StoredVecs;
2901 for (unsigned i = 0; i < InterleaveFactor; i++) {
2902 assert((Group->getMember(i) || MaskForGaps) &&(static_cast <bool> ((Group->getMember(i) || MaskForGaps
) && "Fail to get a member from an interleaved store group"
) ? void (0) : __assert_fail ("(Group->getMember(i) || MaskForGaps) && \"Fail to get a member from an interleaved store group\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 2903, __extension__
__PRETTY_FUNCTION__))
2903 "Fail to get a member from an interleaved store group")(static_cast <bool> ((Group->getMember(i) || MaskForGaps
) && "Fail to get a member from an interleaved store group"
) ? void (0) : __assert_fail ("(Group->getMember(i) || MaskForGaps) && \"Fail to get a member from an interleaved store group\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 2903, __extension__
__PRETTY_FUNCTION__))
;
2904 Instruction *Member = Group->getMember(i);
2905
2906 // Skip the gaps in the group.
2907 if (!Member) {
2908 Value *Undef = PoisonValue::get(SubVT);
2909 StoredVecs.push_back(Undef);
2910 continue;
2911 }
2912
2913 Value *StoredVec = State.get(StoredValues[i], Part);
2914
2915 if (Group->isReverse())
2916 StoredVec = Builder.CreateVectorReverse(StoredVec, "reverse");
2917
2918 // If this member has different type, cast it to a unified type.
2919
2920 if (StoredVec->getType() != SubVT)
2921 StoredVec = createBitOrPointerCast(StoredVec, SubVT, DL);
2922
2923 StoredVecs.push_back(StoredVec);
2924 }
2925
2926 // Concatenate all vectors into a wide vector.
2927 Value *WideVec = concatenateVectors(Builder, StoredVecs);
2928
2929 // Interleave the elements in the wide vector.
2930 Value *IVec = Builder.CreateShuffleVector(
2931 WideVec, createInterleaveMask(VF.getKnownMinValue(), InterleaveFactor),
2932 "interleaved.vec");
2933
2934 Instruction *NewStoreInstr;
2935 if (BlockInMask || MaskForGaps) {
2936 Value *GroupMask = MaskForGaps;
2937 if (BlockInMask) {
2938 Value *BlockInMaskPart = State.get(BlockInMask, Part);
2939 Value *ShuffledMask = Builder.CreateShuffleVector(
2940 BlockInMaskPart,
2941 createReplicatedMask(InterleaveFactor, VF.getKnownMinValue()),
2942 "interleaved.mask");
2943 GroupMask = MaskForGaps ? Builder.CreateBinOp(Instruction::And,
2944 ShuffledMask, MaskForGaps)
2945 : ShuffledMask;
2946 }
2947 NewStoreInstr = Builder.CreateMaskedStore(IVec, AddrParts[Part],
2948 Group->getAlign(), GroupMask);
2949 } else
2950 NewStoreInstr =
2951 Builder.CreateAlignedStore(IVec, AddrParts[Part], Group->getAlign());
2952
2953 Group->addMetadata(NewStoreInstr);
2954 }
2955}
2956
2957void InnerLoopVectorizer::scalarizeInstruction(Instruction *Instr,
2958 VPReplicateRecipe *RepRecipe,
2959 const VPIteration &Instance,
2960 bool IfPredicateInstr,
2961 VPTransformState &State) {
2962 assert(!Instr->getType()->isAggregateType() && "Can't handle vectors")(static_cast <bool> (!Instr->getType()->isAggregateType
() && "Can't handle vectors") ? void (0) : __assert_fail
("!Instr->getType()->isAggregateType() && \"Can't handle vectors\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 2962, __extension__
__PRETTY_FUNCTION__))
;
2963
2964 // llvm.experimental.noalias.scope.decl intrinsics must only be duplicated for
2965 // the first lane and part.
2966 if (isa<NoAliasScopeDeclInst>(Instr))
2967 if (!Instance.isFirstIteration())
2968 return;
2969
2970 setDebugLocFromInst(Instr);
2971
2972 // Does this instruction return a value ?
2973 bool IsVoidRetTy = Instr->getType()->isVoidTy();
2974
2975 Instruction *Cloned = Instr->clone();
2976 if (!IsVoidRetTy)
2977 Cloned->setName(Instr->getName() + ".cloned");
2978
2979 // If the scalarized instruction contributes to the address computation of a
2980 // widen masked load/store which was in a basic block that needed predication
2981 // and is not predicated after vectorization, we can't propagate
2982 // poison-generating flags (nuw/nsw, exact, inbounds, etc.). The scalarized
2983 // instruction could feed a poison value to the base address of the widen
2984 // load/store.
2985 if (State.MayGeneratePoisonRecipes.contains(RepRecipe))
2986 Cloned->dropPoisonGeneratingFlags();
2987
2988 State.Builder.SetInsertPoint(Builder.GetInsertBlock(),
2989 Builder.GetInsertPoint());
2990 // Replace the operands of the cloned instructions with their scalar
2991 // equivalents in the new loop.
2992 for (auto &I : enumerate(RepRecipe->operands())) {
2993 auto InputInstance = Instance;
2994 VPValue *Operand = I.value();
2995 if (State.Plan->isUniformAfterVectorization(Operand))
2996 InputInstance.Lane = VPLane::getFirstLane();
2997 Cloned->setOperand(I.index(), State.get(Operand, InputInstance));
2998 }
2999 addNewMetadata(Cloned, Instr);
3000
3001 // Place the cloned scalar in the new loop.
3002 Builder.Insert(Cloned);
3003
3004 State.set(RepRecipe, Cloned, Instance);
3005
3006 // If we just cloned a new assumption, add it the assumption cache.
3007 if (auto *II = dyn_cast<AssumeInst>(Cloned))
3008 AC->registerAssumption(II);
3009
3010 // End if-block.
3011 if (IfPredicateInstr)
3012 PredicatedInstructions.push_back(Cloned);
3013}
3014
3015void InnerLoopVectorizer::createHeaderBranch(Loop *L) {
3016 BasicBlock *Header = L->getHeader();
3017 assert(!L->getLoopLatch() && "loop should not have a latch at this point")(static_cast <bool> (!L->getLoopLatch() && "loop should not have a latch at this point"
) ? void (0) : __assert_fail ("!L->getLoopLatch() && \"loop should not have a latch at this point\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 3017, __extension__
__PRETTY_FUNCTION__))
;
3018
3019 IRBuilder<> B(Header->getTerminator());
3020 Instruction *OldInst =
3021 getDebugLocFromInstOrOperands(Legal->getPrimaryInduction());
3022 setDebugLocFromInst(OldInst, &B);
3023
3024 // Connect the header to the exit and header blocks and replace the old
3025 // terminator.
3026 B.CreateCondBr(B.getTrue(), L->getUniqueExitBlock(), Header);
3027
3028 // Now we have two terminators. Remove the old one from the block.
3029 Header->getTerminator()->eraseFromParent();
3030}
3031
3032Value *InnerLoopVectorizer::getOrCreateTripCount(Loop *L) {
3033 if (TripCount)
3034 return TripCount;
3035
3036 assert(L && "Create Trip Count for null loop.")(static_cast <bool> (L && "Create Trip Count for null loop."
) ? void (0) : __assert_fail ("L && \"Create Trip Count for null loop.\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 3036, __extension__
__PRETTY_FUNCTION__))
;
3037 IRBuilder<> Builder(L->getLoopPreheader()->getTerminator());
3038 // Find the loop boundaries.
3039 ScalarEvolution *SE = PSE.getSE();
3040 const SCEV *BackedgeTakenCount = PSE.getBackedgeTakenCount();
3041 assert(!isa<SCEVCouldNotCompute>(BackedgeTakenCount) &&(static_cast <bool> (!isa<SCEVCouldNotCompute>(BackedgeTakenCount
) && "Invalid loop count") ? void (0) : __assert_fail
("!isa<SCEVCouldNotCompute>(BackedgeTakenCount) && \"Invalid loop count\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 3042, __extension__
__PRETTY_FUNCTION__))
3042 "Invalid loop count")(static_cast <bool> (!isa<SCEVCouldNotCompute>(BackedgeTakenCount
) && "Invalid loop count") ? void (0) : __assert_fail
("!isa<SCEVCouldNotCompute>(BackedgeTakenCount) && \"Invalid loop count\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 3042, __extension__
__PRETTY_FUNCTION__))
;
3043
3044 Type *IdxTy = Legal->getWidestInductionType();
3045 assert(IdxTy && "No type for induction")(static_cast <bool> (IdxTy && "No type for induction"
) ? void (0) : __assert_fail ("IdxTy && \"No type for induction\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 3045, __extension__
__PRETTY_FUNCTION__))
;
3046
3047 // The exit count might have the type of i64 while the phi is i32. This can
3048 // happen if we have an induction variable that is sign extended before the
3049 // compare. The only way that we get a backedge taken count is that the
3050 // induction variable was signed and as such will not overflow. In such a case
3051 // truncation is legal.
3052 if (SE->getTypeSizeInBits(BackedgeTakenCount->getType()) >
3053 IdxTy->getPrimitiveSizeInBits())
3054 BackedgeTakenCount = SE->getTruncateOrNoop(BackedgeTakenCount, IdxTy);
3055 BackedgeTakenCount = SE->getNoopOrZeroExtend(BackedgeTakenCount, IdxTy);
3056
3057 // Get the total trip count from the count by adding 1.
3058 const SCEV *ExitCount = SE->getAddExpr(
3059 BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType()));
3060
3061 const DataLayout &DL = L->getHeader()->getModule()->getDataLayout();
3062
3063 // Expand the trip count and place the new instructions in the preheader.
3064 // Notice that the pre-header does not change, only the loop body.
3065 SCEVExpander Exp(*SE, DL, "induction");
3066
3067 // Count holds the overall loop count (N).
3068 TripCount = Exp.expandCodeFor(ExitCount, ExitCount->getType(),
3069 L->getLoopPreheader()->getTerminator());
3070
3071 if (TripCount->getType()->isPointerTy())
3072 TripCount =
3073 CastInst::CreatePointerCast(TripCount, IdxTy, "exitcount.ptrcnt.to.int",
3074 L->getLoopPreheader()->getTerminator());
3075
3076 return TripCount;
3077}
3078
3079Value *InnerLoopVectorizer::getOrCreateVectorTripCount(Loop *L) {
3080 if (VectorTripCount)
3081 return VectorTripCount;
3082
3083 Value *TC = getOrCreateTripCount(L);
3084 IRBuilder<> Builder(L->getLoopPreheader()->getTerminator());
3085
3086 Type *Ty = TC->getType();
3087 // This is where we can make the step a runtime constant.
3088 Value *Step = createStepForVF(Builder, Ty, VF, UF);
3089
3090 // If the tail is to be folded by masking, round the number of iterations N
3091 // up to a multiple of Step instead of rounding down. This is done by first
3092 // adding Step-1 and then rounding down. Note that it's ok if this addition
3093 // overflows: the vector induction variable will eventually wrap to zero given
3094 // that it starts at zero and its Step is a power of two; the loop will then
3095 // exit, with the last early-exit vector comparison also producing all-true.
3096 if (Cost->foldTailByMasking()) {
3097 assert(isPowerOf2_32(VF.getKnownMinValue() * UF) &&(static_cast <bool> (isPowerOf2_32(VF.getKnownMinValue(
) * UF) && "VF*UF must be a power of 2 when folding tail by masking"
) ? void (0) : __assert_fail ("isPowerOf2_32(VF.getKnownMinValue() * UF) && \"VF*UF must be a power of 2 when folding tail by masking\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 3098, __extension__
__PRETTY_FUNCTION__))
3098 "VF*UF must be a power of 2 when folding tail by masking")(static_cast <bool> (isPowerOf2_32(VF.getKnownMinValue(
) * UF) && "VF*UF must be a power of 2 when folding tail by masking"
) ? void (0) : __assert_fail ("isPowerOf2_32(VF.getKnownMinValue() * UF) && \"VF*UF must be a power of 2 when folding tail by masking\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 3098, __extension__
__PRETTY_FUNCTION__))
;
3099 Value *NumLanes = getRuntimeVF(Builder, Ty, VF * UF);
3100 TC = Builder.CreateAdd(
3101 TC, Builder.CreateSub(NumLanes, ConstantInt::get(Ty, 1)), "n.rnd.up");
3102 }
3103
3104 // Now we need to generate the expression for the part of the loop that the
3105 // vectorized body will execute. This is equal to N - (N % Step) if scalar
3106 // iterations are not required for correctness, or N - Step, otherwise. Step
3107 // is equal to the vectorization factor (number of SIMD elements) times the
3108 // unroll factor (number of SIMD instructions).
3109 Value *R = Builder.CreateURem(TC, Step, "n.mod.vf");
3110
3111 // There are cases where we *must* run at least one iteration in the remainder
3112 // loop. See the cost model for when this can happen. If the step evenly
3113 // divides the trip count, we set the remainder to be equal to the step. If
3114 // the step does not evenly divide the trip count, no adjustment is necessary
3115 // since there will already be scalar iterations. Note that the minimum
3116 // iterations check ensures that N >= Step.
3117 if (Cost->requiresScalarEpilogue(VF)) {
3118 auto *IsZero = Builder.CreateICmpEQ(R, ConstantInt::get(R->getType(), 0));
3119 R = Builder.CreateSelect(IsZero, Step, R);
3120 }
3121
3122 VectorTripCount = Builder.CreateSub(TC, R, "n.vec");
3123
3124 return VectorTripCount;
3125}
3126
3127Value *InnerLoopVectorizer::createBitOrPointerCast(Value *V, VectorType *DstVTy,
3128 const DataLayout &DL) {
3129 // Verify that V is a vector type with same number of elements as DstVTy.
3130 auto *DstFVTy = cast<FixedVectorType>(DstVTy);
3131 unsigned VF = DstFVTy->getNumElements();
3132 auto *SrcVecTy = cast<FixedVectorType>(V->getType());
3133 assert((VF == SrcVecTy->getNumElements()) && "Vector dimensions do not match")(static_cast <bool> ((VF == SrcVecTy->getNumElements
()) && "Vector dimensions do not match") ? void (0) :
__assert_fail ("(VF == SrcVecTy->getNumElements()) && \"Vector dimensions do not match\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 3133, __extension__
__PRETTY_FUNCTION__))
;
3134 Type *SrcElemTy = SrcVecTy->getElementType();
3135 Type *DstElemTy = DstFVTy->getElementType();
3136 assert((DL.getTypeSizeInBits(SrcElemTy) == DL.getTypeSizeInBits(DstElemTy)) &&(static_cast <bool> ((DL.getTypeSizeInBits(SrcElemTy) ==
DL.getTypeSizeInBits(DstElemTy)) && "Vector elements must have same size"
) ? void (0) : __assert_fail ("(DL.getTypeSizeInBits(SrcElemTy) == DL.getTypeSizeInBits(DstElemTy)) && \"Vector elements must have same size\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 3137, __extension__
__PRETTY_FUNCTION__))
3137 "Vector elements must have same size")(static_cast <bool> ((DL.getTypeSizeInBits(SrcElemTy) ==
DL.getTypeSizeInBits(DstElemTy)) && "Vector elements must have same size"
) ? void (0) : __assert_fail ("(DL.getTypeSizeInBits(SrcElemTy) == DL.getTypeSizeInBits(DstElemTy)) && \"Vector elements must have same size\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 3137, __extension__
__PRETTY_FUNCTION__))
;
3138
3139 // Do a direct cast if element types are castable.
3140 if (CastInst::isBitOrNoopPointerCastable(SrcElemTy, DstElemTy, DL)) {
3141 return Builder.CreateBitOrPointerCast(V, DstFVTy);
3142 }
3143 // V cannot be directly casted to desired vector type.
3144 // May happen when V is a floating point vector but DstVTy is a vector of
3145 // pointers or vice-versa. Handle this using a two-step bitcast using an
3146 // intermediate Integer type for the bitcast i.e. Ptr <-> Int <-> Float.
3147 assert((DstElemTy->isPointerTy() != SrcElemTy->isPointerTy()) &&(static_cast <bool> ((DstElemTy->isPointerTy() != SrcElemTy
->isPointerTy()) && "Only one type should be a pointer type"
) ? void (0) : __assert_fail ("(DstElemTy->isPointerTy() != SrcElemTy->isPointerTy()) && \"Only one type should be a pointer type\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 3148, __extension__
__PRETTY_FUNCTION__))
3148 "Only one type should be a pointer type")(static_cast <bool> ((DstElemTy->isPointerTy() != SrcElemTy
->isPointerTy()) && "Only one type should be a pointer type"
) ? void (0) : __assert_fail ("(DstElemTy->isPointerTy() != SrcElemTy->isPointerTy()) && \"Only one type should be a pointer type\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 3148, __extension__
__PRETTY_FUNCTION__))
;
3149 assert((DstElemTy->isFloatingPointTy() != SrcElemTy->isFloatingPointTy()) &&(static_cast <bool> ((DstElemTy->isFloatingPointTy()
!= SrcElemTy->isFloatingPointTy()) && "Only one type should be a floating point type"
) ? void (0) : __assert_fail ("(DstElemTy->isFloatingPointTy() != SrcElemTy->isFloatingPointTy()) && \"Only one type should be a floating point type\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 3150, __extension__
__PRETTY_FUNCTION__))
3150 "Only one type should be a floating point type")(static_cast <bool> ((DstElemTy->isFloatingPointTy()
!= SrcElemTy->isFloatingPointTy()) && "Only one type should be a floating point type"
) ? void (0) : __assert_fail ("(DstElemTy->isFloatingPointTy() != SrcElemTy->isFloatingPointTy()) && \"Only one type should be a floating point type\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 3150, __extension__
__PRETTY_FUNCTION__))
;
3151 Type *IntTy =
3152 IntegerType::getIntNTy(V->getContext(), DL.getTypeSizeInBits(SrcElemTy));
3153 auto *VecIntTy = FixedVectorType::get(IntTy, VF);
3154 Value *CastVal = Builder.CreateBitOrPointerCast(V, VecIntTy);
3155 return Builder.CreateBitOrPointerCast(CastVal, DstFVTy);
3156}
3157
3158void InnerLoopVectorizer::emitMinimumIterationCountCheck(Loop *L,
3159 BasicBlock *Bypass) {
3160 Value *Count = getOrCreateTripCount(L);
3161 // Reuse existing vector loop preheader for TC checks.
3162 // Note that new preheader block is generated for vector loop.
3163 BasicBlock *const TCCheckBlock = LoopVectorPreHeader;
3164 IRBuilder<> Builder(TCCheckBlock->getTerminator());
3165
3166 // Generate code to check if the loop's trip count is less than VF * UF, or
3167 // equal to it in case a scalar epilogue is required; this implies that the
3168 // vector trip count is zero. This check also covers the case where adding one
3169 // to the backedge-taken count overflowed leading to an incorrect trip count
3170 // of zero. In this case we will also jump to the scalar loop.
3171 auto P = Cost->requiresScalarEpilogue(VF) ? ICmpInst::ICMP_ULE
3172 : ICmpInst::ICMP_ULT;
3173
3174 // If tail is to be folded, vector loop takes care of all iterations.
3175 Value *CheckMinIters = Builder.getFalse();
3176 if (!Cost->foldTailByMasking()) {
3177 Value *Step = createStepForVF(Builder, Count->getType(), VF, UF);
3178 CheckMinIters = Builder.CreateICmp(P, Count, Step, "min.iters.check");
3179 }
3180 // Create new preheader for vector loop.
3181 LoopVectorPreHeader =
3182 SplitBlock(TCCheckBlock, TCCheckBlock->getTerminator(), DT, LI, nullptr,
3183 "vector.ph");
3184
3185 assert(DT->properlyDominates(DT->getNode(TCCheckBlock),(static_cast <bool> (DT->properlyDominates(DT->getNode
(TCCheckBlock), DT->getNode(Bypass)->getIDom()) &&
"TC check is expected to dominate Bypass") ? void (0) : __assert_fail
("DT->properlyDominates(DT->getNode(TCCheckBlock), DT->getNode(Bypass)->getIDom()) && \"TC check is expected to dominate Bypass\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 3187, __extension__
__PRETTY_FUNCTION__))
3186 DT->getNode(Bypass)->getIDom()) &&(static_cast <bool> (DT->properlyDominates(DT->getNode
(TCCheckBlock), DT->getNode(Bypass)->getIDom()) &&
"TC check is expected to dominate Bypass") ? void (0) : __assert_fail
("DT->properlyDominates(DT->getNode(TCCheckBlock), DT->getNode(Bypass)->getIDom()) && \"TC check is expected to dominate Bypass\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 3187, __extension__
__PRETTY_FUNCTION__))
3187 "TC check is expected to dominate Bypass")(static_cast <bool> (DT->properlyDominates(DT->getNode
(TCCheckBlock), DT->getNode(Bypass)->getIDom()) &&
"TC check is expected to dominate Bypass") ? void (0) : __assert_fail
("DT->properlyDominates(DT->getNode(TCCheckBlock), DT->getNode(Bypass)->getIDom()) && \"TC check is expected to dominate Bypass\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 3187, __extension__
__PRETTY_FUNCTION__))
;
3188
3189 // Update dominator for Bypass & LoopExit (if needed).
3190 DT->changeImmediateDominator(Bypass, TCCheckBlock);
3191 if (!Cost->requiresScalarEpilogue(VF))
3192 // If there is an epilogue which must run, there's no edge from the
3193 // middle block to exit blocks and thus no need to update the immediate
3194 // dominator of the exit blocks.
3195 DT->changeImmediateDominator(LoopExitBlock, TCCheckBlock);
3196
3197 ReplaceInstWithInst(
3198 TCCheckBlock->getTerminator(),
3199 BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters));
3200 LoopBypassBlocks.push_back(TCCheckBlock);
3201}
3202
3203BasicBlock *InnerLoopVectorizer::emitSCEVChecks(Loop *L, BasicBlock *Bypass) {
3204
3205 BasicBlock *const SCEVCheckBlock =
3206 RTChecks.emitSCEVChecks(L, Bypass, LoopVectorPreHeader, LoopExitBlock);
3207 if (!SCEVCheckBlock)
3208 return nullptr;
3209
3210 assert(!(SCEVCheckBlock->getParent()->hasOptSize() ||(static_cast <bool> (!(SCEVCheckBlock->getParent()->
hasOptSize() || (OptForSizeBasedOnProfile && Cost->
Hints->getForce() != LoopVectorizeHints::FK_Enabled)) &&
"Cannot SCEV check stride or overflow when optimizing for size"
) ? void (0) : __assert_fail ("!(SCEVCheckBlock->getParent()->hasOptSize() || (OptForSizeBasedOnProfile && Cost->Hints->getForce() != LoopVectorizeHints::FK_Enabled)) && \"Cannot SCEV check stride or overflow when optimizing for size\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 3213, __extension__
__PRETTY_FUNCTION__))
3211 (OptForSizeBasedOnProfile &&(static_cast <bool> (!(SCEVCheckBlock->getParent()->
hasOptSize() || (OptForSizeBasedOnProfile && Cost->
Hints->getForce() != LoopVectorizeHints::FK_Enabled)) &&
"Cannot SCEV check stride or overflow when optimizing for size"
) ? void (0) : __assert_fail ("!(SCEVCheckBlock->getParent()->hasOptSize() || (OptForSizeBasedOnProfile && Cost->Hints->getForce() != LoopVectorizeHints::FK_Enabled)) && \"Cannot SCEV check stride or overflow when optimizing for size\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 3213, __extension__
__PRETTY_FUNCTION__))
3212 Cost->Hints->getForce() != LoopVectorizeHints::FK_Enabled)) &&(static_cast <bool> (!(SCEVCheckBlock->getParent()->
hasOptSize() || (OptForSizeBasedOnProfile && Cost->
Hints->getForce() != LoopVectorizeHints::FK_Enabled)) &&
"Cannot SCEV check stride or overflow when optimizing for size"
) ? void (0) : __assert_fail ("!(SCEVCheckBlock->getParent()->hasOptSize() || (OptForSizeBasedOnProfile && Cost->Hints->getForce() != LoopVectorizeHints::FK_Enabled)) && \"Cannot SCEV check stride or overflow when optimizing for size\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 3213, __extension__
__PRETTY_FUNCTION__))
3213 "Cannot SCEV check stride or overflow when optimizing for size")(static_cast <bool> (!(SCEVCheckBlock->getParent()->
hasOptSize() || (OptForSizeBasedOnProfile && Cost->
Hints->getForce() != LoopVectorizeHints::FK_Enabled)) &&
"Cannot SCEV check stride or overflow when optimizing for size"
) ? void (0) : __assert_fail ("!(SCEVCheckBlock->getParent()->hasOptSize() || (OptForSizeBasedOnProfile && Cost->Hints->getForce() != LoopVectorizeHints::FK_Enabled)) && \"Cannot SCEV check stride or overflow when optimizing for size\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 3213, __extension__
__PRETTY_FUNCTION__))
;
3214
3215
3216 // Update dominator only if this is first RT check.
3217 if (LoopBypassBlocks.empty()) {
3218 DT->changeImmediateDominator(Bypass, SCEVCheckBlock);
3219 if (!Cost->requiresScalarEpilogue(VF))
3220 // If there is an epilogue which must run, there's no edge from the
3221 // middle block to exit blocks and thus no need to update the immediate
3222 // dominator of the exit blocks.
3223 DT->changeImmediateDominator(LoopExitBlock, SCEVCheckBlock);
3224 }
3225
3226 LoopBypassBlocks.push_back(SCEVCheckBlock);
3227 AddedSafetyChecks = true;
3228 return SCEVCheckBlock;
3229}
3230
3231BasicBlock *InnerLoopVectorizer::emitMemRuntimeChecks(Loop *L,
3232 BasicBlock *Bypass) {
3233 // VPlan-native path does not do any analysis for runtime checks currently.
3234 if (EnableVPlanNativePath)
3235 return nullptr;
3236
3237 BasicBlock *const MemCheckBlock =
3238 RTChecks.emitMemRuntimeChecks(L, Bypass, LoopVectorPreHeader);
3239
3240 // Check if we generated code that checks in runtime if arrays overlap. We put
3241 // the checks into a separate block to make the more common case of few
3242 // elements faster.
3243 if (!MemCheckBlock)
3244 return nullptr;
3245
3246 if (MemCheckBlock->getParent()->hasOptSize() || OptForSizeBasedOnProfile) {
3247 assert(Cost->Hints->getForce() == LoopVectorizeHints::FK_Enabled &&(static_cast <bool> (Cost->Hints->getForce() == LoopVectorizeHints
::FK_Enabled && "Cannot emit memory checks when optimizing for size, unless forced "
"to vectorize.") ? void (0) : __assert_fail ("Cost->Hints->getForce() == LoopVectorizeHints::FK_Enabled && \"Cannot emit memory checks when optimizing for size, unless forced \" \"to vectorize.\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 3249, __extension__
__PRETTY_FUNCTION__))
3248 "Cannot emit memory checks when optimizing for size, unless forced "(static_cast <bool> (Cost->Hints->getForce() == LoopVectorizeHints
::FK_Enabled && "Cannot emit memory checks when optimizing for size, unless forced "
"to vectorize.") ? void (0) : __assert_fail ("Cost->Hints->getForce() == LoopVectorizeHints::FK_Enabled && \"Cannot emit memory checks when optimizing for size, unless forced \" \"to vectorize.\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 3249, __extension__
__PRETTY_FUNCTION__))
3249 "to vectorize.")(static_cast <bool> (Cost->Hints->getForce() == LoopVectorizeHints
::FK_Enabled && "Cannot emit memory checks when optimizing for size, unless forced "
"to vectorize.") ? void (0) : __assert_fail ("Cost->Hints->getForce() == LoopVectorizeHints::FK_Enabled && \"Cannot emit memory checks when optimizing for size, unless forced \" \"to vectorize.\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 3249, __extension__
__PRETTY_FUNCTION__))
;
3250 ORE->emit([&]() {
3251 return OptimizationRemarkAnalysis(DEBUG_TYPE"loop-vectorize", "VectorizationCodeSize",
3252 L->getStartLoc(), L->getHeader())
3253 << "Code-size may be reduced by not forcing "
3254 "vectorization, or by source-code modifications "
3255 "eliminating the need for runtime checks "
3256 "(e.g., adding 'restrict').";
3257 });
3258 }
3259
3260 LoopBypassBlocks.push_back(MemCheckBlock);
3261
3262 AddedSafetyChecks = true;
3263
3264 // We currently don't use LoopVersioning for the actual loop cloning but we
3265 // still use it to add the noalias metadata.
3266 LVer = std::make_unique<LoopVersioning>(
3267 *Legal->getLAI(),
3268 Legal->getLAI()->getRuntimePointerChecking()->getChecks(), OrigLoop, LI,
3269 DT, PSE.getSE());
3270 LVer->prepareNoAliasMetadata();
3271 return MemCheckBlock;
3272}
3273
3274Value *InnerLoopVectorizer::emitTransformedIndex(
3275 IRBuilder<> &B, Value *Index, ScalarEvolution *SE, const DataLayout &DL,
3276 const InductionDescriptor &ID, BasicBlock *VectorHeader) const {
3277
3278 SCEVExpander Exp(*SE, DL, "induction");
3279 auto Step = ID.getStep();
3280 auto StartValue = ID.getStartValue();
3281 assert(Index->getType()->getScalarType() == Step->getType() &&(static_cast <bool> (Index->getType()->getScalarType
() == Step->getType() && "Index scalar type does not match StepValue type"
) ? void (0) : __assert_fail ("Index->getType()->getScalarType() == Step->getType() && \"Index scalar type does not match StepValue type\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 3282, __extension__
__PRETTY_FUNCTION__))
3282 "Index scalar type does not match StepValue type")(static_cast <bool> (Index->getType()->getScalarType
() == Step->getType() && "Index scalar type does not match StepValue type"
) ? void (0) : __assert_fail ("Index->getType()->getScalarType() == Step->getType() && \"Index scalar type does not match StepValue type\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 3282, __extension__
__PRETTY_FUNCTION__))
;
3283
3284 // Note: the IR at this point is broken. We cannot use SE to create any new
3285 // SCEV and then expand it, hoping that SCEV's simplification will give us
3286 // a more optimal code. Unfortunately, attempt of doing so on invalid IR may
3287 // lead to various SCEV crashes. So all we can do is to use builder and rely
3288 // on InstCombine for future simplifications. Here we handle some trivial
3289 // cases only.
3290 auto CreateAdd = [&B](Value *X, Value *Y) {
3291 assert(X->getType() == Y->getType() && "Types don't match!")(static_cast <bool> (X->getType() == Y->getType()
&& "Types don't match!") ? void (0) : __assert_fail (
"X->getType() == Y->getType() && \"Types don't match!\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 3291, __extension__
__PRETTY_FUNCTION__))
;
3292 if (auto *CX = dyn_cast<ConstantInt>(X))
3293 if (CX->isZero())
3294 return Y;
3295 if (auto *CY = dyn_cast<ConstantInt>(Y))
3296 if (CY->isZero())
3297 return X;
3298 return B.CreateAdd(X, Y);
3299 };
3300
3301 // We allow X to be a vector type, in which case Y will potentially be
3302 // splatted into a vector with the same element count.
3303 auto CreateMul = [&B](Value *X, Value *Y) {
3304 assert(X->getType()->getScalarType() == Y->getType() &&(static_cast <bool> (X->getType()->getScalarType(
) == Y->getType() && "Types don't match!") ? void (
0) : __assert_fail ("X->getType()->getScalarType() == Y->getType() && \"Types don't match!\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 3305, __extension__
__PRETTY_FUNCTION__))
3305 "Types don't match!")(static_cast <bool> (X->getType()->getScalarType(
) == Y->getType() && "Types don't match!") ? void (
0) : __assert_fail ("X->getType()->getScalarType() == Y->getType() && \"Types don't match!\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 3305, __extension__
__PRETTY_FUNCTION__))
;
3306 if (auto *CX = dyn_cast<ConstantInt>(X))
3307 if (CX->isOne())
3308 return Y;
3309 if (auto *CY = dyn_cast<ConstantInt>(Y))
3310 if (CY->isOne())
3311 return X;
3312 VectorType *XVTy = dyn_cast<VectorType>(X->getType());
3313 if (XVTy && !isa<VectorType>(Y->getType()))
3314 Y = B.CreateVectorSplat(XVTy->getElementCount(), Y);
3315 return B.CreateMul(X, Y);
3316 };
3317
3318 // Get a suitable insert point for SCEV expansion. For blocks in the vector
3319 // loop, choose the end of the vector loop header (=VectorHeader), because
3320 // the DomTree is not kept up-to-date for additional blocks generated in the
3321 // vector loop. By using the header as insertion point, we guarantee that the
3322 // expanded instructions dominate all their uses.
3323 auto GetInsertPoint = [this, &B, VectorHeader]() {
3324 BasicBlock *InsertBB = B.GetInsertPoint()->getParent();
3325 if (InsertBB != LoopVectorBody &&
3326 LI->getLoopFor(VectorHeader) == LI->getLoopFor(InsertBB))
3327 return VectorHeader->getTerminator();
3328 return &*B.GetInsertPoint();
3329 };
3330
3331 switch (ID.getKind()) {
3332 case InductionDescriptor::IK_IntInduction: {
3333 assert(!isa<VectorType>(Index->getType()) &&(static_cast <bool> (!isa<VectorType>(Index->getType
()) && "Vector indices not supported for integer inductions yet"
) ? void (0) : __assert_fail ("!isa<VectorType>(Index->getType()) && \"Vector indices not supported for integer inductions yet\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 3334, __extension__
__PRETTY_FUNCTION__))
3334 "Vector indices not supported for integer inductions yet")(static_cast <bool> (!isa<VectorType>(Index->getType
()) && "Vector indices not supported for integer inductions yet"
) ? void (0) : __assert_fail ("!isa<VectorType>(Index->getType()) && \"Vector indices not supported for integer inductions yet\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 3334, __extension__
__PRETTY_FUNCTION__))
;
3335 assert(Index->getType() == StartValue->getType() &&(static_cast <bool> (Index->getType() == StartValue->
getType() && "Index type does not match StartValue type"
) ? void (0) : __assert_fail ("Index->getType() == StartValue->getType() && \"Index type does not match StartValue type\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 3336, __extension__
__PRETTY_FUNCTION__))
3336 "Index type does not match StartValue type")(static_cast <bool> (Index->getType() == StartValue->
getType() && "Index type does not match StartValue type"
) ? void (0) : __assert_fail ("Index->getType() == StartValue->getType() && \"Index type does not match StartValue type\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 3336, __extension__
__PRETTY_FUNCTION__))
;
3337 if (ID.getConstIntStepValue() && ID.getConstIntStepValue()->isMinusOne())
3338 return B.CreateSub(StartValue, Index);
3339 auto *Offset = CreateMul(
3340 Index, Exp.expandCodeFor(Step, Index->getType(), GetInsertPoint()));
3341 return CreateAdd(StartValue, Offset);
3342 }
3343 case InductionDescriptor::IK_PtrInduction: {
3344 assert(isa<SCEVConstant>(Step) &&(static_cast <bool> (isa<SCEVConstant>(Step) &&
"Expected constant step for pointer induction") ? void (0) :
__assert_fail ("isa<SCEVConstant>(Step) && \"Expected constant step for pointer induction\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 3345, __extension__
__PRETTY_FUNCTION__))
3345 "Expected constant step for pointer induction")(static_cast <bool> (isa<SCEVConstant>(Step) &&
"Expected constant step for pointer induction") ? void (0) :
__assert_fail ("isa<SCEVConstant>(Step) && \"Expected constant step for pointer induction\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 3345, __extension__
__PRETTY_FUNCTION__))
;
3346 return B.CreateGEP(
3347 ID.getElementType(), StartValue,
3348 CreateMul(Index,
3349 Exp.expandCodeFor(Step, Index->getType()->getScalarType(),
3350 GetInsertPoint())));
3351 }
3352 case InductionDescriptor::IK_FpInduction: {
3353 assert(!isa<VectorType>(Index->getType()) &&(static_cast <bool> (!isa<VectorType>(Index->getType
()) && "Vector indices not supported for FP inductions yet"
) ? void (0) : __assert_fail ("!isa<VectorType>(Index->getType()) && \"Vector indices not supported for FP inductions yet\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 3354, __extension__
__PRETTY_FUNCTION__))
3354 "Vector indices not supported for FP inductions yet")(static_cast <bool> (!isa<VectorType>(Index->getType
()) && "Vector indices not supported for FP inductions yet"
) ? void (0) : __assert_fail ("!isa<VectorType>(Index->getType()) && \"Vector indices not supported for FP inductions yet\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 3354, __extension__
__PRETTY_FUNCTION__))
;
3355 assert(Step->getType()->isFloatingPointTy() && "Expected FP Step value")(static_cast <bool> (Step->getType()->isFloatingPointTy
() && "Expected FP Step value") ? void (0) : __assert_fail
("Step->getType()->isFloatingPointTy() && \"Expected FP Step value\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 3355, __extension__
__PRETTY_FUNCTION__))
;
3356 auto InductionBinOp = ID.getInductionBinOp();
3357 assert(InductionBinOp &&(static_cast <bool> (InductionBinOp && (InductionBinOp
->getOpcode() == Instruction::FAdd || InductionBinOp->getOpcode
() == Instruction::FSub) && "Original bin op should be defined for FP induction"
) ? void (0) : __assert_fail ("InductionBinOp && (InductionBinOp->getOpcode() == Instruction::FAdd || InductionBinOp->getOpcode() == Instruction::FSub) && \"Original bin op should be defined for FP induction\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 3360, __extension__
__PRETTY_FUNCTION__))
3358 (InductionBinOp->getOpcode() == Instruction::FAdd ||(static_cast <bool> (InductionBinOp && (InductionBinOp
->getOpcode() == Instruction::FAdd || InductionBinOp->getOpcode
() == Instruction::FSub) && "Original bin op should be defined for FP induction"
) ? void (0) : __assert_fail ("InductionBinOp && (InductionBinOp->getOpcode() == Instruction::FAdd || InductionBinOp->getOpcode() == Instruction::FSub) && \"Original bin op should be defined for FP induction\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 3360, __extension__
__PRETTY_FUNCTION__))
3359 InductionBinOp->getOpcode() == Instruction::FSub) &&(static_cast <bool> (InductionBinOp && (InductionBinOp
->getOpcode() == Instruction::FAdd || InductionBinOp->getOpcode
() == Instruction::FSub) && "Original bin op should be defined for FP induction"
) ? void (0) : __assert_fail ("InductionBinOp && (InductionBinOp->getOpcode() == Instruction::FAdd || InductionBinOp->getOpcode() == Instruction::FSub) && \"Original bin op should be defined for FP induction\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 3360, __extension__
__PRETTY_FUNCTION__))
3360 "Original bin op should be defined for FP induction")(static_cast <bool> (InductionBinOp && (InductionBinOp
->getOpcode() == Instruction::FAdd || InductionBinOp->getOpcode
() == Instruction::FSub) && "Original bin op should be defined for FP induction"
) ? void (0) : __assert_fail ("InductionBinOp && (InductionBinOp->getOpcode() == Instruction::FAdd || InductionBinOp->getOpcode() == Instruction::FSub) && \"Original bin op should be defined for FP induction\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 3360, __extension__
__PRETTY_FUNCTION__))
;
3361
3362 Value *StepValue = cast<SCEVUnknown>(Step)->getValue();
3363 Value *MulExp = B.CreateFMul(StepValue, Index);
3364 return B.CreateBinOp(InductionBinOp->getOpcode(), StartValue, MulExp,
3365 "induction");
3366 }
3367 case InductionDescriptor::IK_NoInduction:
3368 return nullptr;
3369 }
3370 llvm_unreachable("invalid enum")::llvm::llvm_unreachable_internal("invalid enum", "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp"
, 3370)
;
3371}
3372
3373Loop *InnerLoopVectorizer::createVectorLoopSkeleton(StringRef Prefix) {
3374 LoopScalarBody = OrigLoop->getHeader();
3375 LoopVectorPreHeader = OrigLoop->getLoopPreheader();
3376 assert(LoopVectorPreHeader && "Invalid loop structure")(static_cast <bool> (LoopVectorPreHeader && "Invalid loop structure"
) ? void (0) : __assert_fail ("LoopVectorPreHeader && \"Invalid loop structure\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 3376, __extension__
__PRETTY_FUNCTION__))
;
3377 LoopExitBlock = OrigLoop->getUniqueExitBlock(); // may be nullptr
3378 assert((LoopExitBlock || Cost->requiresScalarEpilogue(VF)) &&(static_cast <bool> ((LoopExitBlock || Cost->requiresScalarEpilogue
(VF)) && "multiple exit loop without required epilogue?"
) ? void (0) : __assert_fail ("(LoopExitBlock || Cost->requiresScalarEpilogue(VF)) && \"multiple exit loop without required epilogue?\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 3379, __extension__
__PRETTY_FUNCTION__))
3379 "multiple exit loop without required epilogue?")(static_cast <bool> ((LoopExitBlock || Cost->requiresScalarEpilogue
(VF)) && "multiple exit loop without required epilogue?"
) ? void (0) : __assert_fail ("(LoopExitBlock || Cost->requiresScalarEpilogue(VF)) && \"multiple exit loop without required epilogue?\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 3379, __extension__
__PRETTY_FUNCTION__))
;
3380
3381 LoopMiddleBlock =
3382 SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT,
3383 LI, nullptr, Twine(Prefix) + "middle.block");
3384 LoopScalarPreHeader =
3385 SplitBlock(LoopMiddleBlock, LoopMiddleBlock->getTerminator(), DT, LI,
3386 nullptr, Twine(Prefix) + "scalar.ph");
3387
3388 auto *ScalarLatchTerm = OrigLoop->getLoopLatch()->getTerminator();
3389
3390 // Set up the middle block terminator. Two cases:
3391 // 1) If we know that we must execute the scalar epilogue, emit an
3392 // unconditional branch.
3393 // 2) Otherwise, we must have a single unique exit block (due to how we
3394 // implement the multiple exit case). In this case, set up a conditonal
3395 // branch from the middle block to the loop scalar preheader, and the
3396 // exit block. completeLoopSkeleton will update the condition to use an
3397 // iteration check, if required to decide whether to execute the remainder.
3398 BranchInst *BrInst = Cost->requiresScalarEpilogue(VF) ?
3399 BranchInst::Create(LoopScalarPreHeader) :
3400 BranchInst::Create(LoopExitBlock, LoopScalarPreHeader,
3401 Builder.getTrue());
3402 BrInst->setDebugLoc(ScalarLatchTerm->getDebugLoc());
3403 ReplaceInstWithInst(LoopMiddleBlock->getTerminator(), BrInst);
3404
3405 // We intentionally don't let SplitBlock to update LoopInfo since
3406 // LoopVectorBody should belong to another loop than LoopVectorPreHeader.
3407 // LoopVectorBody is explicitly added to the correct place few lines later.
3408 LoopVectorBody =
3409 SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT,
3410 nullptr, nullptr, Twine(Prefix) + "vector.body");
3411
3412 // Update dominator for loop exit.
3413 if (!Cost->requiresScalarEpilogue(VF))
3414 // If there is an epilogue which must run, there's no edge from the
3415 // middle block to exit blocks and thus no need to update the immediate
3416 // dominator of the exit blocks.
3417 DT->changeImmediateDominator(LoopExitBlock, LoopMiddleBlock);
3418
3419 // Create and register the new vector loop.
3420 Loop *Lp = LI->AllocateLoop();
3421 Loop *ParentLoop = OrigLoop->getParentLoop();
3422
3423 // Insert the new loop into the loop nest and register the new basic blocks
3424 // before calling any utilities such as SCEV that require valid LoopInfo.
3425 if (ParentLoop) {
3426 ParentLoop->addChildLoop(Lp);
3427 } else {
3428 LI->addTopLevelLoop(Lp);
3429 }
3430 Lp->addBasicBlockToLoop(LoopVectorBody, *LI);
3431 return Lp;
3432}
3433
3434void InnerLoopVectorizer::createInductionResumeValues(
3435 Loop *L, std::pair<BasicBlock *, Value *> AdditionalBypass) {
3436 assert(((AdditionalBypass.first && AdditionalBypass.second) ||(static_cast <bool> (((AdditionalBypass.first &&
AdditionalBypass.second) || (!AdditionalBypass.first &&
!AdditionalBypass.second)) && "Inconsistent information about additional bypass."
) ? void (0) : __assert_fail ("((AdditionalBypass.first && AdditionalBypass.second) || (!AdditionalBypass.first && !AdditionalBypass.second)) && \"Inconsistent information about additional bypass.\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 3438, __extension__
__PRETTY_FUNCTION__))
3437 (!AdditionalBypass.first && !AdditionalBypass.second)) &&(static_cast <bool> (((AdditionalBypass.first &&
AdditionalBypass.second) || (!AdditionalBypass.first &&
!AdditionalBypass.second)) && "Inconsistent information about additional bypass."
) ? void (0) : __assert_fail ("((AdditionalBypass.first && AdditionalBypass.second) || (!AdditionalBypass.first && !AdditionalBypass.second)) && \"Inconsistent information about additional bypass.\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 3438, __extension__
__PRETTY_FUNCTION__))
3438 "Inconsistent information about additional bypass.")(static_cast <bool> (((AdditionalBypass.first &&
AdditionalBypass.second) || (!AdditionalBypass.first &&
!AdditionalBypass.second)) && "Inconsistent information about additional bypass."
) ? void (0) : __assert_fail ("((AdditionalBypass.first && AdditionalBypass.second) || (!AdditionalBypass.first && !AdditionalBypass.second)) && \"Inconsistent information about additional bypass.\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 3438, __extension__
__PRETTY_FUNCTION__))
;
3439
3440 Value *VectorTripCount = getOrCreateVectorTripCount(L);
3441 assert(VectorTripCount && L && "Expected valid arguments")(static_cast <bool> (VectorTripCount && L &&
"Expected valid arguments") ? void (0) : __assert_fail ("VectorTripCount && L && \"Expected valid arguments\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 3441, __extension__
__PRETTY_FUNCTION__))
;
3442 // We are going to resume the execution of the scalar loop.
3443 // Go over all of the induction variables that we found and fix the
3444 // PHIs that are left in the scalar version of the loop.
3445 // The starting values of PHI nodes depend on the counter of the last
3446 // iteration in the vectorized loop.
3447 // If we come from a bypass edge then we need to start from the original
3448 // start value.
3449 Instruction *OldInduction = Legal->getPrimaryInduction();
3450 for (auto &InductionEntry : Legal->getInductionVars()) {
3451 PHINode *OrigPhi = InductionEntry.first;
3452 InductionDescriptor II = InductionEntry.second;
3453
3454 // Create phi nodes to merge from the backedge-taken check block.
3455 PHINode *BCResumeVal =
3456 PHINode::Create(OrigPhi->getType(), 3, "bc.resume.val",
3457 LoopScalarPreHeader->getTerminator());
3458 // Copy original phi DL over to the new one.
3459 BCResumeVal->setDebugLoc(OrigPhi->getDebugLoc());
3460 Value *&EndValue = IVEndValues[OrigPhi];
3461 Value *EndValueFromAdditionalBypass = AdditionalBypass.second;
3462 if (OrigPhi == OldInduction) {
3463 // We know what the end value is.
3464 EndValue = VectorTripCount;
3465 } else {
3466 IRBuilder<> B(L->getLoopPreheader()->getTerminator());
3467
3468 // Fast-math-flags propagate from the original induction instruction.
3469 if (II.getInductionBinOp() && isa<FPMathOperator>(II.getInductionBinOp()))
3470 B.setFastMathFlags(II.getInductionBinOp()->getFastMathFlags());
3471
3472 Type *StepType = II.getStep()->getType();
3473 Instruction::CastOps CastOp =
3474 CastInst::getCastOpcode(VectorTripCount, true, StepType, true);
3475 Value *CRD = B.CreateCast(CastOp, VectorTripCount, StepType, "cast.crd");
3476 const DataLayout &DL = LoopScalarBody->getModule()->getDataLayout();
3477 EndValue =
3478 emitTransformedIndex(B, CRD, PSE.getSE(), DL, II, LoopVectorBody);
3479 EndValue->setName("ind.end");
3480
3481 // Compute the end value for the additional bypass (if applicable).
3482 if (AdditionalBypass.first) {
3483 B.SetInsertPoint(&(*AdditionalBypass.first->getFirstInsertionPt()));
3484 CastOp = CastInst::getCastOpcode(AdditionalBypass.second, true,
3485 StepType, true);
3486 CRD =
3487 B.CreateCast(CastOp, AdditionalBypass.second, StepType, "cast.crd");
3488 EndValueFromAdditionalBypass =
3489 emitTransformedIndex(B, CRD, PSE.getSE(), DL, II, LoopVectorBody);
3490 EndValueFromAdditionalBypass->setName("ind.end");
3491 }
3492 }
3493 // The new PHI merges the original incoming value, in case of a bypass,
3494 // or the value at the end of the vectorized loop.
3495 BCResumeVal->addIncoming(EndValue, LoopMiddleBlock);
3496
3497 // Fix the scalar body counter (PHI node).
3498 // The old induction's phi node in the scalar body needs the truncated
3499 // value.
3500 for (BasicBlock *BB : LoopBypassBlocks)
3501 BCResumeVal->addIncoming(II.getStartValue(), BB);
3502
3503 if (AdditionalBypass.first)
3504 BCResumeVal->setIncomingValueForBlock(AdditionalBypass.first,
3505 EndValueFromAdditionalBypass);
3506
3507 OrigPhi->setIncomingValueForBlock(LoopScalarPreHeader, BCResumeVal);
3508 }
3509}
3510
3511BasicBlock *InnerLoopVectorizer::completeLoopSkeleton(Loop *L,
3512 MDNode *OrigLoopID) {
3513 assert(L && "Expected valid loop.")(static_cast <bool> (L && "Expected valid loop."
) ? void (0) : __assert_fail ("L && \"Expected valid loop.\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 3513, __extension__
__PRETTY_FUNCTION__))
;
3514
3515 // The trip counts should be cached by now.
3516 Value *Count = getOrCreateTripCount(L);
3517 Value *VectorTripCount = getOrCreateVectorTripCount(L);
3518
3519 auto *ScalarLatchTerm = OrigLoop->getLoopLatch()->getTerminator();
3520
3521 // Add a check in the middle block to see if we have completed
3522 // all of the iterations in the first vector loop. Three cases:
3523 // 1) If we require a scalar epilogue, there is no conditional branch as
3524 // we unconditionally branch to the scalar preheader. Do nothing.
3525 // 2) If (N - N%VF) == N, then we *don't* need to run the remainder.
3526 // Thus if tail is to be folded, we know we don't need to run the
3527 // remainder and we can use the previous value for the condition (true).
3528 // 3) Otherwise, construct a runtime check.
3529 if (!Cost->requiresScalarEpilogue(VF) && !Cost->foldTailByMasking()) {
3530 Instruction *CmpN = CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_EQ,
3531 Count, VectorTripCount, "cmp.n",
3532 LoopMiddleBlock->getTerminator());
3533
3534 // Here we use the same DebugLoc as the scalar loop latch terminator instead
3535 // of the corresponding compare because they may have ended up with
3536 // different line numbers and we want to avoid awkward line stepping while
3537 // debugging. Eg. if the compare has got a line number inside the loop.
3538 CmpN->setDebugLoc(ScalarLatchTerm->getDebugLoc());
3539 cast<BranchInst>(LoopMiddleBlock->getTerminator())->setCondition(CmpN);
3540 }
3541
3542 // Get ready to start creating new instructions into the vectorized body.
3543 assert(LoopVectorPreHeader == L->getLoopPreheader() &&(static_cast <bool> (LoopVectorPreHeader == L->getLoopPreheader
() && "Inconsistent vector loop preheader") ? void (0
) : __assert_fail ("LoopVectorPreHeader == L->getLoopPreheader() && \"Inconsistent vector loop preheader\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 3544, __extension__
__PRETTY_FUNCTION__))
3544 "Inconsistent vector loop preheader")(static_cast <bool> (LoopVectorPreHeader == L->getLoopPreheader
() && "Inconsistent vector loop preheader") ? void (0
) : __assert_fail ("LoopVectorPreHeader == L->getLoopPreheader() && \"Inconsistent vector loop preheader\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 3544, __extension__
__PRETTY_FUNCTION__))
;
3545 Builder.SetInsertPoint(&*LoopVectorBody->getFirstInsertionPt());
3546
3547#ifdef EXPENSIVE_CHECKS
3548 assert(DT->verify(DominatorTree::VerificationLevel::Fast))(static_cast <bool> (DT->verify(DominatorTree::VerificationLevel
::Fast)) ? void (0) : __assert_fail ("DT->verify(DominatorTree::VerificationLevel::Fast)"
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 3548, __extension__
__PRETTY_FUNCTION__))
;
3549 LI->verify(*DT);
3550#endif
3551
3552 return LoopVectorPreHeader;
3553}
3554
3555std::pair<BasicBlock *, Value *>
3556InnerLoopVectorizer::createVectorizedLoopSkeleton() {
3557 /*
3558 In this function we generate a new loop. The new loop will contain
3559 the vectorized instructions while the old loop will continue to run the
3560 scalar remainder.
3561
3562 [ ] <-- loop iteration number check.
3563 / |
3564 / v
3565 | [ ] <-- vector loop bypass (may consist of multiple blocks).
3566 | / |
3567 | / v
3568 || [ ] <-- vector pre header.
3569 |/ |
3570 | v
3571 | [ ] \
3572 | [ ]_| <-- vector loop.
3573 | |
3574 | v
3575 \ -[ ] <--- middle-block.
3576 \/ |
3577 /\ v
3578 | ->[ ] <--- new preheader.
3579 | |
3580 (opt) v <-- edge from middle to exit iff epilogue is not required.
3581 | [ ] \
3582 | [ ]_| <-- old scalar loop to handle remainder (scalar epilogue).
3583 \ |
3584 \ v
3585 >[ ] <-- exit block(s).
3586 ...
3587 */
3588
3589 // Get the metadata of the original loop before it gets modified.
3590 MDNode *OrigLoopID = OrigLoop->getLoopID();
3591
3592 // Workaround! Compute the trip count of the original loop and cache it
3593 // before we start modifying the CFG. This code has a systemic problem
3594 // wherein it tries to run analysis over partially constructed IR; this is
3595 // wrong, and not simply for SCEV. The trip count of the original loop
3596 // simply happens to be prone to hitting this in practice. In theory, we
3597 // can hit the same issue for any SCEV, or ValueTracking query done during
3598 // mutation. See PR49900.
3599 getOrCreateTripCount(OrigLoop);
3600
3601 // Create an empty vector loop, and prepare basic blocks for the runtime
3602 // checks.
3603 Loop *Lp = createVectorLoopSkeleton("");
3604
3605 // Now, compare the new count to zero. If it is zero skip the vector loop and
3606 // jump to the scalar loop. This check also covers the case where the
3607 // backedge-taken count is uint##_max: adding one to it will overflow leading
3608 // to an incorrect trip count of zero. In this (rare) case we will also jump
3609 // to the scalar loop.
3610 emitMinimumIterationCountCheck(Lp, LoopScalarPreHeader);
3611
3612 // Generate the code to check any assumptions that we've made for SCEV
3613 // expressions.
3614 emitSCEVChecks(Lp, LoopScalarPreHeader);
3615
3616 // Generate the code that checks in runtime if arrays overlap. We put the
3617 // checks into a separate block to make the more common case of few elements
3618 // faster.
3619 emitMemRuntimeChecks(Lp, LoopScalarPreHeader);
3620
3621 createHeaderBranch(Lp);
3622
3623 // Emit phis for the new starting index of the scalar loop.
3624 createInductionResumeValues(Lp);
3625
3626 return {completeLoopSkeleton(Lp, OrigLoopID), nullptr};
3627}
3628
3629// Fix up external users of the induction variable. At this point, we are
3630// in LCSSA form, with all external PHIs that use the IV having one input value,
3631// coming from the remainder loop. We need those PHIs to also have a correct
3632// value for the IV when arriving directly from the middle block.
3633void InnerLoopVectorizer::fixupIVUsers(PHINode *OrigPhi,
3634 const InductionDescriptor &II,
3635 Value *CountRoundDown, Value *EndValue,
3636 BasicBlock *MiddleBlock) {
3637 // There are two kinds of external IV usages - those that use the value
3638 // computed in the last iteration (the PHI) and those that use the penultimate
3639 // value (the value that feeds into the phi from the loop latch).
3640 // We allow both, but they, obviously, have different values.
3641
3642 assert(OrigLoop->getUniqueExitBlock() && "Expected a single exit block")(static_cast <bool> (OrigLoop->getUniqueExitBlock() &&
"Expected a single exit block") ? void (0) : __assert_fail (
"OrigLoop->getUniqueExitBlock() && \"Expected a single exit block\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 3642, __extension__
__PRETTY_FUNCTION__))
;
3643
3644 DenseMap<Value *, Value *> MissingVals;
3645
3646 // An external user of the last iteration's value should see the value that
3647 // the remainder loop uses to initialize its own IV.
3648 Value *PostInc = OrigPhi->getIncomingValueForBlock(OrigLoop->getLoopLatch());
3649 for (User *U : PostInc->users()) {
3650 Instruction *UI = cast<Instruction>(U);
3651 if (!OrigLoop->contains(UI)) {
3652 assert(isa<PHINode>(UI) && "Expected LCSSA form")(static_cast <bool> (isa<PHINode>(UI) && "Expected LCSSA form"
) ? void (0) : __assert_fail ("isa<PHINode>(UI) && \"Expected LCSSA form\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 3652, __extension__
__PRETTY_FUNCTION__))
;
3653 MissingVals[UI] = EndValue;
3654 }
3655 }
3656
3657 // An external user of the penultimate value need to see EndValue - Step.
3658 // The simplest way to get this is to recompute it from the constituent SCEVs,
3659 // that is Start + (Step * (CRD - 1)).
3660 for (User *U : OrigPhi->users()) {
3661 auto *UI = cast<Instruction>(U);
3662 if (!OrigLoop->contains(UI)) {
3663 const DataLayout &DL =
3664 OrigLoop->getHeader()->getModule()->getDataLayout();
3665 assert(isa<PHINode>(UI) && "Expected LCSSA form")(static_cast <bool> (isa<PHINode>(UI) && "Expected LCSSA form"
) ? void (0) : __assert_fail ("isa<PHINode>(UI) && \"Expected LCSSA form\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 3665, __extension__
__PRETTY_FUNCTION__))
;
3666
3667 IRBuilder<> B(MiddleBlock->getTerminator());
3668
3669 // Fast-math-flags propagate from the original induction instruction.
3670 if (II.getInductionBinOp() && isa<FPMathOperator>(II.getInductionBinOp()))
3671 B.setFastMathFlags(II.getInductionBinOp()->getFastMathFlags());
3672
3673 Value *CountMinusOne = B.CreateSub(
3674 CountRoundDown, ConstantInt::get(CountRoundDown->getType(), 1));
3675 Value *CMO =
3676 !II.getStep()->getType()->isIntegerTy()
3677 ? B.CreateCast(Instruction::SIToFP, CountMinusOne,
3678 II.getStep()->getType())
3679 : B.CreateSExtOrTrunc(CountMinusOne, II.getStep()->getType());
3680 CMO->setName("cast.cmo");
3681 Value *Escape =
3682 emitTransformedIndex(B, CMO, PSE.getSE(), DL, II, LoopVectorBody);
3683 Escape->setName("ind.escape");
3684 MissingVals[UI] = Escape;
3685 }
3686 }
3687
3688 for (auto &I : MissingVals) {
3689 PHINode *PHI = cast<PHINode>(I.first);
3690 // One corner case we have to handle is two IVs "chasing" each-other,
3691 // that is %IV2 = phi [...], [ %IV1, %latch ]
3692 // In this case, if IV1 has an external use, we need to avoid adding both
3693 // "last value of IV1" and "penultimate value of IV2". So, verify that we
3694 // don't already have an incoming value for the middle block.
3695 if (PHI->getBasicBlockIndex(MiddleBlock) == -1)
3696 PHI->addIncoming(I.second, MiddleBlock);
3697 }
3698}
3699
3700namespace {
3701
3702struct CSEDenseMapInfo {
3703 static bool canHandle(const Instruction *I) {
3704 return isa<InsertElementInst>(I) || isa<ExtractElementInst>(I) ||
3705 isa<ShuffleVectorInst>(I) || isa<GetElementPtrInst>(I);
3706 }
3707
3708 static inline Instruction *getEmptyKey() {
3709 return DenseMapInfo<Instruction *>::getEmptyKey();
3710 }
3711
3712 static inline Instruction *getTombstoneKey() {
3713 return DenseMapInfo<Instruction *>::getTombstoneKey();
3714 }
3715
3716 static unsigned getHashValue(const Instruction *I) {
3717 assert(canHandle(I) && "Unknown instruction!")(static_cast <bool> (canHandle(I) && "Unknown instruction!"
) ? void (0) : __assert_fail ("canHandle(I) && \"Unknown instruction!\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 3717, __extension__
__PRETTY_FUNCTION__))
;
3718 return hash_combine(I->getOpcode(), hash_combine_range(I->value_op_begin(),
3719 I->value_op_end()));
3720 }
3721
3722 static bool isEqual(const Instruction *LHS, const Instruction *RHS) {
3723 if (LHS == getEmptyKey() || RHS == getEmptyKey() ||
3724 LHS == getTombstoneKey() || RHS == getTombstoneKey())
3725 return LHS == RHS;
3726 return LHS->isIdenticalTo(RHS);
3727 }
3728};
3729
3730} // end anonymous namespace
3731
3732///Perform cse of induction variable instructions.
3733static void cse(BasicBlock *BB) {
3734 // Perform simple cse.
3735 SmallDenseMap<Instruction *, Instruction *, 4, CSEDenseMapInfo> CSEMap;
3736 for (Instruction &In : llvm::make_early_inc_range(*BB)) {
3737 if (!CSEDenseMapInfo::canHandle(&In))
3738 continue;
3739
3740 // Check if we can replace this instruction with any of the
3741 // visited instructions.
3742 if (Instruction *V = CSEMap.lookup(&In)) {
3743 In.replaceAllUsesWith(V);
3744 In.eraseFromParent();
3745 continue;
3746 }
3747
3748 CSEMap[&In] = &In;
3749 }
3750}
3751
3752InstructionCost
3753LoopVectorizationCostModel::getVectorCallCost(CallInst *CI, ElementCount VF,
3754 bool &NeedToScalarize) const {
3755 Function *F = CI->getCalledFunction();
3756 Type *ScalarRetTy = CI->getType();
3757 SmallVector<Type *, 4> Tys, ScalarTys;
3758 for (auto &ArgOp : CI->args())
3759 ScalarTys.push_back(ArgOp->getType());
3760
3761 // Estimate cost of scalarized vector call. The source operands are assumed
3762 // to be vectors, so we need to extract individual elements from there,
3763 // execute VF scalar calls, and then gather the result into the vector return
3764 // value.
3765 InstructionCost ScalarCallCost =
3766 TTI.getCallInstrCost(F, ScalarRetTy, ScalarTys, TTI::TCK_RecipThroughput);
3767 if (VF.isScalar())
3768 return ScalarCallCost;
3769
3770 // Compute corresponding vector type for return value and arguments.
3771 Type *RetTy = ToVectorTy(ScalarRetTy, VF);
3772 for (Type *ScalarTy : ScalarTys)
3773 Tys.push_back(ToVectorTy(ScalarTy, VF));
3774
3775 // Compute costs of unpacking argument values for the scalar calls and
3776 // packing the return values to a vector.
3777 InstructionCost ScalarizationCost = getScalarizationOverhead(CI, VF);
3778
3779 InstructionCost Cost =
3780 ScalarCallCost * VF.getKnownMinValue() + ScalarizationCost;
3781
3782 // If we can't emit a vector call for this function, then the currently found
3783 // cost is the cost we need to return.
3784 NeedToScalarize = true;
3785 VFShape Shape = VFShape::get(*CI, VF, false /*HasGlobalPred*/);
3786 Function *VecFunc = VFDatabase(*CI).getVectorizedFunction(Shape);
3787
3788 if (!TLI || CI->isNoBuiltin() || !VecFunc)
3789 return Cost;
3790
3791 // If the corresponding vector cost is cheaper, return its cost.
3792 InstructionCost VectorCallCost =
3793 TTI.getCallInstrCost(nullptr, RetTy, Tys, TTI::TCK_RecipThroughput);
3794 if (VectorCallCost < Cost) {
3795 NeedToScalarize = false;
3796 Cost = VectorCallCost;
3797 }
3798 return Cost;
3799}
3800
3801static Type *MaybeVectorizeType(Type *Elt, ElementCount VF) {
3802 if (VF.isScalar() || (!Elt->isIntOrPtrTy() && !Elt->isFloatingPointTy()))
3803 return Elt;
3804 return VectorType::get(Elt, VF);
3805}
3806
3807InstructionCost
3808LoopVectorizationCostModel::getVectorIntrinsicCost(CallInst *CI,
3809 ElementCount VF) const {
3810 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
3811 assert(ID && "Expected intrinsic call!")(static_cast <bool> (ID && "Expected intrinsic call!"
) ? void (0) : __assert_fail ("ID && \"Expected intrinsic call!\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 3811, __extension__
__PRETTY_FUNCTION__))
;
3812 Type *RetTy = MaybeVectorizeType(CI->getType(), VF);
3813 FastMathFlags FMF;
3814 if (auto *FPMO = dyn_cast<FPMathOperator>(CI))
3815 FMF = FPMO->getFastMathFlags();
3816
3817 SmallVector<const Value *> Arguments(CI->args());
3818 FunctionType *FTy = CI->getCalledFunction()->getFunctionType();
3819 SmallVector<Type *> ParamTys;
3820 std::transform(FTy->param_begin(), FTy->param_end(),
3821 std::back_inserter(ParamTys),
3822 [&](Type *Ty) { return MaybeVectorizeType(Ty, VF); });
3823
3824 IntrinsicCostAttributes CostAttrs(ID, RetTy, Arguments, ParamTys, FMF,
3825 dyn_cast<IntrinsicInst>(CI));
3826 return TTI.getIntrinsicInstrCost(CostAttrs,
3827 TargetTransformInfo::TCK_RecipThroughput);
3828}
3829
3830static Type *smallestIntegerVectorType(Type *T1, Type *T2) {
3831 auto *I1 = cast<IntegerType>(cast<VectorType>(T1)->getElementType());
3832 auto *I2 = cast<IntegerType>(cast<VectorType>(T2)->getElementType());
3833 return I1->getBitWidth() < I2->getBitWidth() ? T1 : T2;
3834}
3835
3836static Type *largestIntegerVectorType(Type *T1, Type *T2) {
3837 auto *I1 = cast<IntegerType>(cast<VectorType>(T1)->getElementType());
3838 auto *I2 = cast<IntegerType>(cast<VectorType>(T2)->getElementType());
3839 return I1->getBitWidth() > I2->getBitWidth() ? T1 : T2;
3840}
3841
3842void InnerLoopVectorizer::truncateToMinimalBitwidths(VPTransformState &State) {
3843 // For every instruction `I` in MinBWs, truncate the operands, create a
3844 // truncated version of `I` and reextend its result. InstCombine runs
3845 // later and will remove any ext/trunc pairs.
3846 SmallPtrSet<Value *, 4> Erased;
3847 for (const auto &KV : Cost->getMinimalBitwidths()) {
3848 // If the value wasn't vectorized, we must maintain the original scalar
3849 // type. The absence of the value from State indicates that it
3850 // wasn't vectorized.
3851 // FIXME: Should not rely on getVPValue at this point.
3852 VPValue *Def = State.Plan->getVPValue(KV.first, true);
3853 if (!State.hasAnyVectorValue(Def))
3854 continue;
3855 for (unsigned Part = 0; Part < UF; ++Part) {
3856 Value *I = State.get(Def, Part);
3857 if (Erased.count(I) || I->use_empty() || !isa<Instruction>(I))
3858 continue;
3859 Type *OriginalTy = I->getType();
3860 Type *ScalarTruncatedTy =
3861 IntegerType::get(OriginalTy->getContext(), KV.second);
3862 auto *TruncatedTy = VectorType::get(
3863 ScalarTruncatedTy, cast<VectorType>(OriginalTy)->getElementCount());
3864 if (TruncatedTy == OriginalTy)
3865 continue;
3866
3867 IRBuilder<> B(cast<Instruction>(I));
3868 auto ShrinkOperand = [&](Value *V) -> Value * {
3869 if (auto *ZI = dyn_cast<ZExtInst>(V))
3870 if (ZI->getSrcTy() == TruncatedTy)
3871 return ZI->getOperand(0);
3872 return B.CreateZExtOrTrunc(V, TruncatedTy);
3873 };
3874
3875 // The actual instruction modification depends on the instruction type,
3876 // unfortunately.
3877 Value *NewI = nullptr;
3878 if (auto *BO = dyn_cast<BinaryOperator>(I)) {
3879 NewI = B.CreateBinOp(BO->getOpcode(), ShrinkOperand(BO->getOperand(0)),
3880 ShrinkOperand(BO->getOperand(1)));
3881
3882 // Any wrapping introduced by shrinking this operation shouldn't be
3883 // considered undefined behavior. So, we can't unconditionally copy
3884 // arithmetic wrapping flags to NewI.
3885 cast<BinaryOperator>(NewI)->copyIRFlags(I, /*IncludeWrapFlags=*/false);
3886 } else if (auto *CI = dyn_cast<ICmpInst>(I)) {
3887 NewI =
3888 B.CreateICmp(CI->getPredicate(), ShrinkOperand(CI->getOperand(0)),
3889 ShrinkOperand(CI->getOperand(1)));
3890 } else if (auto *SI = dyn_cast<SelectInst>(I)) {
3891 NewI = B.CreateSelect(SI->getCondition(),
3892 ShrinkOperand(SI->getTrueValue()),
3893 ShrinkOperand(SI->getFalseValue()));
3894 } else if (auto *CI = dyn_cast<CastInst>(I)) {
3895 switch (CI->getOpcode()) {
3896 default:
3897 llvm_unreachable("Unhandled cast!")::llvm::llvm_unreachable_internal("Unhandled cast!", "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp"
, 3897)
;
3898 case Instruction::Trunc:
3899 NewI = ShrinkOperand(CI->getOperand(0));
3900 break;
3901 case Instruction::SExt:
3902 NewI = B.CreateSExtOrTrunc(
3903 CI->getOperand(0),
3904 smallestIntegerVectorType(OriginalTy, TruncatedTy));
3905 break;
3906 case Instruction::ZExt:
3907 NewI = B.CreateZExtOrTrunc(
3908 CI->getOperand(0),
3909 smallestIntegerVectorType(OriginalTy, TruncatedTy));
3910 break;
3911 }
3912 } else if (auto *SI = dyn_cast<ShuffleVectorInst>(I)) {
3913 auto Elements0 =
3914 cast<VectorType>(SI->getOperand(0)->getType())->getElementCount();
3915 auto *O0 = B.CreateZExtOrTrunc(
3916 SI->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements0));
3917 auto Elements1 =
3918 cast<VectorType>(SI->getOperand(1)->getType())->getElementCount();
3919 auto *O1 = B.CreateZExtOrTrunc(
3920 SI->getOperand(1), VectorType::get(ScalarTruncatedTy, Elements1));
3921
3922 NewI = B.CreateShuffleVector(O0, O1, SI->getShuffleMask());
3923 } else if (isa<LoadInst>(I) || isa<PHINode>(I)) {
3924 // Don't do anything with the operands, just extend the result.
3925 continue;
3926 } else if (auto *IE = dyn_cast<InsertElementInst>(I)) {
3927 auto Elements =
3928 cast<VectorType>(IE->getOperand(0)->getType())->getElementCount();
3929 auto *O0 = B.CreateZExtOrTrunc(
3930 IE->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements));
3931 auto *O1 = B.CreateZExtOrTrunc(IE->getOperand(1), ScalarTruncatedTy);
3932 NewI = B.CreateInsertElement(O0, O1, IE->getOperand(2));
3933 } else if (auto *EE = dyn_cast<ExtractElementInst>(I)) {
3934 auto Elements =
3935 cast<VectorType>(EE->getOperand(0)->getType())->getElementCount();
3936 auto *O0 = B.CreateZExtOrTrunc(
3937 EE->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements));
3938 NewI = B.CreateExtractElement(O0, EE->getOperand(2));
3939 } else {
3940 // If we don't know what to do, be conservative and don't do anything.
3941 continue;
3942 }
3943
3944 // Lastly, extend the result.
3945 NewI->takeName(cast<Instruction>(I));
3946 Value *Res = B.CreateZExtOrTrunc(NewI, OriginalTy);
3947 I->replaceAllUsesWith(Res);
3948 cast<Instruction>(I)->eraseFromParent();
3949 Erased.insert(I);
3950 State.reset(Def, Res, Part);
3951 }
3952 }
3953
3954 // We'll have created a bunch of ZExts that are now parentless. Clean up.
3955 for (const auto &KV : Cost->getMinimalBitwidths()) {
3956 // If the value wasn't vectorized, we must maintain the original scalar
3957 // type. The absence of the value from State indicates that it
3958 // wasn't vectorized.
3959 // FIXME: Should not rely on getVPValue at this point.
3960 VPValue *Def = State.Plan->getVPValue(KV.first, true);
3961 if (!State.hasAnyVectorValue(Def))
3962 continue;
3963 for (unsigned Part = 0; Part < UF; ++Part) {
3964 Value *I = State.get(Def, Part);
3965 ZExtInst *Inst = dyn_cast<ZExtInst>(I);
3966 if (Inst && Inst->use_empty()) {
3967 Value *NewI = Inst->getOperand(0);
3968 Inst->eraseFromParent();
3969 State.reset(Def, NewI, Part);
3970 }
3971 }
3972 }
3973}
3974
3975void InnerLoopVectorizer::fixVectorizedLoop(VPTransformState &State) {
3976 // Insert truncates and extends for any truncated instructions as hints to
3977 // InstCombine.
3978 if (VF.isVector())
3979 truncateToMinimalBitwidths(State);
3980
3981 // Fix widened non-induction PHIs by setting up the PHI operands.
3982 if (OrigPHIsToFix.size()) {
3983 assert(EnableVPlanNativePath &&(static_cast <bool> (EnableVPlanNativePath && "Unexpected non-induction PHIs for fixup in non VPlan-native path"
) ? void (0) : __assert_fail ("EnableVPlanNativePath && \"Unexpected non-induction PHIs for fixup in non VPlan-native path\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 3984, __extension__
__PRETTY_FUNCTION__))
3984 "Unexpected non-induction PHIs for fixup in non VPlan-native path")(static_cast <bool> (EnableVPlanNativePath && "Unexpected non-induction PHIs for fixup in non VPlan-native path"
) ? void (0) : __assert_fail ("EnableVPlanNativePath && \"Unexpected non-induction PHIs for fixup in non VPlan-native path\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 3984, __extension__
__PRETTY_FUNCTION__))
;
3985 fixNonInductionPHIs(State);
3986 }
3987
3988 // At this point every instruction in the original loop is widened to a
3989 // vector form. Now we need to fix the recurrences in the loop. These PHI
3990 // nodes are currently empty because we did not want to introduce cycles.
3991 // This is the second stage of vectorizing recurrences.
3992 fixCrossIterationPHIs(State);
3993
3994 // Forget the original basic block.
3995 PSE.getSE()->forgetLoop(OrigLoop);
3996
3997 // If we inserted an edge from the middle block to the unique exit block,
3998 // update uses outside the loop (phis) to account for the newly inserted
3999 // edge.
4000 if (!Cost->requiresScalarEpilogue(VF)) {
4001 // Fix-up external users of the induction variables.
4002 for (auto &Entry : Legal->getInductionVars())
4003 fixupIVUsers(Entry.first, Entry.second,
4004 getOrCreateVectorTripCount(LI->getLoopFor(LoopVectorBody)),
4005 IVEndValues[Entry.first], LoopMiddleBlock);
4006
4007 fixLCSSAPHIs(State);
4008 }
4009
4010 for (Instruction *PI : PredicatedInstructions)
4011 sinkScalarOperands(&*PI);
4012
4013 // Remove redundant induction instructions.
4014 cse(LoopVectorBody);
4015
4016 // Set/update profile weights for the vector and remainder loops as original
4017 // loop iterations are now distributed among them. Note that original loop
4018 // represented by LoopScalarBody becomes remainder loop after vectorization.
4019 //
4020 // For cases like foldTailByMasking() and requiresScalarEpiloque() we may
4021 // end up getting slightly roughened result but that should be OK since
4022 // profile is not inherently precise anyway. Note also possible bypass of
4023 // vector code caused by legality checks is ignored, assigning all the weight
4024 // to the vector loop, optimistically.
4025 //
4026 // For scalable vectorization we can't know at compile time how many iterations
4027 // of the loop are handled in one vector iteration, so instead assume a pessimistic
4028 // vscale of '1'.
4029 setProfileInfoAfterUnrolling(
4030 LI->getLoopFor(LoopScalarBody), LI->getLoopFor(LoopVectorBody),
4031 LI->getLoopFor(LoopScalarBody), VF.getKnownMinValue() * UF);
4032}
4033
4034void InnerLoopVectorizer::fixCrossIterationPHIs(VPTransformState &State) {
4035 // In order to support recurrences we need to be able to vectorize Phi nodes.
4036 // Phi nodes have cycles, so we need to vectorize them in two stages. This is
4037 // stage #2: We now need to fix the recurrences by adding incoming edges to
4038 // the currently empty PHI nodes. At this point every instruction in the
4039 // original loop is widened to a vector form so we can use them to construct
4040 // the incoming edges.
4041 VPBasicBlock *Header = State.Plan->getEntry()->getEntryBasicBlock();
4042 for (VPRecipeBase &R : Header->phis()) {
4043 if (auto *ReductionPhi = dyn_cast<VPReductionPHIRecipe>(&R))
4044 fixReduction(ReductionPhi, State);
4045 else if (auto *FOR = dyn_cast<VPFirstOrderRecurrencePHIRecipe>(&R))
4046 fixFirstOrderRecurrence(FOR, State);
4047 }
4048}
4049
4050void InnerLoopVectorizer::fixFirstOrderRecurrence(
4051 VPFirstOrderRecurrencePHIRecipe *PhiR, VPTransformState &State) {
4052 // This is the second phase of vectorizing first-order recurrences. An
4053 // overview of the transformation is described below. Suppose we have the
4054 // following loop.
4055 //
4056 // for (int i = 0; i < n; ++i)
4057 // b[i] = a[i] - a[i - 1];
4058 //
4059 // There is a first-order recurrence on "a". For this loop, the shorthand
4060 // scalar IR looks like:
4061 //
4062 // scalar.ph:
4063 // s_init = a[-1]
4064 // br scalar.body
4065 //
4066 // scalar.body:
4067 // i = phi [0, scalar.ph], [i+1, scalar.body]
4068 // s1 = phi [s_init, scalar.ph], [s2, scalar.body]
4069 // s2 = a[i]
4070 // b[i] = s2 - s1
4071 // br cond, scalar.body, ...
4072 //
4073 // In this example, s1 is a recurrence because it's value depends on the
4074 // previous iteration. In the first phase of vectorization, we created a
4075 // vector phi v1 for s1. We now complete the vectorization and produce the
4076 // shorthand vector IR shown below (for VF = 4, UF = 1).
4077 //
4078 // vector.ph:
4079 // v_init = vector(..., ..., ..., a[-1])
4080 // br vector.body
4081 //
4082 // vector.body
4083 // i = phi [0, vector.ph], [i+4, vector.body]
4084 // v1 = phi [v_init, vector.ph], [v2, vector.body]
4085 // v2 = a[i, i+1, i+2, i+3];
4086 // v3 = vector(v1(3), v2(0, 1, 2))
4087 // b[i, i+1, i+2, i+3] = v2 - v3
4088 // br cond, vector.body, middle.block
4089 //
4090 // middle.block:
4091 // x = v2(3)
4092 // br scalar.ph
4093 //
4094 // scalar.ph:
4095 // s_init = phi [x, middle.block], [a[-1], otherwise]
4096 // br scalar.body
4097 //
4098 // After execution completes the vector loop, we extract the next value of
4099 // the recurrence (x) to use as the initial value in the scalar loop.
4100
4101 // Extract the last vector element in the middle block. This will be the
4102 // initial value for the recurrence when jumping to the scalar loop.
4103 VPValue *PreviousDef = PhiR->getBackedgeValue();
4104 Value *Incoming = State.get(PreviousDef, UF - 1);
4105 auto *ExtractForScalar = Incoming;
4106 auto *IdxTy = Builder.getInt32Ty();
4107 if (VF.isVector()) {
4108 auto *One = ConstantInt::get(IdxTy, 1);
4109 Builder.SetInsertPoint(LoopMiddleBlock->getTerminator());
4110 auto *RuntimeVF = getRuntimeVF(Builder, IdxTy, VF);
4111 auto *LastIdx = Builder.CreateSub(RuntimeVF, One);
4112 ExtractForScalar = Builder.CreateExtractElement(ExtractForScalar, LastIdx,
4113 "vector.recur.extract");
4114 }
4115 // Extract the second last element in the middle block if the
4116 // Phi is used outside the loop. We need to extract the phi itself
4117 // and not the last element (the phi update in the current iteration). This
4118 // will be the value when jumping to the exit block from the LoopMiddleBlock,
4119 // when the scalar loop is not run at all.
4120 Value *ExtractForPhiUsedOutsideLoop = nullptr;
4121 if (VF.isVector()) {
4122 auto *RuntimeVF = getRuntimeVF(Builder, IdxTy, VF);
4123 auto *Idx = Builder.CreateSub(RuntimeVF, ConstantInt::get(IdxTy, 2));
4124 ExtractForPhiUsedOutsideLoop = Builder.CreateExtractElement(
4125 Incoming, Idx, "vector.recur.extract.for.phi");
4126 } else if (UF > 1)
4127 // When loop is unrolled without vectorizing, initialize
4128 // ExtractForPhiUsedOutsideLoop with the value just prior to unrolled value
4129 // of `Incoming`. This is analogous to the vectorized case above: extracting
4130 // the second last element when VF > 1.
4131 ExtractForPhiUsedOutsideLoop = State.get(PreviousDef, UF - 2);
4132
4133 // Fix the initial value of the original recurrence in the scalar loop.
4134 Builder.SetInsertPoint(&*LoopScalarPreHeader->begin());
4135 PHINode *Phi = cast<PHINode>(PhiR->getUnderlyingValue());
4136 auto *Start = Builder.CreatePHI(Phi->getType(), 2, "scalar.recur.init");
4137 auto *ScalarInit = PhiR->getStartValue()->getLiveInIRValue();
4138 for (auto *BB : predecessors(LoopScalarPreHeader)) {
4139 auto *Incoming = BB == LoopMiddleBlock ? ExtractForScalar : ScalarInit;
4140 Start->addIncoming(Incoming, BB);
4141 }
4142
4143 Phi->setIncomingValueForBlock(LoopScalarPreHeader, Start);
4144 Phi->setName("scalar.recur");
4145
4146 // Finally, fix users of the recurrence outside the loop. The users will need
4147 // either the last value of the scalar recurrence or the last value of the
4148 // vector recurrence we extracted in the middle block. Since the loop is in
4149 // LCSSA form, we just need to find all the phi nodes for the original scalar
4150 // recurrence in the exit block, and then add an edge for the middle block.
4151 // Note that LCSSA does not imply single entry when the original scalar loop
4152 // had multiple exiting edges (as we always run the last iteration in the
4153 // scalar epilogue); in that case, there is no edge from middle to exit and
4154 // and thus no phis which needed updated.
4155 if (!Cost->requiresScalarEpilogue(VF))
4156 for (PHINode &LCSSAPhi : LoopExitBlock->phis())
4157 if (llvm::is_contained(LCSSAPhi.incoming_values(), Phi))
4158 LCSSAPhi.addIncoming(ExtractForPhiUsedOutsideLoop, LoopMiddleBlock);
4159}
4160
4161void InnerLoopVectorizer::fixReduction(VPReductionPHIRecipe *PhiR,
4162 VPTransformState &State) {
4163 PHINode *OrigPhi = cast<PHINode>(PhiR->getUnderlyingValue());
4164 // Get it's reduction variable descriptor.
4165 assert(Legal->isReductionVariable(OrigPhi) &&(static_cast <bool> (Legal->isReductionVariable(OrigPhi
) && "Unable to find the reduction variable") ? void (
0) : __assert_fail ("Legal->isReductionVariable(OrigPhi) && \"Unable to find the reduction variable\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 4166, __extension__
__PRETTY_FUNCTION__))
4166 "Unable to find the reduction variable")(static_cast <bool> (Legal->isReductionVariable(OrigPhi
) && "Unable to find the reduction variable") ? void (
0) : __assert_fail ("Legal->isReductionVariable(OrigPhi) && \"Unable to find the reduction variable\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 4166, __extension__
__PRETTY_FUNCTION__))
;
4167 const RecurrenceDescriptor &RdxDesc = PhiR->getRecurrenceDescriptor();
4168
4169 RecurKind RK = RdxDesc.getRecurrenceKind();
4170 TrackingVH<Value> ReductionStartValue = RdxDesc.getRecurrenceStartValue();
4171 Instruction *LoopExitInst = RdxDesc.getLoopExitInstr();
4172 setDebugLocFromInst(ReductionStartValue);
4173
4174 VPValue *LoopExitInstDef = PhiR->getBackedgeValue();
4175 // This is the vector-clone of the value that leaves the loop.
4176 Type *VecTy = State.get(LoopExitInstDef, 0)->getType();
4177
4178 // Wrap flags are in general invalid after vectorization, clear them.
4179 clearReductionWrapFlags(RdxDesc, State);
4180
4181 // Before each round, move the insertion point right between
4182 // the PHIs and the values we are going to write.
4183 // This allows us to write both PHINodes and the extractelement
4184 // instructions.
4185 Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt());
4186
4187 setDebugLocFromInst(LoopExitInst);
4188
4189 Type *PhiTy = OrigPhi->getType();
4190 // If tail is folded by masking, the vector value to leave the loop should be
4191 // a Select choosing between the vectorized LoopExitInst and vectorized Phi,
4192 // instead of the former. For an inloop reduction the reduction will already
4193 // be predicated, and does not need to be handled here.
4194 if (Cost->foldTailByMasking() && !PhiR->isInLoop()) {
4195 for (unsigned Part = 0; Part < UF; ++Part) {
4196 Value *VecLoopExitInst = State.get(LoopExitInstDef, Part);
4197 Value *Sel = nullptr;
4198 for (User *U : VecLoopExitInst->users()) {
4199 if (isa<SelectInst>(U)) {
4200 assert(!Sel && "Reduction exit feeding two selects")(static_cast <bool> (!Sel && "Reduction exit feeding two selects"
) ? void (0) : __assert_fail ("!Sel && \"Reduction exit feeding two selects\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 4200, __extension__
__PRETTY_FUNCTION__))
;
4201 Sel = U;
4202 } else
4203 assert(isa<PHINode>(U) && "Reduction exit must feed Phi's or select")(static_cast <bool> (isa<PHINode>(U) && "Reduction exit must feed Phi's or select"
) ? void (0) : __assert_fail ("isa<PHINode>(U) && \"Reduction exit must feed Phi's or select\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 4203, __extension__
__PRETTY_FUNCTION__))
;
4204 }
4205 assert(Sel && "Reduction exit feeds no select")(static_cast <bool> (Sel && "Reduction exit feeds no select"
) ? void (0) : __assert_fail ("Sel && \"Reduction exit feeds no select\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 4205, __extension__
__PRETTY_FUNCTION__))
;
4206 State.reset(LoopExitInstDef, Sel, Part);
4207
4208 // If the target can create a predicated operator for the reduction at no
4209 // extra cost in the loop (for example a predicated vadd), it can be
4210 // cheaper for the select to remain in the loop than be sunk out of it,
4211 // and so use the select value for the phi instead of the old
4212 // LoopExitValue.
4213 if (PreferPredicatedReductionSelect ||
4214 TTI->preferPredicatedReductionSelect(
4215 RdxDesc.getOpcode(), PhiTy,
4216 TargetTransformInfo::ReductionFlags())) {
4217 auto *VecRdxPhi =
4218 cast<PHINode>(State.get(PhiR, Part));
4219 VecRdxPhi->setIncomingValueForBlock(
4220 LI->getLoopFor(LoopVectorBody)->getLoopLatch(), Sel);
4221 }
4222 }
4223 }
4224
4225 // If the vector reduction can be performed in a smaller type, we truncate
4226 // then extend the loop exit value to enable InstCombine to evaluate the
4227 // entire expression in the smaller type.
4228 if (VF.isVector() && PhiTy != RdxDesc.getRecurrenceType()) {
4229 assert(!PhiR->isInLoop() && "Unexpected truncated inloop reduction!")(static_cast <bool> (!PhiR->isInLoop() && "Unexpected truncated inloop reduction!"
) ? void (0) : __assert_fail ("!PhiR->isInLoop() && \"Unexpected truncated inloop reduction!\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 4229, __extension__
__PRETTY_FUNCTION__))
;
4230 Type *RdxVecTy = VectorType::get(RdxDesc.getRecurrenceType(), VF);
4231 Builder.SetInsertPoint(
4232 LI->getLoopFor(LoopVectorBody)->getLoopLatch()->getTerminator());
4233 VectorParts RdxParts(UF);
4234 for (unsigned Part = 0; Part < UF; ++Part) {
4235 RdxParts[Part] = State.get(LoopExitInstDef, Part);
4236 Value *Trunc = Builder.CreateTrunc(RdxParts[Part], RdxVecTy);
4237 Value *Extnd = RdxDesc.isSigned() ? Builder.CreateSExt(Trunc, VecTy)
4238 : Builder.CreateZExt(Trunc, VecTy);
4239 for (User *U : llvm::make_early_inc_range(RdxParts[Part]->users()))
4240 if (U != Trunc) {
4241 U->replaceUsesOfWith(RdxParts[Part], Extnd);
4242 RdxParts[Part] = Extnd;
4243 }
4244 }
4245 Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt());
4246 for (unsigned Part = 0; Part < UF; ++Part) {
4247 RdxParts[Part] = Builder.CreateTrunc(RdxParts[Part], RdxVecTy);
4248 State.reset(LoopExitInstDef, RdxParts[Part], Part);
4249 }
4250 }
4251
4252 // Reduce all of the unrolled parts into a single vector.
4253 Value *ReducedPartRdx = State.get(LoopExitInstDef, 0);
4254 unsigned Op = RecurrenceDescriptor::getOpcode(RK);
4255
4256 // The middle block terminator has already been assigned a DebugLoc here (the
4257 // OrigLoop's single latch terminator). We want the whole middle block to
4258 // appear to execute on this line because: (a) it is all compiler generated,
4259 // (b) these instructions are always executed after evaluating the latch
4260 // conditional branch, and (c) other passes may add new predecessors which
4261 // terminate on this line. This is the easiest way to ensure we don't
4262 // accidentally cause an extra step back into the loop while debugging.
4263 setDebugLocFromInst(LoopMiddleBlock->getTerminator());
4264 if (PhiR->isOrdered())
4265 ReducedPartRdx = State.get(LoopExitInstDef, UF - 1);
4266 else {
4267 // Floating-point operations should have some FMF to enable the reduction.
4268 IRBuilderBase::FastMathFlagGuard FMFG(Builder);
4269 Builder.setFastMathFlags(RdxDesc.getFastMathFlags());
4270 for (unsigned Part = 1; Part < UF; ++Part) {
4271 Value *RdxPart = State.get(LoopExitInstDef, Part);
4272 if (Op != Instruction::ICmp && Op != Instruction::FCmp) {
4273 ReducedPartRdx = Builder.CreateBinOp(
4274 (Instruction::BinaryOps)Op, RdxPart, ReducedPartRdx, "bin.rdx");
4275 } else if (RecurrenceDescriptor::isSelectCmpRecurrenceKind(RK))
4276 ReducedPartRdx = createSelectCmpOp(Builder, ReductionStartValue, RK,
4277 ReducedPartRdx, RdxPart);
4278 else
4279 ReducedPartRdx = createMinMaxOp(Builder, RK, ReducedPartRdx, RdxPart);
4280 }
4281 }
4282
4283 // Create the reduction after the loop. Note that inloop reductions create the
4284 // target reduction in the loop using a Reduction recipe.
4285 if (VF.isVector() && !PhiR->isInLoop()) {
4286 ReducedPartRdx =
4287 createTargetReduction(Builder, TTI, RdxDesc, ReducedPartRdx, OrigPhi);
4288 // If the reduction can be performed in a smaller type, we need to extend
4289 // the reduction to the wider type before we branch to the original loop.
4290 if (PhiTy != RdxDesc.getRecurrenceType())
4291 ReducedPartRdx = RdxDesc.isSigned()
4292 ? Builder.CreateSExt(ReducedPartRdx, PhiTy)
4293 : Builder.CreateZExt(ReducedPartRdx, PhiTy);
4294 }
4295
4296 // Create a phi node that merges control-flow from the backedge-taken check
4297 // block and the middle block.
4298 PHINode *BCBlockPhi = PHINode::Create(PhiTy, 2, "bc.merge.rdx",
4299 LoopScalarPreHeader->getTerminator());
4300 for (unsigned I = 0, E = LoopBypassBlocks.size(); I != E; ++I)
4301 BCBlockPhi->addIncoming(ReductionStartValue, LoopBypassBlocks[I]);
4302 BCBlockPhi->addIncoming(ReducedPartRdx, LoopMiddleBlock);
4303
4304 // Now, we need to fix the users of the reduction variable
4305 // inside and outside of the scalar remainder loop.
4306
4307 // We know that the loop is in LCSSA form. We need to update the PHI nodes
4308 // in the exit blocks. See comment on analogous loop in
4309 // fixFirstOrderRecurrence for a more complete explaination of the logic.
4310 if (!Cost->requiresScalarEpilogue(VF))
4311 for (PHINode &LCSSAPhi : LoopExitBlock->phis())
4312 if (llvm::is_contained(LCSSAPhi.incoming_values(), LoopExitInst))
4313 LCSSAPhi.addIncoming(ReducedPartRdx, LoopMiddleBlock);
4314
4315 // Fix the scalar loop reduction variable with the incoming reduction sum
4316 // from the vector body and from the backedge value.
4317 int IncomingEdgeBlockIdx =
4318 OrigPhi->getBasicBlockIndex(OrigLoop->getLoopLatch());
4319 assert(IncomingEdgeBlockIdx >= 0 && "Invalid block index")(static_cast <bool> (IncomingEdgeBlockIdx >= 0 &&
"Invalid block index") ? void (0) : __assert_fail ("IncomingEdgeBlockIdx >= 0 && \"Invalid block index\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 4319, __extension__
__PRETTY_FUNCTION__))
;
4320 // Pick the other block.
4321 int SelfEdgeBlockIdx = (IncomingEdgeBlockIdx ? 0 : 1);
4322 OrigPhi->setIncomingValue(SelfEdgeBlockIdx, BCBlockPhi);
4323 OrigPhi->setIncomingValue(IncomingEdgeBlockIdx, LoopExitInst);
4324}
4325
4326void InnerLoopVectorizer::clearReductionWrapFlags(const RecurrenceDescriptor &RdxDesc,
4327 VPTransformState &State) {
4328 RecurKind RK = RdxDesc.getRecurrenceKind();
4329 if (RK != RecurKind::Add && RK != RecurKind::Mul)
4330 return;
4331
4332 Instruction *LoopExitInstr = RdxDesc.getLoopExitInstr();
4333 assert(LoopExitInstr && "null loop exit instruction")(static_cast <bool> (LoopExitInstr && "null loop exit instruction"
) ? void (0) : __assert_fail ("LoopExitInstr && \"null loop exit instruction\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 4333, __extension__
__PRETTY_FUNCTION__))
;
4334 SmallVector<Instruction *, 8> Worklist;
4335 SmallPtrSet<Instruction *, 8> Visited;
4336 Worklist.push_back(LoopExitInstr);
4337 Visited.insert(LoopExitInstr);
4338
4339 while (!Worklist.empty()) {
4340 Instruction *Cur = Worklist.pop_back_val();
4341 if (isa<OverflowingBinaryOperator>(Cur))
4342 for (unsigned Part = 0; Part < UF; ++Part) {
4343 // FIXME: Should not rely on getVPValue at this point.
4344 Value *V = State.get(State.Plan->getVPValue(Cur, true), Part);
4345 cast<Instruction>(V)->dropPoisonGeneratingFlags();
4346 }
4347
4348 for (User *U : Cur->users()) {
4349 Instruction *UI = cast<Instruction>(U);
4350 if ((Cur != LoopExitInstr || OrigLoop->contains(UI->getParent())) &&
4351 Visited.insert(UI).second)
4352 Worklist.push_back(UI);
4353 }
4354 }
4355}
4356
4357void InnerLoopVectorizer::fixLCSSAPHIs(VPTransformState &State) {
4358 for (PHINode &LCSSAPhi : LoopExitBlock->phis()) {
4359 if (LCSSAPhi.getBasicBlockIndex(LoopMiddleBlock) != -1)
4360 // Some phis were already hand updated by the reduction and recurrence
4361 // code above, leave them alone.
4362 continue;
4363
4364 auto *IncomingValue = LCSSAPhi.getIncomingValue(0);
4365 // Non-instruction incoming values will have only one value.
4366
4367 VPLane Lane = VPLane::getFirstLane();
4368 if (isa<Instruction>(IncomingValue) &&
4369 !Cost->isUniformAfterVectorization(cast<Instruction>(IncomingValue),
4370 VF))
4371 Lane = VPLane::getLastLaneForVF(VF);
4372
4373 // Can be a loop invariant incoming value or the last scalar value to be
4374 // extracted from the vectorized loop.
4375 // FIXME: Should not rely on getVPValue at this point.
4376 Builder.SetInsertPoint(LoopMiddleBlock->getTerminator());
4377 Value *lastIncomingValue =
4378 OrigLoop->isLoopInvariant(IncomingValue)
4379 ? IncomingValue
4380 : State.get(State.Plan->getVPValue(IncomingValue, true),
4381 VPIteration(UF - 1, Lane));
4382 LCSSAPhi.addIncoming(lastIncomingValue, LoopMiddleBlock);
4383 }
4384}
4385
4386void InnerLoopVectorizer::sinkScalarOperands(Instruction *PredInst) {
4387 // The basic block and loop containing the predicated instruction.
4388 auto *PredBB = PredInst->getParent();
4389 auto *VectorLoop = LI->getLoopFor(PredBB);
4390
4391 // Initialize a worklist with the operands of the predicated instruction.
4392 SetVector<Value *> Worklist(PredInst->op_begin(), PredInst->op_end());
4393
4394 // Holds instructions that we need to analyze again. An instruction may be
4395 // reanalyzed if we don't yet know if we can sink it or not.
4396 SmallVector<Instruction *, 8> InstsToReanalyze;
4397
4398 // Returns true if a given use occurs in the predicated block. Phi nodes use
4399 // their operands in their corresponding predecessor blocks.
4400 auto isBlockOfUsePredicated = [&](Use &U) -> bool {
4401 auto *I = cast<Instruction>(U.getUser());
4402 BasicBlock *BB = I->getParent();
4403 if (auto *Phi = dyn_cast<PHINode>(I))
4404 BB = Phi->getIncomingBlock(
4405 PHINode::getIncomingValueNumForOperand(U.getOperandNo()));
4406 return BB == PredBB;
4407 };
4408
4409 // Iteratively sink the scalarized operands of the predicated instruction
4410 // into the block we created for it. When an instruction is sunk, it's
4411 // operands are then added to the worklist. The algorithm ends after one pass
4412 // through the worklist doesn't sink a single instruction.
4413 bool Changed;
4414 do {
4415 // Add the instructions that need to be reanalyzed to the worklist, and
4416 // reset the changed indicator.
4417 Worklist.insert(InstsToReanalyze.begin(), InstsToReanalyze.end());
4418 InstsToReanalyze.clear();
4419 Changed = false;
4420
4421 while (!Worklist.empty()) {
4422 auto *I = dyn_cast<Instruction>(Worklist.pop_back_val());
4423
4424 // We can't sink an instruction if it is a phi node, is not in the loop,
4425 // or may have side effects.
4426 if (!I || isa<PHINode>(I) || !VectorLoop->contains(I) ||
4427 I->mayHaveSideEffects())
4428 continue;
4429
4430 // If the instruction is already in PredBB, check if we can sink its
4431 // operands. In that case, VPlan's sinkScalarOperands() succeeded in
4432 // sinking the scalar instruction I, hence it appears in PredBB; but it
4433 // may have failed to sink I's operands (recursively), which we try
4434 // (again) here.
4435 if (I->getParent() == PredBB) {
4436 Worklist.insert(I->op_begin(), I->op_end());
4437 continue;
4438 }
4439
4440 // It's legal to sink the instruction if all its uses occur in the
4441 // predicated block. Otherwise, there's nothing to do yet, and we may
4442 // need to reanalyze the instruction.
4443 if (!llvm::all_of(I->uses(), isBlockOfUsePredicated)) {
4444 InstsToReanalyze.push_back(I);
4445 continue;
4446 }
4447
4448 // Move the instruction to the beginning of the predicated block, and add
4449 // it's operands to the worklist.
4450 I->moveBefore(&*PredBB->getFirstInsertionPt());
4451 Worklist.insert(I->op_begin(), I->op_end());
4452
4453 // The sinking may have enabled other instructions to be sunk, so we will
4454 // need to iterate.
4455 Changed = true;
4456 }
4457 } while (Changed);
4458}
4459
4460void InnerLoopVectorizer::fixNonInductionPHIs(VPTransformState &State) {
4461 for (PHINode *OrigPhi : OrigPHIsToFix) {
4462 VPWidenPHIRecipe *VPPhi =
4463 cast<VPWidenPHIRecipe>(State.Plan->getVPValue(OrigPhi));
4464 PHINode *NewPhi = cast<PHINode>(State.get(VPPhi, 0));
4465 // Make sure the builder has a valid insert point.
4466 Builder.SetInsertPoint(NewPhi);
4467 for (unsigned i = 0; i < VPPhi->getNumOperands(); ++i) {
4468 VPValue *Inc = VPPhi->getIncomingValue(i);
4469 VPBasicBlock *VPBB = VPPhi->getIncomingBlock(i);
4470 NewPhi->addIncoming(State.get(Inc, 0), State.CFG.VPBB2IRBB[VPBB]);
4471 }
4472 }
4473}
4474
4475bool InnerLoopVectorizer::useOrderedReductions(
4476 const RecurrenceDescriptor &RdxDesc) {
4477 return Cost->useOrderedReductions(RdxDesc);
4478}
4479
4480void InnerLoopVectorizer::widenPHIInstruction(Instruction *PN,
4481 VPWidenPHIRecipe *PhiR,
4482 VPTransformState &State) {
4483 PHINode *P = cast<PHINode>(PN);
4484 if (EnableVPlanNativePath) {
4485 // Currently we enter here in the VPlan-native path for non-induction
4486 // PHIs where all control flow is uniform. We simply widen these PHIs.
4487 // Create a vector phi with no operands - the vector phi operands will be
4488 // set at the end of vector code generation.
4489 Type *VecTy = (State.VF.isScalar())
4490 ? PN->getType()
4491 : VectorType::get(PN->getType(), State.VF);
4492 Value *VecPhi = Builder.CreatePHI(VecTy, PN->getNumOperands(), "vec.phi");
4493 State.set(PhiR, VecPhi, 0);
4494 OrigPHIsToFix.push_back(P);
4495
4496 return;
4497 }
4498
4499 assert(PN->getParent() == OrigLoop->getHeader() &&(static_cast <bool> (PN->getParent() == OrigLoop->
getHeader() && "Non-header phis should have been handled elsewhere"
) ? void (0) : __assert_fail ("PN->getParent() == OrigLoop->getHeader() && \"Non-header phis should have been handled elsewhere\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 4500, __extension__
__PRETTY_FUNCTION__))
4500 "Non-header phis should have been handled elsewhere")(static_cast <bool> (PN->getParent() == OrigLoop->
getHeader() && "Non-header phis should have been handled elsewhere"
) ? void (0) : __assert_fail ("PN->getParent() == OrigLoop->getHeader() && \"Non-header phis should have been handled elsewhere\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 4500, __extension__
__PRETTY_FUNCTION__))
;
4501
4502 // In order to support recurrences we need to be able to vectorize Phi nodes.
4503 // Phi nodes have cycles, so we need to vectorize them in two stages. This is
4504 // stage #1: We create a new vector PHI node with no incoming edges. We'll use
4505 // this value when we vectorize all of the instructions that use the PHI.
4506
4507 assert(!Legal->isReductionVariable(P) &&(static_cast <bool> (!Legal->isReductionVariable(P) &&
"reductions should be handled elsewhere") ? void (0) : __assert_fail
("!Legal->isReductionVariable(P) && \"reductions should be handled elsewhere\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 4508, __extension__
__PRETTY_FUNCTION__))
4508 "reductions should be handled elsewhere")(static_cast <bool> (!Legal->isReductionVariable(P) &&
"reductions should be handled elsewhere") ? void (0) : __assert_fail
("!Legal->isReductionVariable(P) && \"reductions should be handled elsewhere\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 4508, __extension__
__PRETTY_FUNCTION__))
;
4509
4510 setDebugLocFromInst(P);
4511
4512 // This PHINode must be an induction variable.
4513 // Make sure that we know about it.
4514 assert(Legal->getInductionVars().count(P) && "Not an induction variable")(static_cast <bool> (Legal->getInductionVars().count
(P) && "Not an induction variable") ? void (0) : __assert_fail
("Legal->getInductionVars().count(P) && \"Not an induction variable\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 4514, __extension__
__PRETTY_FUNCTION__))
;
4515
4516 InductionDescriptor II = Legal->getInductionVars().lookup(P);
4517 const DataLayout &DL = OrigLoop->getHeader()->getModule()->getDataLayout();
4518
4519 auto *IVR = PhiR->getParent()->getPlan()->getCanonicalIV();
4520 PHINode *CanonicalIV = cast<PHINode>(State.get(IVR, 0));
4521
4522 // FIXME: The newly created binary instructions should contain nsw/nuw flags,
4523 // which can be found from the original scalar operations.
4524 switch (II.getKind()) {
4525 case InductionDescriptor::IK_NoInduction:
4526 llvm_unreachable("Unknown induction")::llvm::llvm_unreachable_internal("Unknown induction", "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp"
, 4526)
;
4527 case InductionDescriptor::IK_IntInduction:
4528 case InductionDescriptor::IK_FpInduction:
4529 llvm_unreachable("Integer/fp induction is handled elsewhere.")::llvm::llvm_unreachable_internal("Integer/fp induction is handled elsewhere."
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 4529)
;
4530 case InductionDescriptor::IK_PtrInduction: {
4531 // Handle the pointer induction variable case.
4532 assert(P->getType()->isPointerTy() && "Unexpected type.")(static_cast <bool> (P->getType()->isPointerTy() &&
"Unexpected type.") ? void (0) : __assert_fail ("P->getType()->isPointerTy() && \"Unexpected type.\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 4532, __extension__
__PRETTY_FUNCTION__))
;
4533
4534 if (Cost->isScalarAfterVectorization(P, State.VF)) {
4535 // This is the normalized GEP that starts counting at zero.
4536 Value *PtrInd =
4537 Builder.CreateSExtOrTrunc(CanonicalIV, II.getStep()->getType());
4538 // Determine the number of scalars we need to generate for each unroll
4539 // iteration. If the instruction is uniform, we only need to generate the
4540 // first lane. Otherwise, we generate all VF values.
4541 bool IsUniform = Cost->isUniformAfterVectorization(P, State.VF);
4542 assert((IsUniform || !State.VF.isScalable()) &&(static_cast <bool> ((IsUniform || !State.VF.isScalable
()) && "Cannot scalarize a scalable VF") ? void (0) :
__assert_fail ("(IsUniform || !State.VF.isScalable()) && \"Cannot scalarize a scalable VF\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 4543, __extension__
__PRETTY_FUNCTION__))
4543 "Cannot scalarize a scalable VF")(static_cast <bool> ((IsUniform || !State.VF.isScalable
()) && "Cannot scalarize a scalable VF") ? void (0) :
__assert_fail ("(IsUniform || !State.VF.isScalable()) && \"Cannot scalarize a scalable VF\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 4543, __extension__
__PRETTY_FUNCTION__))
;
4544 unsigned Lanes = IsUniform ? 1 : State.VF.getFixedValue();
4545
4546 for (unsigned Part = 0; Part < UF; ++Part) {
4547 Value *PartStart =
4548 createStepForVF(Builder, PtrInd->getType(), VF, Part);
4549
4550 for (unsigned Lane = 0; Lane < Lanes; ++Lane) {
4551 Value *Idx = Builder.CreateAdd(
4552 PartStart, ConstantInt::get(PtrInd->getType(), Lane));
4553 Value *GlobalIdx = Builder.CreateAdd(PtrInd, Idx);
4554 Value *SclrGep = emitTransformedIndex(Builder, GlobalIdx, PSE.getSE(),
4555 DL, II, State.CFG.PrevBB);
4556 SclrGep->setName("next.gep");
4557 State.set(PhiR, SclrGep, VPIteration(Part, Lane));
4558 }
4559 }
4560 return;
4561 }
4562 assert(isa<SCEVConstant>(II.getStep()) &&(static_cast <bool> (isa<SCEVConstant>(II.getStep
()) && "Induction step not a SCEV constant!") ? void (
0) : __assert_fail ("isa<SCEVConstant>(II.getStep()) && \"Induction step not a SCEV constant!\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 4563, __extension__
__PRETTY_FUNCTION__))
4563 "Induction step not a SCEV constant!")(static_cast <bool> (isa<SCEVConstant>(II.getStep
()) && "Induction step not a SCEV constant!") ? void (
0) : __assert_fail ("isa<SCEVConstant>(II.getStep()) && \"Induction step not a SCEV constant!\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 4563, __extension__
__PRETTY_FUNCTION__))
;
4564 Type *PhiType = II.getStep()->getType();
4565
4566 // Build a pointer phi
4567 Value *ScalarStartValue = PhiR->getStartValue()->getLiveInIRValue();
4568 Type *ScStValueType = ScalarStartValue->getType();
4569 PHINode *NewPointerPhi =
4570 PHINode::Create(ScStValueType, 2, "pointer.phi", CanonicalIV);
4571 NewPointerPhi->addIncoming(ScalarStartValue, LoopVectorPreHeader);
4572
4573 // A pointer induction, performed by using a gep
4574 BasicBlock *LoopLatch = LI->getLoopFor(LoopVectorBody)->getLoopLatch();
4575 Instruction *InductionLoc = LoopLatch->getTerminator();
4576 const SCEV *ScalarStep = II.getStep();
4577 SCEVExpander Exp(*PSE.getSE(), DL, "induction");
4578 Value *ScalarStepValue =
4579 Exp.expandCodeFor(ScalarStep, PhiType, InductionLoc);
4580 Value *RuntimeVF = getRuntimeVF(Builder, PhiType, VF);
4581 Value *NumUnrolledElems =
4582 Builder.CreateMul(RuntimeVF, ConstantInt::get(PhiType, State.UF));
4583 Value *InductionGEP = GetElementPtrInst::Create(
4584 II.getElementType(), NewPointerPhi,
4585 Builder.CreateMul(ScalarStepValue, NumUnrolledElems), "ptr.ind",
4586 InductionLoc);
4587 NewPointerPhi->addIncoming(InductionGEP, LoopLatch);
4588
4589 // Create UF many actual address geps that use the pointer
4590 // phi as base and a vectorized version of the step value
4591 // (<step*0, ..., step*N>) as offset.
4592 for (unsigned Part = 0; Part < State.UF; ++Part) {
4593 Type *VecPhiType = VectorType::get(PhiType, State.VF);
4594 Value *StartOffsetScalar =
4595 Builder.CreateMul(RuntimeVF, ConstantInt::get(PhiType, Part));
4596 Value *StartOffset =
4597 Builder.CreateVectorSplat(State.VF, StartOffsetScalar);
4598 // Create a vector of consecutive numbers from zero to VF.
4599 StartOffset =
4600 Builder.CreateAdd(StartOffset, Builder.CreateStepVector(VecPhiType));
4601
4602 Value *GEP = Builder.CreateGEP(
4603 II.getElementType(), NewPointerPhi,
4604 Builder.CreateMul(
4605 StartOffset, Builder.CreateVectorSplat(State.VF, ScalarStepValue),
4606 "vector.gep"));
4607 State.set(PhiR, GEP, Part);
4608 }
4609 }
4610 }
4611}
4612
4613/// A helper function for checking whether an integer division-related
4614/// instruction may divide by zero (in which case it must be predicated if
4615/// executed conditionally in the scalar code).
4616/// TODO: It may be worthwhile to generalize and check isKnownNonZero().
4617/// Non-zero divisors that are non compile-time constants will not be
4618/// converted into multiplication, so we will still end up scalarizing
4619/// the division, but can do so w/o predication.
4620static bool mayDivideByZero(Instruction &I) {
4621 assert((I.getOpcode() == Instruction::UDiv ||(static_cast <bool> ((I.getOpcode() == Instruction::UDiv
|| I.getOpcode() == Instruction::SDiv || I.getOpcode() == Instruction
::URem || I.getOpcode() == Instruction::SRem) && "Unexpected instruction"
) ? void (0) : __assert_fail ("(I.getOpcode() == Instruction::UDiv || I.getOpcode() == Instruction::SDiv || I.getOpcode() == Instruction::URem || I.getOpcode() == Instruction::SRem) && \"Unexpected instruction\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 4625, __extension__
__PRETTY_FUNCTION__))
4622 I.getOpcode() == Instruction::SDiv ||(static_cast <bool> ((I.getOpcode() == Instruction::UDiv
|| I.getOpcode() == Instruction::SDiv || I.getOpcode() == Instruction
::URem || I.getOpcode() == Instruction::SRem) && "Unexpected instruction"
) ? void (0) : __assert_fail ("(I.getOpcode() == Instruction::UDiv || I.getOpcode() == Instruction::SDiv || I.getOpcode() == Instruction::URem || I.getOpcode() == Instruction::SRem) && \"Unexpected instruction\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 4625, __extension__
__PRETTY_FUNCTION__))
4623 I.getOpcode() == Instruction::URem ||(static_cast <bool> ((I.getOpcode() == Instruction::UDiv
|| I.getOpcode() == Instruction::SDiv || I.getOpcode() == Instruction
::URem || I.getOpcode() == Instruction::SRem) && "Unexpected instruction"
) ? void (0) : __assert_fail ("(I.getOpcode() == Instruction::UDiv || I.getOpcode() == Instruction::SDiv || I.getOpcode() == Instruction::URem || I.getOpcode() == Instruction::SRem) && \"Unexpected instruction\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 4625, __extension__
__PRETTY_FUNCTION__))
4624 I.getOpcode() == Instruction::SRem) &&(static_cast <bool> ((I.getOpcode() == Instruction::UDiv
|| I.getOpcode() == Instruction::SDiv || I.getOpcode() == Instruction
::URem || I.getOpcode() == Instruction::SRem) && "Unexpected instruction"
) ? void (0) : __assert_fail ("(I.getOpcode() == Instruction::UDiv || I.getOpcode() == Instruction::SDiv || I.getOpcode() == Instruction::URem || I.getOpcode() == Instruction::SRem) && \"Unexpected instruction\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 4625, __extension__
__PRETTY_FUNCTION__))
4625 "Unexpected instruction")(static_cast <bool> ((I.getOpcode() == Instruction::UDiv
|| I.getOpcode() == Instruction::SDiv || I.getOpcode() == Instruction
::URem || I.getOpcode() == Instruction::SRem) && "Unexpected instruction"
) ? void (0) : __assert_fail ("(I.getOpcode() == Instruction::UDiv || I.getOpcode() == Instruction::SDiv || I.getOpcode() == Instruction::URem || I.getOpcode() == Instruction::SRem) && \"Unexpected instruction\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 4625, __extension__
__PRETTY_FUNCTION__))
;
4626 Value *Divisor = I.getOperand(1);
4627 auto *CInt = dyn_cast<ConstantInt>(Divisor);
4628 return !CInt || CInt->isZero();
4629}
4630
4631void InnerLoopVectorizer::widenCallInstruction(CallInst &I, VPValue *Def,
4632 VPUser &ArgOperands,
4633 VPTransformState &State) {
4634 assert(!isa<DbgInfoIntrinsic>(I) &&(static_cast <bool> (!isa<DbgInfoIntrinsic>(I) &&
"DbgInfoIntrinsic should have been dropped during VPlan construction"
) ? void (0) : __assert_fail ("!isa<DbgInfoIntrinsic>(I) && \"DbgInfoIntrinsic should have been dropped during VPlan construction\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 4635, __extension__
__PRETTY_FUNCTION__))
4635 "DbgInfoIntrinsic should have been dropped during VPlan construction")(static_cast <bool> (!isa<DbgInfoIntrinsic>(I) &&
"DbgInfoIntrinsic should have been dropped during VPlan construction"
) ? void (0) : __assert_fail ("!isa<DbgInfoIntrinsic>(I) && \"DbgInfoIntrinsic should have been dropped during VPlan construction\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 4635, __extension__
__PRETTY_FUNCTION__))
;
4636 setDebugLocFromInst(&I);
4637
4638 Module *M = I.getParent()->getParent()->getParent();
4639 auto *CI = cast<CallInst>(&I);
4640
4641 SmallVector<Type *, 4> Tys;
4642 for (Value *ArgOperand : CI->args())
4643 Tys.push_back(ToVectorTy(ArgOperand->getType(), VF.getKnownMinValue()));
4644
4645 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
4646
4647 // The flag shows whether we use Intrinsic or a usual Call for vectorized
4648 // version of the instruction.
4649 // Is it beneficial to perform intrinsic call compared to lib call?
4650 bool NeedToScalarize = false;
4651 InstructionCost CallCost = Cost->getVectorCallCost(CI, VF, NeedToScalarize);
4652 InstructionCost IntrinsicCost = ID ? Cost->getVectorIntrinsicCost(CI, VF) : 0;
4653 bool UseVectorIntrinsic = ID && IntrinsicCost <= CallCost;
4654 assert((UseVectorIntrinsic || !NeedToScalarize) &&(static_cast <bool> ((UseVectorIntrinsic || !NeedToScalarize
) && "Instruction should be scalarized elsewhere.") ?
void (0) : __assert_fail ("(UseVectorIntrinsic || !NeedToScalarize) && \"Instruction should be scalarized elsewhere.\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 4655, __extension__
__PRETTY_FUNCTION__))
4655 "Instruction should be scalarized elsewhere.")(static_cast <bool> ((UseVectorIntrinsic || !NeedToScalarize
) && "Instruction should be scalarized elsewhere.") ?
void (0) : __assert_fail ("(UseVectorIntrinsic || !NeedToScalarize) && \"Instruction should be scalarized elsewhere.\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 4655, __extension__
__PRETTY_FUNCTION__))
;
4656 assert((IntrinsicCost.isValid() || CallCost.isValid()) &&(static_cast <bool> ((IntrinsicCost.isValid() || CallCost
.isValid()) && "Either the intrinsic cost or vector call cost must be valid"
) ? void (0) : __assert_fail ("(IntrinsicCost.isValid() || CallCost.isValid()) && \"Either the intrinsic cost or vector call cost must be valid\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 4657, __extension__
__PRETTY_FUNCTION__))
4657 "Either the intrinsic cost or vector call cost must be valid")(static_cast <bool> ((IntrinsicCost.isValid() || CallCost
.isValid()) && "Either the intrinsic cost or vector call cost must be valid"
) ? void (0) : __assert_fail ("(IntrinsicCost.isValid() || CallCost.isValid()) && \"Either the intrinsic cost or vector call cost must be valid\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 4657, __extension__
__PRETTY_FUNCTION__))
;
4658
4659 for (unsigned Part = 0; Part < UF; ++Part) {
4660 SmallVector<Type *, 2> TysForDecl = {CI->getType()};
4661 SmallVector<Value *, 4> Args;
4662 for (auto &I : enumerate(ArgOperands.operands())) {
4663 // Some intrinsics have a scalar argument - don't replace it with a
4664 // vector.
4665 Value *Arg;
4666 if (!UseVectorIntrinsic || !hasVectorInstrinsicScalarOpd(ID, I.index()))
4667 Arg = State.get(I.value(), Part);
4668 else {
4669 Arg = State.get(I.value(), VPIteration(0, 0));
4670 if (hasVectorInstrinsicOverloadedScalarOpd(ID, I.index()))
4671 TysForDecl.push_back(Arg->getType());
4672 }
4673 Args.push_back(Arg);
4674 }
4675
4676 Function *VectorF;
4677 if (UseVectorIntrinsic) {
4678 // Use vector version of the intrinsic.
4679 if (VF.isVector())
4680 TysForDecl[0] = VectorType::get(CI->getType()->getScalarType(), VF);
4681 VectorF = Intrinsic::getDeclaration(M, ID, TysForDecl);
4682 assert(VectorF && "Can't retrieve vector intrinsic.")(static_cast <bool> (VectorF && "Can't retrieve vector intrinsic."
) ? void (0) : __assert_fail ("VectorF && \"Can't retrieve vector intrinsic.\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 4682, __extension__
__PRETTY_FUNCTION__))
;
4683 } else {
4684 // Use vector version of the function call.
4685 const VFShape Shape = VFShape::get(*CI, VF, false /*HasGlobalPred*/);
4686#ifndef NDEBUG
4687 assert(VFDatabase(*CI).getVectorizedFunction(Shape) != nullptr &&(static_cast <bool> (VFDatabase(*CI).getVectorizedFunction
(Shape) != nullptr && "Can't create vector function."
) ? void (0) : __assert_fail ("VFDatabase(*CI).getVectorizedFunction(Shape) != nullptr && \"Can't create vector function.\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 4688, __extension__
__PRETTY_FUNCTION__))
4688 "Can't create vector function.")(static_cast <bool> (VFDatabase(*CI).getVectorizedFunction
(Shape) != nullptr && "Can't create vector function."
) ? void (0) : __assert_fail ("VFDatabase(*CI).getVectorizedFunction(Shape) != nullptr && \"Can't create vector function.\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 4688, __extension__
__PRETTY_FUNCTION__))
;
4689#endif
4690 VectorF = VFDatabase(*CI).getVectorizedFunction(Shape);
4691 }
4692 SmallVector<OperandBundleDef, 1> OpBundles;
4693 CI->getOperandBundlesAsDefs(OpBundles);
4694 CallInst *V = Builder.CreateCall(VectorF, Args, OpBundles);
4695
4696 if (isa<FPMathOperator>(V))
4697 V->copyFastMathFlags(CI);
4698
4699 State.set(Def, V, Part);
4700 addMetadata(V, &I);
4701 }
4702}
4703
4704void LoopVectorizationCostModel::collectLoopScalars(ElementCount VF) {
4705 // We should not collect Scalars more than once per VF. Right now, this
4706 // function is called from collectUniformsAndScalars(), which already does
4707 // this check. Collecting Scalars for VF=1 does not make any sense.
4708 assert(VF.isVector() && Scalars.find(VF) == Scalars.end() &&(static_cast <bool> (VF.isVector() && Scalars.find
(VF) == Scalars.end() && "This function should not be visited twice for the same VF"
) ? void (0) : __assert_fail ("VF.isVector() && Scalars.find(VF) == Scalars.end() && \"This function should not be visited twice for the same VF\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 4709, __extension__
__PRETTY_FUNCTION__))
4709 "This function should not be visited twice for the same VF")(static_cast <bool> (VF.isVector() && Scalars.find
(VF) == Scalars.end() && "This function should not be visited twice for the same VF"
) ? void (0) : __assert_fail ("VF.isVector() && Scalars.find(VF) == Scalars.end() && \"This function should not be visited twice for the same VF\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 4709, __extension__
__PRETTY_FUNCTION__))
;
4710
4711 SmallSetVector<Instruction *, 8> Worklist;
4712
4713 // These sets are used to seed the analysis with pointers used by memory
4714 // accesses that will remain scalar.
4715 SmallSetVector<Instruction *, 8> ScalarPtrs;
4716 SmallPtrSet<Instruction *, 8> PossibleNonScalarPtrs;
4717 auto *Latch = TheLoop->getLoopLatch();
4718
4719 // A helper that returns true if the use of Ptr by MemAccess will be scalar.
4720 // The pointer operands of loads and stores will be scalar as long as the
4721 // memory access is not a gather or scatter operation. The value operand of a
4722 // store will remain scalar if the store is scalarized.
4723 auto isScalarUse = [&](Instruction *MemAccess, Value *Ptr) {
4724 InstWidening WideningDecision = getWideningDecision(MemAccess, VF);
4725 assert(WideningDecision != CM_Unknown &&(static_cast <bool> (WideningDecision != CM_Unknown &&
"Widening decision should be ready at this moment") ? void (
0) : __assert_fail ("WideningDecision != CM_Unknown && \"Widening decision should be ready at this moment\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 4726, __extension__
__PRETTY_FUNCTION__))
4726 "Widening decision should be ready at this moment")(static_cast <bool> (WideningDecision != CM_Unknown &&
"Widening decision should be ready at this moment") ? void (
0) : __assert_fail ("WideningDecision != CM_Unknown && \"Widening decision should be ready at this moment\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 4726, __extension__
__PRETTY_FUNCTION__))
;
4727 if (auto *Store = dyn_cast<StoreInst>(MemAccess))
4728 if (Ptr == Store->getValueOperand())
4729 return WideningDecision == CM_Scalarize;
4730 assert(Ptr == getLoadStorePointerOperand(MemAccess) &&(static_cast <bool> (Ptr == getLoadStorePointerOperand(
MemAccess) && "Ptr is neither a value or pointer operand"
) ? void (0) : __assert_fail ("Ptr == getLoadStorePointerOperand(MemAccess) && \"Ptr is neither a value or pointer operand\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 4731, __extension__
__PRETTY_FUNCTION__))
4731 "Ptr is neither a value or pointer operand")(static_cast <bool> (Ptr == getLoadStorePointerOperand(
MemAccess) && "Ptr is neither a value or pointer operand"
) ? void (0) : __assert_fail ("Ptr == getLoadStorePointerOperand(MemAccess) && \"Ptr is neither a value or pointer operand\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 4731, __extension__
__PRETTY_FUNCTION__))
;
4732 return WideningDecision != CM_GatherScatter;
4733 };
4734
4735 // A helper that returns true if the given value is a bitcast or
4736 // getelementptr instruction contained in the loop.
4737 auto isLoopVaryingBitCastOrGEP = [&](Value *V) {
4738 return ((isa<BitCastInst>(V) && V->getType()->isPointerTy()) ||
4739 isa<GetElementPtrInst>(V)) &&
4740 !TheLoop->isLoopInvariant(V);
4741 };
4742
4743 // A helper that evaluates a memory access's use of a pointer. If the use will
4744 // be a scalar use and the pointer is only used by memory accesses, we place
4745 // the pointer in ScalarPtrs. Otherwise, the pointer is placed in
4746 // PossibleNonScalarPtrs.
4747 auto evaluatePtrUse = [&](Instruction *MemAccess, Value *Ptr) {
4748 // We only care about bitcast and getelementptr instructions contained in
4749 // the loop.
4750 if (!isLoopVaryingBitCastOrGEP(Ptr))
4751 return;
4752
4753 // If the pointer has already been identified as scalar (e.g., if it was
4754 // also identified as uniform), there's nothing to do.
4755 auto *I = cast<Instruction>(Ptr);
4756 if (Worklist.count(I))
4757 return;
4758
4759 // If the use of the pointer will be a scalar use, and all users of the
4760 // pointer are memory accesses, place the pointer in ScalarPtrs. Otherwise,
4761 // place the pointer in PossibleNonScalarPtrs.
4762 if (isScalarUse(MemAccess, Ptr) && llvm::all_of(I->users(), [&](User *U) {
4763 return isa<LoadInst>(U) || isa<StoreInst>(U);
4764 }))
4765 ScalarPtrs.insert(I);
4766 else
4767 PossibleNonScalarPtrs.insert(I);
4768 };
4769
4770 // We seed the scalars analysis with three classes of instructions: (1)
4771 // instructions marked uniform-after-vectorization and (2) bitcast,
4772 // getelementptr and (pointer) phi instructions used by memory accesses
4773 // requiring a scalar use.
4774 //
4775 // (1) Add to the worklist all instructions that have been identified as
4776 // uniform-after-vectorization.
4777 Worklist.insert(Uniforms[VF].begin(), Uniforms[VF].end());
4778
4779 // (2) Add to the worklist all bitcast and getelementptr instructions used by
4780 // memory accesses requiring a scalar use. The pointer operands of loads and
4781 // stores will be scalar as long as the memory accesses is not a gather or
4782 // scatter operation. The value operand of a store will remain scalar if the
4783 // store is scalarized.
4784 for (auto *BB : TheLoop->blocks())
4785 for (auto &I : *BB) {
4786 if (auto *Load = dyn_cast<LoadInst>(&I)) {
4787 evaluatePtrUse(Load, Load->getPointerOperand());
4788 } else if (auto *Store = dyn_cast<StoreInst>(&I)) {
4789 evaluatePtrUse(Store, Store->getPointerOperand());
4790 evaluatePtrUse(Store, Store->getValueOperand());
4791 }
4792 }
4793 for (auto *I : ScalarPtrs)
4794 if (!PossibleNonScalarPtrs.count(I)) {
4795 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *I << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-vectorize")) { dbgs() << "LV: Found scalar instruction: "
<< *I << "\n"; } } while (false)
;
4796 Worklist.insert(I);
4797 }
4798
4799 // Insert the forced scalars.
4800 // FIXME: Currently widenPHIInstruction() often creates a dead vector
4801 // induction variable when the PHI user is scalarized.
4802 auto ForcedScalar = ForcedScalars.find(VF);
4803 if (ForcedScalar != ForcedScalars.end())
4804 for (auto *I : ForcedScalar->second)
4805 Worklist.insert(I);
4806
4807 // Expand the worklist by looking through any bitcasts and getelementptr
4808 // instructions we've already identified as scalar. This is similar to the
4809 // expansion step in collectLoopUniforms(); however, here we're only
4810 // expanding to include additional bitcasts and getelementptr instructions.
4811 unsigned Idx = 0;
4812 while (Idx != Worklist.size()) {
4813 Instruction *Dst = Worklist[Idx++];
4814 if (!isLoopVaryingBitCastOrGEP(Dst->getOperand(0)))
4815 continue;
4816 auto *Src = cast<Instruction>(Dst->getOperand(0));
4817 if (llvm::all_of(Src->users(), [&](User *U) -> bool {
4818 auto *J = cast<Instruction>(U);
4819 return !TheLoop->contains(J) || Worklist.count(J) ||
4820 ((isa<LoadInst>(J) || isa<StoreInst>(J)) &&
4821 isScalarUse(J, Src));
4822 })) {
4823 Worklist.insert(Src);
4824 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Src << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-vectorize")) { dbgs() << "LV: Found scalar instruction: "
<< *Src << "\n"; } } while (false)
;
4825 }
4826 }
4827
4828 // An induction variable will remain scalar if all users of the induction
4829 // variable and induction variable update remain scalar.
4830 for (auto &Induction : Legal->getInductionVars()) {
4831 auto *Ind = Induction.first;
4832 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
4833
4834 // If tail-folding is applied, the primary induction variable will be used
4835 // to feed a vector compare.
4836 if (Ind == Legal->getPrimaryInduction() && foldTailByMasking())
4837 continue;
4838
4839 // Returns true if \p Indvar is a pointer induction that is used directly by
4840 // load/store instruction \p I.
4841 auto IsDirectLoadStoreFromPtrIndvar = [&](Instruction *Indvar,
4842 Instruction *I) {
4843 return Induction.second.getKind() ==
4844 InductionDescriptor::IK_PtrInduction &&
4845 (isa<LoadInst>(I) || isa<StoreInst>(I)) &&
4846 Indvar == getLoadStorePointerOperand(I) && isScalarUse(I, Indvar);
4847 };
4848
4849 // Determine if all users of the induction variable are scalar after
4850 // vectorization.
4851 auto ScalarInd = llvm::all_of(Ind->users(), [&](User *U) -> bool {
4852 auto *I = cast<Instruction>(U);
4853 return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I) ||
4854 IsDirectLoadStoreFromPtrIndvar(Ind, I);
4855 });
4856 if (!ScalarInd)
4857 continue;
4858
4859 // Determine if all users of the induction variable update instruction are
4860 // scalar after vectorization.
4861 auto ScalarIndUpdate =
4862 llvm::all_of(IndUpdate->users(), [&](User *U) -> bool {
4863 auto *I = cast<Instruction>(U);
4864 return I == Ind || !TheLoop->contains(I) || Worklist.count(I) ||
4865 IsDirectLoadStoreFromPtrIndvar(IndUpdate, I);
4866 });
4867 if (!ScalarIndUpdate)
4868 continue;
4869
4870 // The induction variable and its update instruction will remain scalar.
4871 Worklist.insert(Ind);
4872 Worklist.insert(IndUpdate);
4873 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Ind << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-vectorize")) { dbgs() << "LV: Found scalar instruction: "
<< *Ind << "\n"; } } while (false)
;
4874 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *IndUpdatedo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-vectorize")) { dbgs() << "LV: Found scalar instruction: "
<< *IndUpdate << "\n"; } } while (false)
4875 << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-vectorize")) { dbgs() << "LV: Found scalar instruction: "
<< *IndUpdate << "\n"; } } while (false)
;
4876 }
4877
4878 Scalars[VF].insert(Worklist.begin(), Worklist.end());
4879}
4880
4881bool LoopVectorizationCostModel::isScalarWithPredication(
4882 Instruction *I, ElementCount VF) const {
4883 if (!blockNeedsPredicationForAnyReason(I->getParent()))
4884 return false;
4885 switch(I->getOpcode()) {
4886 default:
4887 break;
4888 case Instruction::Load:
4889 case Instruction::Store: {
4890 if (!Legal->isMaskRequired(I))
4891 return false;
4892 auto *Ptr = getLoadStorePointerOperand(I);
4893 auto *Ty = getLoadStoreType(I);
4894 Type *VTy = Ty;
4895 if (VF.isVector())
4896 VTy = VectorType::get(Ty, VF);
4897 const Align Alignment = getLoadStoreAlignment(I);
4898 return isa<LoadInst>(I) ? !(isLegalMaskedLoad(Ty, Ptr, Alignment) ||
4899 TTI.isLegalMaskedGather(VTy, Alignment))
4900 : !(isLegalMaskedStore(Ty, Ptr, Alignment) ||
4901 TTI.isLegalMaskedScatter(VTy, Alignment));
4902 }
4903 case Instruction::UDiv:
4904 case Instruction::SDiv:
4905 case Instruction::SRem:
4906 case Instruction::URem:
4907 return mayDivideByZero(*I);
4908 }
4909 return false;
4910}
4911
4912bool LoopVectorizationCostModel::interleavedAccessCanBeWidened(
4913 Instruction *I, ElementCount VF) {
4914 assert(isAccessInterleaved(I) && "Expecting interleaved access.")(static_cast <bool> (isAccessInterleaved(I) && "Expecting interleaved access."
) ? void (0) : __assert_fail ("isAccessInterleaved(I) && \"Expecting interleaved access.\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 4914, __extension__
__PRETTY_FUNCTION__))
;
4915 assert(getWideningDecision(I, VF) == CM_Unknown &&(static_cast <bool> (getWideningDecision(I, VF) == CM_Unknown
&& "Decision should not be set yet.") ? void (0) : __assert_fail
("getWideningDecision(I, VF) == CM_Unknown && \"Decision should not be set yet.\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 4916, __extension__
__PRETTY_FUNCTION__))
4916 "Decision should not be set yet.")(static_cast <bool> (getWideningDecision(I, VF) == CM_Unknown
&& "Decision should not be set yet.") ? void (0) : __assert_fail
("getWideningDecision(I, VF) == CM_Unknown && \"Decision should not be set yet.\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 4916, __extension__
__PRETTY_FUNCTION__))
;
4917 auto *Group = getInterleavedAccessGroup(I);
4918 assert(Group && "Must have a group.")(static_cast <bool> (Group && "Must have a group."
) ? void (0) : __assert_fail ("Group && \"Must have a group.\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 4918, __extension__
__PRETTY_FUNCTION__))
;
4919
4920 // If the instruction's allocated size doesn't equal it's type size, it
4921 // requires padding and will be scalarized.
4922 auto &DL = I->getModule()->getDataLayout();
4923 auto *ScalarTy = getLoadStoreType(I);
4924 if (hasIrregularType(ScalarTy, DL))
4925 return false;
4926
4927 // Check if masking is required.
4928 // A Group may need masking for one of two reasons: it resides in a block that
4929 // needs predication, or it was decided to use masking to deal with gaps
4930 // (either a gap at the end of a load-access that may result in a speculative
4931 // load, or any gaps in a store-access).
4932 bool PredicatedAccessRequiresMasking =
4933 blockNeedsPredicationForAnyReason(I->getParent()) &&
4934 Legal->isMaskRequired(I);
4935 bool LoadAccessWithGapsRequiresEpilogMasking =
4936 isa<LoadInst>(I) && Group->requiresScalarEpilogue() &&
4937 !isScalarEpilogueAllowed();
4938 bool StoreAccessWithGapsRequiresMasking =
4939 isa<StoreInst>(I) && (Group->getNumMembers() < Group->getFactor());
4940 if (!PredicatedAccessRequiresMasking &&
4941 !LoadAccessWithGapsRequiresEpilogMasking &&
4942 !StoreAccessWithGapsRequiresMasking)
4943 return true;
4944
4945 // If masked interleaving is required, we expect that the user/target had
4946 // enabled it, because otherwise it either wouldn't have been created or
4947 // it should have been invalidated by the CostModel.
4948 assert(useMaskedInterleavedAccesses(TTI) &&(static_cast <bool> (useMaskedInterleavedAccesses(TTI) &&
"Masked interleave-groups for predicated accesses are not enabled."
) ? void (0) : __assert_fail ("useMaskedInterleavedAccesses(TTI) && \"Masked interleave-groups for predicated accesses are not enabled.\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 4949, __extension__
__PRETTY_FUNCTION__))
4949 "Masked interleave-groups for predicated accesses are not enabled.")(static_cast <bool> (useMaskedInterleavedAccesses(TTI) &&
"Masked interleave-groups for predicated accesses are not enabled."
) ? void (0) : __assert_fail ("useMaskedInterleavedAccesses(TTI) && \"Masked interleave-groups for predicated accesses are not enabled.\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 4949, __extension__
__PRETTY_FUNCTION__))
;
4950
4951 if (Group->isReverse())
4952 return false;
4953
4954 auto *Ty = getLoadStoreType(I);
4955 const Align Alignment = getLoadStoreAlignment(I);
4956 return isa<LoadInst>(I) ? TTI.isLegalMaskedLoad(Ty, Alignment)
4957 : TTI.isLegalMaskedStore(Ty, Alignment);
4958}
4959
4960bool LoopVectorizationCostModel::memoryInstructionCanBeWidened(
4961 Instruction *I, ElementCount VF) {
4962 // Get and ensure we have a valid memory instruction.
4963 assert((isa<LoadInst, StoreInst>(I)) && "Invalid memory instruction")(static_cast <bool> ((isa<LoadInst, StoreInst>(I)
) && "Invalid memory instruction") ? void (0) : __assert_fail
("(isa<LoadInst, StoreInst>(I)) && \"Invalid memory instruction\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 4963, __extension__
__PRETTY_FUNCTION__))
;
4964
4965 auto *Ptr = getLoadStorePointerOperand(I);
4966 auto *ScalarTy = getLoadStoreType(I);
4967
4968 // In order to be widened, the pointer should be consecutive, first of all.
4969 if (!Legal->isConsecutivePtr(ScalarTy, Ptr))
4970 return false;
4971
4972 // If the instruction is a store located in a predicated block, it will be
4973 // scalarized.
4974 if (isScalarWithPredication(I, VF))
4975 return false;
4976
4977 // If the instruction's allocated size doesn't equal it's type size, it
4978 // requires padding and will be scalarized.
4979 auto &DL = I->getModule()->getDataLayout();
4980 if (hasIrregularType(ScalarTy, DL))
4981 return false;
4982
4983 return true;
4984}
4985
4986void LoopVectorizationCostModel::collectLoopUniforms(ElementCount VF) {
4987 // We should not collect Uniforms more than once per VF. Right now,
4988 // this function is called from collectUniformsAndScalars(), which
4989 // already does this check. Collecting Uniforms for VF=1 does not make any
4990 // sense.
4991
4992 assert(VF.isVector() && Uniforms.find(VF) == Uniforms.end() &&(static_cast <bool> (VF.isVector() && Uniforms.
find(VF) == Uniforms.end() && "This function should not be visited twice for the same VF"
) ? void (0) : __assert_fail ("VF.isVector() && Uniforms.find(VF) == Uniforms.end() && \"This function should not be visited twice for the same VF\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 4993, __extension__
__PRETTY_FUNCTION__))
4993 "This function should not be visited twice for the same VF")(static_cast <bool> (VF.isVector() && Uniforms.
find(VF) == Uniforms.end() && "This function should not be visited twice for the same VF"
) ? void (0) : __assert_fail ("VF.isVector() && Uniforms.find(VF) == Uniforms.end() && \"This function should not be visited twice for the same VF\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 4993, __extension__
__PRETTY_FUNCTION__))
;
4994
4995 // Visit the list of Uniforms. If we'll not find any uniform value, we'll
4996 // not analyze again. Uniforms.count(VF) will return 1.
4997 Uniforms[VF].clear();
4998
4999 // We now know that the loop is vectorizable!
5000 // Collect instructions inside the loop that will remain uniform after
5001 // vectorization.
5002
5003 // Global values, params and instructions outside of current loop are out of
5004 // scope.
5005 auto isOutOfScope = [&](Value *V) -> bool {
5006 Instruction *I = dyn_cast<Instruction>(V);
5007 return (!I || !TheLoop->contains(I));
5008 };
5009
5010 // Worklist containing uniform instructions demanding lane 0.
5011 SetVector<Instruction *> Worklist;
5012 BasicBlock *Latch = TheLoop->getLoopLatch();
5013
5014 // Add uniform instructions demanding lane 0 to the worklist. Instructions
5015 // that are scalar with predication must not be considered uniform after
5016 // vectorization, because that would create an erroneous replicating region
5017 // where only a single instance out of VF should be formed.
5018 // TODO: optimize such seldom cases if found important, see PR40816.
5019 auto addToWorklistIfAllowed = [&](Instruction *I) -> void {
5020 if (isOutOfScope(I)) {
5021 LLVM_DEBUG(dbgs() << "LV: Found not uniform due to scope: "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-vectorize")) { dbgs() << "LV: Found not uniform due to scope: "
<< *I << "\n"; } } while (false)
5022 << *I << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-vectorize")) { dbgs() << "LV: Found not uniform due to scope: "
<< *I << "\n"; } } while (false)
;
5023 return;
5024 }
5025 if (isScalarWithPredication(I, VF)) {
5026 LLVM_DEBUG(dbgs() << "LV: Found not uniform being ScalarWithPredication: "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-vectorize")) { dbgs() << "LV: Found not uniform being ScalarWithPredication: "
<< *I << "\n"; } } while (false)
5027 << *I << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-vectorize")) { dbgs() << "LV: Found not uniform being ScalarWithPredication: "
<< *I << "\n"; } } while (false)
;
5028 return;
5029 }
5030 LLVM_DEBUG(dbgs() << "LV: Found uniform instruction: " << *I << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-vectorize")) { dbgs() << "LV: Found uniform instruction: "
<< *I << "\n"; } } while (false)
;
5031 Worklist.insert(I);
5032 };
5033
5034 // Start with the conditional branch. If the branch condition is an
5035 // instruction contained in the loop that is only used by the branch, it is
5036 // uniform.
5037 auto *Cmp = dyn_cast<Instruction>(Latch->getTerminator()->getOperand(0));
5038 if (Cmp && TheLoop->contains(Cmp) && Cmp->hasOneUse())
5039 addToWorklistIfAllowed(Cmp);
5040
5041 auto isUniformDecision = [&](Instruction *I, ElementCount VF) {
5042 InstWidening WideningDecision = getWideningDecision(I, VF);
5043 assert(WideningDecision != CM_Unknown &&(static_cast <bool> (WideningDecision != CM_Unknown &&
"Widening decision should be ready at this moment") ? void (
0) : __assert_fail ("WideningDecision != CM_Unknown && \"Widening decision should be ready at this moment\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 5044, __extension__
__PRETTY_FUNCTION__))
5044 "Widening decision should be ready at this moment")(static_cast <bool> (WideningDecision != CM_Unknown &&
"Widening decision should be ready at this moment") ? void (
0) : __assert_fail ("WideningDecision != CM_Unknown && \"Widening decision should be ready at this moment\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 5044, __extension__
__PRETTY_FUNCTION__))
;
5045
5046 // A uniform memory op is itself uniform. We exclude uniform stores
5047 // here as they demand the last lane, not the first one.
5048 if (isa<LoadInst>(I) && Legal->isUniformMemOp(*I)) {
5049 assert(WideningDecision == CM_Scalarize)(static_cast <bool> (WideningDecision == CM_Scalarize) ?
void (0) : __assert_fail ("WideningDecision == CM_Scalarize"
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 5049, __extension__
__PRETTY_FUNCTION__))
;
5050 return true;
5051 }
5052
5053 return (WideningDecision == CM_Widen ||
5054 WideningDecision == CM_Widen_Reverse ||
5055 WideningDecision == CM_Interleave);
5056 };
5057
5058
5059 // Returns true if Ptr is the pointer operand of a memory access instruction
5060 // I, and I is known to not require scalarization.
5061 auto isVectorizedMemAccessUse = [&](Instruction *I, Value *Ptr) -> bool {
5062 return getLoadStorePointerOperand(I) == Ptr && isUniformDecision(I, VF);
5063 };
5064
5065 // Holds a list of values which are known to have at least one uniform use.
5066 // Note that there may be other uses which aren't uniform. A "uniform use"
5067 // here is something which only demands lane 0 of the unrolled iterations;
5068 // it does not imply that all lanes produce the same value (e.g. this is not
5069 // the usual meaning of uniform)
5070 SetVector<Value *> HasUniformUse;
5071
5072 // Scan the loop for instructions which are either a) known to have only
5073 // lane 0 demanded or b) are uses which demand only lane 0 of their operand.
5074 for (auto *BB : TheLoop->blocks())
5075 for (auto &I : *BB) {
5076 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(&I)) {
5077 switch (II->getIntrinsicID()) {
5078 case Intrinsic::sideeffect:
5079 case Intrinsic::experimental_noalias_scope_decl:
5080 case Intrinsic::assume:
5081 case Intrinsic::lifetime_start:
5082 case Intrinsic::lifetime_end:
5083 if (TheLoop->hasLoopInvariantOperands(&I))
5084 addToWorklistIfAllowed(&I);
5085 break;
5086 default:
5087 break;
5088 }
5089 }
5090
5091 // ExtractValue instructions must be uniform, because the operands are
5092 // known to be loop-invariant.
5093 if (auto *EVI = dyn_cast<ExtractValueInst>(&I)) {
5094 assert(isOutOfScope(EVI->getAggregateOperand()) &&(static_cast <bool> (isOutOfScope(EVI->getAggregateOperand
()) && "Expected aggregate value to be loop invariant"
) ? void (0) : __assert_fail ("isOutOfScope(EVI->getAggregateOperand()) && \"Expected aggregate value to be loop invariant\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 5095, __extension__
__PRETTY_FUNCTION__))
5095 "Expected aggregate value to be loop invariant")(static_cast <bool> (isOutOfScope(EVI->getAggregateOperand
()) && "Expected aggregate value to be loop invariant"
) ? void (0) : __assert_fail ("isOutOfScope(EVI->getAggregateOperand()) && \"Expected aggregate value to be loop invariant\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 5095, __extension__
__PRETTY_FUNCTION__))
;
5096 addToWorklistIfAllowed(EVI);
5097 continue;
5098 }
5099
5100 // If there's no pointer operand, there's nothing to do.
5101 auto *Ptr = getLoadStorePointerOperand(&I);
5102 if (!Ptr)
5103 continue;
5104
5105 // A uniform memory op is itself uniform. We exclude uniform stores
5106 // here as they demand the last lane, not the first one.
5107 if (isa<LoadInst>(I) && Legal->isUniformMemOp(I))
5108 addToWorklistIfAllowed(&I);
5109
5110 if (isUniformDecision(&I, VF)) {
5111 assert(isVectorizedMemAccessUse(&I, Ptr) && "consistency check")(static_cast <bool> (isVectorizedMemAccessUse(&I, Ptr
) && "consistency check") ? void (0) : __assert_fail (
"isVectorizedMemAccessUse(&I, Ptr) && \"consistency check\""
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 5111, __extension__
__PRETTY_FUNCTION__))
;
5112 HasUniformUse.insert(Ptr);
5113 }
5114 }
5115
5116 // Add to the worklist any operands which have *only* uniform (e.g. lane 0
5117 // demanding) users. Since loops are assumed to be in LCSSA form, this
5118 // disallows uses outside the loop as well.
5119 for (auto *V : HasUniformUse) {
5120 if (isOutOfScope(V))
5121 continue;
5122 auto *I = cast<Instruction>(V);
5123 auto UsersAreMemAccesses =
5124 llvm::all_of(I->users(), [&](User *U) -> bool {
5125 return isVectorizedMemAccessUse(cast<Instruction>(U), V);
5126 });
5127 if (UsersAreMemAccesses)
5128 addToWorklistIfAllowed(I);
5129 }
5130
5131 // Expand Worklist in topological order: whenever a new instruction
5132 // is added , its users should be already inside Worklist. It ensures
5133 // a uniform instruction will only be used by uniform instructions.
5134 unsigned idx = 0;
5135 while (idx != Worklist.size()) {
5136 Instruction *I = Worklist[idx++];
5137
5138 for (auto OV : I->operand_values()) {
5139 // isOutOfScope operands cannot be uniform instructions.
5140 if (isOutOfScope(OV))
5141 continue;
5142 // First order recurrence Phi's should typically be considered
5143 // non-uniform.
5144 auto *OP = dyn_cast<PHINode>(OV);
5145 if (OP && Legal->isFirstOrderRecurrence(OP))
5146 continue;
5147 // If all the users of the operand are uniform, then add the
5148 // operand into the uniform worklist.
5149 auto *OI = cast<Instruction>(OV);
5150 if (llvm::all_of(OI->users(), [&](User *U) -> bool {
5151 auto *J = cast<Instruction>(U);
5152 return Worklist.count(J) || isVectorizedMemAccessUse(J, OI);
5153 }))
5154 addToWorklistIfAllowed(OI);
5155 }
5156 }
5157
5158 // For an instruction to be added into Worklist above, all its users inside
5159 // the loop should also be in Worklist. However, this condition cannot be
5160 // true for phi nodes that form a cyclic dependence. We must process phi
5161 // nodes separately. An induction variable will remain uniform if all users
5162 // of the induction variable and induction variable update remain uniform.
5163 // The code below handles both pointer and non-pointer induction variables.
5164 for (auto &Induction : Legal->getInductionVars()) {
5165 auto *Ind = Induction.first;
5166 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
5167
5168 // Determine if all users of the induction variable are uniform after
5169 // vectorization.
5170 auto UniformInd = llvm::all_of(Ind->users(), [&](User *U) -> bool {
5171 auto *I = cast<Instruction>(U);
5172 return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I) ||
5173 isVectorizedMemAccessUse(I, Ind);
5174 });
5175 if (!UniformInd)
5176 continue;
5177
5178 // Determine if all users of the induction variable update instruction are
5179 // uniform after vectorization.
5180 auto UniformIndUpdate =
5181 llvm::all_of(IndUpdate->users(), [&](User *U) -> bool {
5182 auto *I = cast<Instruction>(U);
5183 return I == Ind || !TheLoop->contains(I) || Worklist.count(I) ||
5184 isVectorizedMemAccessUse(I, IndUpdate);
5185 });
5186 if (!UniformIndUpdate)
5187 continue;
5188
5189 // The induction variable and its update instruction will remain uniform.
5190 addToWorklistIfAllowed(Ind);
5191 addToWorklistIfAllowed(IndUpdate);
5192 }
5193
5194 Uniforms[VF].insert(Worklist.begin(), Worklist.end());
5195}
5196
5197bool LoopVectorizationCostModel::runtimeChecksRequired() {
5198 LLVM_DEBUG(dbgs() << "LV: Performing code size checks.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-vectorize")) { dbgs() << "LV: Performing code size checks.\n"
; } } while (false)
;
5199
5200 if (Legal->getRuntimePointerChecking()->Need) {
5201 reportVectorizationFailure("Runtime ptr check is required with -Os/-Oz",
5202 "runtime pointer checks needed. Enable vectorization of this "
5203 "loop with '#pragma clang loop vectorize(enable)' when "
5204 "compiling with -Os/-Oz",
5205 "CantVersionLoopWithOptForSize", ORE, TheLoop);
5206 return true;
5207 }
5208
5209 if (!PSE.getUnionPredicate().getPredicates().empty()) {
5210 reportVectorizationFailure("Runtime SCEV check is required with -Os/-Oz",
5211 "runtime SCEV checks needed. Enable vectorization of this "
5212 "loop with '#pragma clang loop vectorize(enable)' when "
5213 "compiling with -Os/-Oz",
5214 "CantVersionLoopWithOptForSize", ORE, TheLoop);
5215 return true;
5216 }
5217
5218 // FIXME: Avoid specializing for stride==1 instead of bailing out.
5219 if (!Legal->getLAI()->getSymbolicStrides().empty()) {
5220 reportVectorizationFailure("Runtime stride check for small trip count",
5221 "runtime stride == 1 checks needed. Enable vectorization of "
5222 "this loop without such check by compiling with -Os/-Oz",
5223 "CantVersionLoopWithOptForSize", ORE, TheLoop);
5224 return true;
5225 }
5226
5227 return false;
5228}
5229
5230ElementCount
5231LoopVectorizationCostModel::getMaxLegalScalableVF(unsigned MaxSafeElements) {
5232 if (!TTI.supportsScalableVectors() && !ForceTargetSupportsScalableVectors)
5233 return ElementCount::getScalable(0);
5234
5235 if (Hints->isScalableVectorizationDisabled()) {
5236 reportVectorizationInfo("Scalable vectorization is explicitly disabled",
5237 "ScalableVectorizationDisabled", ORE, TheLoop);
5238 return ElementCount::getScalable(0);
5239 }
5240
5241 LLVM_DEBUG(dbgs() << "LV: Scalable vectorization is available\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-vectorize")) { dbgs() << "LV: Scalable vectorization is available\n"
; } } while (false)
;
5242
5243 auto MaxScalableVF = ElementCount::getScalable(
5244 std::numeric_limits<ElementCount::ScalarTy>::max());
5245
5246 // Test that the loop-vectorizer can legalize all operations for this MaxVF.
5247 // FIXME: While for scalable vectors this is currently sufficient, this should
5248 // be replaced by a more detailed mechanism that filters out specific VFs,
5249 // instead of invalidating vectorization for a whole set of VFs based on the
5250 // MaxVF.
5251
5252 // Disable scalable vectorization if the loop contains unsupported reductions.
5253 if (!canVectorizeReductions(MaxScalableVF)) {
5254 reportVectorizationInfo(
5255 "Scalable vectorization not supported for the reduction "
5256 "operations found in this loop.",
5257 "ScalableVFUnfeasible", ORE, TheLoop);
5258 return ElementCount::getScalable(0);
5259 }
5260
5261 // Disable scalable vectorization if the loop contains any instructions
5262 // with element types not supported for scalable vectors.
5263 if (any_of(ElementTypesInLoop, [&](Type *Ty) {
5264 return !Ty->isVoidTy() &&
5265 !this->TTI.isElementTypeLegalForScalableVector(Ty);
5266 })) {
5267 reportVectorizationInfo("Scalable vectorization is not supported "
5268 "for all element types found in this loop.",
5269 "ScalableVFUnfeasible", ORE, TheLoop);
5270 return ElementCount::getScalable(0);
5271 }
5272
5273 if (Legal->isSafeForAnyVectorWidth())
5274 return MaxScalableVF;
5275
5276 // Limit MaxScalableVF by the maximum safe dependence distance.
5277 Optional<unsigned> MaxVScale = TTI.getMaxVScale();
5278 if (!MaxVScale && TheFunction->hasFnAttribute(Attribute::VScaleRange))
5279 MaxVScale =
5280 TheFunction->getFnAttribute(Attribute::VScaleRange).getVScaleRangeMax();
5281 MaxScalableVF = ElementCount::getScalable(
5282 MaxVScale ? (MaxSafeElements / MaxVScale.getValue()) : 0);
5283 if (!MaxScalableVF)
5284 reportVectorizationInfo(
5285 "Max legal vector width too small, scalable vectorization "
5286 "unfeasible.",
5287 "ScalableVFUnfeasible", ORE, TheLoop);
5288
5289 return MaxScalableVF;
5290}
5291
5292FixedScalableVFPair LoopVectorizationCostModel::computeFeasibleMaxVF(
5293 unsigned ConstTripCount, ElementCount UserVF, bool FoldTailByMasking) {
5294 MinBWs = computeMinimumValueSizes(TheLoop->getBlocks(), *DB, &TTI);
5295 unsigned SmallestType, WidestType;
5296 std::tie(SmallestType, WidestType) = getSmallestAndWidestTypes();
5297
5298 // Get the maximum safe dependence distance in bits computed by LAA.
5299 // It is computed by MaxVF * sizeOf(type) * 8, where type is taken from
5300 // the memory accesses that is most restrictive (involved in the smallest
5301 // dependence distance).
5302 unsigned MaxSafeElements =
5303 PowerOf2Floor(Legal->getMaxSafeVectorWidthInBits() / WidestType);
5304
5305 auto MaxSafeFixedVF = ElementCount::getFixed(MaxSafeElements);
5306 auto MaxSafeScalableVF = getMaxLegalScalableVF(MaxSafeElements);
5307
5308 LLVM_DEBUG(dbgs() << "LV: The max safe fixed VF is: " << MaxSafeFixedVFdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-vectorize")) { dbgs() << "LV: The max safe fixed VF is: "
<< MaxSafeFixedVF << ".\n"; } } while (false)
5309 << ".\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-vectorize")) { dbgs() << "LV: The max safe fixed VF is: "
<< MaxSafeFixedVF << ".\n"; } } while (false)
;
5310 LLVM_DEBUG(dbgs() << "LV: The max safe scalable VF is: " << MaxSafeScalableVFdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-vectorize")) { dbgs() << "LV: The max safe scalable VF is: "
<< MaxSafeScalableVF << ".\n"; } } while (false)
5311 << ".\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-vectorize")) { dbgs() << "LV: The max safe scalable VF is: "
<< MaxSafeScalableVF << ".\n"; } } while (false)
;
5312
5313 // First analyze the UserVF, fall back if the UserVF should be ignored.
5314 if (UserVF) {
5315 auto MaxSafeUserVF =
5316 UserVF.isScalable() ? MaxSafeScalableVF : MaxSafeFixedVF;
5317
5318 if (ElementCount::isKnownLE(UserVF, MaxSafeUserVF)) {
5319 // If `VF=vscale x N` is safe, then so is `VF=N`
5320 if (UserVF.isScalable())
5321 return FixedScalableVFPair(
5322 ElementCount::getFixed(UserVF.getKnownMinValue()), UserVF);
5323 else
5324 return UserVF;
5325 }
5326
5327 assert(ElementCount::isKnownGT(UserVF, MaxSafeUserVF))(static_cast <bool> (ElementCount::isKnownGT(UserVF, MaxSafeUserVF
)) ? void (0) : __assert_fail ("ElementCount::isKnownGT(UserVF, MaxSafeUserVF)"
, "llvm/lib/Transforms/Vectorize/LoopVectorize.cpp", 5327, __extension__
__PRETTY_FUNCTION__))
;
5328
5329 // Only clamp if the UserVF is not scalable. If the UserVF is scalable, it
5330 // is better to ignore the hint and let the compiler choose a suitable VF.
5331 if (!UserVF.isScalable()) {
5332 LLVM_DEBUG(dbgs() << "LV: User VF=" << UserVFdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-vectorize")) { dbgs() << "LV: User VF=" <<
UserVF << " is unsafe, clamping to max safe VF=" <<
MaxSafeFixedVF << ".\n"; } } while (false)
5333 << " is unsafe, clamping to max safe VF="do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-vectorize")) { dbgs() << "LV: User VF=" <<
UserVF << " is unsafe, clamping to max safe VF=" <<
MaxSafeFixedVF << ".\n"; } } while (false)
5334 << MaxSafeFixedVF << ".\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-vectorize")) { dbgs() << "LV: User VF=" <<
UserVF << " is unsafe, clamping to max safe VF=" <<
MaxSafeFixedVF << ".\n"; } } while (false)
;
5335 ORE->emit([&]() {
5336 return OptimizationRemarkAnalysis(DEBUG_TYPE"loop-vectorize", "VectorizationFactor",
5337 TheLoop->getStartLoc(),
5338 TheLoop->getHeader())
5339 << "User-specified vectorization factor "
5340 << ore::NV("UserVectorizationFactor", UserVF)
5341 << " is unsafe, clamping to maximum safe vectorization factor "
5342 << ore::NV("VectorizationFactor", MaxSafeFixedVF);
5343 });
5344 return MaxSafeFixedVF;
5345 }
5346
5347 if (!TTI.supportsScalableVectors() && !ForceTargetSupportsScalableVectors) {
5348 LLVM_DEBUG(dbgs() << "LV: User VF=" << UserVFdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-vectorize")) { dbgs() << "LV: User VF=" <<
UserVF << " is ignored because scalable vectors are not "
"available.\n"; } } while (false)
5349 << " is ignored because scalable vectors are not "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-vectorize")) { dbgs() << "LV: User VF=" <<
UserVF << " is ignored because scalable vectors are not "
"available.\n"; } } while (false)
5350 "available.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-vectorize")) { dbgs() << "LV: User VF=" <<
UserVF << " is ignored because scalable vectors are not "
"available.\n"; } } while (false)
;
5351 ORE->emit([&]() {
5352 return OptimizationRemarkAnalysis(DEBUG_TYPE"loop-vectorize", "VectorizationFactor",
5353 TheLoop->getStartLoc(),
5354 TheLoop->getHeader())
5355 << "User-specified vectorization factor "
5356 << ore::NV("UserVectorizationFactor", UserVF)
5357 << " is ignored because the target does not support scalable "
5358 "vectors. The compiler will pick a more suitable value.";
5359 });
5360 } else {
5361 LLVM_DEBUG(dbgs() << "LV: User VF=" << UserVFdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-vectorize")) { dbgs() << "LV: User VF=" <<
UserVF << " is unsafe. Ignoring scalable UserVF.\n"; }
} while (false)
5362 << " is unsafe. Ignoring scalable UserVF.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-vectorize")) { dbgs() << "LV: User VF=" <<
UserVF << " is unsafe. Ignoring scalable UserVF.\n"; }
} while (false)
;
5363 ORE->emit([&]() {
5364 return OptimizationRemarkAnalysis(DEBUG_TYPE"loop-vectorize", "VectorizationFactor",
5365 TheLoop->getStartLoc(),
5366 TheLoop->getHeader())
5367 << "User-specified vectorization factor "
5368 << ore::NV("UserVectorizationFactor", UserVF)
5369 << " is unsafe. Ignoring the hint to let the compiler pick a "
5370 "more suitable value.";
5371 });
5372 }
5373 }
5374
5375 LLVM_DEBUG(dbgs() << "LV: The Smallest and Widest types: " << SmallestTypedo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-vectorize")) { dbgs() << "LV: The Smallest and Widest types: "
<< SmallestType << " / " << WidestType <<
" bits.\n"; } } while (false)
5376 << " / " << WidestType << " bits.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-vectorize")) { dbgs() << "LV: The Smallest and Widest types: "
<< SmallestType << " / " << WidestType <<
" bits.\n"; } } while (false)
;
5377
5378 FixedScalableVFPair Result(ElementCount::getFixed(1),
5379 ElementCount::getScalable(0));
5380 if (auto MaxVF =
5381 getMaximizedVFForTarget(ConstTripCount, SmallestType, WidestType,
5382 MaxSafeFixedVF, FoldTailByMasking))
5383 Result.FixedVF = MaxVF;
5384
5385 if (auto MaxVF =
5386 getMaximizedVFForTarget(ConstTripCount, SmallestType, WidestType,
5387 MaxSafeScalableVF, FoldTailByMasking))
5388 if (MaxVF.isScalable()) {
5389 Result.ScalableVF = MaxVF;
5390 LLVM_DEBUG(dbgs() << "LV: Found feasible scalable VF = " << MaxVFdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-vectorize")) { dbgs() << "LV: Found feasible scalable VF = "
<< MaxVF << "\n"; } } while (false)
5391 << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-vectorize")) { dbgs() << "LV: Found feasible scalable VF = "
<< MaxVF << "\n"; } } while (false)
;
5392 }
5393
5394 return Result;
5395}
5396
5397FixedScalableVFPair
5398LoopVectorizationCostModel::computeMaxVF(ElementCount UserVF, unsigned UserIC) {
5399 if (Legal->getRuntimePointerChecking()->Need && TTI.hasBranchDivergence()) {
5400 // TODO: It may by useful to do since it's still likely to be dynamically
5401 // uniform if the target can skip.
5402 reportVectorizationFailure(
5403 "Not inserting runtime ptr check for divergent target",
5404 "runtime pointer checks needed. Not enabled for divergent target",
5405 "CantVersionLoopWithDivergentTarget", ORE, TheLoop);
5406 return FixedScalableVFPair::getNone();
5407 }
5408
5409 unsigned TC = PSE.getSE()->getSmallConstantTripCount(TheLoop);
5410 LLVM_DEBUG(dbgs() << "LV: Found trip count: " << TC << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-vectorize")) { dbgs() << "LV: Found trip count: "
<< TC << '\n'; } } while (false)
;
5411 if (TC == 1) {
5412 reportVectorizationFailure("Single iteration (non) loop",
5413 "loop trip count is one, irrelevant for vectorization",
5414 "SingleIterationLoop", ORE, TheLoop);
5415 return FixedScalableVFPair::getNone();
5416 }
5417
5418 switch (ScalarEpilogueStatus) {
5419 case CM_ScalarEpilogueAllowed:
5420 return computeFeasibleMaxVF(TC, UserVF, false);
5421 case CM_ScalarEpilogueNotAllowedUsePredicate:
5422