Bug Summary

File:lib/Transforms/Scalar/LoopStrengthReduce.cpp
Warning:line 3163, column 3
Forming reference to null pointer

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name LoopStrengthReduce.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mthread-model posix -fmath-errno -masm-verbose -mconstructor-aliases -munwind-tables -fuse-init-array -target-cpu x86-64 -dwarf-column-info -debugger-tuning=gdb -momit-leaf-frame-pointer -ffunction-sections -fdata-sections -resource-dir /usr/lib/llvm-9/lib/clang/9.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-9~svn358860/build-llvm/lib/Transforms/Scalar -I /build/llvm-toolchain-snapshot-9~svn358860/lib/Transforms/Scalar -I /build/llvm-toolchain-snapshot-9~svn358860/build-llvm/include -I /build/llvm-toolchain-snapshot-9~svn358860/include -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0/backward -internal-isystem /usr/include/clang/9.0.0/include/ -internal-isystem /usr/local/include -internal-isystem /usr/lib/llvm-9/lib/clang/9.0.0/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-comment -std=c++11 -fdeprecated-macro -fdebug-compilation-dir /build/llvm-toolchain-snapshot-9~svn358860/build-llvm/lib/Transforms/Scalar -fdebug-prefix-map=/build/llvm-toolchain-snapshot-9~svn358860=. -ferror-limit 19 -fmessage-length 0 -fvisibility-inlines-hidden -stack-protector 2 -fobjc-runtime=gcc -fdiagnostics-show-option -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -o /tmp/scan-build-2019-04-22-050718-5320-1 -x c++ /build/llvm-toolchain-snapshot-9~svn358860/lib/Transforms/Scalar/LoopStrengthReduce.cpp -faddrsig
1//===- LoopStrengthReduce.cpp - Strength Reduce IVs in Loops --------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This transformation analyzes and transforms the induction variables (and
10// computations derived from them) into forms suitable for efficient execution
11// on the target.
12//
13// This pass performs a strength reduction on array references inside loops that
14// have as one or more of their components the loop induction variable, it
15// rewrites expressions to take advantage of scaled-index addressing modes
16// available on the target, and it performs a variety of other optimizations
17// related to loop induction variables.
18//
19// Terminology note: this code has a lot of handling for "post-increment" or
20// "post-inc" users. This is not talking about post-increment addressing modes;
21// it is instead talking about code like this:
22//
23// %i = phi [ 0, %entry ], [ %i.next, %latch ]
24// ...
25// %i.next = add %i, 1
26// %c = icmp eq %i.next, %n
27//
28// The SCEV for %i is {0,+,1}<%L>. The SCEV for %i.next is {1,+,1}<%L>, however
29// it's useful to think about these as the same register, with some uses using
30// the value of the register before the add and some using it after. In this
31// example, the icmp is a post-increment user, since it uses %i.next, which is
32// the value of the induction variable after the increment. The other common
33// case of post-increment users is users outside the loop.
34//
35// TODO: More sophistication in the way Formulae are generated and filtered.
36//
37// TODO: Handle multiple loops at a time.
38//
39// TODO: Should the addressing mode BaseGV be changed to a ConstantExpr instead
40// of a GlobalValue?
41//
42// TODO: When truncation is free, truncate ICmp users' operands to make it a
43// smaller encoding (on x86 at least).
44//
45// TODO: When a negated register is used by an add (such as in a list of
46// multiple base registers, or as the increment expression in an addrec),
47// we may not actually need both reg and (-1 * reg) in registers; the
48// negation can be implemented by using a sub instead of an add. The
49// lack of support for taking this into consideration when making
50// register pressure decisions is partly worked around by the "Special"
51// use kind.
52//
53//===----------------------------------------------------------------------===//
54
55#include "llvm/Transforms/Scalar/LoopStrengthReduce.h"
56#include "llvm/ADT/APInt.h"
57#include "llvm/ADT/DenseMap.h"
58#include "llvm/ADT/DenseSet.h"
59#include "llvm/ADT/Hashing.h"
60#include "llvm/ADT/PointerIntPair.h"
61#include "llvm/ADT/STLExtras.h"
62#include "llvm/ADT/SetVector.h"
63#include "llvm/ADT/SmallBitVector.h"
64#include "llvm/ADT/SmallPtrSet.h"
65#include "llvm/ADT/SmallSet.h"
66#include "llvm/ADT/SmallVector.h"
67#include "llvm/ADT/iterator_range.h"
68#include "llvm/Analysis/IVUsers.h"
69#include "llvm/Analysis/LoopAnalysisManager.h"
70#include "llvm/Analysis/LoopInfo.h"
71#include "llvm/Analysis/LoopPass.h"
72#include "llvm/Analysis/ScalarEvolution.h"
73#include "llvm/Analysis/ScalarEvolutionExpander.h"
74#include "llvm/Analysis/ScalarEvolutionExpressions.h"
75#include "llvm/Analysis/ScalarEvolutionNormalization.h"
76#include "llvm/Analysis/TargetTransformInfo.h"
77#include "llvm/Transforms/Utils/Local.h"
78#include "llvm/Config/llvm-config.h"
79#include "llvm/IR/BasicBlock.h"
80#include "llvm/IR/Constant.h"
81#include "llvm/IR/Constants.h"
82#include "llvm/IR/DerivedTypes.h"
83#include "llvm/IR/Dominators.h"
84#include "llvm/IR/GlobalValue.h"
85#include "llvm/IR/IRBuilder.h"
86#include "llvm/IR/InstrTypes.h"
87#include "llvm/IR/Instruction.h"
88#include "llvm/IR/Instructions.h"
89#include "llvm/IR/IntrinsicInst.h"
90#include "llvm/IR/Intrinsics.h"
91#include "llvm/IR/Module.h"
92#include "llvm/IR/OperandTraits.h"
93#include "llvm/IR/Operator.h"
94#include "llvm/IR/PassManager.h"
95#include "llvm/IR/Type.h"
96#include "llvm/IR/Use.h"
97#include "llvm/IR/User.h"
98#include "llvm/IR/Value.h"
99#include "llvm/IR/ValueHandle.h"
100#include "llvm/Pass.h"
101#include "llvm/Support/Casting.h"
102#include "llvm/Support/CommandLine.h"
103#include "llvm/Support/Compiler.h"
104#include "llvm/Support/Debug.h"
105#include "llvm/Support/ErrorHandling.h"
106#include "llvm/Support/MathExtras.h"
107#include "llvm/Support/raw_ostream.h"
108#include "llvm/Transforms/Scalar.h"
109#include "llvm/Transforms/Utils.h"
110#include "llvm/Transforms/Utils/BasicBlockUtils.h"
111#include <algorithm>
112#include <cassert>
113#include <cstddef>
114#include <cstdint>
115#include <cstdlib>
116#include <iterator>
117#include <limits>
118#include <numeric>
119#include <map>
120#include <utility>
121
122using namespace llvm;
123
124#define DEBUG_TYPE"loop-reduce" "loop-reduce"
125
126/// MaxIVUsers is an arbitrary threshold that provides an early opportunity for
127/// bail out. This threshold is far beyond the number of users that LSR can
128/// conceivably solve, so it should not affect generated code, but catches the
129/// worst cases before LSR burns too much compile time and stack space.
130static const unsigned MaxIVUsers = 200;
131
132// Temporary flag to cleanup congruent phis after LSR phi expansion.
133// It's currently disabled until we can determine whether it's truly useful or
134// not. The flag should be removed after the v3.0 release.
135// This is now needed for ivchains.
136static cl::opt<bool> EnablePhiElim(
137 "enable-lsr-phielim", cl::Hidden, cl::init(true),
138 cl::desc("Enable LSR phi elimination"));
139
140// The flag adds instruction count to solutions cost comparision.
141static cl::opt<bool> InsnsCost(
142 "lsr-insns-cost", cl::Hidden, cl::init(true),
143 cl::desc("Add instruction count to a LSR cost model"));
144
145// Flag to choose how to narrow complex lsr solution
146static cl::opt<bool> LSRExpNarrow(
147 "lsr-exp-narrow", cl::Hidden, cl::init(false),
148 cl::desc("Narrow LSR complex solution using"
149 " expectation of registers number"));
150
151// Flag to narrow search space by filtering non-optimal formulae with
152// the same ScaledReg and Scale.
153static cl::opt<bool> FilterSameScaledReg(
154 "lsr-filter-same-scaled-reg", cl::Hidden, cl::init(true),
155 cl::desc("Narrow LSR search space by filtering non-optimal formulae"
156 " with the same ScaledReg and Scale"));
157
158static cl::opt<bool> EnableBackedgeIndexing(
159 "lsr-backedge-indexing", cl::Hidden, cl::init(true),
160 cl::desc("Enable the generation of cross iteration indexed memops"));
161
162static cl::opt<unsigned> ComplexityLimit(
163 "lsr-complexity-limit", cl::Hidden,
164 cl::init(std::numeric_limits<uint16_t>::max()),
165 cl::desc("LSR search space complexity limit"));
166
167static cl::opt<bool> EnableRecursiveSetupCost(
168 "lsr-recursive-setupcost", cl::Hidden, cl::init(true),
169 cl::desc("Enable more thorough lsr setup cost calculation"));
170
171#ifndef NDEBUG
172// Stress test IV chain generation.
173static cl::opt<bool> StressIVChain(
174 "stress-ivchain", cl::Hidden, cl::init(false),
175 cl::desc("Stress test LSR IV chains"));
176#else
177static bool StressIVChain = false;
178#endif
179
180namespace {
181
182struct MemAccessTy {
183 /// Used in situations where the accessed memory type is unknown.
184 static const unsigned UnknownAddressSpace =
185 std::numeric_limits<unsigned>::max();
186
187 Type *MemTy = nullptr;
188 unsigned AddrSpace = UnknownAddressSpace;
189
190 MemAccessTy() = default;
191 MemAccessTy(Type *Ty, unsigned AS) : MemTy(Ty), AddrSpace(AS) {}
192
193 bool operator==(MemAccessTy Other) const {
194 return MemTy == Other.MemTy && AddrSpace == Other.AddrSpace;
195 }
196
197 bool operator!=(MemAccessTy Other) const { return !(*this == Other); }
198
199 static MemAccessTy getUnknown(LLVMContext &Ctx,
200 unsigned AS = UnknownAddressSpace) {
201 return MemAccessTy(Type::getVoidTy(Ctx), AS);
202 }
203
204 Type *getType() { return MemTy; }
205};
206
207/// This class holds data which is used to order reuse candidates.
208class RegSortData {
209public:
210 /// This represents the set of LSRUse indices which reference
211 /// a particular register.
212 SmallBitVector UsedByIndices;
213
214 void print(raw_ostream &OS) const;
215 void dump() const;
216};
217
218} // end anonymous namespace
219
220#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
221void RegSortData::print(raw_ostream &OS) const {
222 OS << "[NumUses=" << UsedByIndices.count() << ']';
223}
224
225LLVM_DUMP_METHOD__attribute__((noinline)) __attribute__((__used__)) void RegSortData::dump() const {
226 print(errs()); errs() << '\n';
227}
228#endif
229
230namespace {
231
232/// Map register candidates to information about how they are used.
233class RegUseTracker {
234 using RegUsesTy = DenseMap<const SCEV *, RegSortData>;
235
236 RegUsesTy RegUsesMap;
237 SmallVector<const SCEV *, 16> RegSequence;
238
239public:
240 void countRegister(const SCEV *Reg, size_t LUIdx);
241 void dropRegister(const SCEV *Reg, size_t LUIdx);
242 void swapAndDropUse(size_t LUIdx, size_t LastLUIdx);
243
244 bool isRegUsedByUsesOtherThan(const SCEV *Reg, size_t LUIdx) const;
245
246 const SmallBitVector &getUsedByIndices(const SCEV *Reg) const;
247
248 void clear();
249
250 using iterator = SmallVectorImpl<const SCEV *>::iterator;
251 using const_iterator = SmallVectorImpl<const SCEV *>::const_iterator;
252
253 iterator begin() { return RegSequence.begin(); }
254 iterator end() { return RegSequence.end(); }
255 const_iterator begin() const { return RegSequence.begin(); }
256 const_iterator end() const { return RegSequence.end(); }
257};
258
259} // end anonymous namespace
260
261void
262RegUseTracker::countRegister(const SCEV *Reg, size_t LUIdx) {
263 std::pair<RegUsesTy::iterator, bool> Pair =
264 RegUsesMap.insert(std::make_pair(Reg, RegSortData()));
265 RegSortData &RSD = Pair.first->second;
266 if (Pair.second)
267 RegSequence.push_back(Reg);
268 RSD.UsedByIndices.resize(std::max(RSD.UsedByIndices.size(), LUIdx + 1));
269 RSD.UsedByIndices.set(LUIdx);
270}
271
272void
273RegUseTracker::dropRegister(const SCEV *Reg, size_t LUIdx) {
274 RegUsesTy::iterator It = RegUsesMap.find(Reg);
275 assert(It != RegUsesMap.end())((It != RegUsesMap.end()) ? static_cast<void> (0) : __assert_fail
("It != RegUsesMap.end()", "/build/llvm-toolchain-snapshot-9~svn358860/lib/Transforms/Scalar/LoopStrengthReduce.cpp"
, 275, __PRETTY_FUNCTION__))
;
276 RegSortData &RSD = It->second;
277 assert(RSD.UsedByIndices.size() > LUIdx)((RSD.UsedByIndices.size() > LUIdx) ? static_cast<void>
(0) : __assert_fail ("RSD.UsedByIndices.size() > LUIdx", "/build/llvm-toolchain-snapshot-9~svn358860/lib/Transforms/Scalar/LoopStrengthReduce.cpp"
, 277, __PRETTY_FUNCTION__))
;
278 RSD.UsedByIndices.reset(LUIdx);
279}
280
281void
282RegUseTracker::swapAndDropUse(size_t LUIdx, size_t LastLUIdx) {
283 assert(LUIdx <= LastLUIdx)((LUIdx <= LastLUIdx) ? static_cast<void> (0) : __assert_fail
("LUIdx <= LastLUIdx", "/build/llvm-toolchain-snapshot-9~svn358860/lib/Transforms/Scalar/LoopStrengthReduce.cpp"
, 283, __PRETTY_FUNCTION__))
;
284
285 // Update RegUses. The data structure is not optimized for this purpose;
286 // we must iterate through it and update each of the bit vectors.
287 for (auto &Pair : RegUsesMap) {
288 SmallBitVector &UsedByIndices = Pair.second.UsedByIndices;
289 if (LUIdx < UsedByIndices.size())
290 UsedByIndices[LUIdx] =
291 LastLUIdx < UsedByIndices.size() ? UsedByIndices[LastLUIdx] : false;
292 UsedByIndices.resize(std::min(UsedByIndices.size(), LastLUIdx));
293 }
294}
295
296bool
297RegUseTracker::isRegUsedByUsesOtherThan(const SCEV *Reg, size_t LUIdx) const {
298 RegUsesTy::const_iterator I = RegUsesMap.find(Reg);
299 if (I == RegUsesMap.end())
300 return false;
301 const SmallBitVector &UsedByIndices = I->second.UsedByIndices;
302 int i = UsedByIndices.find_first();
303 if (i == -1) return false;
304 if ((size_t)i != LUIdx) return true;
305 return UsedByIndices.find_next(i) != -1;
306}
307
308const SmallBitVector &RegUseTracker::getUsedByIndices(const SCEV *Reg) const {
309 RegUsesTy::const_iterator I = RegUsesMap.find(Reg);
310 assert(I != RegUsesMap.end() && "Unknown register!")((I != RegUsesMap.end() && "Unknown register!") ? static_cast
<void> (0) : __assert_fail ("I != RegUsesMap.end() && \"Unknown register!\""
, "/build/llvm-toolchain-snapshot-9~svn358860/lib/Transforms/Scalar/LoopStrengthReduce.cpp"
, 310, __PRETTY_FUNCTION__))
;
311 return I->second.UsedByIndices;
312}
313
314void RegUseTracker::clear() {
315 RegUsesMap.clear();
316 RegSequence.clear();
317}
318
319namespace {
320
321/// This class holds information that describes a formula for computing
322/// satisfying a use. It may include broken-out immediates and scaled registers.
323struct Formula {
324 /// Global base address used for complex addressing.
325 GlobalValue *BaseGV = nullptr;
326
327 /// Base offset for complex addressing.
328 int64_t BaseOffset = 0;
329
330 /// Whether any complex addressing has a base register.
331 bool HasBaseReg = false;
332
333 /// The scale of any complex addressing.
334 int64_t Scale = 0;
335
336 /// The list of "base" registers for this use. When this is non-empty. The
337 /// canonical representation of a formula is
338 /// 1. BaseRegs.size > 1 implies ScaledReg != NULL and
339 /// 2. ScaledReg != NULL implies Scale != 1 || !BaseRegs.empty().
340 /// 3. The reg containing recurrent expr related with currect loop in the
341 /// formula should be put in the ScaledReg.
342 /// #1 enforces that the scaled register is always used when at least two
343 /// registers are needed by the formula: e.g., reg1 + reg2 is reg1 + 1 * reg2.
344 /// #2 enforces that 1 * reg is reg.
345 /// #3 ensures invariant regs with respect to current loop can be combined
346 /// together in LSR codegen.
347 /// This invariant can be temporarily broken while building a formula.
348 /// However, every formula inserted into the LSRInstance must be in canonical
349 /// form.
350 SmallVector<const SCEV *, 4> BaseRegs;
351
352 /// The 'scaled' register for this use. This should be non-null when Scale is
353 /// not zero.
354 const SCEV *ScaledReg = nullptr;
355
356 /// An additional constant offset which added near the use. This requires a
357 /// temporary register, but the offset itself can live in an add immediate
358 /// field rather than a register.
359 int64_t UnfoldedOffset = 0;
360
361 Formula() = default;
362
363 void initialMatch(const SCEV *S, Loop *L, ScalarEvolution &SE);
364
365 bool isCanonical(const Loop &L) const;
366
367 void canonicalize(const Loop &L);
368
369 bool unscale();
370
371 bool hasZeroEnd() const;
372
373 size_t getNumRegs() const;
374 Type *getType() const;
375
376 void deleteBaseReg(const SCEV *&S);
377
378 bool referencesReg(const SCEV *S) const;
379 bool hasRegsUsedByUsesOtherThan(size_t LUIdx,
380 const RegUseTracker &RegUses) const;
381
382 void print(raw_ostream &OS) const;
383 void dump() const;
384};
385
386} // end anonymous namespace
387
388/// Recursion helper for initialMatch.
389static void DoInitialMatch(const SCEV *S, Loop *L,
390 SmallVectorImpl<const SCEV *> &Good,
391 SmallVectorImpl<const SCEV *> &Bad,
392 ScalarEvolution &SE) {
393 // Collect expressions which properly dominate the loop header.
394 if (SE.properlyDominates(S, L->getHeader())) {
395 Good.push_back(S);
396 return;
397 }
398
399 // Look at add operands.
400 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) {
401 for (const SCEV *S : Add->operands())
402 DoInitialMatch(S, L, Good, Bad, SE);
403 return;
404 }
405
406 // Look at addrec operands.
407 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S))
408 if (!AR->getStart()->isZero() && AR->isAffine()) {
409 DoInitialMatch(AR->getStart(), L, Good, Bad, SE);
410 DoInitialMatch(SE.getAddRecExpr(SE.getConstant(AR->getType(), 0),
411 AR->getStepRecurrence(SE),
412 // FIXME: AR->getNoWrapFlags()
413 AR->getLoop(), SCEV::FlagAnyWrap),
414 L, Good, Bad, SE);
415 return;
416 }
417
418 // Handle a multiplication by -1 (negation) if it didn't fold.
419 if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(S))
420 if (Mul->getOperand(0)->isAllOnesValue()) {
421 SmallVector<const SCEV *, 4> Ops(Mul->op_begin()+1, Mul->op_end());
422 const SCEV *NewMul = SE.getMulExpr(Ops);
423
424 SmallVector<const SCEV *, 4> MyGood;
425 SmallVector<const SCEV *, 4> MyBad;
426 DoInitialMatch(NewMul, L, MyGood, MyBad, SE);
427 const SCEV *NegOne = SE.getSCEV(ConstantInt::getAllOnesValue(
428 SE.getEffectiveSCEVType(NewMul->getType())));
429 for (const SCEV *S : MyGood)
430 Good.push_back(SE.getMulExpr(NegOne, S));
431 for (const SCEV *S : MyBad)
432 Bad.push_back(SE.getMulExpr(NegOne, S));
433 return;
434 }
435
436 // Ok, we can't do anything interesting. Just stuff the whole thing into a
437 // register and hope for the best.
438 Bad.push_back(S);
439}
440
441/// Incorporate loop-variant parts of S into this Formula, attempting to keep
442/// all loop-invariant and loop-computable values in a single base register.
443void Formula::initialMatch(const SCEV *S, Loop *L, ScalarEvolution &SE) {
444 SmallVector<const SCEV *, 4> Good;
445 SmallVector<const SCEV *, 4> Bad;
446 DoInitialMatch(S, L, Good, Bad, SE);
447 if (!Good.empty()) {
448 const SCEV *Sum = SE.getAddExpr(Good);
449 if (!Sum->isZero())
450 BaseRegs.push_back(Sum);
451 HasBaseReg = true;
452 }
453 if (!Bad.empty()) {
454 const SCEV *Sum = SE.getAddExpr(Bad);
455 if (!Sum->isZero())
456 BaseRegs.push_back(Sum);
457 HasBaseReg = true;
458 }
459 canonicalize(*L);
460}
461
462/// Check whether or not this formula satisfies the canonical
463/// representation.
464/// \see Formula::BaseRegs.
465bool Formula::isCanonical(const Loop &L) const {
466 if (!ScaledReg)
467 return BaseRegs.size() <= 1;
468
469 if (Scale != 1)
470 return true;
471
472 if (Scale == 1 && BaseRegs.empty())
473 return false;
474
475 const SCEVAddRecExpr *SAR = dyn_cast<const SCEVAddRecExpr>(ScaledReg);
476 if (SAR && SAR->getLoop() == &L)
477 return true;
478
479 // If ScaledReg is not a recurrent expr, or it is but its loop is not current
480 // loop, meanwhile BaseRegs contains a recurrent expr reg related with current
481 // loop, we want to swap the reg in BaseRegs with ScaledReg.
482 auto I =
483 find_if(make_range(BaseRegs.begin(), BaseRegs.end()), [&](const SCEV *S) {
484 return isa<const SCEVAddRecExpr>(S) &&
485 (cast<SCEVAddRecExpr>(S)->getLoop() == &L);
486 });
487 return I == BaseRegs.end();
488}
489
490/// Helper method to morph a formula into its canonical representation.
491/// \see Formula::BaseRegs.
492/// Every formula having more than one base register, must use the ScaledReg
493/// field. Otherwise, we would have to do special cases everywhere in LSR
494/// to treat reg1 + reg2 + ... the same way as reg1 + 1*reg2 + ...
495/// On the other hand, 1*reg should be canonicalized into reg.
496void Formula::canonicalize(const Loop &L) {
497 if (isCanonical(L))
498 return;
499 // So far we did not need this case. This is easy to implement but it is
500 // useless to maintain dead code. Beside it could hurt compile time.
501 assert(!BaseRegs.empty() && "1*reg => reg, should not be needed.")((!BaseRegs.empty() && "1*reg => reg, should not be needed."
) ? static_cast<void> (0) : __assert_fail ("!BaseRegs.empty() && \"1*reg => reg, should not be needed.\""
, "/build/llvm-toolchain-snapshot-9~svn358860/lib/Transforms/Scalar/LoopStrengthReduce.cpp"
, 501, __PRETTY_FUNCTION__))
;
502
503 // Keep the invariant sum in BaseRegs and one of the variant sum in ScaledReg.
504 if (!ScaledReg) {
505 ScaledReg = BaseRegs.back();
506 BaseRegs.pop_back();
507 Scale = 1;
508 }
509
510 // If ScaledReg is an invariant with respect to L, find the reg from
511 // BaseRegs containing the recurrent expr related with Loop L. Swap the
512 // reg with ScaledReg.
513 const SCEVAddRecExpr *SAR = dyn_cast<const SCEVAddRecExpr>(ScaledReg);
514 if (!SAR || SAR->getLoop() != &L) {
515 auto I = find_if(make_range(BaseRegs.begin(), BaseRegs.end()),
516 [&](const SCEV *S) {
517 return isa<const SCEVAddRecExpr>(S) &&
518 (cast<SCEVAddRecExpr>(S)->getLoop() == &L);
519 });
520 if (I != BaseRegs.end())
521 std::swap(ScaledReg, *I);
522 }
523}
524
525/// Get rid of the scale in the formula.
526/// In other words, this method morphes reg1 + 1*reg2 into reg1 + reg2.
527/// \return true if it was possible to get rid of the scale, false otherwise.
528/// \note After this operation the formula may not be in the canonical form.
529bool Formula::unscale() {
530 if (Scale != 1)
531 return false;
532 Scale = 0;
533 BaseRegs.push_back(ScaledReg);
534 ScaledReg = nullptr;
535 return true;
536}
537
538bool Formula::hasZeroEnd() const {
539 if (UnfoldedOffset || BaseOffset)
540 return false;
541 if (BaseRegs.size() != 1 || ScaledReg)
542 return false;
543 return true;
544}
545
546/// Return the total number of register operands used by this formula. This does
547/// not include register uses implied by non-constant addrec strides.
548size_t Formula::getNumRegs() const {
549 return !!ScaledReg + BaseRegs.size();
550}
551
552/// Return the type of this formula, if it has one, or null otherwise. This type
553/// is meaningless except for the bit size.
554Type *Formula::getType() const {
555 return !BaseRegs.empty() ? BaseRegs.front()->getType() :
556 ScaledReg ? ScaledReg->getType() :
557 BaseGV ? BaseGV->getType() :
558 nullptr;
559}
560
561/// Delete the given base reg from the BaseRegs list.
562void Formula::deleteBaseReg(const SCEV *&S) {
563 if (&S != &BaseRegs.back())
564 std::swap(S, BaseRegs.back());
565 BaseRegs.pop_back();
566}
567
568/// Test if this formula references the given register.
569bool Formula::referencesReg(const SCEV *S) const {
570 return S == ScaledReg || is_contained(BaseRegs, S);
571}
572
573/// Test whether this formula uses registers which are used by uses other than
574/// the use with the given index.
575bool Formula::hasRegsUsedByUsesOtherThan(size_t LUIdx,
576 const RegUseTracker &RegUses) const {
577 if (ScaledReg)
578 if (RegUses.isRegUsedByUsesOtherThan(ScaledReg, LUIdx))
579 return true;
580 for (const SCEV *BaseReg : BaseRegs)
581 if (RegUses.isRegUsedByUsesOtherThan(BaseReg, LUIdx))
582 return true;
583 return false;
584}
585
586#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
587void Formula::print(raw_ostream &OS) const {
588 bool First = true;
589 if (BaseGV) {
590 if (!First) OS << " + "; else First = false;
591 BaseGV->printAsOperand(OS, /*PrintType=*/false);
592 }
593 if (BaseOffset != 0) {
594 if (!First) OS << " + "; else First = false;
595 OS << BaseOffset;
596 }
597 for (const SCEV *BaseReg : BaseRegs) {
598 if (!First) OS << " + "; else First = false;
599 OS << "reg(" << *BaseReg << ')';
600 }
601 if (HasBaseReg && BaseRegs.empty()) {
602 if (!First) OS << " + "; else First = false;
603 OS << "**error: HasBaseReg**";
604 } else if (!HasBaseReg && !BaseRegs.empty()) {
605 if (!First) OS << " + "; else First = false;
606 OS << "**error: !HasBaseReg**";
607 }
608 if (Scale != 0) {
609 if (!First) OS << " + "; else First = false;
610 OS << Scale << "*reg(";
611 if (ScaledReg)
612 OS << *ScaledReg;
613 else
614 OS << "<unknown>";
615 OS << ')';
616 }
617 if (UnfoldedOffset != 0) {
618 if (!First) OS << " + ";
619 OS << "imm(" << UnfoldedOffset << ')';
620 }
621}
622
623LLVM_DUMP_METHOD__attribute__((noinline)) __attribute__((__used__)) void Formula::dump() const {
624 print(errs()); errs() << '\n';
625}
626#endif
627
628/// Return true if the given addrec can be sign-extended without changing its
629/// value.
630static bool isAddRecSExtable(const SCEVAddRecExpr *AR, ScalarEvolution &SE) {
631 Type *WideTy =
632 IntegerType::get(SE.getContext(), SE.getTypeSizeInBits(AR->getType()) + 1);
633 return isa<SCEVAddRecExpr>(SE.getSignExtendExpr(AR, WideTy));
634}
635
636/// Return true if the given add can be sign-extended without changing its
637/// value.
638static bool isAddSExtable(const SCEVAddExpr *A, ScalarEvolution &SE) {
639 Type *WideTy =
640 IntegerType::get(SE.getContext(), SE.getTypeSizeInBits(A->getType()) + 1);
641 return isa<SCEVAddExpr>(SE.getSignExtendExpr(A, WideTy));
642}
643
644/// Return true if the given mul can be sign-extended without changing its
645/// value.
646static bool isMulSExtable(const SCEVMulExpr *M, ScalarEvolution &SE) {
647 Type *WideTy =
648 IntegerType::get(SE.getContext(),
649 SE.getTypeSizeInBits(M->getType()) * M->getNumOperands());
650 return isa<SCEVMulExpr>(SE.getSignExtendExpr(M, WideTy));
651}
652
653/// Return an expression for LHS /s RHS, if it can be determined and if the
654/// remainder is known to be zero, or null otherwise. If IgnoreSignificantBits
655/// is true, expressions like (X * Y) /s Y are simplified to Y, ignoring that
656/// the multiplication may overflow, which is useful when the result will be
657/// used in a context where the most significant bits are ignored.
658static const SCEV *getExactSDiv(const SCEV *LHS, const SCEV *RHS,
659 ScalarEvolution &SE,
660 bool IgnoreSignificantBits = false) {
661 // Handle the trivial case, which works for any SCEV type.
662 if (LHS == RHS)
663 return SE.getConstant(LHS->getType(), 1);
664
665 // Handle a few RHS special cases.
666 const SCEVConstant *RC = dyn_cast<SCEVConstant>(RHS);
667 if (RC) {
668 const APInt &RA = RC->getAPInt();
669 // Handle x /s -1 as x * -1, to give ScalarEvolution a chance to do
670 // some folding.
671 if (RA.isAllOnesValue())
672 return SE.getMulExpr(LHS, RC);
673 // Handle x /s 1 as x.
674 if (RA == 1)
675 return LHS;
676 }
677
678 // Check for a division of a constant by a constant.
679 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(LHS)) {
680 if (!RC)
681 return nullptr;
682 const APInt &LA = C->getAPInt();
683 const APInt &RA = RC->getAPInt();
684 if (LA.srem(RA) != 0)
685 return nullptr;
686 return SE.getConstant(LA.sdiv(RA));
687 }
688
689 // Distribute the sdiv over addrec operands, if the addrec doesn't overflow.
690 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(LHS)) {
691 if ((IgnoreSignificantBits || isAddRecSExtable(AR, SE)) && AR->isAffine()) {
692 const SCEV *Step = getExactSDiv(AR->getStepRecurrence(SE), RHS, SE,
693 IgnoreSignificantBits);
694 if (!Step) return nullptr;
695 const SCEV *Start = getExactSDiv(AR->getStart(), RHS, SE,
696 IgnoreSignificantBits);
697 if (!Start) return nullptr;
698 // FlagNW is independent of the start value, step direction, and is
699 // preserved with smaller magnitude steps.
700 // FIXME: AR->getNoWrapFlags(SCEV::FlagNW)
701 return SE.getAddRecExpr(Start, Step, AR->getLoop(), SCEV::FlagAnyWrap);
702 }
703 return nullptr;
704 }
705
706 // Distribute the sdiv over add operands, if the add doesn't overflow.
707 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(LHS)) {
708 if (IgnoreSignificantBits || isAddSExtable(Add, SE)) {
709 SmallVector<const SCEV *, 8> Ops;
710 for (const SCEV *S : Add->operands()) {
711 const SCEV *Op = getExactSDiv(S, RHS, SE, IgnoreSignificantBits);
712 if (!Op) return nullptr;
713 Ops.push_back(Op);
714 }
715 return SE.getAddExpr(Ops);
716 }
717 return nullptr;
718 }
719
720 // Check for a multiply operand that we can pull RHS out of.
721 if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(LHS)) {
722 if (IgnoreSignificantBits || isMulSExtable(Mul, SE)) {
723 SmallVector<const SCEV *, 4> Ops;
724 bool Found = false;
725 for (const SCEV *S : Mul->operands()) {
726 if (!Found)
727 if (const SCEV *Q = getExactSDiv(S, RHS, SE,
728 IgnoreSignificantBits)) {
729 S = Q;
730 Found = true;
731 }
732 Ops.push_back(S);
733 }
734 return Found ? SE.getMulExpr(Ops) : nullptr;
735 }
736 return nullptr;
737 }
738
739 // Otherwise we don't know.
740 return nullptr;
741}
742
743/// If S involves the addition of a constant integer value, return that integer
744/// value, and mutate S to point to a new SCEV with that value excluded.
745static int64_t ExtractImmediate(const SCEV *&S, ScalarEvolution &SE) {
746 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) {
747 if (C->getAPInt().getMinSignedBits() <= 64) {
748 S = SE.getConstant(C->getType(), 0);
749 return C->getValue()->getSExtValue();
750 }
751 } else if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) {
752 SmallVector<const SCEV *, 8> NewOps(Add->op_begin(), Add->op_end());
753 int64_t Result = ExtractImmediate(NewOps.front(), SE);
754 if (Result != 0)
755 S = SE.getAddExpr(NewOps);
756 return Result;
757 } else if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) {
758 SmallVector<const SCEV *, 8> NewOps(AR->op_begin(), AR->op_end());
759 int64_t Result = ExtractImmediate(NewOps.front(), SE);
760 if (Result != 0)
761 S = SE.getAddRecExpr(NewOps, AR->getLoop(),
762 // FIXME: AR->getNoWrapFlags(SCEV::FlagNW)
763 SCEV::FlagAnyWrap);
764 return Result;
765 }
766 return 0;
767}
768
769/// If S involves the addition of a GlobalValue address, return that symbol, and
770/// mutate S to point to a new SCEV with that value excluded.
771static GlobalValue *ExtractSymbol(const SCEV *&S, ScalarEvolution &SE) {
772 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
773 if (GlobalValue *GV = dyn_cast<GlobalValue>(U->getValue())) {
774 S = SE.getConstant(GV->getType(), 0);
775 return GV;
776 }
777 } else if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) {
778 SmallVector<const SCEV *, 8> NewOps(Add->op_begin(), Add->op_end());
779 GlobalValue *Result = ExtractSymbol(NewOps.back(), SE);
780 if (Result)
781 S = SE.getAddExpr(NewOps);
782 return Result;
783 } else if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) {
784 SmallVector<const SCEV *, 8> NewOps(AR->op_begin(), AR->op_end());
785 GlobalValue *Result = ExtractSymbol(NewOps.front(), SE);
786 if (Result)
787 S = SE.getAddRecExpr(NewOps, AR->getLoop(),
788 // FIXME: AR->getNoWrapFlags(SCEV::FlagNW)
789 SCEV::FlagAnyWrap);
790 return Result;
791 }
792 return nullptr;
793}
794
795/// Returns true if the specified instruction is using the specified value as an
796/// address.
797static bool isAddressUse(const TargetTransformInfo &TTI,
798 Instruction *Inst, Value *OperandVal) {
799 bool isAddress = isa<LoadInst>(Inst);
800 if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
801 if (SI->getPointerOperand() == OperandVal)
802 isAddress = true;
803 } else if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) {
804 // Addressing modes can also be folded into prefetches and a variety
805 // of intrinsics.
806 switch (II->getIntrinsicID()) {
807 case Intrinsic::memset:
808 case Intrinsic::prefetch:
809 if (II->getArgOperand(0) == OperandVal)
810 isAddress = true;
811 break;
812 case Intrinsic::memmove:
813 case Intrinsic::memcpy:
814 if (II->getArgOperand(0) == OperandVal ||
815 II->getArgOperand(1) == OperandVal)
816 isAddress = true;
817 break;
818 default: {
819 MemIntrinsicInfo IntrInfo;
820 if (TTI.getTgtMemIntrinsic(II, IntrInfo)) {
821 if (IntrInfo.PtrVal == OperandVal)
822 isAddress = true;
823 }
824 }
825 }
826 } else if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(Inst)) {
827 if (RMW->getPointerOperand() == OperandVal)
828 isAddress = true;
829 } else if (AtomicCmpXchgInst *CmpX = dyn_cast<AtomicCmpXchgInst>(Inst)) {
830 if (CmpX->getPointerOperand() == OperandVal)
831 isAddress = true;
832 }
833 return isAddress;
834}
835
836/// Return the type of the memory being accessed.
837static MemAccessTy getAccessType(const TargetTransformInfo &TTI,
838 Instruction *Inst, Value *OperandVal) {
839 MemAccessTy AccessTy(Inst->getType(), MemAccessTy::UnknownAddressSpace);
840 if (const StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
841 AccessTy.MemTy = SI->getOperand(0)->getType();
842 AccessTy.AddrSpace = SI->getPointerAddressSpace();
843 } else if (const LoadInst *LI = dyn_cast<LoadInst>(Inst)) {
844 AccessTy.AddrSpace = LI->getPointerAddressSpace();
845 } else if (const AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(Inst)) {
846 AccessTy.AddrSpace = RMW->getPointerAddressSpace();
847 } else if (const AtomicCmpXchgInst *CmpX = dyn_cast<AtomicCmpXchgInst>(Inst)) {
848 AccessTy.AddrSpace = CmpX->getPointerAddressSpace();
849 } else if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) {
850 switch (II->getIntrinsicID()) {
851 case Intrinsic::prefetch:
852 case Intrinsic::memset:
853 AccessTy.AddrSpace = II->getArgOperand(0)->getType()->getPointerAddressSpace();
854 AccessTy.MemTy = OperandVal->getType();
855 break;
856 case Intrinsic::memmove:
857 case Intrinsic::memcpy:
858 AccessTy.AddrSpace = OperandVal->getType()->getPointerAddressSpace();
859 AccessTy.MemTy = OperandVal->getType();
860 break;
861 default: {
862 MemIntrinsicInfo IntrInfo;
863 if (TTI.getTgtMemIntrinsic(II, IntrInfo) && IntrInfo.PtrVal) {
864 AccessTy.AddrSpace
865 = IntrInfo.PtrVal->getType()->getPointerAddressSpace();
866 }
867
868 break;
869 }
870 }
871 }
872
873 // All pointers have the same requirements, so canonicalize them to an
874 // arbitrary pointer type to minimize variation.
875 if (PointerType *PTy = dyn_cast<PointerType>(AccessTy.MemTy))
876 AccessTy.MemTy = PointerType::get(IntegerType::get(PTy->getContext(), 1),
877 PTy->getAddressSpace());
878
879 return AccessTy;
880}
881
882/// Return true if this AddRec is already a phi in its loop.
883static bool isExistingPhi(const SCEVAddRecExpr *AR, ScalarEvolution &SE) {
884 for (PHINode &PN : AR->getLoop()->getHeader()->phis()) {
885 if (SE.isSCEVable(PN.getType()) &&
886 (SE.getEffectiveSCEVType(PN.getType()) ==
887 SE.getEffectiveSCEVType(AR->getType())) &&
888 SE.getSCEV(&PN) == AR)
889 return true;
890 }
891 return false;
892}
893
894/// Check if expanding this expression is likely to incur significant cost. This
895/// is tricky because SCEV doesn't track which expressions are actually computed
896/// by the current IR.
897///
898/// We currently allow expansion of IV increments that involve adds,
899/// multiplication by constants, and AddRecs from existing phis.
900///
901/// TODO: Allow UDivExpr if we can find an existing IV increment that is an
902/// obvious multiple of the UDivExpr.
903static bool isHighCostExpansion(const SCEV *S,
904 SmallPtrSetImpl<const SCEV*> &Processed,
905 ScalarEvolution &SE) {
906 // Zero/One operand expressions
907 switch (S->getSCEVType()) {
908 case scUnknown:
909 case scConstant:
910 return false;
911 case scTruncate:
912 return isHighCostExpansion(cast<SCEVTruncateExpr>(S)->getOperand(),
913 Processed, SE);
914 case scZeroExtend:
915 return isHighCostExpansion(cast<SCEVZeroExtendExpr>(S)->getOperand(),
916 Processed, SE);
917 case scSignExtend:
918 return isHighCostExpansion(cast<SCEVSignExtendExpr>(S)->getOperand(),
919 Processed, SE);
920 }
921
922 if (!Processed.insert(S).second)
923 return false;
924
925 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) {
926 for (const SCEV *S : Add->operands()) {
927 if (isHighCostExpansion(S, Processed, SE))
928 return true;
929 }
930 return false;
931 }
932
933 if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(S)) {
934 if (Mul->getNumOperands() == 2) {
935 // Multiplication by a constant is ok
936 if (isa<SCEVConstant>(Mul->getOperand(0)))
937 return isHighCostExpansion(Mul->getOperand(1), Processed, SE);
938
939 // If we have the value of one operand, check if an existing
940 // multiplication already generates this expression.
941 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(Mul->getOperand(1))) {
942 Value *UVal = U->getValue();
943 for (User *UR : UVal->users()) {
944 // If U is a constant, it may be used by a ConstantExpr.
945 Instruction *UI = dyn_cast<Instruction>(UR);
946 if (UI && UI->getOpcode() == Instruction::Mul &&
947 SE.isSCEVable(UI->getType())) {
948 return SE.getSCEV(UI) == Mul;
949 }
950 }
951 }
952 }
953 }
954
955 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) {
956 if (isExistingPhi(AR, SE))
957 return false;
958 }
959
960 // Fow now, consider any other type of expression (div/mul/min/max) high cost.
961 return true;
962}
963
964/// If any of the instructions in the specified set are trivially dead, delete
965/// them and see if this makes any of their operands subsequently dead.
966static bool
967DeleteTriviallyDeadInstructions(SmallVectorImpl<WeakTrackingVH> &DeadInsts) {
968 bool Changed = false;
969
970 while (!DeadInsts.empty()) {
971 Value *V = DeadInsts.pop_back_val();
972 Instruction *I = dyn_cast_or_null<Instruction>(V);
973
974 if (!I || !isInstructionTriviallyDead(I))
975 continue;
976
977 for (Use &O : I->operands())
978 if (Instruction *U = dyn_cast<Instruction>(O)) {
979 O = nullptr;
980 if (U->use_empty())
981 DeadInsts.emplace_back(U);
982 }
983
984 I->eraseFromParent();
985 Changed = true;
986 }
987
988 return Changed;
989}
990
991namespace {
992
993class LSRUse;
994
995} // end anonymous namespace
996
997/// Check if the addressing mode defined by \p F is completely
998/// folded in \p LU at isel time.
999/// This includes address-mode folding and special icmp tricks.
1000/// This function returns true if \p LU can accommodate what \p F
1001/// defines and up to 1 base + 1 scaled + offset.
1002/// In other words, if \p F has several base registers, this function may
1003/// still return true. Therefore, users still need to account for
1004/// additional base registers and/or unfolded offsets to derive an
1005/// accurate cost model.
1006static bool isAMCompletelyFolded(const TargetTransformInfo &TTI,
1007 const LSRUse &LU, const Formula &F);
1008
1009// Get the cost of the scaling factor used in F for LU.
1010static unsigned getScalingFactorCost(const TargetTransformInfo &TTI,
1011 const LSRUse &LU, const Formula &F,
1012 const Loop &L);
1013
1014namespace {
1015
1016/// This class is used to measure and compare candidate formulae.
1017class Cost {
1018 const Loop *L = nullptr;
1019 ScalarEvolution *SE = nullptr;
1020 const TargetTransformInfo *TTI = nullptr;
1021 TargetTransformInfo::LSRCost C;
1022
1023public:
1024 Cost() = delete;
1025 Cost(const Loop *L, ScalarEvolution &SE, const TargetTransformInfo &TTI) :
1026 L(L), SE(&SE), TTI(&TTI) {
1027 C.Insns = 0;
1028 C.NumRegs = 0;
1029 C.AddRecCost = 0;
1030 C.NumIVMuls = 0;
1031 C.NumBaseAdds = 0;
1032 C.ImmCost = 0;
1033 C.SetupCost = 0;
1034 C.ScaleCost = 0;
1035 }
1036
1037 bool isLess(Cost &Other);
1038
1039 void Lose();
1040
1041#ifndef NDEBUG
1042 // Once any of the metrics loses, they must all remain losers.
1043 bool isValid() {
1044 return ((C.Insns | C.NumRegs | C.AddRecCost | C.NumIVMuls | C.NumBaseAdds
1045 | C.ImmCost | C.SetupCost | C.ScaleCost) != ~0u)
1046 || ((C.Insns & C.NumRegs & C.AddRecCost & C.NumIVMuls & C.NumBaseAdds
1047 & C.ImmCost & C.SetupCost & C.ScaleCost) == ~0u);
1048 }
1049#endif
1050
1051 bool isLoser() {
1052 assert(isValid() && "invalid cost")((isValid() && "invalid cost") ? static_cast<void>
(0) : __assert_fail ("isValid() && \"invalid cost\""
, "/build/llvm-toolchain-snapshot-9~svn358860/lib/Transforms/Scalar/LoopStrengthReduce.cpp"
, 1052, __PRETTY_FUNCTION__))
;
1053 return C.NumRegs == ~0u;
1054 }
1055
1056 void RateFormula(const Formula &F,
1057 SmallPtrSetImpl<const SCEV *> &Regs,
1058 const DenseSet<const SCEV *> &VisitedRegs,
1059 const LSRUse &LU,
1060 SmallPtrSetImpl<const SCEV *> *LoserRegs = nullptr);
1061
1062 void print(raw_ostream &OS) const;
1063 void dump() const;
1064
1065private:
1066 void RateRegister(const Formula &F, const SCEV *Reg,
1067 SmallPtrSetImpl<const SCEV *> &Regs);
1068 void RatePrimaryRegister(const Formula &F, const SCEV *Reg,
1069 SmallPtrSetImpl<const SCEV *> &Regs,
1070 SmallPtrSetImpl<const SCEV *> *LoserRegs);
1071};
1072
1073/// An operand value in an instruction which is to be replaced with some
1074/// equivalent, possibly strength-reduced, replacement.
1075struct LSRFixup {
1076 /// The instruction which will be updated.
1077 Instruction *UserInst = nullptr;
1078
1079 /// The operand of the instruction which will be replaced. The operand may be
1080 /// used more than once; every instance will be replaced.
1081 Value *OperandValToReplace = nullptr;
1082
1083 /// If this user is to use the post-incremented value of an induction
1084 /// variable, this set is non-empty and holds the loops associated with the
1085 /// induction variable.
1086 PostIncLoopSet PostIncLoops;
1087
1088 /// A constant offset to be added to the LSRUse expression. This allows
1089 /// multiple fixups to share the same LSRUse with different offsets, for
1090 /// example in an unrolled loop.
1091 int64_t Offset = 0;
1092
1093 LSRFixup() = default;
1094
1095 bool isUseFullyOutsideLoop(const Loop *L) const;
1096
1097 void print(raw_ostream &OS) const;
1098 void dump() const;
1099};
1100
1101/// A DenseMapInfo implementation for holding DenseMaps and DenseSets of sorted
1102/// SmallVectors of const SCEV*.
1103struct UniquifierDenseMapInfo {
1104 static SmallVector<const SCEV *, 4> getEmptyKey() {
1105 SmallVector<const SCEV *, 4> V;
1106 V.push_back(reinterpret_cast<const SCEV *>(-1));
1107 return V;
1108 }
1109
1110 static SmallVector<const SCEV *, 4> getTombstoneKey() {
1111 SmallVector<const SCEV *, 4> V;
1112 V.push_back(reinterpret_cast<const SCEV *>(-2));
1113 return V;
1114 }
1115
1116 static unsigned getHashValue(const SmallVector<const SCEV *, 4> &V) {
1117 return static_cast<unsigned>(hash_combine_range(V.begin(), V.end()));
1118 }
1119
1120 static bool isEqual(const SmallVector<const SCEV *, 4> &LHS,
1121 const SmallVector<const SCEV *, 4> &RHS) {
1122 return LHS == RHS;
1123 }
1124};
1125
1126/// This class holds the state that LSR keeps for each use in IVUsers, as well
1127/// as uses invented by LSR itself. It includes information about what kinds of
1128/// things can be folded into the user, information about the user itself, and
1129/// information about how the use may be satisfied. TODO: Represent multiple
1130/// users of the same expression in common?
1131class LSRUse {
1132 DenseSet<SmallVector<const SCEV *, 4>, UniquifierDenseMapInfo> Uniquifier;
1133
1134public:
1135 /// An enum for a kind of use, indicating what types of scaled and immediate
1136 /// operands it might support.
1137 enum KindType {
1138 Basic, ///< A normal use, with no folding.
1139 Special, ///< A special case of basic, allowing -1 scales.
1140 Address, ///< An address use; folding according to TargetLowering
1141 ICmpZero ///< An equality icmp with both operands folded into one.
1142 // TODO: Add a generic icmp too?
1143 };
1144
1145 using SCEVUseKindPair = PointerIntPair<const SCEV *, 2, KindType>;
1146
1147 KindType Kind;
1148 MemAccessTy AccessTy;
1149
1150 /// The list of operands which are to be replaced.
1151 SmallVector<LSRFixup, 8> Fixups;
1152
1153 /// Keep track of the min and max offsets of the fixups.
1154 int64_t MinOffset = std::numeric_limits<int64_t>::max();
1155 int64_t MaxOffset = std::numeric_limits<int64_t>::min();
1156
1157 /// This records whether all of the fixups using this LSRUse are outside of
1158 /// the loop, in which case some special-case heuristics may be used.
1159 bool AllFixupsOutsideLoop = true;
1160
1161 /// RigidFormula is set to true to guarantee that this use will be associated
1162 /// with a single formula--the one that initially matched. Some SCEV
1163 /// expressions cannot be expanded. This allows LSR to consider the registers
1164 /// used by those expressions without the need to expand them later after
1165 /// changing the formula.
1166 bool RigidFormula = false;
1167
1168 /// This records the widest use type for any fixup using this
1169 /// LSRUse. FindUseWithSimilarFormula can't consider uses with different max
1170 /// fixup widths to be equivalent, because the narrower one may be relying on
1171 /// the implicit truncation to truncate away bogus bits.
1172 Type *WidestFixupType = nullptr;
1173
1174 /// A list of ways to build a value that can satisfy this user. After the
1175 /// list is populated, one of these is selected heuristically and used to
1176 /// formulate a replacement for OperandValToReplace in UserInst.
1177 SmallVector<Formula, 12> Formulae;
1178
1179 /// The set of register candidates used by all formulae in this LSRUse.
1180 SmallPtrSet<const SCEV *, 4> Regs;
1181
1182 LSRUse(KindType K, MemAccessTy AT) : Kind(K), AccessTy(AT) {}
1183
1184 LSRFixup &getNewFixup() {
1185 Fixups.push_back(LSRFixup());
1186 return Fixups.back();
1187 }
1188
1189 void pushFixup(LSRFixup &f) {
1190 Fixups.push_back(f);
1191 if (f.Offset > MaxOffset)
1192 MaxOffset = f.Offset;
1193 if (f.Offset < MinOffset)
1194 MinOffset = f.Offset;
1195 }
1196
1197 bool HasFormulaWithSameRegs(const Formula &F) const;
1198 float getNotSelectedProbability(const SCEV *Reg) const;
1199 bool InsertFormula(const Formula &F, const Loop &L);
1200 void DeleteFormula(Formula &F);
1201 void RecomputeRegs(size_t LUIdx, RegUseTracker &Reguses);
1202
1203 void print(raw_ostream &OS) const;
1204 void dump() const;
1205};
1206
1207} // end anonymous namespace
1208
1209static bool isAMCompletelyFolded(const TargetTransformInfo &TTI,
1210 LSRUse::KindType Kind, MemAccessTy AccessTy,
1211 GlobalValue *BaseGV, int64_t BaseOffset,
1212 bool HasBaseReg, int64_t Scale,
1213 Instruction *Fixup = nullptr);
1214
1215static unsigned getSetupCost(const SCEV *Reg) {
1216 if (isa<SCEVUnknown>(Reg) || isa<SCEVConstant>(Reg))
1217 return 1;
1218 if (!EnableRecursiveSetupCost)
1219 return 0;
1220 if (const auto *S = dyn_cast<SCEVAddRecExpr>(Reg))
1221 return getSetupCost(S->getStart());
1222 if (auto S = dyn_cast<SCEVCastExpr>(Reg))
1223 return getSetupCost(S->getOperand());
1224 if (auto S = dyn_cast<SCEVNAryExpr>(Reg))
1225 return std::accumulate(S->op_begin(), S->op_end(), 0,
1226 [](unsigned i, const SCEV *Reg) {
1227 return i + getSetupCost(Reg);
1228 });
1229 if (auto S = dyn_cast<SCEVUDivExpr>(Reg))
1230 return getSetupCost(S->getLHS()) + getSetupCost(S->getRHS());
1231 return 0;
1232}
1233
1234/// Tally up interesting quantities from the given register.
1235void Cost::RateRegister(const Formula &F, const SCEV *Reg,
1236 SmallPtrSetImpl<const SCEV *> &Regs) {
1237 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Reg)) {
1238 // If this is an addrec for another loop, it should be an invariant
1239 // with respect to L since L is the innermost loop (at least
1240 // for now LSR only handles innermost loops).
1241 if (AR->getLoop() != L) {
1242 // If the AddRec exists, consider it's register free and leave it alone.
1243 if (isExistingPhi(AR, *SE))
1244 return;
1245
1246 // It is bad to allow LSR for current loop to add induction variables
1247 // for its sibling loops.
1248 if (!AR->getLoop()->contains(L)) {
1249 Lose();
1250 return;
1251 }
1252
1253 // Otherwise, it will be an invariant with respect to Loop L.
1254 ++C.NumRegs;
1255 return;
1256 }
1257
1258 unsigned LoopCost = 1;
1259 if (TTI->isIndexedLoadLegal(TTI->MIM_PostInc, AR->getType()) ||
1260 TTI->isIndexedStoreLegal(TTI->MIM_PostInc, AR->getType())) {
1261
1262 // If the step size matches the base offset, we could use pre-indexed
1263 // addressing.
1264 if (TTI->shouldFavorBackedgeIndex(L)) {
1265 if (auto *Step = dyn_cast<SCEVConstant>(AR->getStepRecurrence(*SE)))
1266 if (Step->getAPInt() == F.BaseOffset)
1267 LoopCost = 0;
1268 }
1269
1270 if (TTI->shouldFavorPostInc()) {
1271 const SCEV *LoopStep = AR->getStepRecurrence(*SE);
1272 if (isa<SCEVConstant>(LoopStep)) {
1273 const SCEV *LoopStart = AR->getStart();
1274 if (!isa<SCEVConstant>(LoopStart) &&
1275 SE->isLoopInvariant(LoopStart, L))
1276 LoopCost = 0;
1277 }
1278 }
1279 }
1280 C.AddRecCost += LoopCost;
1281
1282 // Add the step value register, if it needs one.
1283 // TODO: The non-affine case isn't precisely modeled here.
1284 if (!AR->isAffine() || !isa<SCEVConstant>(AR->getOperand(1))) {
1285 if (!Regs.count(AR->getOperand(1))) {
1286 RateRegister(F, AR->getOperand(1), Regs);
1287 if (isLoser())
1288 return;
1289 }
1290 }
1291 }
1292 ++C.NumRegs;
1293
1294 // Rough heuristic; favor registers which don't require extra setup
1295 // instructions in the preheader.
1296 C.SetupCost += getSetupCost(Reg);
1297
1298 C.NumIVMuls += isa<SCEVMulExpr>(Reg) &&
1299 SE->hasComputableLoopEvolution(Reg, L);
1300}
1301
1302/// Record this register in the set. If we haven't seen it before, rate
1303/// it. Optional LoserRegs provides a way to declare any formula that refers to
1304/// one of those regs an instant loser.
1305void Cost::RatePrimaryRegister(const Formula &F, const SCEV *Reg,
1306 SmallPtrSetImpl<const SCEV *> &Regs,
1307 SmallPtrSetImpl<const SCEV *> *LoserRegs) {
1308 if (LoserRegs && LoserRegs->count(Reg)) {
1309 Lose();
1310 return;
1311 }
1312 if (Regs.insert(Reg).second) {
1313 RateRegister(F, Reg, Regs);
1314 if (LoserRegs && isLoser())
1315 LoserRegs->insert(Reg);
1316 }
1317}
1318
1319void Cost::RateFormula(const Formula &F,
1320 SmallPtrSetImpl<const SCEV *> &Regs,
1321 const DenseSet<const SCEV *> &VisitedRegs,
1322 const LSRUse &LU,
1323 SmallPtrSetImpl<const SCEV *> *LoserRegs) {
1324 assert(F.isCanonical(*L) && "Cost is accurate only for canonical formula")((F.isCanonical(*L) && "Cost is accurate only for canonical formula"
) ? static_cast<void> (0) : __assert_fail ("F.isCanonical(*L) && \"Cost is accurate only for canonical formula\""
, "/build/llvm-toolchain-snapshot-9~svn358860/lib/Transforms/Scalar/LoopStrengthReduce.cpp"
, 1324, __PRETTY_FUNCTION__))
;
1325 // Tally up the registers.
1326 unsigned PrevAddRecCost = C.AddRecCost;
1327 unsigned PrevNumRegs = C.NumRegs;
1328 unsigned PrevNumBaseAdds = C.NumBaseAdds;
1329 if (const SCEV *ScaledReg = F.ScaledReg) {
1330 if (VisitedRegs.count(ScaledReg)) {
1331 Lose();
1332 return;
1333 }
1334 RatePrimaryRegister(F, ScaledReg, Regs, LoserRegs);
1335 if (isLoser())
1336 return;
1337 }
1338 for (const SCEV *BaseReg : F.BaseRegs) {
1339 if (VisitedRegs.count(BaseReg)) {
1340 Lose();
1341 return;
1342 }
1343 RatePrimaryRegister(F, BaseReg, Regs, LoserRegs);
1344 if (isLoser())
1345 return;
1346 }
1347
1348 // Determine how many (unfolded) adds we'll need inside the loop.
1349 size_t NumBaseParts = F.getNumRegs();
1350 if (NumBaseParts > 1)
1351 // Do not count the base and a possible second register if the target
1352 // allows to fold 2 registers.
1353 C.NumBaseAdds +=
1354 NumBaseParts - (1 + (F.Scale && isAMCompletelyFolded(*TTI, LU, F)));
1355 C.NumBaseAdds += (F.UnfoldedOffset != 0);
1356
1357 // Accumulate non-free scaling amounts.
1358 C.ScaleCost += getScalingFactorCost(*TTI, LU, F, *L);
1359
1360 // Tally up the non-zero immediates.
1361 for (const LSRFixup &Fixup : LU.Fixups) {
1362 int64_t O = Fixup.Offset;
1363 int64_t Offset = (uint64_t)O + F.BaseOffset;
1364 if (F.BaseGV)
1365 C.ImmCost += 64; // Handle symbolic values conservatively.
1366 // TODO: This should probably be the pointer size.
1367 else if (Offset != 0)
1368 C.ImmCost += APInt(64, Offset, true).getMinSignedBits();
1369
1370 // Check with target if this offset with this instruction is
1371 // specifically not supported.
1372 if (LU.Kind == LSRUse::Address && Offset != 0 &&
1373 !isAMCompletelyFolded(*TTI, LSRUse::Address, LU.AccessTy, F.BaseGV,
1374 Offset, F.HasBaseReg, F.Scale, Fixup.UserInst))
1375 C.NumBaseAdds++;
1376 }
1377
1378 // If we don't count instruction cost exit here.
1379 if (!InsnsCost) {
1380 assert(isValid() && "invalid cost")((isValid() && "invalid cost") ? static_cast<void>
(0) : __assert_fail ("isValid() && \"invalid cost\""
, "/build/llvm-toolchain-snapshot-9~svn358860/lib/Transforms/Scalar/LoopStrengthReduce.cpp"
, 1380, __PRETTY_FUNCTION__))
;
1381 return;
1382 }
1383
1384 // Treat every new register that exceeds TTI.getNumberOfRegisters() - 1 as
1385 // additional instruction (at least fill).
1386 unsigned TTIRegNum = TTI->getNumberOfRegisters(false) - 1;
1387 if (C.NumRegs > TTIRegNum) {
1388 // Cost already exceeded TTIRegNum, then only newly added register can add
1389 // new instructions.
1390 if (PrevNumRegs > TTIRegNum)
1391 C.Insns += (C.NumRegs - PrevNumRegs);
1392 else
1393 C.Insns += (C.NumRegs - TTIRegNum);
1394 }
1395
1396 // If ICmpZero formula ends with not 0, it could not be replaced by
1397 // just add or sub. We'll need to compare final result of AddRec.
1398 // That means we'll need an additional instruction. But if the target can
1399 // macro-fuse a compare with a branch, don't count this extra instruction.
1400 // For -10 + {0, +, 1}:
1401 // i = i + 1;
1402 // cmp i, 10
1403 //
1404 // For {-10, +, 1}:
1405 // i = i + 1;
1406 if (LU.Kind == LSRUse::ICmpZero && !F.hasZeroEnd() &&
1407 !TTI->canMacroFuseCmp())
1408 C.Insns++;
1409 // Each new AddRec adds 1 instruction to calculation.
1410 C.Insns += (C.AddRecCost - PrevAddRecCost);
1411
1412 // BaseAdds adds instructions for unfolded registers.
1413 if (LU.Kind != LSRUse::ICmpZero)
1414 C.Insns += C.NumBaseAdds - PrevNumBaseAdds;
1415 assert(isValid() && "invalid cost")((isValid() && "invalid cost") ? static_cast<void>
(0) : __assert_fail ("isValid() && \"invalid cost\""
, "/build/llvm-toolchain-snapshot-9~svn358860/lib/Transforms/Scalar/LoopStrengthReduce.cpp"
, 1415, __PRETTY_FUNCTION__))
;
1416}
1417
1418/// Set this cost to a losing value.
1419void Cost::Lose() {
1420 C.Insns = std::numeric_limits<unsigned>::max();
1421 C.NumRegs = std::numeric_limits<unsigned>::max();
1422 C.AddRecCost = std::numeric_limits<unsigned>::max();
1423 C.NumIVMuls = std::numeric_limits<unsigned>::max();
1424 C.NumBaseAdds = std::numeric_limits<unsigned>::max();
1425 C.ImmCost = std::numeric_limits<unsigned>::max();
1426 C.SetupCost = std::numeric_limits<unsigned>::max();
1427 C.ScaleCost = std::numeric_limits<unsigned>::max();
1428}
1429
1430/// Choose the lower cost.
1431bool Cost::isLess(Cost &Other) {
1432 if (InsnsCost.getNumOccurrences() > 0 && InsnsCost &&
1433 C.Insns != Other.C.Insns)
1434 return C.Insns < Other.C.Insns;
1435 return TTI->isLSRCostLess(C, Other.C);
1436}
1437
1438#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1439void Cost::print(raw_ostream &OS) const {
1440 if (InsnsCost)
1441 OS << C.Insns << " instruction" << (C.Insns == 1 ? " " : "s ");
1442 OS << C.NumRegs << " reg" << (C.NumRegs == 1 ? "" : "s");
1443 if (C.AddRecCost != 0)
1444 OS << ", with addrec cost " << C.AddRecCost;
1445 if (C.NumIVMuls != 0)
1446 OS << ", plus " << C.NumIVMuls << " IV mul"
1447 << (C.NumIVMuls == 1 ? "" : "s");
1448 if (C.NumBaseAdds != 0)
1449 OS << ", plus " << C.NumBaseAdds << " base add"
1450 << (C.NumBaseAdds == 1 ? "" : "s");
1451 if (C.ScaleCost != 0)
1452 OS << ", plus " << C.ScaleCost << " scale cost";
1453 if (C.ImmCost != 0)
1454 OS << ", plus " << C.ImmCost << " imm cost";
1455 if (C.SetupCost != 0)
1456 OS << ", plus " << C.SetupCost << " setup cost";
1457}
1458
1459LLVM_DUMP_METHOD__attribute__((noinline)) __attribute__((__used__)) void Cost::dump() const {
1460 print(errs()); errs() << '\n';
1461}
1462#endif
1463
1464/// Test whether this fixup always uses its value outside of the given loop.
1465bool LSRFixup::isUseFullyOutsideLoop(const Loop *L) const {
1466 // PHI nodes use their value in their incoming blocks.
1467 if (const PHINode *PN = dyn_cast<PHINode>(UserInst)) {
1468 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
1469 if (PN->getIncomingValue(i) == OperandValToReplace &&
1470 L->contains(PN->getIncomingBlock(i)))
1471 return false;
1472 return true;
1473 }
1474
1475 return !L->contains(UserInst);
1476}
1477
1478#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1479void LSRFixup::print(raw_ostream &OS) const {
1480 OS << "UserInst=";
1481 // Store is common and interesting enough to be worth special-casing.
1482 if (StoreInst *Store = dyn_cast<StoreInst>(UserInst)) {
1483 OS << "store ";
1484 Store->getOperand(0)->printAsOperand(OS, /*PrintType=*/false);
1485 } else if (UserInst->getType()->isVoidTy())
1486 OS << UserInst->getOpcodeName();
1487 else
1488 UserInst->printAsOperand(OS, /*PrintType=*/false);
1489
1490 OS << ", OperandValToReplace=";
1491 OperandValToReplace->printAsOperand(OS, /*PrintType=*/false);
1492
1493 for (const Loop *PIL : PostIncLoops) {
1494 OS << ", PostIncLoop=";
1495 PIL->getHeader()->printAsOperand(OS, /*PrintType=*/false);
1496 }
1497
1498 if (Offset != 0)
1499 OS << ", Offset=" << Offset;
1500}
1501
1502LLVM_DUMP_METHOD__attribute__((noinline)) __attribute__((__used__)) void LSRFixup::dump() const {
1503 print(errs()); errs() << '\n';
1504}
1505#endif
1506
1507/// Test whether this use as a formula which has the same registers as the given
1508/// formula.
1509bool LSRUse::HasFormulaWithSameRegs(const Formula &F) const {
1510 SmallVector<const SCEV *, 4> Key = F.BaseRegs;
1511 if (F.ScaledReg) Key.push_back(F.ScaledReg);
1512 // Unstable sort by host order ok, because this is only used for uniquifying.
1513 llvm::sort(Key);
1514 return Uniquifier.count(Key);
1515}
1516
1517/// The function returns a probability of selecting formula without Reg.
1518float LSRUse::getNotSelectedProbability(const SCEV *Reg) const {
1519 unsigned FNum = 0;
1520 for (const Formula &F : Formulae)
1521 if (F.referencesReg(Reg))
1522 FNum++;
1523 return ((float)(Formulae.size() - FNum)) / Formulae.size();
1524}
1525
1526/// If the given formula has not yet been inserted, add it to the list, and
1527/// return true. Return false otherwise. The formula must be in canonical form.
1528bool LSRUse::InsertFormula(const Formula &F, const Loop &L) {
1529 assert(F.isCanonical(L) && "Invalid canonical representation")((F.isCanonical(L) && "Invalid canonical representation"
) ? static_cast<void> (0) : __assert_fail ("F.isCanonical(L) && \"Invalid canonical representation\""
, "/build/llvm-toolchain-snapshot-9~svn358860/lib/Transforms/Scalar/LoopStrengthReduce.cpp"
, 1529, __PRETTY_FUNCTION__))
;
1530
1531 if (!Formulae.empty() && RigidFormula)
1532 return false;
1533
1534 SmallVector<const SCEV *, 4> Key = F.BaseRegs;
1535 if (F.ScaledReg) Key.push_back(F.ScaledReg);
1536 // Unstable sort by host order ok, because this is only used for uniquifying.
1537 llvm::sort(Key);
1538
1539 if (!Uniquifier.insert(Key).second)
1540 return false;
1541
1542 // Using a register to hold the value of 0 is not profitable.
1543 assert((!F.ScaledReg || !F.ScaledReg->isZero()) &&(((!F.ScaledReg || !F.ScaledReg->isZero()) && "Zero allocated in a scaled register!"
) ? static_cast<void> (0) : __assert_fail ("(!F.ScaledReg || !F.ScaledReg->isZero()) && \"Zero allocated in a scaled register!\""
, "/build/llvm-toolchain-snapshot-9~svn358860/lib/Transforms/Scalar/LoopStrengthReduce.cpp"
, 1544, __PRETTY_FUNCTION__))
1544 "Zero allocated in a scaled register!")(((!F.ScaledReg || !F.ScaledReg->isZero()) && "Zero allocated in a scaled register!"
) ? static_cast<void> (0) : __assert_fail ("(!F.ScaledReg || !F.ScaledReg->isZero()) && \"Zero allocated in a scaled register!\""
, "/build/llvm-toolchain-snapshot-9~svn358860/lib/Transforms/Scalar/LoopStrengthReduce.cpp"
, 1544, __PRETTY_FUNCTION__))
;
1545#ifndef NDEBUG
1546 for (const SCEV *BaseReg : F.BaseRegs)
1547 assert(!BaseReg->isZero() && "Zero allocated in a base register!")((!BaseReg->isZero() && "Zero allocated in a base register!"
) ? static_cast<void> (0) : __assert_fail ("!BaseReg->isZero() && \"Zero allocated in a base register!\""
, "/build/llvm-toolchain-snapshot-9~svn358860/lib/Transforms/Scalar/LoopStrengthReduce.cpp"
, 1547, __PRETTY_FUNCTION__))
;
1548#endif
1549
1550 // Add the formula to the list.
1551 Formulae.push_back(F);
1552
1553 // Record registers now being used by this use.
1554 Regs.insert(F.BaseRegs.begin(), F.BaseRegs.end());
1555 if (F.ScaledReg)
1556 Regs.insert(F.ScaledReg);
1557
1558 return true;
1559}
1560
1561/// Remove the given formula from this use's list.
1562void LSRUse::DeleteFormula(Formula &F) {
1563 if (&F != &Formulae.back())
1564 std::swap(F, Formulae.back());
1565 Formulae.pop_back();
1566}
1567
1568/// Recompute the Regs field, and update RegUses.
1569void LSRUse::RecomputeRegs(size_t LUIdx, RegUseTracker &RegUses) {
1570 // Now that we've filtered out some formulae, recompute the Regs set.
1571 SmallPtrSet<const SCEV *, 4> OldRegs = std::move(Regs);
1572 Regs.clear();
1573 for (const Formula &F : Formulae) {
1574 if (F.ScaledReg) Regs.insert(F.ScaledReg);
1575 Regs.insert(F.BaseRegs.begin(), F.BaseRegs.end());
1576 }
1577
1578 // Update the RegTracker.
1579 for (const SCEV *S : OldRegs)
1580 if (!Regs.count(S))
1581 RegUses.dropRegister(S, LUIdx);
1582}
1583
1584#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1585void LSRUse::print(raw_ostream &OS) const {
1586 OS << "LSR Use: Kind=";
1587 switch (Kind) {
1588 case Basic: OS << "Basic"; break;
1589 case Special: OS << "Special"; break;
1590 case ICmpZero: OS << "ICmpZero"; break;
1591 case Address:
1592 OS << "Address of ";
1593 if (AccessTy.MemTy->isPointerTy())
1594 OS << "pointer"; // the full pointer type could be really verbose
1595 else {
1596 OS << *AccessTy.MemTy;
1597 }
1598
1599 OS << " in addrspace(" << AccessTy.AddrSpace << ')';
1600 }
1601
1602 OS << ", Offsets={";
1603 bool NeedComma = false;
1604 for (const LSRFixup &Fixup : Fixups) {
1605 if (NeedComma) OS << ',';
1606 OS << Fixup.Offset;
1607 NeedComma = true;
1608 }
1609 OS << '}';
1610
1611 if (AllFixupsOutsideLoop)
1612 OS << ", all-fixups-outside-loop";
1613
1614 if (WidestFixupType)
1615 OS << ", widest fixup type: " << *WidestFixupType;
1616}
1617
1618LLVM_DUMP_METHOD__attribute__((noinline)) __attribute__((__used__)) void LSRUse::dump() const {
1619 print(errs()); errs() << '\n';
1620}
1621#endif
1622
1623static bool isAMCompletelyFolded(const TargetTransformInfo &TTI,
1624 LSRUse::KindType Kind, MemAccessTy AccessTy,
1625 GlobalValue *BaseGV, int64_t BaseOffset,
1626 bool HasBaseReg, int64_t Scale,
1627 Instruction *Fixup/*= nullptr*/) {
1628 switch (Kind) {
1629 case LSRUse::Address:
1630 return TTI.isLegalAddressingMode(AccessTy.MemTy, BaseGV, BaseOffset,
1631 HasBaseReg, Scale, AccessTy.AddrSpace, Fixup);
1632
1633 case LSRUse::ICmpZero:
1634 // There's not even a target hook for querying whether it would be legal to
1635 // fold a GV into an ICmp.
1636 if (BaseGV)
1637 return false;
1638
1639 // ICmp only has two operands; don't allow more than two non-trivial parts.
1640 if (Scale != 0 && HasBaseReg && BaseOffset != 0)
1641 return false;
1642
1643 // ICmp only supports no scale or a -1 scale, as we can "fold" a -1 scale by
1644 // putting the scaled register in the other operand of the icmp.
1645 if (Scale != 0 && Scale != -1)
1646 return false;
1647
1648 // If we have low-level target information, ask the target if it can fold an
1649 // integer immediate on an icmp.
1650 if (BaseOffset != 0) {
1651 // We have one of:
1652 // ICmpZero BaseReg + BaseOffset => ICmp BaseReg, -BaseOffset
1653 // ICmpZero -1*ScaleReg + BaseOffset => ICmp ScaleReg, BaseOffset
1654 // Offs is the ICmp immediate.
1655 if (Scale == 0)
1656 // The cast does the right thing with
1657 // std::numeric_limits<int64_t>::min().
1658 BaseOffset = -(uint64_t)BaseOffset;
1659 return TTI.isLegalICmpImmediate(BaseOffset);
1660 }
1661
1662 // ICmpZero BaseReg + -1*ScaleReg => ICmp BaseReg, ScaleReg
1663 return true;
1664
1665 case LSRUse::Basic:
1666 // Only handle single-register values.
1667 return !BaseGV && Scale == 0 && BaseOffset == 0;
1668
1669 case LSRUse::Special:
1670 // Special case Basic to handle -1 scales.
1671 return !BaseGV && (Scale == 0 || Scale == -1) && BaseOffset == 0;
1672 }
1673
1674 llvm_unreachable("Invalid LSRUse Kind!")::llvm::llvm_unreachable_internal("Invalid LSRUse Kind!", "/build/llvm-toolchain-snapshot-9~svn358860/lib/Transforms/Scalar/LoopStrengthReduce.cpp"
, 1674)
;
1675}
1676
1677static bool isAMCompletelyFolded(const TargetTransformInfo &TTI,
1678 int64_t MinOffset, int64_t MaxOffset,
1679 LSRUse::KindType Kind, MemAccessTy AccessTy,
1680 GlobalValue *BaseGV, int64_t BaseOffset,
1681 bool HasBaseReg, int64_t Scale) {
1682 // Check for overflow.
1683 if (((int64_t)((uint64_t)BaseOffset + MinOffset) > BaseOffset) !=
1684 (MinOffset > 0))
1685 return false;
1686 MinOffset = (uint64_t)BaseOffset + MinOffset;
1687 if (((int64_t)((uint64_t)BaseOffset + MaxOffset) > BaseOffset) !=
1688 (MaxOffset > 0))
1689 return false;
1690 MaxOffset = (uint64_t)BaseOffset + MaxOffset;
1691
1692 return isAMCompletelyFolded(TTI, Kind, AccessTy, BaseGV, MinOffset,
1693 HasBaseReg, Scale) &&
1694 isAMCompletelyFolded(TTI, Kind, AccessTy, BaseGV, MaxOffset,
1695 HasBaseReg, Scale);
1696}
1697
1698static bool isAMCompletelyFolded(const TargetTransformInfo &TTI,
1699 int64_t MinOffset, int64_t MaxOffset,
1700 LSRUse::KindType Kind, MemAccessTy AccessTy,
1701 const Formula &F, const Loop &L) {
1702 // For the purpose of isAMCompletelyFolded either having a canonical formula
1703 // or a scale not equal to zero is correct.
1704 // Problems may arise from non canonical formulae having a scale == 0.
1705 // Strictly speaking it would best to just rely on canonical formulae.
1706 // However, when we generate the scaled formulae, we first check that the
1707 // scaling factor is profitable before computing the actual ScaledReg for
1708 // compile time sake.
1709 assert((F.isCanonical(L) || F.Scale != 0))(((F.isCanonical(L) || F.Scale != 0)) ? static_cast<void>
(0) : __assert_fail ("(F.isCanonical(L) || F.Scale != 0)", "/build/llvm-toolchain-snapshot-9~svn358860/lib/Transforms/Scalar/LoopStrengthReduce.cpp"
, 1709, __PRETTY_FUNCTION__))
;
1710 return isAMCompletelyFolded(TTI, MinOffset, MaxOffset, Kind, AccessTy,
1711 F.BaseGV, F.BaseOffset, F.HasBaseReg, F.Scale);
1712}
1713
1714/// Test whether we know how to expand the current formula.
1715static bool isLegalUse(const TargetTransformInfo &TTI, int64_t MinOffset,
1716 int64_t MaxOffset, LSRUse::KindType Kind,
1717 MemAccessTy AccessTy, GlobalValue *BaseGV,
1718 int64_t BaseOffset, bool HasBaseReg, int64_t Scale) {
1719 // We know how to expand completely foldable formulae.
1720 return isAMCompletelyFolded(TTI, MinOffset, MaxOffset, Kind, AccessTy, BaseGV,
1721 BaseOffset, HasBaseReg, Scale) ||
1722 // Or formulae that use a base register produced by a sum of base
1723 // registers.
1724 (Scale == 1 &&
1725 isAMCompletelyFolded(TTI, MinOffset, MaxOffset, Kind, AccessTy,
1726 BaseGV, BaseOffset, true, 0));
1727}
1728
1729static bool isLegalUse(const TargetTransformInfo &TTI, int64_t MinOffset,
1730 int64_t MaxOffset, LSRUse::KindType Kind,
1731 MemAccessTy AccessTy, const Formula &F) {
1732 return isLegalUse(TTI, MinOffset, MaxOffset, Kind, AccessTy, F.BaseGV,
1733 F.BaseOffset, F.HasBaseReg, F.Scale);
1734}
1735
1736static bool isAMCompletelyFolded(const TargetTransformInfo &TTI,
1737 const LSRUse &LU, const Formula &F) {
1738 // Target may want to look at the user instructions.
1739 if (LU.Kind == LSRUse::Address && TTI.LSRWithInstrQueries()) {
1740 for (const LSRFixup &Fixup : LU.Fixups)
1741 if (!isAMCompletelyFolded(TTI, LSRUse::Address, LU.AccessTy, F.BaseGV,
1742 (F.BaseOffset + Fixup.Offset), F.HasBaseReg,
1743 F.Scale, Fixup.UserInst))
1744 return false;
1745 return true;
1746 }
1747
1748 return isAMCompletelyFolded(TTI, LU.MinOffset, LU.MaxOffset, LU.Kind,
1749 LU.AccessTy, F.BaseGV, F.BaseOffset, F.HasBaseReg,
1750 F.Scale);
1751}
1752
1753static unsigned getScalingFactorCost(const TargetTransformInfo &TTI,
1754 const LSRUse &LU, const Formula &F,
1755 const Loop &L) {
1756 if (!F.Scale)
1757 return 0;
1758
1759 // If the use is not completely folded in that instruction, we will have to
1760 // pay an extra cost only for scale != 1.
1761 if (!isAMCompletelyFolded(TTI, LU.MinOffset, LU.MaxOffset, LU.Kind,
1762 LU.AccessTy, F, L))
1763 return F.Scale != 1;
1764
1765 switch (LU.Kind) {
1766 case LSRUse::Address: {
1767 // Check the scaling factor cost with both the min and max offsets.
1768 int ScaleCostMinOffset = TTI.getScalingFactorCost(
1769 LU.AccessTy.MemTy, F.BaseGV, F.BaseOffset + LU.MinOffset, F.HasBaseReg,
1770 F.Scale, LU.AccessTy.AddrSpace);
1771 int ScaleCostMaxOffset = TTI.getScalingFactorCost(
1772 LU.AccessTy.MemTy, F.BaseGV, F.BaseOffset + LU.MaxOffset, F.HasBaseReg,
1773 F.Scale, LU.AccessTy.AddrSpace);
1774
1775 assert(ScaleCostMinOffset >= 0 && ScaleCostMaxOffset >= 0 &&((ScaleCostMinOffset >= 0 && ScaleCostMaxOffset >=
0 && "Legal addressing mode has an illegal cost!") ?
static_cast<void> (0) : __assert_fail ("ScaleCostMinOffset >= 0 && ScaleCostMaxOffset >= 0 && \"Legal addressing mode has an illegal cost!\""
, "/build/llvm-toolchain-snapshot-9~svn358860/lib/Transforms/Scalar/LoopStrengthReduce.cpp"
, 1776, __PRETTY_FUNCTION__))
1776 "Legal addressing mode has an illegal cost!")((ScaleCostMinOffset >= 0 && ScaleCostMaxOffset >=
0 && "Legal addressing mode has an illegal cost!") ?
static_cast<void> (0) : __assert_fail ("ScaleCostMinOffset >= 0 && ScaleCostMaxOffset >= 0 && \"Legal addressing mode has an illegal cost!\""
, "/build/llvm-toolchain-snapshot-9~svn358860/lib/Transforms/Scalar/LoopStrengthReduce.cpp"
, 1776, __PRETTY_FUNCTION__))
;
1777 return std::max(ScaleCostMinOffset, ScaleCostMaxOffset);
1778 }
1779 case LSRUse::ICmpZero:
1780 case LSRUse::Basic:
1781 case LSRUse::Special:
1782 // The use is completely folded, i.e., everything is folded into the
1783 // instruction.
1784 return 0;
1785 }
1786
1787 llvm_unreachable("Invalid LSRUse Kind!")::llvm::llvm_unreachable_internal("Invalid LSRUse Kind!", "/build/llvm-toolchain-snapshot-9~svn358860/lib/Transforms/Scalar/LoopStrengthReduce.cpp"
, 1787)
;
1788}
1789
1790static bool isAlwaysFoldable(const TargetTransformInfo &TTI,
1791 LSRUse::KindType Kind, MemAccessTy AccessTy,
1792 GlobalValue *BaseGV, int64_t BaseOffset,
1793 bool HasBaseReg) {
1794 // Fast-path: zero is always foldable.
1795 if (BaseOffset == 0 && !BaseGV) return true;
1796
1797 // Conservatively, create an address with an immediate and a
1798 // base and a scale.
1799 int64_t Scale = Kind == LSRUse::ICmpZero ? -1 : 1;
1800
1801 // Canonicalize a scale of 1 to a base register if the formula doesn't
1802 // already have a base register.
1803 if (!HasBaseReg && Scale == 1) {
1804 Scale = 0;
1805 HasBaseReg = true;
1806 }
1807
1808 return isAMCompletelyFolded(TTI, Kind, AccessTy, BaseGV, BaseOffset,
1809 HasBaseReg, Scale);
1810}
1811
1812static bool isAlwaysFoldable(const TargetTransformInfo &TTI,
1813 ScalarEvolution &SE, int64_t MinOffset,
1814 int64_t MaxOffset, LSRUse::KindType Kind,
1815 MemAccessTy AccessTy, const SCEV *S,
1816 bool HasBaseReg) {
1817 // Fast-path: zero is always foldable.
1818 if (S->isZero()) return true;
1819
1820 // Conservatively, create an address with an immediate and a
1821 // base and a scale.
1822 int64_t BaseOffset = ExtractImmediate(S, SE);
1823 GlobalValue *BaseGV = ExtractSymbol(S, SE);
1824
1825 // If there's anything else involved, it's not foldable.
1826 if (!S->isZero()) return false;
1827
1828 // Fast-path: zero is always foldable.
1829 if (BaseOffset == 0 && !BaseGV) return true;
1830
1831 // Conservatively, create an address with an immediate and a
1832 // base and a scale.
1833 int64_t Scale = Kind == LSRUse::ICmpZero ? -1 : 1;
1834
1835 return isAMCompletelyFolded(TTI, MinOffset, MaxOffset, Kind, AccessTy, BaseGV,
1836 BaseOffset, HasBaseReg, Scale);
1837}
1838
1839namespace {
1840
1841/// An individual increment in a Chain of IV increments. Relate an IV user to
1842/// an expression that computes the IV it uses from the IV used by the previous
1843/// link in the Chain.
1844///
1845/// For the head of a chain, IncExpr holds the absolute SCEV expression for the
1846/// original IVOperand. The head of the chain's IVOperand is only valid during
1847/// chain collection, before LSR replaces IV users. During chain generation,
1848/// IncExpr can be used to find the new IVOperand that computes the same
1849/// expression.
1850struct IVInc {
1851 Instruction *UserInst;
1852 Value* IVOperand;
1853 const SCEV *IncExpr;
1854
1855 IVInc(Instruction *U, Value *O, const SCEV *E)
1856 : UserInst(U), IVOperand(O), IncExpr(E) {}
1857};
1858
1859// The list of IV increments in program order. We typically add the head of a
1860// chain without finding subsequent links.
1861struct IVChain {
1862 SmallVector<IVInc, 1> Incs;
1863 const SCEV *ExprBase = nullptr;
1864
1865 IVChain() = default;
1866 IVChain(const IVInc &Head, const SCEV *Base)
1867 : Incs(1, Head), ExprBase(Base) {}
1868
1869 using const_iterator = SmallVectorImpl<IVInc>::const_iterator;
1870
1871 // Return the first increment in the chain.
1872 const_iterator begin() const {
1873 assert(!Incs.empty())((!Incs.empty()) ? static_cast<void> (0) : __assert_fail
("!Incs.empty()", "/build/llvm-toolchain-snapshot-9~svn358860/lib/Transforms/Scalar/LoopStrengthReduce.cpp"
, 1873, __PRETTY_FUNCTION__))
;
1874 return std::next(Incs.begin());
1875 }
1876 const_iterator end() const {
1877 return Incs.end();
1878 }
1879
1880 // Returns true if this chain contains any increments.
1881 bool hasIncs() const { return Incs.size() >= 2; }
1882
1883 // Add an IVInc to the end of this chain.
1884 void add(const IVInc &X) { Incs.push_back(X); }
1885
1886 // Returns the last UserInst in the chain.
1887 Instruction *tailUserInst() const { return Incs.back().UserInst; }
1888
1889 // Returns true if IncExpr can be profitably added to this chain.
1890 bool isProfitableIncrement(const SCEV *OperExpr,
1891 const SCEV *IncExpr,
1892 ScalarEvolution&);
1893};
1894
1895/// Helper for CollectChains to track multiple IV increment uses. Distinguish
1896/// between FarUsers that definitely cross IV increments and NearUsers that may
1897/// be used between IV increments.
1898struct ChainUsers {
1899 SmallPtrSet<Instruction*, 4> FarUsers;
1900 SmallPtrSet<Instruction*, 4> NearUsers;
1901};
1902
1903/// This class holds state for the main loop strength reduction logic.
1904class LSRInstance {
1905 IVUsers &IU;
1906 ScalarEvolution &SE;
1907 DominatorTree &DT;
1908 LoopInfo &LI;
1909 const TargetTransformInfo &TTI;
1910 Loop *const L;
1911 bool FavorBackedgeIndex = false;
1912 bool Changed = false;
1913
1914 /// This is the insert position that the current loop's induction variable
1915 /// increment should be placed. In simple loops, this is the latch block's
1916 /// terminator. But in more complicated cases, this is a position which will
1917 /// dominate all the in-loop post-increment users.
1918 Instruction *IVIncInsertPos = nullptr;
1919
1920 /// Interesting factors between use strides.
1921 ///
1922 /// We explicitly use a SetVector which contains a SmallSet, instead of the
1923 /// default, a SmallDenseSet, because we need to use the full range of
1924 /// int64_ts, and there's currently no good way of doing that with
1925 /// SmallDenseSet.
1926 SetVector<int64_t, SmallVector<int64_t, 8>, SmallSet<int64_t, 8>> Factors;
1927
1928 /// Interesting use types, to facilitate truncation reuse.
1929 SmallSetVector<Type *, 4> Types;
1930
1931 /// The list of interesting uses.
1932 mutable SmallVector<LSRUse, 16> Uses;
1933
1934 /// Track which uses use which register candidates.
1935 RegUseTracker RegUses;
1936
1937 // Limit the number of chains to avoid quadratic behavior. We don't expect to
1938 // have more than a few IV increment chains in a loop. Missing a Chain falls
1939 // back to normal LSR behavior for those uses.
1940 static const unsigned MaxChains = 8;
1941
1942 /// IV users can form a chain of IV increments.
1943 SmallVector<IVChain, MaxChains> IVChainVec;
1944
1945 /// IV users that belong to profitable IVChains.
1946 SmallPtrSet<Use*, MaxChains> IVIncSet;
1947
1948 void OptimizeShadowIV();
1949 bool FindIVUserForCond(ICmpInst *Cond, IVStrideUse *&CondUse);
1950 ICmpInst *OptimizeMax(ICmpInst *Cond, IVStrideUse* &CondUse);
1951 void OptimizeLoopTermCond();
1952
1953 void ChainInstruction(Instruction *UserInst, Instruction *IVOper,
1954 SmallVectorImpl<ChainUsers> &ChainUsersVec);
1955 void FinalizeChain(IVChain &Chain);
1956 void CollectChains();
1957 void GenerateIVChain(const IVChain &Chain, SCEVExpander &Rewriter,
1958 SmallVectorImpl<WeakTrackingVH> &DeadInsts);
1959
1960 void CollectInterestingTypesAndFactors();
1961 void CollectFixupsAndInitialFormulae();
1962
1963 // Support for sharing of LSRUses between LSRFixups.
1964 using UseMapTy = DenseMap<LSRUse::SCEVUseKindPair, size_t>;
1965 UseMapTy UseMap;
1966
1967 bool reconcileNewOffset(LSRUse &LU, int64_t NewOffset, bool HasBaseReg,
1968 LSRUse::KindType Kind, MemAccessTy AccessTy);
1969
1970 std::pair<size_t, int64_t> getUse(const SCEV *&Expr, LSRUse::KindType Kind,
1971 MemAccessTy AccessTy);
1972
1973 void DeleteUse(LSRUse &LU, size_t LUIdx);
1974
1975 LSRUse *FindUseWithSimilarFormula(const Formula &F, const LSRUse &OrigLU);
1976
1977 void InsertInitialFormula(const SCEV *S, LSRUse &LU, size_t LUIdx);
1978 void InsertSupplementalFormula(const SCEV *S, LSRUse &LU, size_t LUIdx);
1979 void CountRegisters(const Formula &F, size_t LUIdx);
1980 bool InsertFormula(LSRUse &LU, unsigned LUIdx, const Formula &F);
1981
1982 void CollectLoopInvariantFixupsAndFormulae();
1983
1984 void GenerateReassociations(LSRUse &LU, unsigned LUIdx, Formula Base,
1985 unsigned Depth = 0);
1986
1987 void GenerateReassociationsImpl(LSRUse &LU, unsigned LUIdx,
1988 const Formula &Base, unsigned Depth,
1989 size_t Idx, bool IsScaledReg = false);
1990 void GenerateCombinations(LSRUse &LU, unsigned LUIdx, Formula Base);
1991 void GenerateSymbolicOffsetsImpl(LSRUse &LU, unsigned LUIdx,
1992 const Formula &Base, size_t Idx,
1993 bool IsScaledReg = false);
1994 void GenerateSymbolicOffsets(LSRUse &LU, unsigned LUIdx, Formula Base);
1995 void GenerateConstantOffsetsImpl(LSRUse &LU, unsigned LUIdx,
1996 const Formula &Base,
1997 const SmallVectorImpl<int64_t> &Worklist,
1998 size_t Idx, bool IsScaledReg = false);
1999 void GenerateConstantOffsets(LSRUse &LU, unsigned LUIdx, Formula Base);
2000 void GenerateICmpZeroScales(LSRUse &LU, unsigned LUIdx, Formula Base);
2001 void GenerateScales(LSRUse &LU, unsigned LUIdx, Formula Base);
2002 void GenerateTruncates(LSRUse &LU, unsigned LUIdx, Formula Base);
2003 void GenerateCrossUseConstantOffsets();
2004 void GenerateAllReuseFormulae();
2005
2006 void FilterOutUndesirableDedicatedRegisters();
2007
2008 size_t EstimateSearchSpaceComplexity() const;
2009 void NarrowSearchSpaceByDetectingSupersets();
2010 void NarrowSearchSpaceByCollapsingUnrolledCode();
2011 void NarrowSearchSpaceByRefilteringUndesirableDedicatedRegisters();
2012 void NarrowSearchSpaceByFilterFormulaWithSameScaledReg();
2013 void NarrowSearchSpaceByDeletingCostlyFormulas();
2014 void NarrowSearchSpaceByPickingWinnerRegs();
2015 void NarrowSearchSpaceUsingHeuristics();
2016
2017 void SolveRecurse(SmallVectorImpl<const Formula *> &Solution,
2018 Cost &SolutionCost,
2019 SmallVectorImpl<const Formula *> &Workspace,
2020 const Cost &CurCost,
2021 const SmallPtrSet<const SCEV *, 16> &CurRegs,
2022 DenseSet<const SCEV *> &VisitedRegs) const;
2023 void Solve(SmallVectorImpl<const Formula *> &Solution) const;
2024
2025 BasicBlock::iterator
2026 HoistInsertPosition(BasicBlock::iterator IP,
2027 const SmallVectorImpl<Instruction *> &Inputs) const;
2028 BasicBlock::iterator
2029 AdjustInsertPositionForExpand(BasicBlock::iterator IP,
2030 const LSRFixup &LF,
2031 const LSRUse &LU,
2032 SCEVExpander &Rewriter) const;
2033
2034 Value *Expand(const LSRUse &LU, const LSRFixup &LF, const Formula &F,
2035 BasicBlock::iterator IP, SCEVExpander &Rewriter,
2036 SmallVectorImpl<WeakTrackingVH> &DeadInsts) const;
2037 void RewriteForPHI(PHINode *PN, const LSRUse &LU, const LSRFixup &LF,
2038 const Formula &F, SCEVExpander &Rewriter,
2039 SmallVectorImpl<WeakTrackingVH> &DeadInsts) const;
2040 void Rewrite(const LSRUse &LU, const LSRFixup &LF, const Formula &F,
2041 SCEVExpander &Rewriter,
2042 SmallVectorImpl<WeakTrackingVH> &DeadInsts) const;
2043 void ImplementSolution(const SmallVectorImpl<const Formula *> &Solution);
2044
2045public:
2046 LSRInstance(Loop *L, IVUsers &IU, ScalarEvolution &SE, DominatorTree &DT,
2047 LoopInfo &LI, const TargetTransformInfo &TTI);
2048
2049 bool getChanged() const { return Changed; }
2050
2051 void print_factors_and_types(raw_ostream &OS) const;
2052 void print_fixups(raw_ostream &OS) const;
2053 void print_uses(raw_ostream &OS) const;
2054 void print(raw_ostream &OS) const;
2055 void dump() const;
2056};
2057
2058} // end anonymous namespace
2059
2060/// If IV is used in a int-to-float cast inside the loop then try to eliminate
2061/// the cast operation.
2062void LSRInstance::OptimizeShadowIV() {
2063 const SCEV *BackedgeTakenCount = SE.getBackedgeTakenCount(L);
2064 if (isa<SCEVCouldNotCompute>(BackedgeTakenCount))
2065 return;
2066
2067 for (IVUsers::const_iterator UI = IU.begin(), E = IU.end();
2068 UI != E; /* empty */) {
2069 IVUsers::const_iterator CandidateUI = UI;
2070 ++UI;
2071 Instruction *ShadowUse = CandidateUI->getUser();
2072 Type *DestTy = nullptr;
2073 bool IsSigned = false;
2074
2075 /* If shadow use is a int->float cast then insert a second IV
2076 to eliminate this cast.
2077
2078 for (unsigned i = 0; i < n; ++i)
2079 foo((double)i);
2080
2081 is transformed into
2082
2083 double d = 0.0;
2084 for (unsigned i = 0; i < n; ++i, ++d)
2085 foo(d);
2086 */
2087 if (UIToFPInst *UCast = dyn_cast<UIToFPInst>(CandidateUI->getUser())) {
2088 IsSigned = false;
2089 DestTy = UCast->getDestTy();
2090 }
2091 else if (SIToFPInst *SCast = dyn_cast<SIToFPInst>(CandidateUI->getUser())) {
2092 IsSigned = true;
2093 DestTy = SCast->getDestTy();
2094 }
2095 if (!DestTy) continue;
2096
2097 // If target does not support DestTy natively then do not apply
2098 // this transformation.
2099 if (!TTI.isTypeLegal(DestTy)) continue;
2100
2101 PHINode *PH = dyn_cast<PHINode>(ShadowUse->getOperand(0));
2102 if (!PH) continue;
2103 if (PH->getNumIncomingValues() != 2) continue;
2104
2105 // If the calculation in integers overflows, the result in FP type will
2106 // differ. So we only can do this transformation if we are guaranteed to not
2107 // deal with overflowing values
2108 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(SE.getSCEV(PH));
2109 if (!AR) continue;
2110 if (IsSigned && !AR->hasNoSignedWrap()) continue;
2111 if (!IsSigned && !AR->hasNoUnsignedWrap()) continue;
2112
2113 Type *SrcTy = PH->getType();
2114 int Mantissa = DestTy->getFPMantissaWidth();
2115 if (Mantissa == -1) continue;
2116 if ((int)SE.getTypeSizeInBits(SrcTy) > Mantissa)
2117 continue;
2118
2119 unsigned Entry, Latch;
2120 if (PH->getIncomingBlock(0) == L->getLoopPreheader()) {
2121 Entry = 0;
2122 Latch = 1;
2123 } else {
2124 Entry = 1;
2125 Latch = 0;
2126 }
2127
2128 ConstantInt *Init = dyn_cast<ConstantInt>(PH->getIncomingValue(Entry));
2129 if (!Init) continue;
2130 Constant *NewInit = ConstantFP::get(DestTy, IsSigned ?
2131 (double)Init->getSExtValue() :
2132 (double)Init->getZExtValue());
2133
2134 BinaryOperator *Incr =
2135 dyn_cast<BinaryOperator>(PH->getIncomingValue(Latch));
2136 if (!Incr) continue;
2137 if (Incr->getOpcode() != Instruction::Add
2138 && Incr->getOpcode() != Instruction::Sub)
2139 continue;
2140
2141 /* Initialize new IV, double d = 0.0 in above example. */
2142 ConstantInt *C = nullptr;
2143 if (Incr->getOperand(0) == PH)
2144 C = dyn_cast<ConstantInt>(Incr->getOperand(1));
2145 else if (Incr->getOperand(1) == PH)
2146 C = dyn_cast<ConstantInt>(Incr->getOperand(0));
2147 else
2148 continue;
2149
2150 if (!C) continue;
2151
2152 // Ignore negative constants, as the code below doesn't handle them
2153 // correctly. TODO: Remove this restriction.
2154 if (!C->getValue().isStrictlyPositive()) continue;
2155
2156 /* Add new PHINode. */
2157 PHINode *NewPH = PHINode::Create(DestTy, 2, "IV.S.", PH);
2158
2159 /* create new increment. '++d' in above example. */
2160 Constant *CFP = ConstantFP::get(DestTy, C->getZExtValue());
2161 BinaryOperator *NewIncr =
2162 BinaryOperator::Create(Incr->getOpcode() == Instruction::Add ?
2163 Instruction::FAdd : Instruction::FSub,
2164 NewPH, CFP, "IV.S.next.", Incr);
2165
2166 NewPH->addIncoming(NewInit, PH->getIncomingBlock(Entry));
2167 NewPH->addIncoming(NewIncr, PH->getIncomingBlock(Latch));
2168
2169 /* Remove cast operation */
2170 ShadowUse->replaceAllUsesWith(NewPH);
2171 ShadowUse->eraseFromParent();
2172 Changed = true;
2173 break;
2174 }
2175}
2176
2177/// If Cond has an operand that is an expression of an IV, set the IV user and
2178/// stride information and return true, otherwise return false.
2179bool LSRInstance::FindIVUserForCond(ICmpInst *Cond, IVStrideUse *&CondUse) {
2180 for (IVStrideUse &U : IU)
2181 if (U.getUser() == Cond) {
2182 // NOTE: we could handle setcc instructions with multiple uses here, but
2183 // InstCombine does it as well for simple uses, it's not clear that it
2184 // occurs enough in real life to handle.
2185 CondUse = &U;
2186 return true;
2187 }
2188 return false;
2189}
2190
2191/// Rewrite the loop's terminating condition if it uses a max computation.
2192///
2193/// This is a narrow solution to a specific, but acute, problem. For loops
2194/// like this:
2195///
2196/// i = 0;
2197/// do {
2198/// p[i] = 0.0;
2199/// } while (++i < n);
2200///
2201/// the trip count isn't just 'n', because 'n' might not be positive. And
2202/// unfortunately this can come up even for loops where the user didn't use
2203/// a C do-while loop. For example, seemingly well-behaved top-test loops
2204/// will commonly be lowered like this:
2205///
2206/// if (n > 0) {
2207/// i = 0;
2208/// do {
2209/// p[i] = 0.0;
2210/// } while (++i < n);
2211/// }
2212///
2213/// and then it's possible for subsequent optimization to obscure the if
2214/// test in such a way that indvars can't find it.
2215///
2216/// When indvars can't find the if test in loops like this, it creates a
2217/// max expression, which allows it to give the loop a canonical
2218/// induction variable:
2219///
2220/// i = 0;
2221/// max = n < 1 ? 1 : n;
2222/// do {
2223/// p[i] = 0.0;
2224/// } while (++i != max);
2225///
2226/// Canonical induction variables are necessary because the loop passes
2227/// are designed around them. The most obvious example of this is the
2228/// LoopInfo analysis, which doesn't remember trip count values. It
2229/// expects to be able to rediscover the trip count each time it is
2230/// needed, and it does this using a simple analysis that only succeeds if
2231/// the loop has a canonical induction variable.
2232///
2233/// However, when it comes time to generate code, the maximum operation
2234/// can be quite costly, especially if it's inside of an outer loop.
2235///
2236/// This function solves this problem by detecting this type of loop and
2237/// rewriting their conditions from ICMP_NE back to ICMP_SLT, and deleting
2238/// the instructions for the maximum computation.
2239ICmpInst *LSRInstance::OptimizeMax(ICmpInst *Cond, IVStrideUse* &CondUse) {
2240 // Check that the loop matches the pattern we're looking for.
2241 if (Cond->getPredicate() != CmpInst::ICMP_EQ &&
2242 Cond->getPredicate() != CmpInst::ICMP_NE)
2243 return Cond;
2244
2245 SelectInst *Sel = dyn_cast<SelectInst>(Cond->getOperand(1));
2246 if (!Sel || !Sel->hasOneUse()) return Cond;
2247
2248 const SCEV *BackedgeTakenCount = SE.getBackedgeTakenCount(L);
2249 if (isa<SCEVCouldNotCompute>(BackedgeTakenCount))
2250 return Cond;
2251 const SCEV *One = SE.getConstant(BackedgeTakenCount->getType(), 1);
2252
2253 // Add one to the backedge-taken count to get the trip count.
2254 const SCEV *IterationCount = SE.getAddExpr(One, BackedgeTakenCount);
2255 if (IterationCount != SE.getSCEV(Sel)) return Cond;
2256
2257 // Check for a max calculation that matches the pattern. There's no check
2258 // for ICMP_ULE here because the comparison would be with zero, which
2259 // isn't interesting.
2260 CmpInst::Predicate Pred = ICmpInst::BAD_ICMP_PREDICATE;
2261 const SCEVNAryExpr *Max = nullptr;
2262 if (const SCEVSMaxExpr *S = dyn_cast<SCEVSMaxExpr>(BackedgeTakenCount)) {
2263 Pred = ICmpInst::ICMP_SLE;
2264 Max = S;
2265 } else if (const SCEVSMaxExpr *S = dyn_cast<SCEVSMaxExpr>(IterationCount)) {
2266 Pred = ICmpInst::ICMP_SLT;
2267 Max = S;
2268 } else if (const SCEVUMaxExpr *U = dyn_cast<SCEVUMaxExpr>(IterationCount)) {
2269 Pred = ICmpInst::ICMP_ULT;
2270 Max = U;
2271 } else {
2272 // No match; bail.
2273 return Cond;
2274 }
2275
2276 // To handle a max with more than two operands, this optimization would
2277 // require additional checking and setup.
2278 if (Max->getNumOperands() != 2)
2279 return Cond;
2280
2281 const SCEV *MaxLHS = Max->getOperand(0);
2282 const SCEV *MaxRHS = Max->getOperand(1);
2283
2284 // ScalarEvolution canonicalizes constants to the left. For < and >, look
2285 // for a comparison with 1. For <= and >=, a comparison with zero.
2286 if (!MaxLHS ||
2287 (ICmpInst::isTrueWhenEqual(Pred) ? !MaxLHS->isZero() : (MaxLHS != One)))
2288 return Cond;
2289
2290 // Check the relevant induction variable for conformance to
2291 // the pattern.
2292 const SCEV *IV = SE.getSCEV(Cond->getOperand(0));
2293 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(IV);
2294 if (!AR || !AR->isAffine() ||
2295 AR->getStart() != One ||
2296 AR->getStepRecurrence(SE) != One)
2297 return Cond;
2298
2299 assert(AR->getLoop() == L &&((AR->getLoop() == L && "Loop condition operand is an addrec in a different loop!"
) ? static_cast<void> (0) : __assert_fail ("AR->getLoop() == L && \"Loop condition operand is an addrec in a different loop!\""
, "/build/llvm-toolchain-snapshot-9~svn358860/lib/Transforms/Scalar/LoopStrengthReduce.cpp"
, 2300, __PRETTY_FUNCTION__))
2300 "Loop condition operand is an addrec in a different loop!")((AR->getLoop() == L && "Loop condition operand is an addrec in a different loop!"
) ? static_cast<void> (0) : __assert_fail ("AR->getLoop() == L && \"Loop condition operand is an addrec in a different loop!\""
, "/build/llvm-toolchain-snapshot-9~svn358860/lib/Transforms/Scalar/LoopStrengthReduce.cpp"
, 2300, __PRETTY_FUNCTION__))
;
2301
2302 // Check the right operand of the select, and remember it, as it will
2303 // be used in the new comparison instruction.
2304 Value *NewRHS = nullptr;
2305 if (ICmpInst::isTrueWhenEqual(Pred)) {
2306 // Look for n+1, and grab n.
2307 if (AddOperator *BO = dyn_cast<AddOperator>(Sel->getOperand(1)))
2308 if (ConstantInt *BO1 = dyn_cast<ConstantInt>(BO->getOperand(1)))
2309 if (BO1->isOne() && SE.getSCEV(BO->getOperand(0)) == MaxRHS)
2310 NewRHS = BO->getOperand(0);
2311 if (AddOperator *BO = dyn_cast<AddOperator>(Sel->getOperand(2)))
2312 if (ConstantInt *BO1 = dyn_cast<ConstantInt>(BO->getOperand(1)))
2313 if (BO1->isOne() && SE.getSCEV(BO->getOperand(0)) == MaxRHS)
2314 NewRHS = BO->getOperand(0);
2315 if (!NewRHS)
2316 return Cond;
2317 } else if (SE.getSCEV(Sel->getOperand(1)) == MaxRHS)
2318 NewRHS = Sel->getOperand(1);
2319 else if (SE.getSCEV(Sel->getOperand(2)) == MaxRHS)
2320 NewRHS = Sel->getOperand(2);
2321 else if (const SCEVUnknown *SU = dyn_cast<SCEVUnknown>(MaxRHS))
2322 NewRHS = SU->getValue();
2323 else
2324 // Max doesn't match expected pattern.
2325 return Cond;
2326
2327 // Determine the new comparison opcode. It may be signed or unsigned,
2328 // and the original comparison may be either equality or inequality.
2329 if (Cond->getPredicate() == CmpInst::ICMP_EQ)
2330 Pred = CmpInst::getInversePredicate(Pred);
2331
2332 // Ok, everything looks ok to change the condition into an SLT or SGE and
2333 // delete the max calculation.
2334 ICmpInst *NewCond =
2335 new ICmpInst(Cond, Pred, Cond->getOperand(0), NewRHS, "scmp");
2336
2337 // Delete the max calculation instructions.
2338 Cond->replaceAllUsesWith(NewCond);
2339 CondUse->setUser(NewCond);
2340 Instruction *Cmp = cast<Instruction>(Sel->getOperand(0));
2341 Cond->eraseFromParent();
2342 Sel->eraseFromParent();
2343 if (Cmp->use_empty())
2344 Cmp->eraseFromParent();
2345 return NewCond;
2346}
2347
2348/// Change loop terminating condition to use the postinc iv when possible.
2349void
2350LSRInstance::OptimizeLoopTermCond() {
2351 SmallPtrSet<Instruction *, 4> PostIncs;
2352
2353 // We need a different set of heuristics for rotated and non-rotated loops.
2354 // If a loop is rotated then the latch is also the backedge, so inserting
2355 // post-inc expressions just before the latch is ideal. To reduce live ranges
2356 // it also makes sense to rewrite terminating conditions to use post-inc
2357 // expressions.
2358 //
2359 // If the loop is not rotated then the latch is not a backedge; the latch
2360 // check is done in the loop head. Adding post-inc expressions before the
2361 // latch will cause overlapping live-ranges of pre-inc and post-inc expressions
2362 // in the loop body. In this case we do *not* want to use post-inc expressions
2363 // in the latch check, and we want to insert post-inc expressions before
2364 // the backedge.
2365 BasicBlock *LatchBlock = L->getLoopLatch();
2366 SmallVector<BasicBlock*, 8> ExitingBlocks;
2367 L->getExitingBlocks(ExitingBlocks);
2368 if (llvm::all_of(ExitingBlocks, [&LatchBlock](const BasicBlock *BB) {
2369 return LatchBlock != BB;
2370 })) {
2371 // The backedge doesn't exit the loop; treat this as a head-tested loop.
2372 IVIncInsertPos = LatchBlock->getTerminator();
2373 return;
2374 }
2375
2376 // Otherwise treat this as a rotated loop.
2377 for (BasicBlock *ExitingBlock : ExitingBlocks) {
2378 // Get the terminating condition for the loop if possible. If we
2379 // can, we want to change it to use a post-incremented version of its
2380 // induction variable, to allow coalescing the live ranges for the IV into
2381 // one register value.
2382
2383 BranchInst *TermBr = dyn_cast<BranchInst>(ExitingBlock->getTerminator());
2384 if (!TermBr)
2385 continue;
2386 // FIXME: Overly conservative, termination condition could be an 'or' etc..
2387 if (TermBr->isUnconditional() || !isa<ICmpInst>(TermBr->getCondition()))
2388 continue;
2389
2390 // Search IVUsesByStride to find Cond's IVUse if there is one.
2391 IVStrideUse *CondUse = nullptr;
2392 ICmpInst *Cond = cast<ICmpInst>(TermBr->getCondition());
2393 if (!FindIVUserForCond(Cond, CondUse))
2394 continue;
2395
2396 // If the trip count is computed in terms of a max (due to ScalarEvolution
2397 // being unable to find a sufficient guard, for example), change the loop
2398 // comparison to use SLT or ULT instead of NE.
2399 // One consequence of doing this now is that it disrupts the count-down
2400 // optimization. That's not always a bad thing though, because in such
2401 // cases it may still be worthwhile to avoid a max.
2402 Cond = OptimizeMax(Cond, CondUse);
2403
2404 // If this exiting block dominates the latch block, it may also use
2405 // the post-inc value if it won't be shared with other uses.
2406 // Check for dominance.
2407 if (!DT.dominates(ExitingBlock, LatchBlock))
2408 continue;
2409
2410 // Conservatively avoid trying to use the post-inc value in non-latch
2411 // exits if there may be pre-inc users in intervening blocks.
2412 if (LatchBlock != ExitingBlock)
2413 for (IVUsers::const_iterator UI = IU.begin(), E = IU.end(); UI != E; ++UI)
2414 // Test if the use is reachable from the exiting block. This dominator
2415 // query is a conservative approximation of reachability.
2416 if (&*UI != CondUse &&
2417 !DT.properlyDominates(UI->getUser()->getParent(), ExitingBlock)) {
2418 // Conservatively assume there may be reuse if the quotient of their
2419 // strides could be a legal scale.
2420 const SCEV *A = IU.getStride(*CondUse, L);
2421 const SCEV *B = IU.getStride(*UI, L);
2422 if (!A || !B) continue;
2423 if (SE.getTypeSizeInBits(A->getType()) !=
2424 SE.getTypeSizeInBits(B->getType())) {
2425 if (SE.getTypeSizeInBits(A->getType()) >
2426 SE.getTypeSizeInBits(B->getType()))
2427 B = SE.getSignExtendExpr(B, A->getType());
2428 else
2429 A = SE.getSignExtendExpr(A, B->getType());
2430 }
2431 if (const SCEVConstant *D =
2432 dyn_cast_or_null<SCEVConstant>(getExactSDiv(B, A, SE))) {
2433 const ConstantInt *C = D->getValue();
2434 // Stride of one or negative one can have reuse with non-addresses.
2435 if (C->isOne() || C->isMinusOne())
2436 goto decline_post_inc;
2437 // Avoid weird situations.
2438 if (C->getValue().getMinSignedBits() >= 64 ||
2439 C->getValue().isMinSignedValue())
2440 goto decline_post_inc;
2441 // Check for possible scaled-address reuse.
2442 if (isAddressUse(TTI, UI->getUser(), UI->getOperandValToReplace())) {
2443 MemAccessTy AccessTy = getAccessType(
2444 TTI, UI->getUser(), UI->getOperandValToReplace());
2445 int64_t Scale = C->getSExtValue();
2446 if (TTI.isLegalAddressingMode(AccessTy.MemTy, /*BaseGV=*/nullptr,
2447 /*BaseOffset=*/0,
2448 /*HasBaseReg=*/false, Scale,
2449 AccessTy.AddrSpace))
2450 goto decline_post_inc;
2451 Scale = -Scale;
2452 if (TTI.isLegalAddressingMode(AccessTy.MemTy, /*BaseGV=*/nullptr,
2453 /*BaseOffset=*/0,
2454 /*HasBaseReg=*/false, Scale,
2455 AccessTy.AddrSpace))
2456 goto decline_post_inc;
2457 }
2458 }
2459 }
2460
2461 LLVM_DEBUG(dbgs() << " Change loop exiting icmp to use postinc iv: "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-reduce")) { dbgs() << " Change loop exiting icmp to use postinc iv: "
<< *Cond << '\n'; } } while (false)
2462 << *Cond << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-reduce")) { dbgs() << " Change loop exiting icmp to use postinc iv: "
<< *Cond << '\n'; } } while (false)
;
2463
2464 // It's possible for the setcc instruction to be anywhere in the loop, and
2465 // possible for it to have multiple users. If it is not immediately before
2466 // the exiting block branch, move it.
2467 if (&*++BasicBlock::iterator(Cond) != TermBr) {
2468 if (Cond->hasOneUse()) {
2469 Cond->moveBefore(TermBr);
2470 } else {
2471 // Clone the terminating condition and insert into the loopend.
2472 ICmpInst *OldCond = Cond;
2473 Cond = cast<ICmpInst>(Cond->clone());
2474 Cond->setName(L->getHeader()->getName() + ".termcond");
2475 ExitingBlock->getInstList().insert(TermBr->getIterator(), Cond);
2476
2477 // Clone the IVUse, as the old use still exists!
2478 CondUse = &IU.AddUser(Cond, CondUse->getOperandValToReplace());
2479 TermBr->replaceUsesOfWith(OldCond, Cond);
2480 }
2481 }
2482
2483 // If we get to here, we know that we can transform the setcc instruction to
2484 // use the post-incremented version of the IV, allowing us to coalesce the
2485 // live ranges for the IV correctly.
2486 CondUse->transformToPostInc(L);
2487 Changed = true;
2488
2489 PostIncs.insert(Cond);
2490 decline_post_inc:;
2491 }
2492
2493 // Determine an insertion point for the loop induction variable increment. It
2494 // must dominate all the post-inc comparisons we just set up, and it must
2495 // dominate the loop latch edge.
2496 IVIncInsertPos = L->getLoopLatch()->getTerminator();
2497 for (Instruction *Inst : PostIncs) {
2498 BasicBlock *BB =
2499 DT.findNearestCommonDominator(IVIncInsertPos->getParent(),
2500 Inst->getParent());
2501 if (BB == Inst->getParent())
2502 IVIncInsertPos = Inst;
2503 else if (BB != IVIncInsertPos->getParent())
2504 IVIncInsertPos = BB->getTerminator();
2505 }
2506}
2507
2508/// Determine if the given use can accommodate a fixup at the given offset and
2509/// other details. If so, update the use and return true.
2510bool LSRInstance::reconcileNewOffset(LSRUse &LU, int64_t NewOffset,
2511 bool HasBaseReg, LSRUse::KindType Kind,
2512 MemAccessTy AccessTy) {
2513 int64_t NewMinOffset = LU.MinOffset;
2514 int64_t NewMaxOffset = LU.MaxOffset;
2515 MemAccessTy NewAccessTy = AccessTy;
2516
2517 // Check for a mismatched kind. It's tempting to collapse mismatched kinds to
2518 // something conservative, however this can pessimize in the case that one of
2519 // the uses will have all its uses outside the loop, for example.
2520 if (LU.Kind != Kind)
2521 return false;
2522
2523 // Check for a mismatched access type, and fall back conservatively as needed.
2524 // TODO: Be less conservative when the type is similar and can use the same
2525 // addressing modes.
2526 if (Kind == LSRUse::Address) {
2527 if (AccessTy.MemTy != LU.AccessTy.MemTy) {
2528 NewAccessTy = MemAccessTy::getUnknown(AccessTy.MemTy->getContext(),
2529 AccessTy.AddrSpace);
2530 }
2531 }
2532
2533 // Conservatively assume HasBaseReg is true for now.
2534 if (NewOffset < LU.MinOffset) {
2535 if (!isAlwaysFoldable(TTI, Kind, NewAccessTy, /*BaseGV=*/nullptr,
2536 LU.MaxOffset - NewOffset, HasBaseReg))
2537 return false;
2538 NewMinOffset = NewOffset;
2539 } else if (NewOffset > LU.MaxOffset) {
2540 if (!isAlwaysFoldable(TTI, Kind, NewAccessTy, /*BaseGV=*/nullptr,
2541 NewOffset - LU.MinOffset, HasBaseReg))
2542 return false;
2543 NewMaxOffset = NewOffset;
2544 }
2545
2546 // Update the use.
2547 LU.MinOffset = NewMinOffset;
2548 LU.MaxOffset = NewMaxOffset;
2549 LU.AccessTy = NewAccessTy;
2550 return true;
2551}
2552
2553/// Return an LSRUse index and an offset value for a fixup which needs the given
2554/// expression, with the given kind and optional access type. Either reuse an
2555/// existing use or create a new one, as needed.
2556std::pair<size_t, int64_t> LSRInstance::getUse(const SCEV *&Expr,
2557 LSRUse::KindType Kind,
2558 MemAccessTy AccessTy) {
2559 const SCEV *Copy = Expr;
2560 int64_t Offset = ExtractImmediate(Expr, SE);
2561
2562 // Basic uses can't accept any offset, for example.
2563 if (!isAlwaysFoldable(TTI, Kind, AccessTy, /*BaseGV=*/ nullptr,
2564 Offset, /*HasBaseReg=*/ true)) {
2565 Expr = Copy;
2566 Offset = 0;
2567 }
2568
2569 std::pair<UseMapTy::iterator, bool> P =
2570 UseMap.insert(std::make_pair(LSRUse::SCEVUseKindPair(Expr, Kind), 0));
2571 if (!P.second) {
2572 // A use already existed with this base.
2573 size_t LUIdx = P.first->second;
2574 LSRUse &LU = Uses[LUIdx];
2575 if (reconcileNewOffset(LU, Offset, /*HasBaseReg=*/true, Kind, AccessTy))
2576 // Reuse this use.
2577 return std::make_pair(LUIdx, Offset);
2578 }
2579
2580 // Create a new use.
2581 size_t LUIdx = Uses.size();
2582 P.first->second = LUIdx;
2583 Uses.push_back(LSRUse(Kind, AccessTy));
2584 LSRUse &LU = Uses[LUIdx];
2585
2586 LU.MinOffset = Offset;
2587 LU.MaxOffset = Offset;
2588 return std::make_pair(LUIdx, Offset);
2589}
2590
2591/// Delete the given use from the Uses list.
2592void LSRInstance::DeleteUse(LSRUse &LU, size_t LUIdx) {
2593 if (&LU != &Uses.back())
2594 std::swap(LU, Uses.back());
2595 Uses.pop_back();
2596
2597 // Update RegUses.
2598 RegUses.swapAndDropUse(LUIdx, Uses.size());
2599}
2600
2601/// Look for a use distinct from OrigLU which is has a formula that has the same
2602/// registers as the given formula.
2603LSRUse *
2604LSRInstance::FindUseWithSimilarFormula(const Formula &OrigF,
2605 const LSRUse &OrigLU) {
2606 // Search all uses for the formula. This could be more clever.
2607 for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) {
2608 LSRUse &LU = Uses[LUIdx];
2609 // Check whether this use is close enough to OrigLU, to see whether it's
2610 // worthwhile looking through its formulae.
2611 // Ignore ICmpZero uses because they may contain formulae generated by
2612 // GenerateICmpZeroScales, in which case adding fixup offsets may
2613 // be invalid.
2614 if (&LU != &OrigLU &&
2615 LU.Kind != LSRUse::ICmpZero &&
2616 LU.Kind == OrigLU.Kind && OrigLU.AccessTy == LU.AccessTy &&
2617 LU.WidestFixupType == OrigLU.WidestFixupType &&
2618 LU.HasFormulaWithSameRegs(OrigF)) {
2619 // Scan through this use's formulae.
2620 for (const Formula &F : LU.Formulae) {
2621 // Check to see if this formula has the same registers and symbols
2622 // as OrigF.
2623 if (F.BaseRegs == OrigF.BaseRegs &&
2624 F.ScaledReg == OrigF.ScaledReg &&
2625 F.BaseGV == OrigF.BaseGV &&
2626 F.Scale == OrigF.Scale &&
2627 F.UnfoldedOffset == OrigF.UnfoldedOffset) {
2628 if (F.BaseOffset == 0)
2629 return &LU;
2630 // This is the formula where all the registers and symbols matched;
2631 // there aren't going to be any others. Since we declined it, we
2632 // can skip the rest of the formulae and proceed to the next LSRUse.
2633 break;
2634 }
2635 }
2636 }
2637 }
2638
2639 // Nothing looked good.
2640 return nullptr;
2641}
2642
2643void LSRInstance::CollectInterestingTypesAndFactors() {
2644 SmallSetVector<const SCEV *, 4> Strides;
2645
2646 // Collect interesting types and strides.
2647 SmallVector<const SCEV *, 4> Worklist;
2648 for (const IVStrideUse &U : IU) {
2649 const SCEV *Expr = IU.getExpr(U);
2650
2651 // Collect interesting types.
2652 Types.insert(SE.getEffectiveSCEVType(Expr->getType()));
2653
2654 // Add strides for mentioned loops.
2655 Worklist.push_back(Expr);
2656 do {
2657 const SCEV *S = Worklist.pop_back_val();
2658 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) {
2659 if (AR->getLoop() == L)
2660 Strides.insert(AR->getStepRecurrence(SE));
2661 Worklist.push_back(AR->getStart());
2662 } else if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) {
2663 Worklist.append(Add->op_begin(), Add->op_end());
2664 }
2665 } while (!Worklist.empty());
2666 }
2667
2668 // Compute interesting factors from the set of interesting strides.
2669 for (SmallSetVector<const SCEV *, 4>::const_iterator
2670 I = Strides.begin(), E = Strides.end(); I != E; ++I)
2671 for (SmallSetVector<const SCEV *, 4>::const_iterator NewStrideIter =
2672 std::next(I); NewStrideIter != E; ++NewStrideIter) {
2673 const SCEV *OldStride = *I;
2674 const SCEV *NewStride = *NewStrideIter;
2675
2676 if (SE.getTypeSizeInBits(OldStride->getType()) !=
2677 SE.getTypeSizeInBits(NewStride->getType())) {
2678 if (SE.getTypeSizeInBits(OldStride->getType()) >
2679 SE.getTypeSizeInBits(NewStride->getType()))
2680 NewStride = SE.getSignExtendExpr(NewStride, OldStride->getType());
2681 else
2682 OldStride = SE.getSignExtendExpr(OldStride, NewStride->getType());
2683 }
2684 if (const SCEVConstant *Factor =
2685 dyn_cast_or_null<SCEVConstant>(getExactSDiv(NewStride, OldStride,
2686 SE, true))) {
2687 if (Factor->getAPInt().getMinSignedBits() <= 64)
2688 Factors.insert(Factor->getAPInt().getSExtValue());
2689 } else if (const SCEVConstant *Factor =
2690 dyn_cast_or_null<SCEVConstant>(getExactSDiv(OldStride,
2691 NewStride,
2692 SE, true))) {
2693 if (Factor->getAPInt().getMinSignedBits() <= 64)
2694 Factors.insert(Factor->getAPInt().getSExtValue());
2695 }
2696 }
2697
2698 // If all uses use the same type, don't bother looking for truncation-based
2699 // reuse.
2700 if (Types.size() == 1)
2701 Types.clear();
2702
2703 LLVM_DEBUG(print_factors_and_types(dbgs()))do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-reduce")) { print_factors_and_types(dbgs()); } } while
(false)
;
2704}
2705
2706/// Helper for CollectChains that finds an IV operand (computed by an AddRec in
2707/// this loop) within [OI,OE) or returns OE. If IVUsers mapped Instructions to
2708/// IVStrideUses, we could partially skip this.
2709static User::op_iterator
2710findIVOperand(User::op_iterator OI, User::op_iterator OE,
2711 Loop *L, ScalarEvolution &SE) {
2712 for(; OI != OE; ++OI) {
2713 if (Instruction *Oper = dyn_cast<Instruction>(*OI)) {
2714 if (!SE.isSCEVable(Oper->getType()))
2715 continue;
2716
2717 if (const SCEVAddRecExpr *AR =
2718 dyn_cast<SCEVAddRecExpr>(SE.getSCEV(Oper))) {
2719 if (AR->getLoop() == L)
2720 break;
2721 }
2722 }
2723 }
2724 return OI;
2725}
2726
2727/// IVChain logic must consistently peek base TruncInst operands, so wrap it in
2728/// a convenient helper.
2729static Value *getWideOperand(Value *Oper) {
2730 if (TruncInst *Trunc = dyn_cast<TruncInst>(Oper))
2731 return Trunc->getOperand(0);
2732 return Oper;
2733}
2734
2735/// Return true if we allow an IV chain to include both types.
2736static bool isCompatibleIVType(Value *LVal, Value *RVal) {
2737 Type *LType = LVal->getType();
2738 Type *RType = RVal->getType();
2739 return (LType == RType) || (LType->isPointerTy() && RType->isPointerTy() &&
2740 // Different address spaces means (possibly)
2741 // different types of the pointer implementation,
2742 // e.g. i16 vs i32 so disallow that.
2743 (LType->getPointerAddressSpace() ==
2744 RType->getPointerAddressSpace()));
2745}
2746
2747/// Return an approximation of this SCEV expression's "base", or NULL for any
2748/// constant. Returning the expression itself is conservative. Returning a
2749/// deeper subexpression is more precise and valid as long as it isn't less
2750/// complex than another subexpression. For expressions involving multiple
2751/// unscaled values, we need to return the pointer-type SCEVUnknown. This avoids
2752/// forming chains across objects, such as: PrevOper==a[i], IVOper==b[i],
2753/// IVInc==b-a.
2754///
2755/// Since SCEVUnknown is the rightmost type, and pointers are the rightmost
2756/// SCEVUnknown, we simply return the rightmost SCEV operand.
2757static const SCEV *getExprBase(const SCEV *S) {
2758 switch (S->getSCEVType()) {
2759 default: // uncluding scUnknown.
2760 return S;
2761 case scConstant:
2762 return nullptr;
2763 case scTruncate:
2764 return getExprBase(cast<SCEVTruncateExpr>(S)->getOperand());
2765 case scZeroExtend:
2766 return getExprBase(cast<SCEVZeroExtendExpr>(S)->getOperand());
2767 case scSignExtend:
2768 return getExprBase(cast<SCEVSignExtendExpr>(S)->getOperand());
2769 case scAddExpr: {
2770 // Skip over scaled operands (scMulExpr) to follow add operands as long as
2771 // there's nothing more complex.
2772 // FIXME: not sure if we want to recognize negation.
2773 const SCEVAddExpr *Add = cast<SCEVAddExpr>(S);
2774 for (std::reverse_iterator<SCEVAddExpr::op_iterator> I(Add->op_end()),
2775 E(Add->op_begin()); I != E; ++I) {
2776 const SCEV *SubExpr = *I;
2777 if (SubExpr->getSCEVType() == scAddExpr)
2778 return getExprBase(SubExpr);
2779
2780 if (SubExpr->getSCEVType() != scMulExpr)
2781 return SubExpr;
2782 }
2783 return S; // all operands are scaled, be conservative.
2784 }
2785 case scAddRecExpr:
2786 return getExprBase(cast<SCEVAddRecExpr>(S)->getStart());
2787 }
2788}
2789
2790/// Return true if the chain increment is profitable to expand into a loop
2791/// invariant value, which may require its own register. A profitable chain
2792/// increment will be an offset relative to the same base. We allow such offsets
2793/// to potentially be used as chain increment as long as it's not obviously
2794/// expensive to expand using real instructions.
2795bool IVChain::isProfitableIncrement(const SCEV *OperExpr,
2796 const SCEV *IncExpr,
2797 ScalarEvolution &SE) {
2798 // Aggressively form chains when -stress-ivchain.
2799 if (StressIVChain)
2800 return true;
2801
2802 // Do not replace a constant offset from IV head with a nonconstant IV
2803 // increment.
2804 if (!isa<SCEVConstant>(IncExpr)) {
2805 const SCEV *HeadExpr = SE.getSCEV(getWideOperand(Incs[0].IVOperand));
2806 if (isa<SCEVConstant>(SE.getMinusSCEV(OperExpr, HeadExpr)))
2807 return false;
2808 }
2809
2810 SmallPtrSet<const SCEV*, 8> Processed;
2811 return !isHighCostExpansion(IncExpr, Processed, SE);
2812}
2813
2814/// Return true if the number of registers needed for the chain is estimated to
2815/// be less than the number required for the individual IV users. First prohibit
2816/// any IV users that keep the IV live across increments (the Users set should
2817/// be empty). Next count the number and type of increments in the chain.
2818///
2819/// Chaining IVs can lead to considerable code bloat if ISEL doesn't
2820/// effectively use postinc addressing modes. Only consider it profitable it the
2821/// increments can be computed in fewer registers when chained.
2822///
2823/// TODO: Consider IVInc free if it's already used in another chains.
2824static bool
2825isProfitableChain(IVChain &Chain, SmallPtrSetImpl<Instruction*> &Users,
2826 ScalarEvolution &SE) {
2827 if (StressIVChain)
2828 return true;
2829
2830 if (!Chain.hasIncs())
2831 return false;
2832
2833 if (!Users.empty()) {
2834 LLVM_DEBUG(dbgs() << "Chain: " << *Chain.Incs[0].UserInst << " users:\n";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-reduce")) { dbgs() << "Chain: " << *Chain.
Incs[0].UserInst << " users:\n"; for (Instruction *Inst
: Users) { dbgs() << " " << *Inst << "\n"
; }; } } while (false)
2835 for (Instruction *Instdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-reduce")) { dbgs() << "Chain: " << *Chain.
Incs[0].UserInst << " users:\n"; for (Instruction *Inst
: Users) { dbgs() << " " << *Inst << "\n"
; }; } } while (false)
2836 : Users) { dbgs() << " " << *Inst << "\n"; })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-reduce")) { dbgs() << "Chain: " << *Chain.
Incs[0].UserInst << " users:\n"; for (Instruction *Inst
: Users) { dbgs() << " " << *Inst << "\n"
; }; } } while (false)
;
2837 return false;
2838 }
2839 assert(!Chain.Incs.empty() && "empty IV chains are not allowed")((!Chain.Incs.empty() && "empty IV chains are not allowed"
) ? static_cast<void> (0) : __assert_fail ("!Chain.Incs.empty() && \"empty IV chains are not allowed\""
, "/build/llvm-toolchain-snapshot-9~svn358860/lib/Transforms/Scalar/LoopStrengthReduce.cpp"
, 2839, __PRETTY_FUNCTION__))
;
2840
2841 // The chain itself may require a register, so intialize cost to 1.
2842 int cost = 1;
2843
2844 // A complete chain likely eliminates the need for keeping the original IV in
2845 // a register. LSR does not currently know how to form a complete chain unless
2846 // the header phi already exists.
2847 if (isa<PHINode>(Chain.tailUserInst())
2848 && SE.getSCEV(Chain.tailUserInst()) == Chain.Incs[0].IncExpr) {
2849 --cost;
2850 }
2851 const SCEV *LastIncExpr = nullptr;
2852 unsigned NumConstIncrements = 0;
2853 unsigned NumVarIncrements = 0;
2854 unsigned NumReusedIncrements = 0;
2855 for (const IVInc &Inc : Chain) {
2856 if (Inc.IncExpr->isZero())
2857 continue;
2858
2859 // Incrementing by zero or some constant is neutral. We assume constants can
2860 // be folded into an addressing mode or an add's immediate operand.
2861 if (isa<SCEVConstant>(Inc.IncExpr)) {
2862 ++NumConstIncrements;
2863 continue;
2864 }
2865
2866 if (Inc.IncExpr == LastIncExpr)
2867 ++NumReusedIncrements;
2868 else
2869 ++NumVarIncrements;
2870
2871 LastIncExpr = Inc.IncExpr;
2872 }
2873 // An IV chain with a single increment is handled by LSR's postinc
2874 // uses. However, a chain with multiple increments requires keeping the IV's
2875 // value live longer than it needs to be if chained.
2876 if (NumConstIncrements > 1)
2877 --cost;
2878
2879 // Materializing increment expressions in the preheader that didn't exist in
2880 // the original code may cost a register. For example, sign-extended array
2881 // indices can produce ridiculous increments like this:
2882 // IV + ((sext i32 (2 * %s) to i64) + (-1 * (sext i32 %s to i64)))
2883 cost += NumVarIncrements;
2884
2885 // Reusing variable increments likely saves a register to hold the multiple of
2886 // the stride.
2887 cost -= NumReusedIncrements;
2888
2889 LLVM_DEBUG(dbgs() << "Chain: " << *Chain.Incs[0].UserInst << " Cost: " << costdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-reduce")) { dbgs() << "Chain: " << *Chain.
Incs[0].UserInst << " Cost: " << cost << "\n"
; } } while (false)
2890 << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-reduce")) { dbgs() << "Chain: " << *Chain.
Incs[0].UserInst << " Cost: " << cost << "\n"
; } } while (false)
;
2891
2892 return cost < 0;
2893}
2894
2895/// Add this IV user to an existing chain or make it the head of a new chain.
2896void LSRInstance::ChainInstruction(Instruction *UserInst, Instruction *IVOper,
2897 SmallVectorImpl<ChainUsers> &ChainUsersVec) {
2898 // When IVs are used as types of varying widths, they are generally converted
2899 // to a wider type with some uses remaining narrow under a (free) trunc.
2900 Value *const NextIV = getWideOperand(IVOper);
2901 const SCEV *const OperExpr = SE.getSCEV(NextIV);
2902 const SCEV *const OperExprBase = getExprBase(OperExpr);
2903
2904 // Visit all existing chains. Check if its IVOper can be computed as a
2905 // profitable loop invariant increment from the last link in the Chain.
2906 unsigned ChainIdx = 0, NChains = IVChainVec.size();
2907 const SCEV *LastIncExpr = nullptr;
2908 for (; ChainIdx < NChains; ++ChainIdx) {
2909 IVChain &Chain = IVChainVec[ChainIdx];
2910
2911 // Prune the solution space aggressively by checking that both IV operands
2912 // are expressions that operate on the same unscaled SCEVUnknown. This
2913 // "base" will be canceled by the subsequent getMinusSCEV call. Checking
2914 // first avoids creating extra SCEV expressions.
2915 if (!StressIVChain && Chain.ExprBase != OperExprBase)
2916 continue;
2917
2918 Value *PrevIV = getWideOperand(Chain.Incs.back().IVOperand);
2919 if (!isCompatibleIVType(PrevIV, NextIV))
2920 continue;
2921
2922 // A phi node terminates a chain.
2923 if (isa<PHINode>(UserInst) && isa<PHINode>(Chain.tailUserInst()))
2924 continue;
2925
2926 // The increment must be loop-invariant so it can be kept in a register.
2927 const SCEV *PrevExpr = SE.getSCEV(PrevIV);
2928 const SCEV *IncExpr = SE.getMinusSCEV(OperExpr, PrevExpr);
2929 if (!SE.isLoopInvariant(IncExpr, L))
2930 continue;
2931
2932 if (Chain.isProfitableIncrement(OperExpr, IncExpr, SE)) {
2933 LastIncExpr = IncExpr;
2934 break;
2935 }
2936 }
2937 // If we haven't found a chain, create a new one, unless we hit the max. Don't
2938 // bother for phi nodes, because they must be last in the chain.
2939 if (ChainIdx == NChains) {
2940 if (isa<PHINode>(UserInst))
2941 return;
2942 if (NChains >= MaxChains && !StressIVChain) {
2943 LLVM_DEBUG(dbgs() << "IV Chain Limit\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-reduce")) { dbgs() << "IV Chain Limit\n"; } } while
(false)
;
2944 return;
2945 }
2946 LastIncExpr = OperExpr;
2947 // IVUsers may have skipped over sign/zero extensions. We don't currently
2948 // attempt to form chains involving extensions unless they can be hoisted
2949 // into this loop's AddRec.
2950 if (!isa<SCEVAddRecExpr>(LastIncExpr))
2951 return;
2952 ++NChains;
2953 IVChainVec.push_back(IVChain(IVInc(UserInst, IVOper, LastIncExpr),
2954 OperExprBase));
2955 ChainUsersVec.resize(NChains);
2956 LLVM_DEBUG(dbgs() << "IV Chain#" << ChainIdx << " Head: (" << *UserInstdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-reduce")) { dbgs() << "IV Chain#" << ChainIdx
<< " Head: (" << *UserInst << ") IV=" <<
*LastIncExpr << "\n"; } } while (false)
2957 << ") IV=" << *LastIncExpr << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-reduce")) { dbgs() << "IV Chain#" << ChainIdx
<< " Head: (" << *UserInst << ") IV=" <<
*LastIncExpr << "\n"; } } while (false)
;
2958 } else {
2959 LLVM_DEBUG(dbgs() << "IV Chain#" << ChainIdx << " Inc: (" << *UserInstdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-reduce")) { dbgs() << "IV Chain#" << ChainIdx
<< " Inc: (" << *UserInst << ") IV+" <<
*LastIncExpr << "\n"; } } while (false)
2960 << ") IV+" << *LastIncExpr << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-reduce")) { dbgs() << "IV Chain#" << ChainIdx
<< " Inc: (" << *UserInst << ") IV+" <<
*LastIncExpr << "\n"; } } while (false)
;
2961 // Add this IV user to the end of the chain.
2962 IVChainVec[ChainIdx].add(IVInc(UserInst, IVOper, LastIncExpr));
2963 }
2964 IVChain &Chain = IVChainVec[ChainIdx];
2965
2966 SmallPtrSet<Instruction*,4> &NearUsers = ChainUsersVec[ChainIdx].NearUsers;
2967 // This chain's NearUsers become FarUsers.
2968 if (!LastIncExpr->isZero()) {
2969 ChainUsersVec[ChainIdx].FarUsers.insert(NearUsers.begin(),
2970 NearUsers.end());
2971 NearUsers.clear();
2972 }
2973
2974 // All other uses of IVOperand become near uses of the chain.
2975 // We currently ignore intermediate values within SCEV expressions, assuming
2976 // they will eventually be used be the current chain, or can be computed
2977 // from one of the chain increments. To be more precise we could
2978 // transitively follow its user and only add leaf IV users to the set.
2979 for (User *U : IVOper->users()) {
2980 Instruction *OtherUse = dyn_cast<Instruction>(U);
2981 if (!OtherUse)
2982 continue;
2983 // Uses in the chain will no longer be uses if the chain is formed.
2984 // Include the head of the chain in this iteration (not Chain.begin()).
2985 IVChain::const_iterator IncIter = Chain.Incs.begin();
2986 IVChain::const_iterator IncEnd = Chain.Incs.end();
2987 for( ; IncIter != IncEnd; ++IncIter) {
2988 if (IncIter->UserInst == OtherUse)
2989 break;
2990 }
2991 if (IncIter != IncEnd)
2992 continue;
2993
2994 if (SE.isSCEVable(OtherUse->getType())
2995 && !isa<SCEVUnknown>(SE.getSCEV(OtherUse))
2996 && IU.isIVUserOrOperand(OtherUse)) {
2997 continue;
2998 }
2999 NearUsers.insert(OtherUse);
3000 }
3001
3002 // Since this user is part of the chain, it's no longer considered a use
3003 // of the chain.
3004 ChainUsersVec[ChainIdx].FarUsers.erase(UserInst);
3005}
3006
3007/// Populate the vector of Chains.
3008///
3009/// This decreases ILP at the architecture level. Targets with ample registers,
3010/// multiple memory ports, and no register renaming probably don't want
3011/// this. However, such targets should probably disable LSR altogether.
3012///
3013/// The job of LSR is to make a reasonable choice of induction variables across
3014/// the loop. Subsequent passes can easily "unchain" computation exposing more
3015/// ILP *within the loop* if the target wants it.
3016///
3017/// Finding the best IV chain is potentially a scheduling problem. Since LSR
3018/// will not reorder memory operations, it will recognize this as a chain, but
3019/// will generate redundant IV increments. Ideally this would be corrected later
3020/// by a smart scheduler:
3021/// = A[i]
3022/// = A[i+x]
3023/// A[i] =
3024/// A[i+x] =
3025///
3026/// TODO: Walk the entire domtree within this loop, not just the path to the
3027/// loop latch. This will discover chains on side paths, but requires
3028/// maintaining multiple copies of the Chains state.
3029void LSRInstance::CollectChains() {
3030 LLVM_DEBUG(dbgs() << "Collecting IV Chains.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-reduce")) { dbgs() << "Collecting IV Chains.\n";
} } while (false)
;
3031 SmallVector<ChainUsers, 8> ChainUsersVec;
3032
3033 SmallVector<BasicBlock *,8> LatchPath;
3034 BasicBlock *LoopHeader = L->getHeader();
3035 for (DomTreeNode *Rung = DT.getNode(L->getLoopLatch());
3036 Rung->getBlock() != LoopHeader; Rung = Rung->getIDom()) {
3037 LatchPath.push_back(Rung->getBlock());
3038 }
3039 LatchPath.push_back(LoopHeader);
3040
3041 // Walk the instruction stream from the loop header to the loop latch.
3042 for (BasicBlock *BB : reverse(LatchPath)) {
3043 for (Instruction &I : *BB) {
3044 // Skip instructions that weren't seen by IVUsers analysis.
3045 if (isa<PHINode>(I) || !IU.isIVUserOrOperand(&I))
3046 continue;
3047
3048 // Ignore users that are part of a SCEV expression. This way we only
3049 // consider leaf IV Users. This effectively rediscovers a portion of
3050 // IVUsers analysis but in program order this time.
3051 if (SE.isSCEVable(I.getType()) && !isa<SCEVUnknown>(SE.getSCEV(&I)))
3052 continue;
3053
3054 // Remove this instruction from any NearUsers set it may be in.
3055 for (unsigned ChainIdx = 0, NChains = IVChainVec.size();
3056 ChainIdx < NChains; ++ChainIdx) {
3057 ChainUsersVec[ChainIdx].NearUsers.erase(&I);
3058 }
3059 // Search for operands that can be chained.
3060 SmallPtrSet<Instruction*, 4> UniqueOperands;
3061 User::op_iterator IVOpEnd = I.op_end();
3062 User::op_iterator IVOpIter = findIVOperand(I.op_begin(), IVOpEnd, L, SE);
3063 while (IVOpIter != IVOpEnd) {
3064 Instruction *IVOpInst = cast<Instruction>(*IVOpIter);
3065 if (UniqueOperands.insert(IVOpInst).second)
3066 ChainInstruction(&I, IVOpInst, ChainUsersVec);
3067 IVOpIter = findIVOperand(std::next(IVOpIter), IVOpEnd, L, SE);
3068 }
3069 } // Continue walking down the instructions.
3070 } // Continue walking down the domtree.
3071 // Visit phi backedges to determine if the chain can generate the IV postinc.
3072 for (PHINode &PN : L->getHeader()->phis()) {
3073 if (!SE.isSCEVable(PN.getType()))
3074 continue;
3075
3076 Instruction *IncV =
3077 dyn_cast<Instruction>(PN.getIncomingValueForBlock(L->getLoopLatch()));
3078 if (IncV)
3079 ChainInstruction(&PN, IncV, ChainUsersVec);
3080 }
3081 // Remove any unprofitable chains.
3082 unsigned ChainIdx = 0;
3083 for (unsigned UsersIdx = 0, NChains = IVChainVec.size();
3084 UsersIdx < NChains; ++UsersIdx) {
3085 if (!isProfitableChain(IVChainVec[UsersIdx],
3086 ChainUsersVec[UsersIdx].FarUsers, SE))
3087 continue;
3088 // Preserve the chain at UsesIdx.
3089 if (ChainIdx != UsersIdx)
3090 IVChainVec[ChainIdx] = IVChainVec[UsersIdx];
3091 FinalizeChain(IVChainVec[ChainIdx]);
3092 ++ChainIdx;
3093 }
3094 IVChainVec.resize(ChainIdx);
3095}
3096
3097void LSRInstance::FinalizeChain(IVChain &Chain) {
3098 assert(!Chain.Incs.empty() && "empty IV chains are not allowed")((!Chain.Incs.empty() && "empty IV chains are not allowed"
) ? static_cast<void> (0) : __assert_fail ("!Chain.Incs.empty() && \"empty IV chains are not allowed\""
, "/build/llvm-toolchain-snapshot-9~svn358860/lib/Transforms/Scalar/LoopStrengthReduce.cpp"
, 3098, __PRETTY_FUNCTION__))
;
3099 LLVM_DEBUG(dbgs() << "Final Chain: " << *Chain.Incs[0].UserInst << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-reduce")) { dbgs() << "Final Chain: " << *
Chain.Incs[0].UserInst << "\n"; } } while (false)
;
3100
3101 for (const IVInc &Inc : Chain) {
3102 LLVM_DEBUG(dbgs() << " Inc: " << *Inc.UserInst << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-reduce")) { dbgs() << " Inc: " << *
Inc.UserInst << "\n"; } } while (false)
;
3103 auto UseI = find(Inc.UserInst->operands(), Inc.IVOperand);
3104 assert(UseI != Inc.UserInst->op_end() && "cannot find IV operand")((UseI != Inc.UserInst->op_end() && "cannot find IV operand"
) ? static_cast<void> (0) : __assert_fail ("UseI != Inc.UserInst->op_end() && \"cannot find IV operand\""
, "/build/llvm-toolchain-snapshot-9~svn358860/lib/Transforms/Scalar/LoopStrengthReduce.cpp"
, 3104, __PRETTY_FUNCTION__))
;
3105 IVIncSet.insert(UseI);
3106 }
3107}
3108
3109/// Return true if the IVInc can be folded into an addressing mode.
3110static bool canFoldIVIncExpr(const SCEV *IncExpr, Instruction *UserInst,
3111 Value *Operand, const TargetTransformInfo &TTI) {
3112 const SCEVConstant *IncConst = dyn_cast<SCEVConstant>(IncExpr);
3113 if (!IncConst || !isAddressUse(TTI, UserInst, Operand))
3114 return false;
3115
3116 if (IncConst->getAPInt().getMinSignedBits() > 64)
3117 return false;
3118
3119 MemAccessTy AccessTy = getAccessType(TTI, UserInst, Operand);
3120 int64_t IncOffset = IncConst->getValue()->getSExtValue();
3121 if (!isAlwaysFoldable(TTI, LSRUse::Address, AccessTy, /*BaseGV=*/nullptr,
3122 IncOffset, /*HaseBaseReg=*/false))
3123 return false;
3124
3125 return true;
3126}
3127
3128/// Generate an add or subtract for each IVInc in a chain to materialize the IV
3129/// user's operand from the previous IV user's operand.
3130void LSRInstance::GenerateIVChain(const IVChain &Chain, SCEVExpander &Rewriter,
3131 SmallVectorImpl<WeakTrackingVH> &DeadInsts) {
3132 // Find the new IVOperand for the head of the chain. It may have been replaced
3133 // by LSR.
3134 const IVInc &Head = Chain.Incs[0];
3135 User::op_iterator IVOpEnd = Head.UserInst->op_end();
3136 // findIVOperand returns IVOpEnd if it can no longer find a valid IV user.
3137 User::op_iterator IVOpIter = findIVOperand(Head.UserInst->op_begin(),
3138 IVOpEnd, L, SE);
3139 Value *IVSrc = nullptr;
26
'IVSrc' initialized to a null pointer value
3140 while (IVOpIter != IVOpEnd) {
27
Loop condition is false. Execution continues on line 3157
3141 IVSrc = getWideOperand(*IVOpIter);
3142
3143 // If this operand computes the expression that the chain needs, we may use
3144 // it. (Check this after setting IVSrc which is used below.)
3145 //
3146 // Note that if Head.IncExpr is wider than IVSrc, then this phi is too
3147 // narrow for the chain, so we can no longer use it. We do allow using a
3148 // wider phi, assuming the LSR checked for free truncation. In that case we
3149 // should already have a truncate on this operand such that
3150 // getSCEV(IVSrc) == IncExpr.
3151 if (SE.getSCEV(*IVOpIter) == Head.IncExpr
3152 || SE.getSCEV(IVSrc) == Head.IncExpr) {
3153 break;
3154 }
3155 IVOpIter = findIVOperand(std::next(IVOpIter), IVOpEnd, L, SE);
3156 }
3157 if (IVOpIter == IVOpEnd) {
28
Assuming 'IVOpIter' is not equal to 'IVOpEnd'
29
Taking false branch
3158 // Gracefully give up on this chain.
3159 LLVM_DEBUG(dbgs() << "Concealed chain head: " << *Head.UserInst << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-reduce")) { dbgs() << "Concealed chain head: " <<
*Head.UserInst << "\n"; } } while (false)
;
3160 return;
3161 }
3162
3163 LLVM_DEBUG(dbgs() << "Generate chain at: " << *IVSrc << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-reduce")) { dbgs() << "Generate chain at: " <<
*IVSrc << "\n"; } } while (false)
;
30
Assuming 'DebugFlag' is not equal to 0
31
Assuming the condition is true
32
Taking true branch
33
Forming reference to null pointer
3164 Type *IVTy = IVSrc->getType();
3165 Type *IntTy = SE.getEffectiveSCEVType(IVTy);
3166 const SCEV *LeftOverExpr = nullptr;
3167 for (const IVInc &Inc : Chain) {
3168 Instruction *InsertPt = Inc.UserInst;
3169 if (isa<PHINode>(InsertPt))
3170 InsertPt = L->getLoopLatch()->getTerminator();
3171
3172 // IVOper will replace the current IV User's operand. IVSrc is the IV
3173 // value currently held in a register.
3174 Value *IVOper = IVSrc;
3175 if (!Inc.IncExpr->isZero()) {
3176 // IncExpr was the result of subtraction of two narrow values, so must
3177 // be signed.
3178 const SCEV *IncExpr = SE.getNoopOrSignExtend(Inc.IncExpr, IntTy);
3179 LeftOverExpr = LeftOverExpr ?
3180 SE.getAddExpr(LeftOverExpr, IncExpr) : IncExpr;
3181 }
3182 if (LeftOverExpr && !LeftOverExpr->isZero()) {
3183 // Expand the IV increment.
3184 Rewriter.clearPostInc();
3185 Value *IncV = Rewriter.expandCodeFor(LeftOverExpr, IntTy, InsertPt);
3186 const SCEV *IVOperExpr = SE.getAddExpr(SE.getUnknown(IVSrc),
3187 SE.getUnknown(IncV));
3188 IVOper = Rewriter.expandCodeFor(IVOperExpr, IVTy, InsertPt);
3189
3190 // If an IV increment can't be folded, use it as the next IV value.
3191 if (!canFoldIVIncExpr(LeftOverExpr, Inc.UserInst, Inc.IVOperand, TTI)) {
3192 assert(IVTy == IVOper->getType() && "inconsistent IV increment type")((IVTy == IVOper->getType() && "inconsistent IV increment type"
) ? static_cast<void> (0) : __assert_fail ("IVTy == IVOper->getType() && \"inconsistent IV increment type\""
, "/build/llvm-toolchain-snapshot-9~svn358860/lib/Transforms/Scalar/LoopStrengthReduce.cpp"
, 3192, __PRETTY_FUNCTION__))
;
3193 IVSrc = IVOper;
3194 LeftOverExpr = nullptr;
3195 }
3196 }
3197 Type *OperTy = Inc.IVOperand->getType();
3198 if (IVTy != OperTy) {
3199 assert(SE.getTypeSizeInBits(IVTy) >= SE.getTypeSizeInBits(OperTy) &&((SE.getTypeSizeInBits(IVTy) >= SE.getTypeSizeInBits(OperTy
) && "cannot extend a chained IV") ? static_cast<void
> (0) : __assert_fail ("SE.getTypeSizeInBits(IVTy) >= SE.getTypeSizeInBits(OperTy) && \"cannot extend a chained IV\""
, "/build/llvm-toolchain-snapshot-9~svn358860/lib/Transforms/Scalar/LoopStrengthReduce.cpp"
, 3200, __PRETTY_FUNCTION__))
3200 "cannot extend a chained IV")((SE.getTypeSizeInBits(IVTy) >= SE.getTypeSizeInBits(OperTy
) && "cannot extend a chained IV") ? static_cast<void
> (0) : __assert_fail ("SE.getTypeSizeInBits(IVTy) >= SE.getTypeSizeInBits(OperTy) && \"cannot extend a chained IV\""
, "/build/llvm-toolchain-snapshot-9~svn358860/lib/Transforms/Scalar/LoopStrengthReduce.cpp"
, 3200, __PRETTY_FUNCTION__))
;
3201 IRBuilder<> Builder(InsertPt);
3202 IVOper = Builder.CreateTruncOrBitCast(IVOper, OperTy, "lsr.chain");
3203 }
3204 Inc.UserInst->replaceUsesOfWith(Inc.IVOperand, IVOper);
3205 DeadInsts.emplace_back(Inc.IVOperand);
3206 }
3207 // If LSR created a new, wider phi, we may also replace its postinc. We only
3208 // do this if we also found a wide value for the head of the chain.
3209 if (isa<PHINode>(Chain.tailUserInst())) {
3210 for (PHINode &Phi : L->getHeader()->phis()) {
3211 if (!isCompatibleIVType(&Phi, IVSrc))
3212 continue;
3213 Instruction *PostIncV = dyn_cast<Instruction>(
3214 Phi.getIncomingValueForBlock(L->getLoopLatch()));
3215 if (!PostIncV || (SE.getSCEV(PostIncV) != SE.getSCEV(IVSrc)))
3216 continue;
3217 Value *IVOper = IVSrc;
3218 Type *PostIncTy = PostIncV->getType();
3219 if (IVTy != PostIncTy) {
3220 assert(PostIncTy->isPointerTy() && "mixing int/ptr IV types")((PostIncTy->isPointerTy() && "mixing int/ptr IV types"
) ? static_cast<void> (0) : __assert_fail ("PostIncTy->isPointerTy() && \"mixing int/ptr IV types\""
, "/build/llvm-toolchain-snapshot-9~svn358860/lib/Transforms/Scalar/LoopStrengthReduce.cpp"
, 3220, __PRETTY_FUNCTION__))
;
3221 IRBuilder<> Builder(L->getLoopLatch()->getTerminator());
3222 Builder.SetCurrentDebugLocation(PostIncV->getDebugLoc());
3223 IVOper = Builder.CreatePointerCast(IVSrc, PostIncTy, "lsr.chain");
3224 }
3225 Phi.replaceUsesOfWith(PostIncV, IVOper);
3226 DeadInsts.emplace_back(PostIncV);
3227 }
3228 }
3229}
3230
3231void LSRInstance::CollectFixupsAndInitialFormulae() {
3232 for (const IVStrideUse &U : IU) {
3233 Instruction *UserInst = U.getUser();
3234 // Skip IV users that are part of profitable IV Chains.
3235 User::op_iterator UseI =
3236 find(UserInst->operands(), U.getOperandValToReplace());
3237 assert(UseI != UserInst->op_end() && "cannot find IV operand")((UseI != UserInst->op_end() && "cannot find IV operand"
) ? static_cast<void> (0) : __assert_fail ("UseI != UserInst->op_end() && \"cannot find IV operand\""
, "/build/llvm-toolchain-snapshot-9~svn358860/lib/Transforms/Scalar/LoopStrengthReduce.cpp"
, 3237, __PRETTY_FUNCTION__))
;
3238 if (IVIncSet.count(UseI)) {
3239 LLVM_DEBUG(dbgs() << "Use is in profitable chain: " << **UseI << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-reduce")) { dbgs() << "Use is in profitable chain: "
<< **UseI << '\n'; } } while (false)
;
3240 continue;
3241 }
3242
3243 LSRUse::KindType Kind = LSRUse::Basic;
3244 MemAccessTy AccessTy;
3245 if (isAddressUse(TTI, UserInst, U.getOperandValToReplace())) {
3246 Kind = LSRUse::Address;
3247 AccessTy = getAccessType(TTI, UserInst, U.getOperandValToReplace());
3248 }
3249
3250 const SCEV *S = IU.getExpr(U);
3251 PostIncLoopSet TmpPostIncLoops = U.getPostIncLoops();
3252
3253 // Equality (== and !=) ICmps are special. We can rewrite (i == N) as
3254 // (N - i == 0), and this allows (N - i) to be the expression that we work
3255 // with rather than just N or i, so we can consider the register
3256 // requirements for both N and i at the same time. Limiting this code to
3257 // equality icmps is not a problem because all interesting loops use
3258 // equality icmps, thanks to IndVarSimplify.
3259 if (ICmpInst *CI = dyn_cast<ICmpInst>(UserInst))
3260 if (CI->isEquality()) {
3261 // Swap the operands if needed to put the OperandValToReplace on the
3262 // left, for consistency.
3263 Value *NV = CI->getOperand(1);
3264 if (NV == U.getOperandValToReplace()) {
3265 CI->setOperand(1, CI->getOperand(0));
3266 CI->setOperand(0, NV);
3267 NV = CI->getOperand(1);
3268 Changed = true;
3269 }
3270
3271 // x == y --> x - y == 0
3272 const SCEV *N = SE.getSCEV(NV);
3273 if (SE.isLoopInvariant(N, L) && isSafeToExpand(N, SE)) {
3274 // S is normalized, so normalize N before folding it into S
3275 // to keep the result normalized.
3276 N = normalizeForPostIncUse(N, TmpPostIncLoops, SE);
3277 Kind = LSRUse::ICmpZero;
3278 S = SE.getMinusSCEV(N, S);
3279 }
3280
3281 // -1 and the negations of all interesting strides (except the negation
3282 // of -1) are now also interesting.
3283 for (size_t i = 0, e = Factors.size(); i != e; ++i)
3284 if (Factors[i] != -1)
3285 Factors.insert(-(uint64_t)Factors[i]);
3286 Factors.insert(-1);
3287 }
3288
3289 // Get or create an LSRUse.
3290 std::pair<size_t, int64_t> P = getUse(S, Kind, AccessTy);
3291 size_t LUIdx = P.first;
3292 int64_t Offset = P.second;
3293 LSRUse &LU = Uses[LUIdx];
3294
3295 // Record the fixup.
3296 LSRFixup &LF = LU.getNewFixup();
3297 LF.UserInst = UserInst;
3298 LF.OperandValToReplace = U.getOperandValToReplace();
3299 LF.PostIncLoops = TmpPostIncLoops;
3300 LF.Offset = Offset;
3301 LU.AllFixupsOutsideLoop &= LF.isUseFullyOutsideLoop(L);
3302
3303 if (!LU.WidestFixupType ||
3304 SE.getTypeSizeInBits(LU.WidestFixupType) <
3305 SE.getTypeSizeInBits(LF.OperandValToReplace->getType()))
3306 LU.WidestFixupType = LF.OperandValToReplace->getType();
3307
3308 // If this is the first use of this LSRUse, give it a formula.
3309 if (LU.Formulae.empty()) {
3310 InsertInitialFormula(S, LU, LUIdx);
3311 CountRegisters(LU.Formulae.back(), LUIdx);
3312 }
3313 }
3314
3315 LLVM_DEBUG(print_fixups(dbgs()))do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-reduce")) { print_fixups(dbgs()); } } while (false)
;
3316}
3317
3318/// Insert a formula for the given expression into the given use, separating out
3319/// loop-variant portions from loop-invariant and loop-computable portions.
3320void
3321LSRInstance::InsertInitialFormula(const SCEV *S, LSRUse &LU, size_t LUIdx) {
3322 // Mark uses whose expressions cannot be expanded.
3323 if (!isSafeToExpand(S, SE))
3324 LU.RigidFormula = true;
3325
3326 Formula F;
3327 F.initialMatch(S, L, SE);
3328 bool Inserted = InsertFormula(LU, LUIdx, F);
3329 assert(Inserted && "Initial formula already exists!")((Inserted && "Initial formula already exists!") ? static_cast
<void> (0) : __assert_fail ("Inserted && \"Initial formula already exists!\""
, "/build/llvm-toolchain-snapshot-9~svn358860/lib/Transforms/Scalar/LoopStrengthReduce.cpp"
, 3329, __PRETTY_FUNCTION__))
; (void)Inserted;
3330}
3331
3332/// Insert a simple single-register formula for the given expression into the
3333/// given use.
3334void
3335LSRInstance::InsertSupplementalFormula(const SCEV *S,
3336 LSRUse &LU, size_t LUIdx) {
3337 Formula F;
3338 F.BaseRegs.push_back(S);
3339 F.HasBaseReg = true;
3340 bool Inserted = InsertFormula(LU, LUIdx, F);
3341 assert(Inserted && "Supplemental formula already exists!")((Inserted && "Supplemental formula already exists!")
? static_cast<void> (0) : __assert_fail ("Inserted && \"Supplemental formula already exists!\""
, "/build/llvm-toolchain-snapshot-9~svn358860/lib/Transforms/Scalar/LoopStrengthReduce.cpp"
, 3341, __PRETTY_FUNCTION__))
; (void)Inserted;
3342}
3343
3344/// Note which registers are used by the given formula, updating RegUses.
3345void LSRInstance::CountRegisters(const Formula &F, size_t LUIdx) {
3346 if (F.ScaledReg)
3347 RegUses.countRegister(F.ScaledReg, LUIdx);
3348 for (const SCEV *BaseReg : F.BaseRegs)
3349 RegUses.countRegister(BaseReg, LUIdx);
3350}
3351
3352/// If the given formula has not yet been inserted, add it to the list, and
3353/// return true. Return false otherwise.
3354bool LSRInstance::InsertFormula(LSRUse &LU, unsigned LUIdx, const Formula &F) {
3355 // Do not insert formula that we will not be able to expand.
3356 assert(isLegalUse(TTI, LU.MinOffset, LU.MaxOffset, LU.Kind, LU.AccessTy, F) &&((isLegalUse(TTI, LU.MinOffset, LU.MaxOffset, LU.Kind, LU.AccessTy
, F) && "Formula is illegal") ? static_cast<void>
(0) : __assert_fail ("isLegalUse(TTI, LU.MinOffset, LU.MaxOffset, LU.Kind, LU.AccessTy, F) && \"Formula is illegal\""
, "/build/llvm-toolchain-snapshot-9~svn358860/lib/Transforms/Scalar/LoopStrengthReduce.cpp"
, 3357, __PRETTY_FUNCTION__))
3357 "Formula is illegal")((isLegalUse(TTI, LU.MinOffset, LU.MaxOffset, LU.Kind, LU.AccessTy
, F) && "Formula is illegal") ? static_cast<void>
(0) : __assert_fail ("isLegalUse(TTI, LU.MinOffset, LU.MaxOffset, LU.Kind, LU.AccessTy, F) && \"Formula is illegal\""
, "/build/llvm-toolchain-snapshot-9~svn358860/lib/Transforms/Scalar/LoopStrengthReduce.cpp"
, 3357, __PRETTY_FUNCTION__))
;
3358
3359 if (!LU.InsertFormula(F, *L))
3360 return false;
3361
3362 CountRegisters(F, LUIdx);
3363 return true;
3364}
3365
3366/// Check for other uses of loop-invariant values which we're tracking. These
3367/// other uses will pin these values in registers, making them less profitable
3368/// for elimination.
3369/// TODO: This currently misses non-constant addrec step registers.
3370/// TODO: Should this give more weight to users inside the loop?
3371void
3372LSRInstance::CollectLoopInvariantFixupsAndFormulae() {
3373 SmallVector<const SCEV *, 8> Worklist(RegUses.begin(), RegUses.end());
3374 SmallPtrSet<const SCEV *, 32> Visited;
3375
3376 while (!Worklist.empty()) {
3377 const SCEV *S = Worklist.pop_back_val();
3378
3379 // Don't process the same SCEV twice
3380 if (!Visited.insert(S).second)
3381 continue;
3382
3383 if (const SCEVNAryExpr *N = dyn_cast<SCEVNAryExpr>(S))
3384 Worklist.append(N->op_begin(), N->op_end());
3385 else if (const SCEVCastExpr *C = dyn_cast<SCEVCastExpr>(S))
3386 Worklist.push_back(C->getOperand());
3387 else if (const SCEVUDivExpr *D = dyn_cast<SCEVUDivExpr>(S)) {
3388 Worklist.push_back(D->getLHS());
3389 Worklist.push_back(D->getRHS());
3390 } else if (const SCEVUnknown *US = dyn_cast<SCEVUnknown>(S)) {
3391 const Value *V = US->getValue();
3392 if (const Instruction *Inst = dyn_cast<Instruction>(V)) {
3393 // Look for instructions defined outside the loop.
3394 if (L->contains(Inst)) continue;
3395 } else if (isa<UndefValue>(V))
3396 // Undef doesn't have a live range, so it doesn't matter.
3397 continue;
3398 for (const Use &U : V->uses()) {
3399 const Instruction *UserInst = dyn_cast<Instruction>(U.getUser());
3400 // Ignore non-instructions.
3401 if (!UserInst)
3402 continue;
3403 // Ignore instructions in other functions (as can happen with
3404 // Constants).
3405 if (UserInst->getParent()->getParent() != L->getHeader()->getParent())
3406 continue;
3407 // Ignore instructions not dominated by the loop.
3408 const BasicBlock *UseBB = !isa<PHINode>(UserInst) ?
3409 UserInst->getParent() :
3410 cast<PHINode>(UserInst)->getIncomingBlock(
3411 PHINode::getIncomingValueNumForOperand(U.getOperandNo()));
3412 if (!DT.dominates(L->getHeader(), UseBB))
3413 continue;
3414 // Don't bother if the instruction is in a BB which ends in an EHPad.
3415 if (UseBB->getTerminator()->isEHPad())
3416 continue;
3417 // Don't bother rewriting PHIs in catchswitch blocks.
3418 if (isa<CatchSwitchInst>(UserInst->getParent()->getTerminator()))
3419 continue;
3420 // Ignore uses which are part of other SCEV expressions, to avoid
3421 // analyzing them multiple times.
3422 if (SE.isSCEVable(UserInst->getType())) {
3423 const SCEV *UserS = SE.getSCEV(const_cast<Instruction *>(UserInst));
3424 // If the user is a no-op, look through to its uses.
3425 if (!isa<SCEVUnknown>(UserS))
3426 continue;
3427 if (UserS == US) {
3428 Worklist.push_back(
3429 SE.getUnknown(const_cast<Instruction *>(UserInst)));
3430 continue;
3431 }
3432 }
3433 // Ignore icmp instructions which are already being analyzed.
3434 if (const ICmpInst *ICI = dyn_cast<ICmpInst>(UserInst)) {
3435 unsigned OtherIdx = !U.getOperandNo();
3436 Value *OtherOp = const_cast<Value *>(ICI->getOperand(OtherIdx));
3437 if (SE.hasComputableLoopEvolution(SE.getSCEV(OtherOp), L))
3438 continue;
3439 }
3440
3441 std::pair<size_t, int64_t> P = getUse(
3442 S, LSRUse::Basic, MemAccessTy());
3443 size_t LUIdx = P.first;
3444 int64_t Offset = P.second;
3445 LSRUse &LU = Uses[LUIdx];
3446 LSRFixup &LF = LU.getNewFixup();
3447 LF.UserInst = const_cast<Instruction *>(UserInst);
3448 LF.OperandValToReplace = U;
3449 LF.Offset = Offset;
3450 LU.AllFixupsOutsideLoop &= LF.isUseFullyOutsideLoop(L);
3451 if (!LU.WidestFixupType ||
3452 SE.getTypeSizeInBits(LU.WidestFixupType) <
3453 SE.getTypeSizeInBits(LF.OperandValToReplace->getType()))
3454 LU.WidestFixupType = LF.OperandValToReplace->getType();
3455 InsertSupplementalFormula(US, LU, LUIdx);
3456 CountRegisters(LU.Formulae.back(), Uses.size() - 1);
3457 break;
3458 }
3459 }
3460 }
3461}
3462
3463/// Split S into subexpressions which can be pulled out into separate
3464/// registers. If C is non-null, multiply each subexpression by C.
3465///
3466/// Return remainder expression after factoring the subexpressions captured by
3467/// Ops. If Ops is complete, return NULL.
3468static const SCEV *CollectSubexprs(const SCEV *S, const SCEVConstant *C,
3469 SmallVectorImpl<const SCEV *> &Ops,
3470 const Loop *L,
3471 ScalarEvolution &SE,
3472 unsigned Depth = 0) {
3473 // Arbitrarily cap recursion to protect compile time.
3474 if (Depth >= 3)
3475 return S;
3476
3477 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) {
3478 // Break out add operands.
3479 for (const SCEV *S : Add->operands()) {
3480 const SCEV *Remainder = CollectSubexprs(S, C, Ops, L, SE, Depth+1);
3481 if (Remainder)
3482 Ops.push_back(C ? SE.getMulExpr(C, Remainder) : Remainder);
3483 }
3484 return nullptr;
3485 } else if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) {
3486 // Split a non-zero base out of an addrec.
3487 if (AR->getStart()->isZero() || !AR->isAffine())
3488 return S;
3489
3490 const SCEV *Remainder = CollectSubexprs(AR->getStart(),
3491 C, Ops, L, SE, Depth+1);
3492 // Split the non-zero AddRec unless it is part of a nested recurrence that
3493 // does not pertain to this loop.
3494 if (Remainder && (AR->getLoop() == L || !isa<SCEVAddRecExpr>(Remainder))) {
3495 Ops.push_back(C ? SE.getMulExpr(C, Remainder) : Remainder);
3496 Remainder = nullptr;
3497 }
3498 if (Remainder != AR->getStart()) {
3499 if (!Remainder)
3500 Remainder = SE.getConstant(AR->getType(), 0);
3501 return SE.getAddRecExpr(Remainder,
3502 AR->getStepRecurrence(SE),
3503 AR->getLoop(),
3504 //FIXME: AR->getNoWrapFlags(SCEV::FlagNW)
3505 SCEV::FlagAnyWrap);
3506 }
3507 } else if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(S)) {
3508 // Break (C * (a + b + c)) into C*a + C*b + C*c.
3509 if (Mul->getNumOperands() != 2)
3510 return S;
3511 if (const SCEVConstant *Op0 =
3512 dyn_cast<SCEVConstant>(Mul->getOperand(0))) {
3513 C = C ? cast<SCEVConstant>(SE.getMulExpr(C, Op0)) : Op0;
3514 const SCEV *Remainder =
3515 CollectSubexprs(Mul->getOperand(1), C, Ops, L, SE, Depth+1);
3516 if (Remainder)
3517 Ops.push_back(SE.getMulExpr(C, Remainder));
3518 return nullptr;
3519 }
3520 }
3521 return S;
3522}
3523
3524/// Return true if the SCEV represents a value that may end up as a
3525/// post-increment operation.
3526static bool mayUsePostIncMode(const TargetTransformInfo &TTI,
3527 LSRUse &LU, const SCEV *S, const Loop *L,
3528 ScalarEvolution &SE) {
3529 if (LU.Kind != LSRUse::Address ||
3530 !LU.AccessTy.getType()->isIntOrIntVectorTy())
3531 return false;
3532 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S);
3533 if (!AR)
3534 return false;
3535 const SCEV *LoopStep = AR->getStepRecurrence(SE);
3536 if (!isa<SCEVConstant>(LoopStep))
3537 return false;
3538 if (LU.AccessTy.getType()->getScalarSizeInBits() !=
3539 LoopStep->getType()->getScalarSizeInBits())
3540 return false;
3541 // Check if a post-indexed load/store can be used.
3542 if (TTI.isIndexedLoadLegal(TTI.MIM_PostInc, AR->getType()) ||
3543 TTI.isIndexedStoreLegal(TTI.MIM_PostInc, AR->getType())) {
3544 const SCEV *LoopStart = AR->getStart();
3545 if (!isa<SCEVConstant>(LoopStart) && SE.isLoopInvariant(LoopStart, L))
3546 return true;
3547 }
3548 return false;
3549}
3550
3551/// Helper function for LSRInstance::GenerateReassociations.
3552void LSRInstance::GenerateReassociationsImpl(LSRUse &LU, unsigned LUIdx,
3553 const Formula &Base,
3554 unsigned Depth, size_t Idx,
3555 bool IsScaledReg) {
3556 const SCEV *BaseReg = IsScaledReg ? Base.ScaledReg : Base.BaseRegs[Idx];
3557 // Don't generate reassociations for the base register of a value that
3558 // may generate a post-increment operator. The reason is that the
3559 // reassociations cause extra base+register formula to be created,
3560 // and possibly chosen, but the post-increment is more efficient.
3561 if (TTI.shouldFavorPostInc() && mayUsePostIncMode(TTI, LU, BaseReg, L, SE))
3562 return;
3563 SmallVector<const SCEV *, 8> AddOps;
3564 const SCEV *Remainder = CollectSubexprs(BaseReg, nullptr, AddOps, L, SE);
3565 if (Remainder)
3566 AddOps.push_back(Remainder);
3567
3568 if (AddOps.size() == 1)
3569 return;
3570
3571 for (SmallVectorImpl<const SCEV *>::const_iterator J = AddOps.begin(),
3572 JE = AddOps.end();
3573 J != JE; ++J) {
3574 // Loop-variant "unknown" values are uninteresting; we won't be able to
3575 // do anything meaningful with them.
3576 if (isa<SCEVUnknown>(*J) && !SE.isLoopInvariant(*J, L))
3577 continue;
3578
3579 // Don't pull a constant into a register if the constant could be folded
3580 // into an immediate field.
3581 if (isAlwaysFoldable(TTI, SE, LU.MinOffset, LU.MaxOffset, LU.Kind,
3582 LU.AccessTy, *J, Base.getNumRegs() > 1))
3583 continue;
3584
3585 // Collect all operands except *J.
3586 SmallVector<const SCEV *, 8> InnerAddOps(
3587 ((const SmallVector<const SCEV *, 8> &)AddOps).begin(), J);
3588 InnerAddOps.append(std::next(J),
3589 ((const SmallVector<const SCEV *, 8> &)AddOps).end());
3590
3591 // Don't leave just a constant behind in a register if the constant could
3592 // be folded into an immediate field.
3593 if (InnerAddOps.size() == 1 &&
3594 isAlwaysFoldable(TTI, SE, LU.MinOffset, LU.MaxOffset, LU.Kind,
3595 LU.AccessTy, InnerAddOps[0], Base.getNumRegs() > 1))
3596 continue;
3597
3598 const SCEV *InnerSum = SE.getAddExpr(InnerAddOps);
3599 if (InnerSum->isZero())
3600 continue;
3601 Formula F = Base;
3602
3603 // Add the remaining pieces of the add back into the new formula.
3604 const SCEVConstant *InnerSumSC = dyn_cast<SCEVConstant>(InnerSum);
3605 if (InnerSumSC && SE.getTypeSizeInBits(InnerSumSC->getType()) <= 64 &&
3606 TTI.isLegalAddImmediate((uint64_t)F.UnfoldedOffset +
3607 InnerSumSC->getValue()->getZExtValue())) {
3608 F.UnfoldedOffset =
3609 (uint64_t)F.UnfoldedOffset + InnerSumSC->getValue()->getZExtValue();
3610 if (IsScaledReg)
3611 F.ScaledReg = nullptr;
3612 else
3613 F.BaseRegs.erase(F.BaseRegs.begin() + Idx);
3614 } else if (IsScaledReg)
3615 F.ScaledReg = InnerSum;
3616 else
3617 F.BaseRegs[Idx] = InnerSum;
3618
3619 // Add J as its own register, or an unfolded immediate.
3620 const SCEVConstant *SC = dyn_cast<SCEVConstant>(*J);
3621 if (SC && SE.getTypeSizeInBits(SC->getType()) <= 64 &&
3622 TTI.isLegalAddImmediate((uint64_t)F.UnfoldedOffset +
3623 SC->getValue()->getZExtValue()))
3624 F.UnfoldedOffset =
3625 (uint64_t)F.UnfoldedOffset + SC->getValue()->getZExtValue();
3626 else
3627 F.BaseRegs.push_back(*J);
3628 // We may have changed the number of register in base regs, adjust the
3629 // formula accordingly.
3630 F.canonicalize(*L);
3631
3632 if (InsertFormula(LU, LUIdx, F))
3633 // If that formula hadn't been seen before, recurse to find more like
3634 // it.
3635 // Add check on Log16(AddOps.size()) - same as Log2_32(AddOps.size()) >> 2)
3636 // Because just Depth is not enough to bound compile time.
3637 // This means that every time AddOps.size() is greater 16^x we will add
3638 // x to Depth.
3639 GenerateReassociations(LU, LUIdx, LU.Formulae.back(),
3640 Depth + 1 + (Log2_32(AddOps.size()) >> 2));
3641 }
3642}
3643
3644/// Split out subexpressions from adds and the bases of addrecs.
3645void LSRInstance::GenerateReassociations(LSRUse &LU, unsigned LUIdx,
3646 Formula Base, unsigned Depth) {
3647 assert(Base.isCanonical(*L) && "Input must be in the canonical form")((Base.isCanonical(*L) && "Input must be in the canonical form"
) ? static_cast<void> (0) : __assert_fail ("Base.isCanonical(*L) && \"Input must be in the canonical form\""
, "/build/llvm-toolchain-snapshot-9~svn358860/lib/Transforms/Scalar/LoopStrengthReduce.cpp"
, 3647, __PRETTY_FUNCTION__))
;
3648 // Arbitrarily cap recursion to protect compile time.
3649 if (Depth >= 3)
3650 return;
3651
3652 for (size_t i = 0, e = Base.BaseRegs.size(); i != e; ++i)
3653 GenerateReassociationsImpl(LU, LUIdx, Base, Depth, i);
3654
3655 if (Base.Scale == 1)
3656 GenerateReassociationsImpl(LU, LUIdx, Base, Depth,
3657 /* Idx */ -1, /* IsScaledReg */ true);
3658}
3659
3660/// Generate a formula consisting of all of the loop-dominating registers added
3661/// into a single register.
3662void LSRInstance::GenerateCombinations(LSRUse &LU, unsigned LUIdx,
3663 Formula Base) {
3664 // This method is only interesting on a plurality of registers.
3665 if (Base.BaseRegs.size() + (Base.Scale == 1) +
3666 (Base.UnfoldedOffset != 0) <= 1)
3667 return;
3668
3669 // Flatten the representation, i.e., reg1 + 1*reg2 => reg1 + reg2, before
3670 // processing the formula.
3671 Base.unscale();
3672 SmallVector<const SCEV *, 4> Ops;
3673 Formula NewBase = Base;
3674 NewBase.BaseRegs.clear();
3675 Type *CombinedIntegerType = nullptr;
3676 for (const SCEV *BaseReg : Base.BaseRegs) {
3677 if (SE.properlyDominates(BaseReg, L->getHeader()) &&
3678 !SE.hasComputableLoopEvolution(BaseReg, L)) {
3679 if (!CombinedIntegerType)
3680 CombinedIntegerType = SE.getEffectiveSCEVType(BaseReg->getType());
3681 Ops.push_back(BaseReg);
3682 }
3683 else
3684 NewBase.BaseRegs.push_back(BaseReg);
3685 }
3686
3687 // If no register is relevant, we're done.
3688 if (Ops.size() == 0)
3689 return;
3690
3691 // Utility function for generating the required variants of the combined
3692 // registers.
3693 auto GenerateFormula = [&](const SCEV *Sum) {
3694 Formula F = NewBase;
3695
3696 // TODO: If Sum is zero, it probably means ScalarEvolution missed an
3697 // opportunity to fold something. For now, just ignore such cases
3698 // rather than proceed with zero in a register.
3699 if (Sum->isZero())
3700 return;
3701
3702 F.BaseRegs.push_back(Sum);
3703 F.canonicalize(*L);
3704 (void)InsertFormula(LU, LUIdx, F);
3705 };
3706
3707 // If we collected at least two registers, generate a formula combining them.
3708 if (Ops.size() > 1) {
3709 SmallVector<const SCEV *, 4> OpsCopy(Ops); // Don't let SE modify Ops.
3710 GenerateFormula(SE.getAddExpr(OpsCopy));
3711 }
3712
3713 // If we have an unfolded offset, generate a formula combining it with the
3714 // registers collected.
3715 if (NewBase.UnfoldedOffset) {
3716 assert(CombinedIntegerType && "Missing a type for the unfolded offset")((CombinedIntegerType && "Missing a type for the unfolded offset"
) ? static_cast<void> (0) : __assert_fail ("CombinedIntegerType && \"Missing a type for the unfolded offset\""
, "/build/llvm-toolchain-snapshot-9~svn358860/lib/Transforms/Scalar/LoopStrengthReduce.cpp"
, 3716, __PRETTY_FUNCTION__))
;
3717 Ops.push_back(SE.getConstant(CombinedIntegerType, NewBase.UnfoldedOffset,
3718 true));
3719 NewBase.UnfoldedOffset = 0;
3720 GenerateFormula(SE.getAddExpr(Ops));
3721 }
3722}
3723
3724/// Helper function for LSRInstance::GenerateSymbolicOffsets.
3725void LSRInstance::GenerateSymbolicOffsetsImpl(LSRUse &LU, unsigned LUIdx,
3726 const Formula &Base, size_t Idx,
3727 bool IsScaledReg) {
3728 const SCEV *G = IsScaledReg ? Base.ScaledReg : Base.BaseRegs[Idx];
3729 GlobalValue *GV = ExtractSymbol(G, SE);
3730 if (G->isZero() || !GV)
3731 return;
3732 Formula F = Base;
3733 F.BaseGV = GV;
3734 if (!isLegalUse(TTI, LU.MinOffset, LU.MaxOffset, LU.Kind, LU.AccessTy, F))
3735 return;
3736 if (IsScaledReg)
3737 F.ScaledReg = G;
3738 else
3739 F.BaseRegs[Idx] = G;
3740 (void)InsertFormula(LU, LUIdx, F);
3741}
3742
3743/// Generate reuse formulae using symbolic offsets.
3744void LSRInstance::GenerateSymbolicOffsets(LSRUse &LU, unsigned LUIdx,
3745 Formula Base) {
3746 // We can't add a symbolic offset if the address already contains one.
3747 if (Base.BaseGV) return;
3748
3749 for (size_t i = 0, e = Base.BaseRegs.size(); i != e; ++i)
3750 GenerateSymbolicOffsetsImpl(LU, LUIdx, Base, i);
3751 if (Base.Scale == 1)
3752 GenerateSymbolicOffsetsImpl(LU, LUIdx, Base, /* Idx */ -1,
3753 /* IsScaledReg */ true);
3754}
3755
3756/// Helper function for LSRInstance::GenerateConstantOffsets.
3757void LSRInstance::GenerateConstantOffsetsImpl(
3758 LSRUse &LU, unsigned LUIdx, const Formula &Base,
3759 const SmallVectorImpl<int64_t> &Worklist, size_t Idx, bool IsScaledReg) {
3760
3761 auto GenerateOffset = [&](const SCEV *G, int64_t Offset) {
3762 Formula F = Base;
3763 F.BaseOffset = (uint64_t)Base.BaseOffset - Offset;
3764
3765 if (isLegalUse(TTI, LU.MinOffset - Offset, LU.MaxOffset - Offset, LU.Kind,
3766 LU.AccessTy, F)) {
3767 // Add the offset to the base register.
3768 const SCEV *NewG = SE.getAddExpr(SE.getConstant(G->getType(), Offset), G);
3769 // If it cancelled out, drop the base register, otherwise update it.
3770 if (NewG->isZero()) {
3771 if (IsScaledReg) {
3772 F.Scale = 0;
3773 F.ScaledReg = nullptr;
3774 } else
3775 F.deleteBaseReg(F.BaseRegs[Idx]);
3776 F.canonicalize(*L);
3777 } else if (IsScaledReg)
3778 F.ScaledReg = NewG;
3779 else
3780 F.BaseRegs[Idx] = NewG;
3781
3782 (void)InsertFormula(LU, LUIdx, F);
3783 }
3784 };
3785
3786 const SCEV *G = IsScaledReg ? Base.ScaledReg : Base.BaseRegs[Idx];
3787
3788 // With constant offsets and constant steps, we can generate pre-inc
3789 // accesses by having the offset equal the step. So, for access #0 with a
3790 // step of 8, we generate a G - 8 base which would require the first access
3791 // to be ((G - 8) + 8),+,8. The pre-indexed access then updates the pointer
3792 // for itself and hopefully becomes the base for other accesses. This means
3793 // means that a single pre-indexed access can be generated to become the new
3794 // base pointer for each iteration of the loop, resulting in no extra add/sub
3795 // instructions for pointer updating.
3796 if (FavorBackedgeIndex && LU.Kind == LSRUse::Address) {
3797 if (auto *GAR = dyn_cast<SCEVAddRecExpr>(G)) {
3798 if (auto *StepRec =
3799 dyn_cast<SCEVConstant>(GAR->getStepRecurrence(SE))) {
3800 const APInt &StepInt = StepRec->getAPInt();
3801 int64_t Step = StepInt.isNegative() ?
3802 StepInt.getSExtValue() : StepInt.getZExtValue();
3803
3804 for (int64_t Offset : Worklist) {
3805 Offset -= Step;
3806 GenerateOffset(G, Offset);
3807 }
3808 }
3809 }
3810 }
3811 for (int64_t Offset : Worklist)
3812 GenerateOffset(G, Offset);
3813
3814 int64_t Imm = ExtractImmediate(G, SE);
3815 if (G->isZero() || Imm == 0)
3816 return;
3817 Formula F = Base;
3818 F.BaseOffset = (uint64_t)F.BaseOffset + Imm;
3819 if (!isLegalUse(TTI, LU.MinOffset, LU.MaxOffset, LU.Kind, LU.AccessTy, F))
3820 return;
3821 if (IsScaledReg)
3822 F.ScaledReg = G;
3823 else
3824 F.BaseRegs[Idx] = G;
3825 (void)InsertFormula(LU, LUIdx, F);
3826}
3827
3828/// GenerateConstantOffsets - Generate reuse formulae using symbolic offsets.
3829void LSRInstance::GenerateConstantOffsets(LSRUse &LU, unsigned LUIdx,
3830 Formula Base) {
3831 // TODO: For now, just add the min and max offset, because it usually isn't
3832 // worthwhile looking at everything inbetween.
3833 SmallVector<int64_t, 2> Worklist;
3834 Worklist.push_back(LU.MinOffset);
3835 if (LU.MaxOffset != LU.MinOffset)
3836 Worklist.push_back(LU.MaxOffset);
3837
3838 for (size_t i = 0, e = Base.BaseRegs.size(); i != e; ++i)
3839 GenerateConstantOffsetsImpl(LU, LUIdx, Base, Worklist, i);
3840 if (Base.Scale == 1)
3841 GenerateConstantOffsetsImpl(LU, LUIdx, Base, Worklist, /* Idx */ -1,
3842 /* IsScaledReg */ true);
3843}
3844
3845/// For ICmpZero, check to see if we can scale up the comparison. For example, x
3846/// == y -> x*c == y*c.
3847void LSRInstance::GenerateICmpZeroScales(LSRUse &LU, unsigned LUIdx,
3848 Formula Base) {
3849 if (LU.Kind != LSRUse::ICmpZero) return;
3850
3851 // Determine the integer type for the base formula.
3852 Type *IntTy = Base.getType();
3853 if (!IntTy) return;
3854 if (SE.getTypeSizeInBits(IntTy) > 64) return;
3855
3856 // Don't do this if there is more than one offset.
3857 if (LU.MinOffset != LU.MaxOffset) return;
3858
3859 // Check if transformation is valid. It is illegal to multiply pointer.
3860 if (Base.ScaledReg && Base.ScaledReg->getType()->isPointerTy())
3861 return;
3862 for (const SCEV *BaseReg : Base.BaseRegs)
3863 if (BaseReg->getType()->isPointerTy())
3864 return;
3865 assert(!Base.BaseGV && "ICmpZero use is not legal!")((!Base.BaseGV && "ICmpZero use is not legal!") ? static_cast
<void> (0) : __assert_fail ("!Base.BaseGV && \"ICmpZero use is not legal!\""
, "/build/llvm-toolchain-snapshot-9~svn358860/lib/Transforms/Scalar/LoopStrengthReduce.cpp"
, 3865, __PRETTY_FUNCTION__))
;
3866
3867 // Check each interesting stride.
3868 for (int64_t Factor : Factors) {
3869 // Check that the multiplication doesn't overflow.
3870 if (Base.BaseOffset == std::numeric_limits<int64_t>::min() && Factor == -1)
3871 continue;
3872 int64_t NewBaseOffset = (uint64_t)Base.BaseOffset * Factor;
3873 if (NewBaseOffset / Factor != Base.BaseOffset)
3874 continue;
3875 // If the offset will be truncated at this use, check that it is in bounds.
3876 if (!IntTy->isPointerTy() &&
3877 !ConstantInt::isValueValidForType(IntTy, NewBaseOffset))
3878 continue;
3879
3880 // Check that multiplying with the use offset doesn't overflow.
3881 int64_t Offset = LU.MinOffset;
3882 if (Offset == std::numeric_limits<int64_t>::min() && Factor == -1)
3883 continue;
3884 Offset = (uint64_t)Offset * Factor;
3885 if (Offset / Factor != LU.MinOffset)
3886 continue;
3887 // If the offset will be truncated at this use, check that it is in bounds.
3888 if (!IntTy->isPointerTy() &&
3889 !ConstantInt::isValueValidForType(IntTy, Offset))
3890 continue;
3891
3892 Formula F = Base;
3893 F.BaseOffset = NewBaseOffset;
3894
3895 // Check that this scale is legal.
3896 if (!isLegalUse(TTI, Offset, Offset, LU.Kind, LU.AccessTy, F))
3897 continue;
3898
3899 // Compensate for the use having MinOffset built into it.
3900 F.BaseOffset = (uint64_t)F.BaseOffset + Offset - LU.MinOffset;
3901
3902 const SCEV *FactorS = SE.getConstant(IntTy, Factor);
3903
3904 // Check that multiplying with each base register doesn't overflow.
3905 for (size_t i = 0, e = F.BaseRegs.size(); i != e; ++i) {
3906 F.BaseRegs[i] = SE.getMulExpr(F.BaseRegs[i], FactorS);
3907 if (getExactSDiv(F.BaseRegs[i], FactorS, SE) != Base.BaseRegs[i])
3908 goto next;
3909 }
3910
3911 // Check that multiplying with the scaled register doesn't overflow.
3912 if (F.ScaledReg) {
3913 F.ScaledReg = SE.getMulExpr(F.ScaledReg, FactorS);
3914 if (getExactSDiv(F.ScaledReg, FactorS, SE) != Base.ScaledReg)
3915 continue;
3916 }
3917
3918 // Check that multiplying with the unfolded offset doesn't overflow.
3919 if (F.UnfoldedOffset != 0) {
3920 if (F.UnfoldedOffset == std::numeric_limits<int64_t>::min() &&
3921 Factor == -1)
3922 continue;
3923 F.UnfoldedOffset = (uint64_t)F.UnfoldedOffset * Factor;
3924 if (F.UnfoldedOffset / Factor != Base.UnfoldedOffset)
3925 continue;
3926 // If the offset will be truncated, check that it is in bounds.
3927 if (!IntTy->isPointerTy() &&
3928 !ConstantInt::isValueValidForType(IntTy, F.UnfoldedOffset))
3929 continue;
3930 }
3931
3932 // If we make it here and it's legal, add it.
3933 (void)InsertFormula(LU, LUIdx, F);
3934 next:;
3935 }
3936}
3937
3938/// Generate stride factor reuse formulae by making use of scaled-offset address
3939/// modes, for example.
3940void LSRInstance::GenerateScales(LSRUse &LU, unsigned LUIdx, Formula Base) {
3941 // Determine the integer type for the base formula.
3942 Type *IntTy = Base.getType();
3943 if (!IntTy) return;
3944
3945 // If this Formula already has a scaled register, we can't add another one.
3946 // Try to unscale the formula to generate a better scale.
3947 if (Base.Scale != 0 && !Base.unscale())
3948 return;
3949
3950 assert(Base.Scale == 0 && "unscale did not did its job!")((Base.Scale == 0 && "unscale did not did its job!") ?
static_cast<void> (0) : __assert_fail ("Base.Scale == 0 && \"unscale did not did its job!\""
, "/build/llvm-toolchain-snapshot-9~svn358860/lib/Transforms/Scalar/LoopStrengthReduce.cpp"
, 3950, __PRETTY_FUNCTION__))
;
3951
3952 // Check each interesting stride.
3953 for (int64_t Factor : Factors) {
3954 Base.Scale = Factor;
3955 Base.HasBaseReg = Base.BaseRegs.size() > 1;
3956 // Check whether this scale is going to be legal.
3957 if (!isLegalUse(TTI, LU.MinOffset, LU.MaxOffset, LU.Kind, LU.AccessTy,
3958 Base)) {
3959 // As a special-case, handle special out-of-loop Basic users specially.
3960 // TODO: Reconsider this special case.
3961 if (LU.Kind == LSRUse::Basic &&
3962 isLegalUse(TTI, LU.MinOffset, LU.MaxOffset, LSRUse::Special,
3963 LU.AccessTy, Base) &&
3964 LU.AllFixupsOutsideLoop)
3965 LU.Kind = LSRUse::Special;
3966 else
3967 continue;
3968 }
3969 // For an ICmpZero, negating a solitary base register won't lead to
3970 // new solutions.
3971 if (LU.Kind == LSRUse::ICmpZero &&
3972 !Base.HasBaseReg && Base.BaseOffset == 0 && !Base.BaseGV)
3973 continue;
3974 // For each addrec base reg, if its loop is current loop, apply the scale.
3975 for (size_t i = 0, e = Base.BaseRegs.size(); i != e; ++i) {
3976 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Base.BaseRegs[i]);
3977 if (AR && (AR->getLoop() == L || LU.AllFixupsOutsideLoop)) {
3978 const SCEV *FactorS = SE.getConstant(IntTy, Factor);
3979 if (FactorS->isZero())
3980 continue;
3981 // Divide out the factor, ignoring high bits, since we'll be
3982 // scaling the value back up in the end.
3983 if (const SCEV *Quotient = getExactSDiv(AR, FactorS, SE, true)) {
3984 // TODO: This could be optimized to avoid all the copying.
3985 Formula F = Base;
3986 F.ScaledReg = Quotient;
3987 F.deleteBaseReg(F.BaseRegs[i]);
3988 // The canonical representation of 1*reg is reg, which is already in
3989 // Base. In that case, do not try to insert the formula, it will be
3990 // rejected anyway.
3991 if (F.Scale == 1 && (F.BaseRegs.empty() ||
3992 (AR->getLoop() != L && LU.AllFixupsOutsideLoop)))
3993 continue;
3994 // If AllFixupsOutsideLoop is true and F.Scale is 1, we may generate
3995 // non canonical Formula with ScaledReg's loop not being L.
3996 if (F.Scale == 1 && LU.AllFixupsOutsideLoop)
3997 F.canonicalize(*L);
3998 (void)InsertFormula(LU, LUIdx, F);
3999 }
4000 }
4001 }
4002 }
4003}
4004
4005/// Generate reuse formulae from different IV types.
4006void LSRInstance::GenerateTruncates(LSRUse &LU, unsigned LUIdx, Formula Base) {
4007 // Don't bother truncating symbolic values.
4008 if (Base.BaseGV) return;
4009
4010 // Determine the integer type for the base formula.
4011 Type *DstTy = Base.getType();
4012 if (!DstTy) return;
4013 DstTy = SE.getEffectiveSCEVType(DstTy);
4014
4015 for (Type *SrcTy : Types) {
4016 if (SrcTy != DstTy && TTI.isTruncateFree(SrcTy, DstTy)) {
4017 Formula F = Base;
4018
4019 // Sometimes SCEV is able to prove zero during ext transform. It may
4020 // happen if SCEV did not do all possible transforms while creating the
4021 // initial node (maybe due to depth limitations), but it can do them while
4022 // taking ext.
4023 if (F.ScaledReg) {
4024 const SCEV *NewScaledReg = SE.getAnyExtendExpr(F.ScaledReg, SrcTy);
4025 if (NewScaledReg->isZero())
4026 continue;
4027 F.ScaledReg = NewScaledReg;
4028 }
4029 bool HasZeroBaseReg = false;
4030 for (const SCEV *&BaseReg : F.BaseRegs) {
4031 const SCEV *NewBaseReg = SE.getAnyExtendExpr(BaseReg, SrcTy);
4032 if (NewBaseReg->isZero()) {
4033 HasZeroBaseReg = true;
4034 break;
4035 }
4036 BaseReg = NewBaseReg;
4037 }
4038 if (HasZeroBaseReg)
4039 continue;
4040
4041 // TODO: This assumes we've done basic processing on all uses and
4042 // have an idea what the register usage is.
4043 if (!F.hasRegsUsedByUsesOtherThan(LUIdx, RegUses))
4044 continue;
4045
4046 F.canonicalize(*L);
4047 (void)InsertFormula(LU, LUIdx, F);
4048 }
4049 }
4050}
4051
4052namespace {
4053
4054/// Helper class for GenerateCrossUseConstantOffsets. It's used to defer
4055/// modifications so that the search phase doesn't have to worry about the data
4056/// structures moving underneath it.
4057struct WorkItem {
4058 size_t LUIdx;
4059 int64_t Imm;
4060 const SCEV *OrigReg;
4061
4062 WorkItem(size_t LI, int64_t I, const SCEV *R)
4063 : LUIdx(LI), Imm(I), OrigReg(R) {}
4064
4065 void print(raw_ostream &OS) const;
4066 void dump() const;
4067};
4068
4069} // end anonymous namespace
4070
4071#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
4072void WorkItem::print(raw_ostream &OS) const {
4073 OS << "in formulae referencing " << *OrigReg << " in use " << LUIdx
4074 << " , add offset " << Imm;
4075}
4076
4077LLVM_DUMP_METHOD__attribute__((noinline)) __attribute__((__used__)) void WorkItem::dump() const {
4078 print(errs()); errs() << '\n';
4079}
4080#endif
4081
4082/// Look for registers which are a constant distance apart and try to form reuse
4083/// opportunities between them.
4084void LSRInstance::GenerateCrossUseConstantOffsets() {
4085 // Group the registers by their value without any added constant offset.
4086 using ImmMapTy = std::map<int64_t, const SCEV *>;
4087
4088 DenseMap<const SCEV *, ImmMapTy> Map;
4089 DenseMap<const SCEV *, SmallBitVector> UsedByIndicesMap;
4090 SmallVector<const SCEV *, 8> Sequence;
4091 for (const SCEV *Use : RegUses) {
4092 const SCEV *Reg = Use; // Make a copy for ExtractImmediate to modify.
4093 int64_t Imm = ExtractImmediate(Reg, SE);
4094 auto Pair = Map.insert(std::make_pair(Reg, ImmMapTy()));
4095 if (Pair.second)
4096 Sequence.push_back(Reg);
4097 Pair.first->second.insert(std::make_pair(Imm, Use));
4098 UsedByIndicesMap[Reg] |= RegUses.getUsedByIndices(Use);
4099 }
4100
4101 // Now examine each set of registers with the same base value. Build up
4102 // a list of work to do and do the work in a separate step so that we're
4103 // not adding formulae and register counts while we're searching.
4104 SmallVector<WorkItem, 32> WorkItems;
4105 SmallSet<std::pair<size_t, int64_t>, 32> UniqueItems;
4106 for (const SCEV *Reg : Sequence) {
4107 const ImmMapTy &Imms = Map.find(Reg)->second;
4108
4109 // It's not worthwhile looking for reuse if there's only one offset.
4110 if (Imms.size() == 1)
4111 continue;
4112
4113 LLVM_DEBUG(dbgs() << "Generating cross-use offsets for " << *Reg << ':';do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-reduce")) { dbgs() << "Generating cross-use offsets for "
<< *Reg << ':'; for (const auto &Entry : Imms
) dbgs() << ' ' << Entry.first; dbgs() << '\n'
; } } while (false)
4114 for (const auto &Entrydo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-reduce")) { dbgs() << "Generating cross-use offsets for "
<< *Reg << ':'; for (const auto &Entry : Imms
) dbgs() << ' ' << Entry.first; dbgs() << '\n'
; } } while (false)
4115 : Imms) dbgs()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-reduce")) { dbgs() << "Generating cross-use offsets for "
<< *Reg << ':'; for (const auto &Entry : Imms
) dbgs() << ' ' << Entry.first; dbgs() << '\n'
; } } while (false)
4116 << ' ' << Entry.first;do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-reduce")) { dbgs() << "Generating cross-use offsets for "
<< *Reg << ':'; for (const auto &Entry : Imms
) dbgs() << ' ' << Entry.first; dbgs() << '\n'
; } } while (false)
4117 dbgs() << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-reduce")) { dbgs() << "Generating cross-use offsets for "
<< *Reg << ':'; for (const auto &Entry : Imms
) dbgs() << ' ' << Entry.first; dbgs() << '\n'
; } } while (false)
;
4118
4119 // Examine each offset.
4120 for (ImmMapTy::const_iterator J = Imms.begin(), JE = Imms.end();
4121 J != JE; ++J) {
4122 const SCEV *OrigReg = J->second;
4123
4124 int64_t JImm = J->first;
4125 const SmallBitVector &UsedByIndices = RegUses.getUsedByIndices(OrigReg);
4126
4127 if (!isa<SCEVConstant>(OrigReg) &&
4128 UsedByIndicesMap[Reg].count() == 1) {
4129 LLVM_DEBUG(dbgs() << "Skipping cross-use reuse for " << *OrigRegdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-reduce")) { dbgs() << "Skipping cross-use reuse for "
<< *OrigReg << '\n'; } } while (false)
4130 << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-reduce")) { dbgs() << "Skipping cross-use reuse for "
<< *OrigReg << '\n'; } } while (false)
;
4131 continue;
4132 }
4133
4134 // Conservatively examine offsets between this orig reg a few selected
4135 // other orig regs.
4136 int64_t First = Imms.begin()->first;
4137 int64_t Last = std::prev(Imms.end())->first;
4138 // Compute (First + Last) / 2 without overflow using the fact that
4139 // First + Last = 2 * (First + Last) + (First ^ Last).
4140 int64_t Avg = (First & Last) + ((First ^ Last) >> 1);
4141 // If the result is negative and First is odd and Last even (or vice versa),
4142 // we rounded towards -inf. Add 1 in that case, to round towards 0.
4143 Avg = Avg + ((First ^ Last) & ((uint64_t)Avg >> 63));
4144 ImmMapTy::const_iterator OtherImms[] = {
4145 Imms.begin(), std::prev(Imms.end()),
4146 Imms.lower_bound(Avg)};
4147 for (size_t i = 0, e = array_lengthof(OtherImms); i != e; ++i) {
4148 ImmMapTy::const_iterator M = OtherImms[i];
4149 if (M == J || M == JE) continue;
4150
4151 // Compute the difference between the two.
4152 int64_t Imm = (uint64_t)JImm - M->first;
4153 for (unsigned LUIdx : UsedByIndices.set_bits())
4154 // Make a memo of this use, offset, and register tuple.
4155 if (UniqueItems.insert(std::make_pair(LUIdx, Imm)).second)
4156 WorkItems.push_back(WorkItem(LUIdx, Imm, OrigReg));
4157 }
4158 }
4159 }
4160
4161 Map.clear();
4162 Sequence.clear();
4163 UsedByIndicesMap.clear();
4164 UniqueItems.clear();
4165
4166 // Now iterate through the worklist and add new formulae.
4167 for (const WorkItem &WI : WorkItems) {
4168 size_t LUIdx = WI.LUIdx;
4169 LSRUse &LU = Uses[LUIdx];
4170 int64_t Imm = WI.Imm;
4171 const SCEV *OrigReg = WI.OrigReg;
4172
4173 Type *IntTy = SE.getEffectiveSCEVType(OrigReg->getType());
4174 const SCEV *NegImmS = SE.getSCEV(ConstantInt::get(IntTy, -(uint64_t)Imm));
4175 unsigned BitWidth = SE.getTypeSizeInBits(IntTy);
4176
4177 // TODO: Use a more targeted data structure.
4178 for (size_t L = 0, LE = LU.Formulae.size(); L != LE; ++L) {
4179 Formula F = LU.Formulae[L];
4180 // FIXME: The code for the scaled and unscaled registers looks
4181 // very similar but slightly different. Investigate if they
4182 // could be merged. That way, we would not have to unscale the
4183 // Formula.
4184 F.unscale();
4185 // Use the immediate in the scaled register.
4186 if (F.ScaledReg == OrigReg) {
4187 int64_t Offset = (uint64_t)F.BaseOffset + Imm * (uint64_t)F.Scale;
4188 // Don't create 50 + reg(-50).
4189 if (F.referencesReg(SE.getSCEV(
4190 ConstantInt::get(IntTy, -(uint64_t)Offset))))
4191 continue;
4192 Formula NewF = F;
4193 NewF.BaseOffset = Offset;
4194 if (!isLegalUse(TTI, LU.MinOffset, LU.MaxOffset, LU.Kind, LU.AccessTy,
4195 NewF))
4196 continue;
4197 NewF.ScaledReg = SE.getAddExpr(NegImmS, NewF.ScaledReg);
4198
4199 // If the new scale is a constant in a register, and adding the constant
4200 // value to the immediate would produce a value closer to zero than the
4201 // immediate itself, then the formula isn't worthwhile.
4202 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(NewF.ScaledReg))
4203 if (C->getValue()->isNegative() != (NewF.BaseOffset < 0) &&
4204 (C->getAPInt().abs() * APInt(BitWidth, F.Scale))
4205 .ule(std::abs(NewF.BaseOffset)))
4206 continue;
4207
4208 // OK, looks good.
4209 NewF.canonicalize(*this->L);
4210 (void)InsertFormula(LU, LUIdx, NewF);
4211 } else {
4212 // Use the immediate in a base register.
4213 for (size_t N = 0, NE = F.BaseRegs.size(); N != NE; ++N) {
4214 const SCEV *BaseReg = F.BaseRegs[N];
4215 if (BaseReg != OrigReg)
4216 continue;
4217 Formula NewF = F;
4218 NewF.BaseOffset = (uint64_t)NewF.BaseOffset + Imm;
4219 if (!isLegalUse(TTI, LU.MinOffset, LU.MaxOffset,
4220 LU.Kind, LU.AccessTy, NewF)) {
4221 if (TTI.shouldFavorPostInc() &&
4222 mayUsePostIncMode(TTI, LU, OrigReg, this->L, SE))
4223 continue;
4224 if (!TTI.isLegalAddImmediate((uint64_t)NewF.UnfoldedOffset + Imm))
4225 continue;
4226 NewF = F;
4227 NewF.UnfoldedOffset = (uint64_t)NewF.UnfoldedOffset + Imm;
4228 }
4229 NewF.BaseRegs[N] = SE.getAddExpr(NegImmS, BaseReg);
4230
4231 // If the new formula has a constant in a register, and adding the
4232 // constant value to the immediate would produce a value closer to
4233 // zero than the immediate itself, then the formula isn't worthwhile.
4234 for (const SCEV *NewReg : NewF.BaseRegs)
4235 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(NewReg))
4236 if ((C->getAPInt() + NewF.BaseOffset)
4237 .abs()
4238 .slt(std::abs(NewF.BaseOffset)) &&
4239 (C->getAPInt() + NewF.BaseOffset).countTrailingZeros() >=
4240 countTrailingZeros<uint64_t>(NewF.BaseOffset))
4241 goto skip_formula;
4242
4243 // Ok, looks good.
4244 NewF.canonicalize(*this->L);
4245 (void)InsertFormula(LU, LUIdx, NewF);
4246 break;
4247 skip_formula:;
4248 }
4249 }
4250 }
4251 }
4252}
4253
4254/// Generate formulae for each use.
4255void
4256LSRInstance::GenerateAllReuseFormulae() {
4257 // This is split into multiple loops so that hasRegsUsedByUsesOtherThan
4258 // queries are more precise.
4259 for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) {
4260 LSRUse &LU = Uses[LUIdx];
4261 for (size_t i = 0, f = LU.Formulae.size(); i != f; ++i)
4262 GenerateReassociations(LU, LUIdx, LU.Formulae[i]);
4263 for (size_t i = 0, f = LU.Formulae.size(); i != f; ++i)
4264 GenerateCombinations(LU, LUIdx, LU.Formulae[i]);
4265 }
4266 for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) {
4267 LSRUse &LU = Uses[LUIdx];
4268 for (size_t i = 0, f = LU.Formulae.size(); i != f; ++i)
4269 GenerateSymbolicOffsets(LU, LUIdx, LU.Formulae[i]);
4270 for (size_t i = 0, f = LU.Formulae.size(); i != f; ++i)
4271 GenerateConstantOffsets(LU, LUIdx, LU.Formulae[i]);
4272 for (size_t i = 0, f = LU.Formulae.size(); i != f; ++i)
4273 GenerateICmpZeroScales(LU, LUIdx, LU.Formulae[i]);
4274 for (size_t i = 0, f = LU.Formulae.size(); i != f; ++i)
4275 GenerateScales(LU, LUIdx, LU.Formulae[i]);
4276 }
4277 for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) {
4278 LSRUse &LU = Uses[LUIdx];
4279 for (size_t i = 0, f = LU.Formulae.size(); i != f; ++i)
4280 GenerateTruncates(LU, LUIdx, LU.Formulae[i]);
4281 }
4282
4283 GenerateCrossUseConstantOffsets();
4284
4285 LLVM_DEBUG(dbgs() << "\n"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-reduce")) { dbgs() << "\n" "After generating reuse formulae:\n"
; print_uses(dbgs()); } } while (false)
4286 "After generating reuse formulae:\n";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-reduce")) { dbgs() << "\n" "After generating reuse formulae:\n"
; print_uses(dbgs()); } } while (false)
4287 print_uses(dbgs()))do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-reduce")) { dbgs() << "\n" "After generating reuse formulae:\n"
; print_uses(dbgs()); } } while (false)
;
4288}
4289
4290/// If there are multiple formulae with the same set of registers used
4291/// by other uses, pick the best one and delete the others.
4292void LSRInstance::FilterOutUndesirableDedicatedRegisters() {
4293 DenseSet<const SCEV *> VisitedRegs;
4294 SmallPtrSet<const SCEV *, 16> Regs;
4295 SmallPtrSet<const SCEV *, 16> LoserRegs;
4296#ifndef NDEBUG
4297 bool ChangedFormulae = false;
4298#endif
4299
4300 // Collect the best formula for each unique set of shared registers. This
4301 // is reset for each use.
4302 using BestFormulaeTy =
4303 DenseMap<SmallVector<const SCEV *, 4>, size_t, UniquifierDenseMapInfo>;
4304
4305 BestFormulaeTy BestFormulae;
4306
4307 for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) {
4308 LSRUse &LU = Uses[LUIdx];
4309 LLVM_DEBUG(dbgs() << "Filtering for use "; LU.print(dbgs());do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-reduce")) { dbgs() << "Filtering for use "; LU.print
(dbgs()); dbgs() << '\n'; } } while (false)
4310 dbgs() << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-reduce")) { dbgs() << "Filtering for use "; LU.print
(dbgs()); dbgs() << '\n'; } } while (false)
;
4311
4312 bool Any = false;
4313 for (size_t FIdx = 0, NumForms = LU.Formulae.size();
4314 FIdx != NumForms; ++FIdx) {
4315 Formula &F = LU.Formulae[FIdx];
4316
4317 // Some formulas are instant losers. For example, they may depend on
4318 // nonexistent AddRecs from other loops. These need to be filtered
4319 // immediately, otherwise heuristics could choose them over others leading
4320 // to an unsatisfactory solution. Passing LoserRegs into RateFormula here
4321 // avoids the need to recompute this information across formulae using the
4322 // same bad AddRec. Passing LoserRegs is also essential unless we remove
4323 // the corresponding bad register from the Regs set.
4324 Cost CostF(L, SE, TTI);
4325 Regs.clear();
4326 CostF.RateFormula(F, Regs, VisitedRegs, LU, &LoserRegs);
4327 if (CostF.isLoser()) {
4328 // During initial formula generation, undesirable formulae are generated
4329 // by uses within other loops that have some non-trivial address mode or
4330 // use the postinc form of the IV. LSR needs to provide these formulae
4331 // as the basis of rediscovering the desired formula that uses an AddRec
4332 // corresponding to the existing phi. Once all formulae have been
4333 // generated, these initial losers may be pruned.
4334 LLVM_DEBUG(dbgs() << " Filtering loser "; F.print(dbgs());do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-reduce")) { dbgs() << " Filtering loser "; F.print
(dbgs()); dbgs() << "\n"; } } while (false)
4335 dbgs() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-reduce")) { dbgs() << " Filtering loser "; F.print
(dbgs()); dbgs() << "\n"; } } while (false)
;
4336 }
4337 else {
4338 SmallVector<const SCEV *, 4> Key;
4339 for (const SCEV *Reg : F.BaseRegs) {
4340 if (RegUses.isRegUsedByUsesOtherThan(Reg, LUIdx))
4341 Key.push_back(Reg);
4342 }
4343 if (F.ScaledReg &&
4344 RegUses.isRegUsedByUsesOtherThan(F.ScaledReg, LUIdx))
4345 Key.push_back(F.ScaledReg);
4346 // Unstable sort by host order ok, because this is only used for
4347 // uniquifying.
4348 llvm::sort(Key);
4349
4350 std::pair<BestFormulaeTy::const_iterator, bool> P =
4351 BestFormulae.insert(std::make_pair(Key, FIdx));
4352 if (P.second)
4353 continue;
4354
4355 Formula &Best = LU.Formulae[P.first->second];
4356
4357 Cost CostBest(L, SE, TTI);
4358 Regs.clear();
4359 CostBest.RateFormula(Best, Regs, VisitedRegs, LU);
4360 if (CostF.isLess(CostBest))
4361 std::swap(F, Best);
4362 LLVM_DEBUG(dbgs() << " Filtering out formula "; F.print(dbgs());do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-reduce")) { dbgs() << " Filtering out formula "
; F.print(dbgs()); dbgs() << "\n" " in favor of formula "
; Best.print(dbgs()); dbgs() << '\n'; } } while (false)
4363 dbgs() << "\n"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-reduce")) { dbgs() << " Filtering out formula "
; F.print(dbgs()); dbgs() << "\n" " in favor of formula "
; Best.print(dbgs()); dbgs() << '\n'; } } while (false)
4364 " in favor of formula ";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-reduce")) { dbgs() << " Filtering out formula "
; F.print(dbgs()); dbgs() << "\n" " in favor of formula "
; Best.print(dbgs()); dbgs() << '\n'; } } while (false)
4365 Best.print(dbgs()); dbgs() << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-reduce")) { dbgs() << " Filtering out formula "
; F.print(dbgs()); dbgs() << "\n" " in favor of formula "
; Best.print(dbgs()); dbgs() << '\n'; } } while (false)
;
4366 }
4367#ifndef NDEBUG
4368 ChangedFormulae = true;
4369#endif
4370 LU.DeleteFormula(F);
4371 --FIdx;
4372 --NumForms;
4373 Any = true;
4374 }
4375
4376 // Now that we've filtered out some formulae, recompute the Regs set.
4377 if (Any)
4378 LU.RecomputeRegs(LUIdx, RegUses);
4379
4380 // Reset this to prepare for the next use.
4381 BestFormulae.clear();
4382 }
4383
4384 LLVM_DEBUG(if (ChangedFormulae) {do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-reduce")) { if (ChangedFormulae) { dbgs() << "\n"
"After filtering out undesirable candidates:\n"; print_uses(
dbgs()); }; } } while (false)
4385 dbgs() << "\n"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-reduce")) { if (ChangedFormulae) { dbgs() << "\n"
"After filtering out undesirable candidates:\n"; print_uses(
dbgs()); }; } } while (false)
4386 "After filtering out undesirable candidates:\n";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-reduce")) { if (ChangedFormulae) { dbgs() << "\n"
"After filtering out undesirable candidates:\n"; print_uses(
dbgs()); }; } } while (false)
4387 print_uses(dbgs());do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-reduce")) { if (ChangedFormulae) { dbgs() << "\n"
"After filtering out undesirable candidates:\n"; print_uses(
dbgs()); }; } } while (false)
4388 })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-reduce")) { if (ChangedFormulae) { dbgs() << "\n"
"After filtering out undesirable candidates:\n"; print_uses(
dbgs()); }; } } while (false)
;
4389}
4390
4391/// Estimate the worst-case number of solutions the solver might have to
4392/// consider. It almost never considers this many solutions because it prune the
4393/// search space, but the pruning isn't always sufficient.
4394size_t LSRInstance::EstimateSearchSpaceComplexity() const {
4395 size_t Power = 1;
4396 for (const LSRUse &LU : Uses) {
4397 size_t FSize = LU.Formulae.size();
4398 if (FSize >= ComplexityLimit) {
4399 Power = ComplexityLimit;
4400 break;
4401 }
4402 Power *= FSize;
4403 if (Power >= ComplexityLimit)
4404 break;
4405 }
4406 return Power;
4407}
4408
4409/// When one formula uses a superset of the registers of another formula, it
4410/// won't help reduce register pressure (though it may not necessarily hurt
4411/// register pressure); remove it to simplify the system.
4412void LSRInstance::NarrowSearchSpaceByDetectingSupersets() {
4413 if (EstimateSearchSpaceComplexity() >= ComplexityLimit) {
4414 LLVM_DEBUG(dbgs() << "The search space is too complex.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-reduce")) { dbgs() << "The search space is too complex.\n"
; } } while (false)
;
4415
4416 LLVM_DEBUG(dbgs() << "Narrowing the search space by eliminating formulae "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-reduce")) { dbgs() << "Narrowing the search space by eliminating formulae "
"which use a superset of registers used by other " "formulae.\n"
; } } while (false)
4417 "which use a superset of registers used by other "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-reduce")) { dbgs() << "Narrowing the search space by eliminating formulae "
"which use a superset of registers used by other " "formulae.\n"
; } } while (false)
4418 "formulae.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-reduce")) { dbgs() << "Narrowing the search space by eliminating formulae "
"which use a superset of registers used by other " "formulae.\n"
; } } while (false)
;
4419
4420 for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) {
4421 LSRUse &LU = Uses[LUIdx];
4422 bool Any = false;
4423 for (size_t i = 0, e = LU.Formulae.size(); i != e; ++i) {
4424 Formula &F = LU.Formulae[i];
4425 // Look for a formula with a constant or GV in a register. If the use
4426 // also has a formula with that same value in an immediate field,
4427 // delete the one that uses a register.
4428 for (SmallVectorImpl<const SCEV *>::const_iterator
4429 I = F.BaseRegs.begin(), E = F.BaseRegs.end(); I != E; ++I) {
4430 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(*I)) {
4431 Formula NewF = F;
4432 //FIXME: Formulas should store bitwidth to do wrapping properly.
4433 // See PR41034.
4434 NewF.BaseOffset += (uint64_t)C->getValue()->getSExtValue();
4435 NewF.BaseRegs.erase(NewF.BaseRegs.begin() +
4436 (I - F.BaseRegs.begin()));
4437 if (LU.HasFormulaWithSameRegs(NewF)) {
4438 LLVM_DEBUG(dbgs() << " Deleting "; F.print(dbgs());do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-reduce")) { dbgs() << " Deleting "; F.print(dbgs
()); dbgs() << '\n'; } } while (false)
4439 dbgs() << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-reduce")) { dbgs() << " Deleting "; F.print(dbgs
()); dbgs() << '\n'; } } while (false)
;
4440 LU.DeleteFormula(F);
4441 --i;
4442 --e;
4443 Any = true;
4444 break;
4445 }
4446 } else if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(*I)) {
4447 if (GlobalValue *GV = dyn_cast<GlobalValue>(U->getValue()))
4448 if (!F.BaseGV) {
4449 Formula NewF = F;
4450 NewF.BaseGV = GV;
4451 NewF.BaseRegs.erase(NewF.BaseRegs.begin() +
4452 (I - F.BaseRegs.begin()));
4453 if (LU.HasFormulaWithSameRegs(NewF)) {
4454 LLVM_DEBUG(dbgs() << " Deleting "; F.print(dbgs());do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-reduce")) { dbgs() << " Deleting "; F.print(dbgs
()); dbgs() << '\n'; } } while (false)
4455 dbgs() << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-reduce")) { dbgs() << " Deleting "; F.print(dbgs
()); dbgs() << '\n'; } } while (false)
;
4456 LU.DeleteFormula(F);
4457 --i;
4458 --e;
4459 Any = true;
4460 break;
4461 }
4462 }
4463 }
4464 }
4465 }
4466 if (Any)
4467 LU.RecomputeRegs(LUIdx, RegUses);
4468 }
4469
4470 LLVM_DEBUG(dbgs() << "After pre-selection:\n"; print_uses(dbgs()))do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-reduce")) { dbgs() << "After pre-selection:\n"; print_uses
(dbgs()); } } while (false)
;
4471 }
4472}
4473
4474/// When there are many registers for expressions like A, A+1, A+2, etc.,
4475/// allocate a single register for them.
4476void LSRInstance::NarrowSearchSpaceByCollapsingUnrolledCode() {
4477 if (EstimateSearchSpaceComplexity() < ComplexityLimit)
4478 return;
4479
4480 LLVM_DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-reduce")) { dbgs() << "The search space is too complex.\n"
"Narrowing the search space by assuming that uses separated "
"by a constant offset will use the same registers.\n"; } } while
(false)
4481 dbgs() << "The search space is too complex.\n"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-reduce")) { dbgs() << "The search space is too complex.\n"
"Narrowing the search space by assuming that uses separated "
"by a constant offset will use the same registers.\n"; } } while
(false)
4482 "Narrowing the search space by assuming that uses separated "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-reduce")) { dbgs() << "The search space is too complex.\n"
"Narrowing the search space by assuming that uses separated "
"by a constant offset will use the same registers.\n"; } } while
(false)
4483 "by a constant offset will use the same registers.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-reduce")) { dbgs() << "The search space is too complex.\n"
"Narrowing the search space by assuming that uses separated "
"by a constant offset will use the same registers.\n"; } } while
(false)
;
4484
4485 // This is especially useful for unrolled loops.
4486
4487 for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) {
4488 LSRUse &LU = Uses[LUIdx];
4489 for (const Formula &F : LU.Formulae) {
4490 if (F.BaseOffset == 0 || (F.Scale != 0 && F.Scale != 1))
4491 continue;
4492
4493 LSRUse *LUThatHas = FindUseWithSimilarFormula(F, LU);
4494 if (!LUThatHas)
4495 continue;
4496
4497 if (!reconcileNewOffset(*LUThatHas, F.BaseOffset, /*HasBaseReg=*/ false,
4498 LU.Kind, LU.AccessTy))
4499 continue;
4500
4501 LLVM_DEBUG(dbgs() << " Deleting use "; LU.print(dbgs()); dbgs() << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-reduce")) { dbgs() << " Deleting use "; LU.print
(dbgs()); dbgs() << '\n'; } } while (false)
;
4502
4503 LUThatHas->AllFixupsOutsideLoop &= LU.AllFixupsOutsideLoop;
4504
4505 // Transfer the fixups of LU to LUThatHas.
4506 for (LSRFixup &Fixup : LU.Fixups) {
4507 Fixup.Offset += F.BaseOffset;
4508 LUThatHas->pushFixup(Fixup);
4509 LLVM_DEBUG(dbgs() << "New fixup has offset " << Fixup.Offset << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-reduce")) { dbgs() << "New fixup has offset " <<
Fixup.Offset << '\n'; } } while (false)
;
4510 }
4511
4512 // Delete formulae from the new use which are no longer legal.
4513 bool Any = false;
4514 for (size_t i = 0, e = LUThatHas->Formulae.size(); i != e; ++i) {
4515 Formula &F = LUThatHas->Formulae[i];
4516 if (!isLegalUse(TTI, LUThatHas->MinOffset, LUThatHas->MaxOffset,
4517 LUThatHas->Kind, LUThatHas->AccessTy, F)) {
4518 LLVM_DEBUG(dbgs() << " Deleting "; F.print(dbgs()); dbgs() << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-reduce")) { dbgs() << " Deleting "; F.print(dbgs
()); dbgs() << '\n'; } } while (false)
;
4519 LUThatHas->DeleteFormula(F);
4520 --i;
4521 --e;
4522 Any = true;
4523 }
4524 }
4525
4526 if (Any)
4527 LUThatHas->RecomputeRegs(LUThatHas - &Uses.front(), RegUses);
4528
4529 // Delete the old use.
4530 DeleteUse(LU, LUIdx);
4531 --LUIdx;
4532 --NumUses;
4533 break;
4534 }
4535 }
4536
4537 LLVM_DEBUG(dbgs() << "After pre-selection:\n"; print_uses(dbgs()))do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-reduce")) { dbgs() << "After pre-selection:\n"; print_uses
(dbgs()); } } while (false)
;
4538}
4539
4540/// Call FilterOutUndesirableDedicatedRegisters again, if necessary, now that
4541/// we've done more filtering, as it may be able to find more formulae to
4542/// eliminate.
4543void LSRInstance::NarrowSearchSpaceByRefilteringUndesirableDedicatedRegisters(){
4544 if (EstimateSearchSpaceComplexity() >= ComplexityLimit) {
4545 LLVM_DEBUG(dbgs() << "The search space is too complex.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-reduce")) { dbgs() << "The search space is too complex.\n"
; } } while (false)
;
4546
4547 LLVM_DEBUG(dbgs() << "Narrowing the search space by re-filtering out "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-reduce")) { dbgs() << "Narrowing the search space by re-filtering out "
"undesirable dedicated registers.\n"; } } while (false)
4548 "undesirable dedicated registers.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-reduce")) { dbgs() << "Narrowing the search space by re-filtering out "
"undesirable dedicated registers.\n"; } } while (false)
;
4549
4550 FilterOutUndesirableDedicatedRegisters();
4551
4552 LLVM_DEBUG(dbgs() << "After pre-selection:\n"; print_uses(dbgs()))do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-reduce")) { dbgs() << "After pre-selection:\n"; print_uses
(dbgs()); } } while (false)
;
4553 }
4554}
4555
4556/// If a LSRUse has multiple formulae with the same ScaledReg and Scale.
4557/// Pick the best one and delete the others.
4558/// This narrowing heuristic is to keep as many formulae with different
4559/// Scale and ScaledReg pair as possible while narrowing the search space.
4560/// The benefit is that it is more likely to find out a better solution
4561/// from a formulae set with more Scale and ScaledReg variations than
4562/// a formulae set with the same Scale and ScaledReg. The picking winner
4563/// reg heuristic will often keep the formulae with the same Scale and
4564/// ScaledReg and filter others, and we want to avoid that if possible.
4565void LSRInstance::NarrowSearchSpaceByFilterFormulaWithSameScaledReg() {
4566 if (EstimateSearchSpaceComplexity() < ComplexityLimit)
4567 return;
4568
4569 LLVM_DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-reduce")) { dbgs() << "The search space is too complex.\n"
"Narrowing the search space by choosing the best Formula " "from the Formulae with the same Scale and ScaledReg.\n"
; } } while (false)
4570 dbgs() << "The search space is too complex.\n"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-reduce")) { dbgs() << "The search space is too complex.\n"
"Narrowing the search space by choosing the best Formula " "from the Formulae with the same Scale and ScaledReg.\n"
; } } while (false)
4571 "Narrowing the search space by choosing the best Formula "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-reduce")) { dbgs() << "The search space is too complex.\n"
"Narrowing the search space by choosing the best Formula " "from the Formulae with the same Scale and ScaledReg.\n"
; } } while (false)
4572 "from the Formulae with the same Scale and ScaledReg.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-reduce")) { dbgs() << "The search space is too complex.\n"
"Narrowing the search space by choosing the best Formula " "from the Formulae with the same Scale and ScaledReg.\n"
; } } while (false)
;
4573
4574 // Map the "Scale * ScaledReg" pair to the best formula of current LSRUse.
4575 using BestFormulaeTy = DenseMap<std::pair<const SCEV *, int64_t>, size_t>;
4576
4577 BestFormulaeTy BestFormulae;
4578#ifndef NDEBUG
4579 bool ChangedFormulae = false;
4580#endif
4581 DenseSet<const SCEV *> VisitedRegs;
4582 SmallPtrSet<const SCEV *, 16> Regs;
4583
4584 for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) {
4585 LSRUse &LU = Uses[LUIdx];
4586 LLVM_DEBUG(dbgs() << "Filtering for use "; LU.print(dbgs());do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-reduce")) { dbgs() << "Filtering for use "; LU.print
(dbgs()); dbgs() << '\n'; } } while (false)
4587 dbgs() << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-reduce")) { dbgs() << "Filtering for use "; LU.print
(dbgs()); dbgs() << '\n'; } } while (false)
;
4588
4589 // Return true if Formula FA is better than Formula FB.
4590 auto IsBetterThan = [&](Formula &FA, Formula &FB) {
4591 // First we will try to choose the Formula with fewer new registers.
4592 // For a register used by current Formula, the more the register is
4593 // shared among LSRUses, the less we increase the register number
4594 // counter of the formula.
4595 size_t FARegNum = 0;
4596 for (const SCEV *Reg : FA.BaseRegs) {
4597 const SmallBitVector &UsedByIndices = RegUses.getUsedByIndices(Reg);
4598 FARegNum += (NumUses - UsedByIndices.count() + 1);
4599 }
4600 size_t FBRegNum = 0;
4601 for (const SCEV *Reg : FB.BaseRegs) {
4602 const SmallBitVector &UsedByIndices = RegUses.getUsedByIndices(Reg);
4603 FBRegNum += (NumUses - UsedByIndices.count() + 1);
4604 }
4605 if (FARegNum != FBRegNum)
4606 return FARegNum < FBRegNum;
4607
4608 // If the new register numbers are the same, choose the Formula with
4609 // less Cost.
4610 Cost CostFA(L, SE, TTI);
4611 Cost CostFB(L, SE, TTI);
4612 Regs.clear();
4613 CostFA.RateFormula(FA, Regs, VisitedRegs, LU);
4614 Regs.clear();
4615 CostFB.RateFormula(FB, Regs, VisitedRegs, LU);
4616 return CostFA.isLess(CostFB);
4617 };
4618
4619 bool Any = false;
4620 for (size_t FIdx = 0, NumForms = LU.Formulae.size(); FIdx != NumForms;
4621 ++FIdx) {
4622 Formula &F = LU.Formulae[FIdx];
4623 if (!F.ScaledReg)
4624 continue;
4625 auto P = BestFormulae.insert({{F.ScaledReg, F.Scale}, FIdx});
4626 if (P.second)
4627 continue;
4628
4629 Formula &Best = LU.Formulae[P.first->second];
4630 if (IsBetterThan(F, Best))
4631 std::swap(F, Best);
4632 LLVM_DEBUG(dbgs() << " Filtering out formula "; F.print(dbgs());do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-reduce")) { dbgs() << " Filtering out formula "
; F.print(dbgs()); dbgs() << "\n" " in favor of formula "
; Best.print(dbgs()); dbgs() << '\n'; } } while (false)
4633 dbgs() << "\n"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-reduce")) { dbgs() << " Filtering out formula "
; F.print(dbgs()); dbgs() << "\n" " in favor of formula "
; Best.print(dbgs()); dbgs() << '\n'; } } while (false)
4634 " in favor of formula ";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-reduce")) { dbgs() << " Filtering out formula "
; F.print(dbgs()); dbgs() << "\n" " in favor of formula "
; Best.print(dbgs()); dbgs() << '\n'; } } while (false)
4635 Best.print(dbgs()); dbgs() << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-reduce")) { dbgs() << " Filtering out formula "
; F.print(dbgs()); dbgs() << "\n" " in favor of formula "
; Best.print(dbgs()); dbgs() << '\n'; } } while (false)
;
4636#ifndef NDEBUG
4637 ChangedFormulae = true;
4638#endif
4639 LU.DeleteFormula(F);
4640 --FIdx;
4641 --NumForms;
4642 Any = true;
4643 }
4644 if (Any)
4645 LU.RecomputeRegs(LUIdx, RegUses);
4646
4647 // Reset this to prepare for the next use.
4648 BestFormulae.clear();
4649 }
4650
4651 LLVM_DEBUG(if (ChangedFormulae) {do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-reduce")) { if (ChangedFormulae) { dbgs() << "\n"
"After filtering out undesirable candidates:\n"; print_uses(
dbgs()); }; } } while (false)
4652 dbgs() << "\n"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-reduce")) { if (ChangedFormulae) { dbgs() << "\n"
"After filtering out undesirable candidates:\n"; print_uses(
dbgs()); }; } } while (false)
4653 "After filtering out undesirable candidates:\n";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-reduce")) { if (ChangedFormulae) { dbgs() << "\n"
"After filtering out undesirable candidates:\n"; print_uses(
dbgs()); }; } } while (false)
4654 print_uses(dbgs());do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-reduce")) { if (ChangedFormulae) { dbgs() << "\n"
"After filtering out undesirable candidates:\n"; print_uses(
dbgs()); }; } } while (false)
4655 })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-reduce")) { if (ChangedFormulae) { dbgs() << "\n"
"After filtering out undesirable candidates:\n"; print_uses(
dbgs()); }; } } while (false)
;
4656}
4657
4658/// The function delete formulas with high registers number expectation.
4659/// Assuming we don't know the value of each formula (already delete
4660/// all inefficient), generate probability of not selecting for each
4661/// register.
4662/// For example,
4663/// Use1:
4664/// reg(a) + reg({0,+,1})
4665/// reg(a) + reg({-1,+,1}) + 1
4666/// reg({a,+,1})
4667/// Use2:
4668/// reg(b) + reg({0,+,1})
4669/// reg(b) + reg({-1,+,1}) + 1
4670/// reg({b,+,1})
4671/// Use3:
4672/// reg(c) + reg(b) + reg({0,+,1})
4673/// reg(c) + reg({b,+,1})
4674///
4675/// Probability of not selecting
4676/// Use1 Use2 Use3
4677/// reg(a) (1/3) * 1 * 1
4678/// reg(b) 1 * (1/3) * (1/2)
4679/// reg({0,+,1}) (2/3) * (2/3) * (1/2)
4680/// reg({-1,+,1}) (2/3) * (2/3) * 1
4681/// reg({a,+,1}) (2/3) * 1 * 1
4682/// reg({b,+,1}) 1 * (2/3) * (2/3)
4683/// reg(c) 1 * 1 * 0
4684///
4685/// Now count registers number mathematical expectation for each formula:
4686/// Note that for each use we exclude probability if not selecting for the use.
4687/// For example for Use1 probability for reg(a) would be just 1 * 1 (excluding
4688/// probabilty 1/3 of not selecting for Use1).
4689/// Use1:
4690/// reg(a) + reg({0,+,1}) 1 + 1/3 -- to be deleted
4691/// reg(a) + reg({-1,+,1}) + 1 1 + 4/9 -- to be deleted
4692/// reg({a,+,1}) 1
4693/// Use2:
4694/// reg(b) + reg({0,+,1}) 1/2 + 1/3 -- to be deleted
4695/// reg(b) + reg({-1,+,1}) + 1 1/2 + 2/3 -- to be deleted
4696/// reg({b,+,1}) 2/3
4697/// Use3:
4698/// reg(c) + reg(b) + reg({0,+,1}) 1 + 1/3 + 4/9 -- to be deleted
4699/// reg(c) + reg({b,+,1}) 1 + 2/3
4700void LSRInstance::NarrowSearchSpaceByDeletingCostlyFormulas() {
4701 if (EstimateSearchSpaceComplexity() < ComplexityLimit)
4702 return;
4703 // Ok, we have too many of formulae on our hands to conveniently handle.
4704 // Use a rough heuristic to thin out the list.
4705
4706 // Set of Regs wich will be 100% used in final solution.
4707 // Used in each formula of a solution (in example above this is reg(c)).
4708 // We can skip them in calculations.
4709 SmallPtrSet<const SCEV *, 4> UniqRegs;
4710 LLVM_DEBUG(dbgs() << "The search space is too complex.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-reduce")) { dbgs() << "The search space is too complex.\n"
; } } while (false)
;
4711
4712 // Map each register to probability of not selecting
4713 DenseMap <const SCEV *, float> RegNumMap;
4714 for (const SCEV *Reg : RegUses) {
4715 if (UniqRegs.count(Reg))
4716 continue;
4717 float PNotSel = 1;
4718 for (const LSRUse &LU : Uses) {
4719 if (!LU.Regs.count(Reg))
4720 continue;
4721 float P = LU.getNotSelectedProbability(Reg);
4722 if (P != 0.0)
4723 PNotSel *= P;
4724 else
4725 UniqRegs.insert(Reg);
4726 }
4727 RegNumMap.insert(std::make_pair(Reg, PNotSel));
4728 }
4729
4730 LLVM_DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-reduce")) { dbgs() << "Narrowing the search space by deleting costly formulas\n"
; } } while (false)
4731 dbgs() << "Narrowing the search space by deleting costly formulas\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-reduce")) { dbgs() << "Narrowing the search space by deleting costly formulas\n"
; } } while (false)
;
4732
4733 // Delete formulas where registers number expectation is high.
4734 for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) {
4735 LSRUse &LU = Uses[LUIdx];
4736 // If nothing to delete - continue.
4737 if (LU.Formulae.size() < 2)
4738 continue;
4739 // This is temporary solution to test performance. Float should be
4740 // replaced with round independent type (based on integers) to avoid
4741 // different results for different target builds.
4742 float FMinRegNum = LU.Formulae[0].getNumRegs();
4743 float FMinARegNum = LU.Formulae[0].getNumRegs();
4744 size_t MinIdx = 0;
4745 for (size_t i = 0, e = LU.Formulae.size(); i != e; ++i) {
4746 Formula &F = LU.Formulae[i];
4747 float FRegNum = 0;
4748 float FARegNum = 0;
4749 for (const SCEV *BaseReg : F.BaseRegs) {
4750 if (UniqRegs.count(BaseReg))
4751 continue;
4752 FRegNum += RegNumMap[BaseReg] / LU.getNotSelectedProbability(BaseReg);
4753 if (isa<SCEVAddRecExpr>(BaseReg))
4754 FARegNum +=
4755 RegNumMap[BaseReg] / LU.getNotSelectedProbability(BaseReg);
4756 }
4757 if (const SCEV *ScaledReg = F.ScaledReg) {
4758 if (!UniqRegs.count(ScaledReg)) {
4759 FRegNum +=
4760 RegNumMap[ScaledReg] / LU.getNotSelectedProbability(ScaledReg);
4761 if (isa<SCEVAddRecExpr>(ScaledReg))
4762 FARegNum +=
4763 RegNumMap[ScaledReg] / LU.getNotSelectedProbability(ScaledReg);
4764 }
4765 }
4766 if (FMinRegNum > FRegNum ||
4767 (FMinRegNum == FRegNum && FMinARegNum > FARegNum)) {
4768 FMinRegNum = FRegNum;
4769 FMinARegNum = FARegNum;
4770 MinIdx = i;
4771 }
4772 }
4773 LLVM_DEBUG(dbgs() << " The formula "; LU.Formulae[MinIdx].print(dbgs());do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-reduce")) { dbgs() << " The formula "; LU.Formulae
[MinIdx].print(dbgs()); dbgs() << " with min reg num " <<
FMinRegNum << '\n'; } } while (false)
4774 dbgs() << " with min reg num " << FMinRegNum << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-reduce")) { dbgs() << " The formula "; LU.Formulae
[MinIdx].print(dbgs()); dbgs() << " with min reg num " <<
FMinRegNum << '\n'; } } while (false)
;
4775 if (MinIdx != 0)
4776 std::swap(LU.Formulae[MinIdx], LU.Formulae[0]);
4777 while (LU.Formulae.size() != 1) {
4778 LLVM_DEBUG(dbgs() << " Deleting "; LU.Formulae.back().print(dbgs());do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-reduce")) { dbgs() << " Deleting "; LU.Formulae
.back().print(dbgs()); dbgs() << '\n'; } } while (false
)
4779 dbgs() << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-reduce")) { dbgs() << " Deleting "; LU.Formulae
.back().print(dbgs()); dbgs() << '\n'; } } while (false
)
;
4780 LU.Formulae.pop_back();
4781 }
4782 LU.RecomputeRegs(LUIdx, RegUses);
4783 assert(LU.Formulae.size() == 1 && "Should be exactly 1 min regs formula")((LU.Formulae.size() == 1 && "Should be exactly 1 min regs formula"
) ? static_cast<void> (0) : __assert_fail ("LU.Formulae.size() == 1 && \"Should be exactly 1 min regs formula\""
, "/build/llvm-toolchain-snapshot-9~svn358860/lib/Transforms/Scalar/LoopStrengthReduce.cpp"
, 4783, __PRETTY_FUNCTION__))
;
4784 Formula &F = LU.Formulae[0];
4785 LLVM_DEBUG(dbgs() << " Leaving only "; F.print(dbgs()); dbgs() << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-reduce")) { dbgs() << " Leaving only "; F.print
(dbgs()); dbgs() << '\n'; } } while (false)
;
4786 // When we choose the formula, the regs become unique.
4787 UniqRegs.insert(F.BaseRegs.begin(), F.BaseRegs.end());
4788 if (F.ScaledReg)
4789 UniqRegs.insert(F.ScaledReg);
4790 }
4791 LLVM_DEBUG(dbgs() << "After pre-selection:\n"; print_uses(dbgs()))do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-reduce")) { dbgs() << "After pre-selection:\n"; print_uses
(dbgs()); } } while (false)
;
4792}
4793
4794/// Pick a register which seems likely to be profitable, and then in any use
4795/// which has any reference to that register, delete all formulae which do not
4796/// reference that register.
4797void LSRInstance::NarrowSearchSpaceByPickingWinnerRegs() {
4798 // With all other options exhausted, loop until the system is simple
4799 // enough to handle.
4800 SmallPtrSet<const SCEV *, 4> Taken;
4801 while (EstimateSearchSpaceComplexity() >= ComplexityLimit) {
4802 // Ok, we have too many of formulae on our hands to conveniently handle.
4803 // Use a rough heuristic to thin out the list.
4804 LLVM_DEBUG(dbgs() << "The search space is too complex.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-reduce")) { dbgs() << "The search space is too complex.\n"
; } } while (false)
;
4805
4806 // Pick the register which is used by the most LSRUses, which is likely
4807 // to be a good reuse register candidate.
4808 const SCEV *Best = nullptr;
4809 unsigned BestNum = 0;
4810 for (const SCEV *Reg : RegUses) {
4811 if (Taken.count(Reg))
4812 continue;
4813 if (!Best) {
4814 Best = Reg;
4815 BestNum = RegUses.getUsedByIndices(Reg).count();
4816 } else {
4817 unsigned Count = RegUses.getUsedByIndices(Reg).count();
4818 if (Count > BestNum) {
4819 Best = Reg;
4820 BestNum = Count;
4821 }
4822 }
4823 }
4824
4825 LLVM_DEBUG(dbgs() << "Narrowing the search space by assuming " << *Bestdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-reduce")) { dbgs() << "Narrowing the search space by assuming "
<< *Best << " will yield profitable reuse.\n"; }
} while (false)
4826 << " will yield profitable reuse.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-reduce")) { dbgs() << "Narrowing the search space by assuming "
<< *Best << " will yield profitable reuse.\n"; }
} while (false)
;
4827 Taken.insert(Best);
4828
4829 // In any use with formulae which references this register, delete formulae
4830 // which don't reference it.
4831 for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) {
4832 LSRUse &LU = Uses[LUIdx];
4833 if (!LU.Regs.count(Best)) continue;
4834
4835 bool Any = false;
4836 for (size_t i = 0, e = LU.Formulae.size(); i != e; ++i) {
4837 Formula &F = LU.Formulae[i];
4838 if (!F.referencesReg(Best)) {
4839 LLVM_DEBUG(dbgs() << " Deleting "; F.print(dbgs()); dbgs() << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-reduce")) { dbgs() << " Deleting "; F.print(dbgs
()); dbgs() << '\n'; } } while (false)
;
4840 LU.DeleteFormula(F);
4841 --e;
4842 --i;
4843 Any = true;
4844 assert(e != 0 && "Use has no formulae left! Is Regs inconsistent?")((e != 0 && "Use has no formulae left! Is Regs inconsistent?"
) ? static_cast<void> (0) : __assert_fail ("e != 0 && \"Use has no formulae left! Is Regs inconsistent?\""
, "/build/llvm-toolchain-snapshot-9~svn358860/lib/Transforms/Scalar/LoopStrengthReduce.cpp"
, 4844, __PRETTY_FUNCTION__))
;
4845 continue;
4846 }
4847 }
4848
4849 if (Any)
4850 LU.RecomputeRegs(LUIdx, RegUses);
4851 }
4852
4853 LLVM_DEBUG(dbgs() << "After pre-selection:\n"; print_uses(dbgs()))do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-reduce")) { dbgs() << "After pre-selection:\n"; print_uses
(dbgs()); } } while (false)
;
4854 }
4855}
4856
4857/// If there are an extraordinary number of formulae to choose from, use some
4858/// rough heuristics to prune down the number of formulae. This keeps the main
4859/// solver from taking an extraordinary amount of time in some worst-case
4860/// scenarios.
4861void LSRInstance::NarrowSearchSpaceUsingHeuristics() {
4862 NarrowSearchSpaceByDetectingSupersets();
4863 NarrowSearchSpaceByCollapsingUnrolledCode();
4864 NarrowSearchSpaceByRefilteringUndesirableDedicatedRegisters();
4865 if (FilterSameScaledReg)
4866 NarrowSearchSpaceByFilterFormulaWithSameScaledReg();
4867 if (LSRExpNarrow)
4868 NarrowSearchSpaceByDeletingCostlyFormulas();
4869 else
4870 NarrowSearchSpaceByPickingWinnerRegs();
4871}
4872
4873/// This is the recursive solver.
4874void LSRInstance::SolveRecurse(SmallVectorImpl<const Formula *> &Solution,
4875 Cost &SolutionCost,
4876 SmallVectorImpl<const Formula *> &Workspace,
4877 const Cost &CurCost,
4878 const SmallPtrSet<const SCEV *, 16> &CurRegs,
4879 DenseSet<const SCEV *> &VisitedRegs) const {
4880 // Some ideas:
4881 // - prune more:
4882 // - use more aggressive filtering
4883 // - sort the formula so that the most profitable solutions are found first
4884 // - sort the uses too
4885 // - search faster:
4886 // - don't compute a cost, and then compare. compare while computing a cost
4887 // and bail early.
4888 // - track register sets with SmallBitVector
4889
4890 const LSRUse &LU = Uses[Workspace.size()];
4891
4892 // If this use references any register that's already a part of the
4893 // in-progress solution, consider it a requirement that a formula must
4894 // reference that register in order to be considered. This prunes out
4895 // unprofitable searching.
4896 SmallSetVector<const SCEV *, 4> ReqRegs;
4897 for (const SCEV *S : CurRegs)
4898 if (LU.Regs.count(S))
4899 ReqRegs.insert(S);
4900
4901 SmallPtrSet<const SCEV *, 16> NewRegs;
4902 Cost NewCost(L, SE, TTI);
4903 for (const Formula &F : LU.Formulae) {
4904 // Ignore formulae which may not be ideal in terms of register reuse of
4905 // ReqRegs. The formula should use all required registers before
4906 // introducing new ones.
4907 int NumReqRegsToFind = std::min(F.getNumRegs(), ReqRegs.size());
4908 for (const SCEV *Reg : ReqRegs) {
4909 if ((F.ScaledReg && F.ScaledReg == Reg) ||
4910 is_contained(F.BaseRegs, Reg)) {
4911 --NumReqRegsToFind;
4912 if (NumReqRegsToFind == 0)
4913 break;
4914 }
4915 }
4916 if (NumReqRegsToFind != 0) {
4917 // If none of the formulae satisfied the required registers, then we could
4918 // clear ReqRegs and try again. Currently, we simply give up in this case.
4919 continue;
4920 }
4921
4922 // Evaluate the cost of the current formula. If it's already worse than
4923 // the current best, prune the search at that point.
4924 NewCost = CurCost;
4925 NewRegs = CurRegs;
4926 NewCost.RateFormula(F, NewRegs, VisitedRegs, LU);
4927 if (NewCost.isLess(SolutionCost)) {
4928 Workspace.push_back(&F);
4929 if (Workspace.size() != Uses.size()) {
4930 SolveRecurse(Solution, SolutionCost, Workspace, NewCost,
4931 NewRegs, VisitedRegs);
4932 if (F.getNumRegs() == 1 && Workspace.size() == 1)
4933 VisitedRegs.insert(F.ScaledReg ? F.ScaledReg : F.BaseRegs[0]);
4934 } else {
4935 LLVM_DEBUG(dbgs() << "New best at "; NewCost.print(dbgs());do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-reduce")) { dbgs() << "New best at "; NewCost.print
(dbgs()); dbgs() << ".\nRegs:\n"; for (const SCEV *S : NewRegs
) dbgs() << "- " << *S << "\n"; dbgs() <<
'\n'; } } while (false)
4936 dbgs() << ".\nRegs:\n";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-reduce")) { dbgs() << "New best at "; NewCost.print
(dbgs()); dbgs() << ".\nRegs:\n"; for (const SCEV *S : NewRegs
) dbgs() << "- " << *S << "\n"; dbgs() <<
'\n'; } } while (false)
4937 for (const SCEV *S : NewRegs) dbgs()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-reduce")) { dbgs() << "New best at "; NewCost.print
(dbgs()); dbgs() << ".\nRegs:\n"; for (const SCEV *S : NewRegs
) dbgs() << "- " << *S << "\n"; dbgs() <<
'\n'; } } while (false)
4938 << "- " << *S << "\n";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-reduce")) { dbgs() << "New best at "; NewCost.print
(dbgs()); dbgs() << ".\nRegs:\n"; for (const SCEV *S : NewRegs
) dbgs() << "- " << *S << "\n"; dbgs() <<
'\n'; } } while (false)
4939 dbgs() << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-reduce")) { dbgs() << "New best at "; NewCost.print
(dbgs()); dbgs() << ".\nRegs:\n"; for (const SCEV *S : NewRegs
) dbgs() << "- " << *S << "\n"; dbgs() <<
'\n'; } } while (false)
;
4940
4941 SolutionCost = NewCost;
4942 Solution = Workspace;
4943 }
4944 Workspace.pop_back();
4945 }
4946 }
4947}
4948
4949/// Choose one formula from each use. Return the results in the given Solution
4950/// vector.
4951void LSRInstance::Solve(SmallVectorImpl<const Formula *> &Solution) const {
4952 SmallVector<const Formula *, 8> Workspace;
4953 Cost SolutionCost(L, SE, TTI);
4954 SolutionCost.Lose();
4955 Cost CurCost(L, SE, TTI);
4956 SmallPtrSet<const SCEV *, 16> CurRegs;
4957 DenseSet<const SCEV *> VisitedRegs;
4958 Workspace.reserve(Uses.size());
4959
4960 // SolveRecurse does all the work.
4961 SolveRecurse(Solution, SolutionCost, Workspace, CurCost,
4962 CurRegs, VisitedRegs);
4963 if (Solution.empty()) {
4964 LLVM_DEBUG(dbgs() << "\nNo Satisfactory Solution\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-reduce")) { dbgs() << "\nNo Satisfactory Solution\n"
; } } while (false)
;
4965 return;
4966 }
4967
4968 // Ok, we've now made all our decisions.
4969 LLVM_DEBUG(dbgs() << "\n"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-reduce")) { dbgs() << "\n" "The chosen solution requires "
; SolutionCost.print(dbgs()); dbgs() << ":\n"; for (size_t
i = 0, e = Uses.size(); i != e; ++i) { dbgs() << " ";
Uses[i].print(dbgs()); dbgs() << "\n" " "; Solution
[i]->print(dbgs()); dbgs() << '\n'; }; } } while (false
)
4970 "The chosen solution requires ";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-reduce")) { dbgs() << "\n" "The chosen solution requires "
; SolutionCost.print(dbgs()); dbgs() << ":\n"; for (size_t
i = 0, e = Uses.size(); i != e; ++i) { dbgs() << " ";
Uses[i].print(dbgs()); dbgs() << "\n" " "; Solution
[i]->print(dbgs()); dbgs() << '\n'; }; } } while (false
)
4971 SolutionCost.print(dbgs()); dbgs() << ":\n";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-reduce")) { dbgs() << "\n" "The chosen solution requires "
; SolutionCost.print(dbgs()); dbgs() << ":\n"; for (size_t
i = 0, e = Uses.size(); i != e; ++i) { dbgs() << " ";
Uses[i].print(dbgs()); dbgs() << "\n" " "; Solution
[i]->print(dbgs()); dbgs() << '\n'; }; } } while (false
)
4972 for (size_t i = 0, e = Uses.size(); i != e; ++i) {do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-reduce")) { dbgs() << "\n" "The chosen solution requires "
; SolutionCost.print(dbgs()); dbgs() << ":\n"; for (size_t
i = 0, e = Uses.size(); i != e; ++i) { dbgs() << " ";
Uses[i].print(dbgs()); dbgs() << "\n" " "; Solution
[i]->print(dbgs()); dbgs() << '\n'; }; } } while (false
)
4973 dbgs() << " ";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-reduce")) { dbgs() << "\n" "The chosen solution requires "
; SolutionCost.print(dbgs()); dbgs() << ":\n"; for (size_t
i = 0, e = Uses.size(); i != e; ++i) { dbgs() << " ";
Uses[i].print(dbgs()); dbgs() << "\n" " "; Solution
[i]->print(dbgs()); dbgs() << '\n'; }; } } while (false
)
4974 Uses[i].print(dbgs());do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-reduce")) { dbgs() << "\n" "The chosen solution requires "
; SolutionCost.print(dbgs()); dbgs() << ":\n"; for (size_t
i = 0, e = Uses.size(); i != e; ++i) { dbgs() << " ";
Uses[i].print(dbgs()); dbgs() << "\n" " "; Solution
[i]->print(dbgs()); dbgs() << '\n'; }; } } while (false
)
4975 dbgs() << "\n"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-reduce")) { dbgs() << "\n" "The chosen solution requires "
; SolutionCost.print(dbgs()); dbgs() << ":\n"; for (size_t
i = 0, e = Uses.size(); i != e; ++i) { dbgs() << " ";
Uses[i].print(dbgs()); dbgs() << "\n" " "; Solution
[i]->print(dbgs()); dbgs() << '\n'; }; } } while (false
)
4976 " ";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-reduce")) { dbgs() << "\n" "The chosen solution requires "
; SolutionCost.print(dbgs()); dbgs() << ":\n"; for (size_t
i = 0, e = Uses.size(); i != e; ++i) { dbgs() << " ";
Uses[i].print(dbgs()); dbgs() << "\n" " "; Solution
[i]->print(dbgs()); dbgs() << '\n'; }; } } while (false
)
4977 Solution[i]->print(dbgs());do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-reduce")) { dbgs() << "\n" "The chosen solution requires "
; SolutionCost.print(dbgs()); dbgs() << ":\n"; for (size_t
i = 0, e = Uses.size(); i != e; ++i) { dbgs() << " ";
Uses[i].print(dbgs()); dbgs() << "\n" " "; Solution
[i]->print(dbgs()); dbgs() << '\n'; }; } } while (false
)
4978 dbgs() << '\n';do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-reduce")) { dbgs() << "\n" "The chosen solution requires "
; SolutionCost.print(dbgs()); dbgs() << ":\n"; for (size_t
i = 0, e = Uses.size(); i != e; ++i) { dbgs() << " ";
Uses[i].print(dbgs()); dbgs() << "\n" " "; Solution
[i]->print(dbgs()); dbgs() << '\n'; }; } } while (false
)
4979 })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-reduce")) { dbgs() << "\n" "The chosen solution requires "
; SolutionCost.print(dbgs()); dbgs() << ":\n"; for (size_t
i = 0, e = Uses.size(); i != e; ++i) { dbgs() << " ";
Uses[i].print(dbgs()); dbgs() << "\n" " "; Solution
[i]->print(dbgs()); dbgs() << '\n'; }; } } while (false
)
;
4980
4981 assert(Solution.size() == Uses.size() && "Malformed solution!")((Solution.size() == Uses.size() && "Malformed solution!"
) ? static_cast<void> (0) : __assert_fail ("Solution.size() == Uses.size() && \"Malformed solution!\""
, "/build/llvm-toolchain-snapshot-9~svn358860/lib/Transforms/Scalar/LoopStrengthReduce.cpp"
, 4981, __PRETTY_FUNCTION__))
;
4982}
4983
4984/// Helper for AdjustInsertPositionForExpand. Climb up the dominator tree far as
4985/// we can go while still being dominated by the input positions. This helps
4986/// canonicalize the insert position, which encourages sharing.
4987BasicBlock::iterator
4988LSRInstance::HoistInsertPosition(BasicBlock::iterator IP,
4989 const SmallVectorImpl<Instruction *> &Inputs)
4990 const {
4991 Instruction *Tentative = &*IP;
4992 while (true) {
4993 bool AllDominate = true;
4994 Instruction *BetterPos = nullptr;
4995 // Don't bother attempting to insert before a catchswitch, their basic block
4996 // cannot have other non-PHI instructions.
4997 if (isa<CatchSwitchInst>(Tentative))
4998 return IP;
4999
5000 for (Instruction *Inst : Inputs) {
5001 if (Inst == Tentative || !DT.dominates(Inst, Tentative)) {
5002 AllDominate = false;
5003 break;
5004 }
5005 // Attempt to find an insert position in the middle of the block,
5006 // instead of at the end, so that it can be used for other expansions.
5007 if (Tentative->getParent() == Inst->getParent() &&
5008 (!BetterPos || !DT.dominates(Inst, BetterPos)))
5009 BetterPos = &*std::next(BasicBlock::iterator(Inst));
5010 }
5011 if (!AllDominate)
5012 break;
5013 if (BetterPos)
5014 IP = BetterPos->getIterator();
5015 else
5016 IP = Tentative->getIterator();
5017
5018 const Loop *IPLoop = LI.getLoopFor(IP->getParent());
5019 unsigned IPLoopDepth = IPLoop ? IPLoop->getLoopDepth() : 0;
5020
5021 BasicBlock *IDom;
5022 for (DomTreeNode *Rung = DT.getNode(IP->getParent()); ; ) {
5023 if (!Rung) return IP;
5024 Rung = Rung->getIDom();
5025 if (!Rung) return IP;
5026 IDom = Rung->getBlock();
5027
5028 // Don't climb into a loop though.
5029 const Loop *IDomLoop = LI.getLoopFor(IDom);
5030 unsigned IDomDepth = IDomLoop ? IDomLoop->getLoopDepth() : 0;
5031 if (IDomDepth <= IPLoopDepth &&
5032 (IDomDepth != IPLoopDepth || IDomLoop == IPLoop))
5033 break;
5034 }
5035
5036 Tentative = IDom->getTerminator();
5037 }
5038
5039 return IP;
5040}
5041
5042/// Determine an input position which will be dominated by the operands and
5043/// which will dominate the result.
5044BasicBlock::iterator
5045LSRInstance::AdjustInsertPositionForExpand(BasicBlock::iterator LowestIP,
5046 const LSRFixup &LF,
5047 const LSRUse &LU,
5048 SCEVExpander &Rewriter) const {
5049 // Collect some instructions which must be dominated by the
5050 // expanding replacement. These must be dominated by any operands that
5051 // will be required in the expansion.
5052 SmallVector<Instruction *, 4> Inputs;
5053 if (Instruction *I = dyn_cast<Instruction>(LF.OperandValToReplace))
5054 Inputs.push_back(I);
5055 if (LU.Kind == LSRUse::ICmpZero)
5056 if (Instruction *I =
5057 dyn_cast<Instruction>(cast<ICmpInst>(LF.UserInst)->getOperand(1)))
5058 Inputs.push_back(I);
5059 if (LF.PostIncLoops.count(L)) {
5060 if (LF.isUseFullyOutsideLoop(L))
5061 Inputs.push_back(L->getLoopLatch()->getTerminator());
5062 else
5063 Inputs.push_back(IVIncInsertPos);
5064 }
5065 // The expansion must also be dominated by the increment positions of any
5066 // loops it for which it is using post-inc mode.
5067 for (const Loop *PIL : LF.PostIncLoops) {
5068 if (PIL == L) continue;
5069
5070 // Be dominated by the loop exit.
5071 SmallVector<BasicBlock *, 4> ExitingBlocks;
5072 PIL->getExitingBlocks(ExitingBlocks);
5073 if (!ExitingBlocks.empty()) {
5074 BasicBlock *BB = ExitingBlocks[0];
5075 for (unsigned i = 1, e = ExitingBlocks.size(); i != e; ++i)
5076 BB = DT.findNearestCommonDominator(BB, ExitingBlocks[i]);
5077 Inputs.push_back(BB->getTerminator());
5078 }
5079 }
5080
5081 assert(!isa<PHINode>(LowestIP) && !LowestIP->isEHPad()((!isa<PHINode>(LowestIP) && !LowestIP->isEHPad
() && !isa<DbgInfoIntrinsic>(LowestIP) &&
"Insertion point must be a normal instruction") ? static_cast
<void> (0) : __assert_fail ("!isa<PHINode>(LowestIP) && !LowestIP->isEHPad() && !isa<DbgInfoIntrinsic>(LowestIP) && \"Insertion point must be a normal instruction\""
, "/build/llvm-toolchain-snapshot-9~svn358860/lib/Transforms/Scalar/LoopStrengthReduce.cpp"
, 5083, __PRETTY_FUNCTION__))
5082 && !isa<DbgInfoIntrinsic>(LowestIP) &&((!isa<PHINode>(LowestIP) && !LowestIP->isEHPad
() && !isa<DbgInfoIntrinsic>(LowestIP) &&
"Insertion point must be a normal instruction") ? static_cast
<void> (0) : __assert_fail ("!isa<PHINode>(LowestIP) && !LowestIP->isEHPad() && !isa<DbgInfoIntrinsic>(LowestIP) && \"Insertion point must be a normal instruction\""
, "/build/llvm-toolchain-snapshot-9~svn358860/lib/Transforms/Scalar/LoopStrengthReduce.cpp"
, 5083, __PRETTY_FUNCTION__))
5083 "Insertion point must be a normal instruction")((!isa<PHINode>(LowestIP) && !LowestIP->isEHPad
() && !isa<DbgInfoIntrinsic>(LowestIP) &&
"Insertion point must be a normal instruction") ? static_cast
<void> (0) : __assert_fail ("!isa<PHINode>(LowestIP) && !LowestIP->isEHPad() && !isa<DbgInfoIntrinsic>(LowestIP) && \"Insertion point must be a normal instruction\""
, "/build/llvm-toolchain-snapshot-9~svn358860/lib/Transforms/Scalar/LoopStrengthReduce.cpp"
, 5083, __PRETTY_FUNCTION__))
;
5084
5085 // Then, climb up the immediate dominator tree as far as we can go while
5086 // still being dominated by the input positions.
5087 BasicBlock::iterator IP = HoistInsertPosition(LowestIP, Inputs);
5088
5089 // Don't insert instructions before PHI nodes.
5090 while (isa<PHINode>(IP)) ++IP;
5091
5092 // Ignore landingpad instructions.
5093 while (IP->isEHPad()) ++IP;
5094
5095 // Ignore debug intrinsics.
5096 while (isa<DbgInfoIntrinsic>(IP)) ++IP;
5097
5098 // Set IP below instructions recently inserted by SCEVExpander. This keeps the
5099 // IP consistent across expansions and allows the previously inserted
5100 // instructions to be reused by subsequent expansion.
5101 while (Rewriter.isInsertedInstruction(&*IP) && IP != LowestIP)
5102 ++IP;
5103
5104 return IP;
5105}
5106
5107/// Emit instructions for the leading candidate expression for this LSRUse (this
5108/// is called "expanding").
5109Value *LSRInstance::Expand(const LSRUse &LU, const LSRFixup &LF,
5110 const Formula &F, BasicBlock::iterator IP,
5111 SCEVExpander &Rewriter,
5112 SmallVectorImpl<WeakTrackingVH> &DeadInsts) const {
5113 if (LU.RigidFormula)
5114 return LF.OperandValToReplace;
5115
5116 // Determine an input position which will be dominated by the operands and
5117 // which will dominate the result.
5118 IP = AdjustInsertPositionForExpand(IP, LF, LU, Rewriter);
5119 Rewriter.setInsertPoint(&*IP);
5120
5121 // Inform the Rewriter if we have a post-increment use, so that it can
5122 // perform an advantageous expansion.
5123 Rewriter.setPostInc(LF.PostIncLoops);
5124
5125 // This is the type that the user actually needs.
5126 Type *OpTy = LF.OperandValToReplace->getType();
5127 // This will be the type that we'll initially expand to.
5128 Type *Ty = F.getType();
5129 if (!Ty)
5130 // No type known; just expand directly to the ultimate type.
5131 Ty = OpTy;
5132 else if (SE.getEffectiveSCEVType(Ty) == SE.getEffectiveSCEVType(OpTy))
5133 // Expand directly to the ultimate type if it's the right size.
5134 Ty = OpTy;
5135 // This is the type to do integer arithmetic in.
5136 Type *IntTy = SE.getEffectiveSCEVType(Ty);
5137
5138 // Build up a list of operands to add together to form the full base.
5139 SmallVector<const SCEV *, 8> Ops;
5140
5141 // Expand the BaseRegs portion.
5142 for (const SCEV *Reg : F.BaseRegs) {
5143 assert(!Reg->isZero() && "Zero allocated in a base register!")((!Reg->isZero() && "Zero allocated in a base register!"
) ? static_cast<void> (0) : __assert_fail ("!Reg->isZero() && \"Zero allocated in a base register!\""
, "/build/llvm-toolchain-snapshot-9~svn358860/lib/Transforms/Scalar/LoopStrengthReduce.cpp"
, 5143, __PRETTY_FUNCTION__))
;
5144
5145 // If we're expanding for a post-inc user, make the post-inc adjustment.
5146 Reg = denormalizeForPostIncUse(Reg, LF.PostIncLoops, SE);
5147 Ops.push_back(SE.getUnknown(Rewriter.expandCodeFor(Reg, nullptr)));
5148 }
5149
5150 // Expand the ScaledReg portion.
5151 Value *ICmpScaledV = nullptr;
5152 if (F.Scale != 0) {
5153 const SCEV *ScaledS = F.ScaledReg;
5154
5155 // If we're expanding for a post-inc user, make the post-inc adjustment.
5156 PostIncLoopSet &Loops = const_cast<PostIncLoopSet &>(LF.PostIncLoops);
5157 ScaledS = denormalizeForPostIncUse(ScaledS, Loops, SE);
5158
5159 if (LU.Kind == LSRUse::ICmpZero) {
5160 // Expand ScaleReg as if it was part of the base regs.
5161 if (F.Scale == 1)
5162 Ops.push_back(
5163 SE.getUnknown(Rewriter.expandCodeFor(ScaledS, nullptr)));
5164 else {
5165 // An interesting way of "folding" with an icmp is to use a negated
5166 // scale, which we'll implement by inserting it into the other operand
5167 // of the icmp.
5168 assert(F.Scale == -1 &&((F.Scale == -1 && "The only scale supported by ICmpZero uses is -1!"
) ? static_cast<void> (0) : __assert_fail ("F.Scale == -1 && \"The only scale supported by ICmpZero uses is -1!\""
, "/build/llvm-toolchain-snapshot-9~svn358860/lib/Transforms/Scalar/LoopStrengthReduce.cpp"
, 5169, __PRETTY_FUNCTION__))
5169 "The only scale supported by ICmpZero uses is -1!")((F.Scale == -1 && "The only scale supported by ICmpZero uses is -1!"
) ? static_cast<void> (0) : __assert_fail ("F.Scale == -1 && \"The only scale supported by ICmpZero uses is -1!\""
, "/build/llvm-toolchain-snapshot-9~svn358860/lib/Transforms/Scalar/LoopStrengthReduce.cpp"
, 5169, __PRETTY_FUNCTION__))
;
5170 ICmpScaledV = Rewriter.expandCodeFor(ScaledS, nullptr);
5171 }
5172 } else {
5173 // Otherwise just expand the scaled register and an explicit scale,
5174 // which is expected to be matched as part of the address.
5175
5176 // Flush the operand list to suppress SCEVExpander hoisting address modes.
5177 // Unless the addressing mode will not be folded.
5178 if (!Ops.empty() && LU.Kind == LSRUse::Address &&
5179 isAMCompletelyFolded(TTI, LU, F)) {
5180 Value *FullV = Rewriter.expandCodeFor(SE.getAddExpr(Ops), nullptr);
5181 Ops.clear();
5182 Ops.push_back(SE.getUnknown(FullV));
5183 }
5184 ScaledS = SE.getUnknown(Rewriter.expandCodeFor(ScaledS, nullptr));
5185 if (F.Scale != 1)
5186 ScaledS =
5187 SE.getMulExpr(ScaledS, SE.getConstant(ScaledS->getType(), F.Scale));
5188 Ops.push_back(ScaledS);
5189 }
5190 }
5191
5192 // Expand the GV portion.
5193 if (F.BaseGV) {
5194 // Flush the operand list to suppress SCEVExpander hoisting.
5195 if (!Ops.empty()) {
5196 Value *FullV = Rewriter.expandCodeFor(SE.getAddExpr(Ops), Ty);
5197 Ops.clear();
5198 Ops.push_back(SE.getUnknown(FullV));
5199 }
5200 Ops.push_back(SE.getUnknown(F.BaseGV));
5201 }
5202
5203 // Flush the operand list to suppress SCEVExpander hoisting of both folded and
5204 // unfolded offsets. LSR assumes they both live next to their uses.
5205 if (!Ops.empty()) {
5206 Value *FullV = Rewriter.expandCodeFor(SE.getAddExpr(Ops), Ty);
5207 Ops.clear();
5208 Ops.push_back(SE.getUnknown(FullV));
5209 }
5210
5211 // Expand the immediate portion.
5212 int64_t Offset = (uint64_t)F.BaseOffset + LF.Offset;
5213 if (Offset != 0) {
5214 if (LU.Kind == LSRUse::ICmpZero) {
5215 // The other interesting way of "folding" with an ICmpZero is to use a
5216 // negated immediate.
5217 if (!ICmpScaledV)
5218 ICmpScaledV = ConstantInt::get(IntTy, -(uint64_t)Offset);
5219 else {
5220 Ops.push_back(SE.getUnknown(ICmpScaledV));
5221 ICmpScaledV = ConstantInt::get(IntTy, Offset);
5222 }
5223 } else {
5224 // Just add the immediate values. These again are expected to be matched
5225 // as part of the address.
5226 Ops.push_back(SE.getUnknown(ConstantInt::getSigned(IntTy, Offset)));
5227 }
5228 }
5229
5230 // Expand the unfolded offset portion.
5231 int64_t UnfoldedOffset = F.UnfoldedOffset;
5232 if (UnfoldedOffset != 0) {
5233 // Just add the immediate values.
5234 Ops.push_back(SE.getUnknown(ConstantInt::getSigned(IntTy,
5235 UnfoldedOffset)));
5236 }
5237
5238 // Emit instructions summing all the operands.
5239 const SCEV *FullS = Ops.empty() ?
5240 SE.getConstant(IntTy, 0) :
5241 SE.getAddExpr(Ops);
5242 Value *FullV = Rewriter.expandCodeFor(FullS, Ty);
5243
5244 // We're done expanding now, so reset the rewriter.
5245 Rewriter.clearPostInc();
5246
5247 // An ICmpZero Formula represents an ICmp which we're handling as a
5248 // comparison against zero. Now that we've expanded an expression for that
5249 // form, update the ICmp's other operand.
5250 if (LU.Kind == LSRUse::ICmpZero) {
5251 ICmpInst *CI = cast<ICmpInst>(LF.UserInst);
5252 DeadInsts.emplace_back(CI->getOperand(1));
5253 assert(!F.BaseGV && "ICmp does not support folding a global value and "((!F.BaseGV && "ICmp does not support folding a global value and "
"a scale at the same time!") ? static_cast<void> (0) :
__assert_fail ("!F.BaseGV && \"ICmp does not support folding a global value and \" \"a scale at the same time!\""
, "/build/llvm-toolchain-snapshot-9~svn358860/lib/Transforms/Scalar/LoopStrengthReduce.cpp"
, 5254, __PRETTY_FUNCTION__))
5254 "a scale at the same time!")((!F.BaseGV && "ICmp does not support folding a global value and "
"a scale at the same time!") ? static_cast<void> (0) :
__assert_fail ("!F.BaseGV && \"ICmp does not support folding a global value and \" \"a scale at the same time!\""
, "/build/llvm-toolchain-snapshot-9~svn358860/lib/Transforms/Scalar/LoopStrengthReduce.cpp"
, 5254, __PRETTY_FUNCTION__))
;
5255 if (F.Scale == -1) {
5256 if (ICmpScaledV->getType() != OpTy) {
5257 Instruction *Cast =
5258 CastInst::Create(CastInst::getCastOpcode(ICmpScaledV, false,
5259 OpTy, false),
5260 ICmpScaledV, OpTy, "tmp", CI);
5261 ICmpScaledV = Cast;
5262 }
5263 CI->setOperand(1, ICmpScaledV);
5264 } else {
5265 // A scale of 1 means that the scale has been expanded as part of the
5266 // base regs.
5267 assert((F.Scale == 0 || F.Scale == 1) &&(((F.Scale == 0 || F.Scale == 1) && "ICmp does not support folding a global value and "
"a scale at the same time!") ? static_cast<void> (0) :
__assert_fail ("(F.Scale == 0 || F.Scale == 1) && \"ICmp does not support folding a global value and \" \"a scale at the same time!\""
, "/build/llvm-toolchain-snapshot-9~svn358860/lib/Transforms/Scalar/LoopStrengthReduce.cpp"
, 5269, __PRETTY_FUNCTION__))
5268 "ICmp does not support folding a global value and "(((F.Scale == 0 || F.Scale == 1) && "ICmp does not support folding a global value and "
"a scale at the same time!") ? static_cast<void> (0) :
__assert_fail ("(F.Scale == 0 || F.Scale == 1) && \"ICmp does not support folding a global value and \" \"a scale at the same time!\""
, "/build/llvm-toolchain-snapshot-9~svn358860/lib/Transforms/Scalar/LoopStrengthReduce.cpp"
, 5269, __PRETTY_FUNCTION__))
5269 "a scale at the same time!")(((F.Scale == 0 || F.Scale == 1) && "ICmp does not support folding a global value and "
"a scale at the same time!") ? static_cast<void> (0) :
__assert_fail ("(F.Scale == 0 || F.Scale == 1) && \"ICmp does not support folding a global value and \" \"a scale at the same time!\""
, "/build/llvm-toolchain-snapshot-9~svn358860/lib/Transforms/Scalar/LoopStrengthReduce.cpp"
, 5269, __PRETTY_FUNCTION__))
;
5270 Constant *C = ConstantInt::getSigned(SE.getEffectiveSCEVType(OpTy),
5271 -(uint64_t)Offset);
5272 if (C->getType() != OpTy)
5273 C = ConstantExpr::getCast(CastInst::getCastOpcode(C, false,
5274 OpTy, false),
5275 C, OpTy);
5276
5277 CI->setOperand(1, C);
5278 }
5279 }
5280
5281 return FullV;
5282}
5283
5284/// Helper for Rewrite. PHI nodes are special because the use of their operands
5285/// effectively happens in their predecessor blocks, so the expression may need
5286/// to be expanded in multiple places.
5287void LSRInstance::RewriteForPHI(
5288 PHINode *PN, const LSRUse &LU, const LSRFixup &LF, const Formula &F,
5289 SCEVExpander &Rewriter, SmallVectorImpl<WeakTrackingVH> &DeadInsts) const {
5290 DenseMap<BasicBlock *, Value *> Inserted;
5291 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
5292 if (PN->getIncomingValue(i) == LF.OperandValToReplace) {
5293 bool needUpdateFixups = false;
5294 BasicBlock *BB = PN->getIncomingBlock(i);
5295
5296 // If this is a critical edge, split the edge so that we do not insert
5297 // the code on all predecessor/successor paths. We do this unless this
5298 // is the canonical backedge for this loop, which complicates post-inc
5299 // users.
5300 if (e != 1 && BB->getTerminator()->getNumSuccessors() > 1 &&
5301 !isa<IndirectBrInst>(BB->getTerminator()) &&
5302 !isa<CatchSwitchInst>(BB->getTerminator())) {
5303 BasicBlock *Parent = PN->getParent();
5304 Loop *PNLoop = LI.getLoopFor(Parent);
5305 if (!PNLoop || Parent != PNLoop->getHeader()) {
5306 // Split the critical edge.
5307 BasicBlock *NewBB = nullptr;
5308 if (!Parent->isLandingPad()) {
5309 NewBB = SplitCriticalEdge(BB, Parent,
5310 CriticalEdgeSplittingOptions(&DT, &LI)
5311 .setMergeIdenticalEdges()
5312 .setKeepOneInputPHIs());
5313 } else {
5314 SmallVector<BasicBlock*, 2> NewBBs;
5315 SplitLandingPadPredecessors(Parent, BB, "", "", NewBBs, &DT, &LI);
5316 NewBB = NewBBs[0];
5317 }
5318 // If NewBB==NULL, then SplitCriticalEdge refused to split because all
5319 // phi predecessors are identical. The simple thing to do is skip
5320 // splitting in this case rather than complicate the API.
5321 if (NewBB) {
5322 // If PN is outside of the loop and BB is in the loop, we want to
5323 // move the block to be immediately before the PHI block, not
5324 // immediately after BB.
5325 if (L->contains(BB) && !L->contains(PN))
5326 NewBB->moveBefore(PN->getParent());
5327
5328 // Splitting the edge can reduce the number of PHI entries we have.
5329 e = PN->getNumIncomingValues();
5330 BB = NewBB;
5331 i = PN->getBasicBlockIndex(BB);
5332
5333 needUpdateFixups = true;
5334 }
5335 }
5336 }
5337
5338 std::pair<DenseMap<BasicBlock *, Value *>::iterator, bool> Pair =
5339 Inserted.insert(std::make_pair(BB, static_cast<Value *>(nullptr)));
5340 if (!Pair.second)
5341 PN->setIncomingValue(i, Pair.first->second);
5342 else {
5343 Value *FullV = Expand(LU, LF, F, BB->getTerminator()->getIterator(),
5344 Rewriter, DeadInsts);
5345
5346 // If this is reuse-by-noop-cast, insert the noop cast.
5347 Type *OpTy = LF.OperandValToReplace->getType();
5348 if (FullV->getType() != OpTy)
5349 FullV =
5350 CastInst::Create(CastInst::getCastOpcode(FullV, false,
5351 OpTy, false),
5352 FullV, LF.OperandValToReplace->getType(),
5353 "tmp", BB->getTerminator());
5354
5355 PN->setIncomingValue(i, FullV);
5356 Pair.first->second = FullV;
5357 }
5358
5359 // If LSR splits critical edge and phi node has other pending
5360 // fixup operands, we need to update those pending fixups. Otherwise
5361 // formulae will not be implemented completely and some instructions
5362 // will not be eliminated.
5363 if (needUpdateFixups) {
5364 for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx)
5365 for (LSRFixup &Fixup : Uses[LUIdx].Fixups)
5366 // If fixup is supposed to rewrite some operand in the phi
5367 // that was just updated, it may be already moved to
5368 // another phi node. Such fixup requires update.
5369 if (Fixup.UserInst == PN) {
5370 // Check if the operand we try to replace still exists in the
5371 // original phi.
5372 bool foundInOriginalPHI = false;
5373 for (const auto &val : PN->incoming_values())
5374 if (val == Fixup.OperandValToReplace) {
5375 foundInOriginalPHI = true;
5376 break;
5377 }
5378
5379 // If fixup operand found in original PHI - nothing to do.
5380 if (foundInOriginalPHI)
5381 continue;
5382
5383 // Otherwise it might be moved to another PHI and requires update.
5384 // If fixup operand not found in any of the incoming blocks that
5385 // means we have already rewritten it - nothing to do.
5386 for (const auto &Block : PN->blocks())
5387 for (BasicBlock::iterator I = Block->begin(); isa<PHINode>(I);
5388 ++I) {
5389 PHINode *NewPN = cast<PHINode>(I);
5390 for (const auto &val : NewPN->incoming_values())
5391 if (val == Fixup.OperandValToReplace)
5392 Fixup.UserInst = NewPN;
5393 }
5394 }
5395 }
5396 }
5397}
5398
5399/// Emit instructions for the leading candidate expression for this LSRUse (this
5400/// is called "expanding"), and update the UserInst to reference the newly
5401/// expanded value.
5402void LSRInstance::Rewrite(const LSRUse &LU, const LSRFixup &LF,
5403 const Formula &F, SCEVExpander &Rewriter,
5404 SmallVectorImpl<WeakTrackingVH> &DeadInsts) const {
5405 // First, find an insertion point that dominates UserInst. For PHI nodes,
5406 // find the nearest block which dominates all the relevant uses.
5407 if (PHINode *PN = dyn_cast<PHINode>(LF.UserInst)) {
5408 RewriteForPHI(PN, LU, LF, F, Rewriter, DeadInsts);
5409 } else {
5410 Value *FullV =
5411 Expand(LU, LF, F, LF.UserInst->getIterator(), Rewriter, DeadInsts);
5412
5413 // If this is reuse-by-noop-cast, insert the noop cast.
5414 Type *OpTy = LF.OperandValToReplace->getType();
5415 if (FullV->getType() != OpTy) {
5416 Instruction *Cast =
5417 CastInst::Create(CastInst::getCastOpcode(FullV, false, OpTy, false),
5418 FullV, OpTy, "tmp", LF.UserInst);
5419 FullV = Cast;
5420 }
5421
5422 // Update the user. ICmpZero is handled specially here (for now) because
5423 // Expand may have updated one of the operands of the icmp already, and
5424 // its new value may happen to be equal to LF.OperandValToReplace, in
5425 // which case doing replaceUsesOfWith leads to replacing both operands
5426 // with the same value. TODO: Reorganize this.
5427 if (LU.Kind == LSRUse::ICmpZero)
5428 LF.UserInst->setOperand(0, FullV);
5429 else
5430 LF.UserInst->replaceUsesOfWith(LF.OperandValToReplace, FullV);
5431 }
5432
5433 DeadInsts.emplace_back(LF.OperandValToReplace);
5434}
5435
5436/// Rewrite all the fixup locations with new values, following the chosen
5437/// solution.
5438void LSRInstance::ImplementSolution(
5439 const SmallVectorImpl<const Formula *> &Solution) {
5440 // Keep track of instructions we may have made dead, so that
5441 // we can remove them after we are done working.
5442 SmallVector<WeakTrackingVH, 16> DeadInsts;
5443
5444 SCEVExpander Rewriter(SE, L->getHeader()->getModule()->getDataLayout(),
5445 "lsr");
5446#ifndef NDEBUG
5447 Rewriter.setDebugType(DEBUG_TYPE"loop-reduce");
5448#endif
5449 Rewriter.disableCanonicalMode();
5450 Rewriter.enableLSRMode();
5451 Rewriter.setIVIncInsertPos(L, IVIncInsertPos);
5452
5453 // Mark phi nodes that terminate chains so the expander tries to reuse them.
5454 for (const IVChain &Chain : IVChainVec) {
21
Assuming '__begin1' is equal to '__end1'
5455 if (PHINode *PN = dyn_cast<PHINode>(Chain.tailUserInst()))
5456 Rewriter.setChainedPhi(PN);
5457 }
5458
5459 // Expand the new value definitions and update the users.
5460 for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx)
22
Assuming 'LUIdx' is equal to 'NumUses'
23
Loop condition is false. Execution continues on line 5466
5461 for (const LSRFixup &Fixup : Uses[LUIdx].Fixups) {
5462 Rewrite(Uses[LUIdx], Fixup, *Solution[LUIdx], Rewriter, DeadInsts);
5463 Changed = true;
5464 }
5465
5466 for (const IVChain &Chain : IVChainVec) {
24
Assuming '__begin1' is not equal to '__end1'
5467 GenerateIVChain(Chain, Rewriter, DeadInsts);
25
Calling 'LSRInstance::GenerateIVChain'
5468 Changed = true;
5469 }
5470 // Clean up after ourselves. This must be done before deleting any
5471 // instructions.
5472 Rewriter.clear();
5473
5474 Changed |= DeleteTriviallyDeadInstructions(DeadInsts);
5475}
5476
5477LSRInstance::LSRInstance(Loop *L, IVUsers &IU, ScalarEvolution &SE,
5478 DominatorTree &DT, LoopInfo &LI,
5479 const TargetTransformInfo &TTI)
5480 : IU(IU), SE(SE), DT(DT), LI(LI), TTI(TTI), L(L),
5481 FavorBackedgeIndex(EnableBackedgeIndexing &&
3
Assuming the condition is false
5482 TTI.shouldFavorBackedgeIndex(L)) {
5483 // If LoopSimplify form is not available, stay out of trouble.
5484 if (!L->isLoopSimplifyForm())
4
Assuming the condition is false
5
Taking false branch
5485 return;
5486
5487 // If there's no interesting work to be done, bail early.
5488 if (IU.empty()) return;
6
Assuming the condition is false
7
Taking false branch
5489
5490 // If there's too much analysis to be done, bail early. We won't be able to
5491 // model the problem anyway.
5492 unsigned NumUsers = 0;
5493 for (const IVStrideUse &U : IU) {
5494 if (++NumUsers > MaxIVUsers) {
5495 (void)U;
5496 LLVM_DEBUG(dbgs() << "LSR skipping loop, too many IV Users in " << Udo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-reduce")) { dbgs() << "LSR skipping loop, too many IV Users in "
<< U << "\n"; } } while (false)
5497 << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-reduce")) { dbgs() << "LSR skipping loop, too many IV Users in "
<< U << "\n"; } } while (false)
;
5498 return;
5499 }
5500 // Bail out if we have a PHI on an EHPad that gets a value from a
5501 // CatchSwitchInst. Because the CatchSwitchInst cannot be split, there is
5502 // no good place to stick any instructions.
5503 if (auto *PN = dyn_cast<PHINode>(U.getUser())) {
5504 auto *FirstNonPHI = PN->getParent()->getFirstNonPHI();
5505 if (isa<FuncletPadInst>(FirstNonPHI) ||
5506 isa<CatchSwitchInst>(FirstNonPHI))
5507 for (BasicBlock *PredBB : PN->blocks())
5508 if (isa<CatchSwitchInst>(PredBB->getFirstNonPHI()))
5509 return;
5510 }
5511 }
5512
5513#ifndef NDEBUG
5514 // All dominating loops must have preheaders, or SCEVExpander may not be able
5515 // to materialize an AddRecExpr whose Start is an outer AddRecExpr.
5516 //
5517 // IVUsers analysis should only create users that are dominated by simple loop
5518 // headers. Since this loop should dominate all of its users, its user list
5519 // should be empty if this loop itself is not within a simple loop nest.
5520 for (DomTreeNode *Rung = DT.getNode(L->getLoopPreheader());
8
Loop condition is false. Execution continues on line 5530
5521 Rung; Rung = Rung->getIDom()) {
5522 BasicBlock *BB = Rung->getBlock();
5523 const Loop *DomLoop = LI.getLoopFor(BB);
5524 if (DomLoop && DomLoop->getHeader() == BB) {
5525 assert(DomLoop->getLoopPreheader() && "LSR needs a simplified loop nest")((DomLoop->getLoopPreheader() && "LSR needs a simplified loop nest"
) ? static_cast<void> (0) : __assert_fail ("DomLoop->getLoopPreheader() && \"LSR needs a simplified loop nest\""
, "/build/llvm-toolchain-snapshot-9~svn358860/lib/Transforms/Scalar/LoopStrengthReduce.cpp"
, 5525, __PRETTY_FUNCTION__))
;
5526 }
5527 }
5528#endif // DEBUG
5529
5530 LLVM_DEBUG(dbgs() << "\nLSR on loop ";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-reduce")) { dbgs() << "\nLSR on loop "; L->getHeader
()->printAsOperand(dbgs(), false); dbgs() << ":\n"; }
} while (false)
9
Assuming 'DebugFlag' is 0
10
Loop condition is false. Exiting loop
5531 L->getHeader()->printAsOperand(dbgs(), /*PrintType=*/false);do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-reduce")) { dbgs() << "\nLSR on loop "; L->getHeader
()->printAsOperand(dbgs(), false); dbgs() << ":\n"; }
} while (false)
5532 dbgs() << ":\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-reduce")) { dbgs() << "\nLSR on loop "; L->getHeader
()->printAsOperand(dbgs(), false); dbgs() << ":\n"; }
} while (false)
;
5533
5534 // First, perform some low-level loop optimizations.
5535 OptimizeShadowIV();
5536 OptimizeLoopTermCond();
5537
5538 // If loop preparation eliminates all interesting IV users, bail.
5539 if (IU.empty()) return;
11
Assuming the condition is false
12
Taking false branch
5540
5541 // Skip nested loops until we can model them better with formulae.
5542 if (!L->empty()) {
13
Assuming the condition is false
14
Taking false branch
5543 LLVM_DEBUG(dbgs() << "LSR skipping outer loop " << *L << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-reduce")) { dbgs() << "LSR skipping outer loop "
<< *L << "\n"; } } while (false)
;
5544 return;
5545 }
5546
5547 // Start collecting data and preparing for the solver.
5548 CollectChains();
5549 CollectInterestingTypesAndFactors();
5550 CollectFixupsAndInitialFormulae();
5551 CollectLoopInvariantFixupsAndFormulae();
5552
5553 if (Uses.empty())
15
Taking false branch
5554 return;
5555
5556 LLVM_DEBUG(dbgs() << "LSR found " << Uses.size() << " uses:\n";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-reduce")) { dbgs() << "LSR found " << Uses
.size() << " uses:\n"; print_uses(dbgs()); } } while (false
)
16
Assuming 'DebugFlag' is 0
17
Loop condition is false. Exiting loop
5557 print_uses(dbgs()))do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-reduce")) { dbgs() << "LSR found " << Uses
.size() << " uses:\n"; print_uses(dbgs()); } } while (false
)
;
5558
5559 // Now use the reuse data to generate a bunch of interesting ways
5560 // to formulate the values needed for the uses.
5561 GenerateAllReuseFormulae();
5562
5563 FilterOutUndesirableDedicatedRegisters();
5564 NarrowSearchSpaceUsingHeuristics();
5565
5566 SmallVector<const Formula *, 8> Solution;
5567 Solve(Solution);
5568
5569 // Release memory that is no longer needed.
5570 Factors.clear();
5571 Types.clear();
5572 RegUses.clear();
5573
5574 if (Solution.empty())
18
Taking false branch
5575 return;
5576
5577#ifndef NDEBUG
5578 // Formulae should be legal.
5579 for (const LSRUse &LU : Uses) {
19
Assuming '__begin1' is equal to '__end1'
5580 for (const Formula &F : LU.Formulae)
5581 assert(isLegalUse(TTI, LU.MinOffset, LU.MaxOffset, LU.Kind, LU.AccessTy,((isLegalUse(TTI, LU.MinOffset, LU.MaxOffset, LU.Kind, LU.AccessTy
, F) && "Illegal formula generated!") ? static_cast<
void> (0) : __assert_fail ("isLegalUse(TTI, LU.MinOffset, LU.MaxOffset, LU.Kind, LU.AccessTy, F) && \"Illegal formula generated!\""
, "/build/llvm-toolchain-snapshot-9~svn358860/lib/Transforms/Scalar/LoopStrengthReduce.cpp"
, 5582, __PRETTY_FUNCTION__))
5582 F) && "Illegal formula generated!")((isLegalUse(TTI, LU.MinOffset, LU.MaxOffset, LU.Kind, LU.AccessTy
, F) && "Illegal formula generated!") ? static_cast<
void> (0) : __assert_fail ("isLegalUse(TTI, LU.MinOffset, LU.MaxOffset, LU.Kind, LU.AccessTy, F) && \"Illegal formula generated!\""
, "/build/llvm-toolchain-snapshot-9~svn358860/lib/Transforms/Scalar/LoopStrengthReduce.cpp"
, 5582, __PRETTY_FUNCTION__))
;
5583 };
5584#endif
5585
5586 // Now that we've decided what we want, make it so.
5587 ImplementSolution(Solution);
20
Calling 'LSRInstance::ImplementSolution'
5588}
5589
5590#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
5591void LSRInstance::print_factors_and_types(raw_ostream &OS) const {
5592 if (Factors.empty() && Types.empty()) return;
5593
5594 OS << "LSR has identified the following interesting factors and types: ";
5595 bool First = true;
5596
5597 for (int64_t Factor : Factors) {
5598 if (!First) OS << ", ";
5599 First = false;
5600 OS << '*' << Factor;
5601 }
5602
5603 for (Type *Ty : Types) {
5604 if (!First) OS << ", ";
5605 First = false;
5606 OS << '(' << *Ty << ')';
5607 }
5608 OS << '\n';
5609}
5610
5611void LSRInstance::print_fixups(raw_ostream &OS) const {
5612 OS << "LSR is examining the following fixup sites:\n";
5613 for (const LSRUse &LU : Uses)
5614 for (const LSRFixup &LF : LU.Fixups) {
5615 dbgs() << " ";
5616 LF.print(OS);
5617 OS << '\n';
5618 }
5619}
5620
5621void LSRInstance::print_uses(raw_ostream &OS) const {
5622 OS << "LSR is examining the following uses:\n";
5623 for (const LSRUse &LU : Uses) {
5624 dbgs() << " ";
5625 LU.print(OS);
5626 OS << '\n';
5627 for (const Formula &F : LU.Formulae) {
5628 OS << " ";
5629 F.print(OS);
5630 OS << '\n';
5631 }
5632 }
5633}
5634
5635void LSRInstance::print(raw_ostream &OS) const {
5636 print_factors_and_types(OS);
5637 print_fixups(OS);
5638 print_uses(OS);
5639}
5640
5641LLVM_DUMP_METHOD__attribute__((noinline)) __attribute__((__used__)) void LSRInstance::dump() const {
5642 print(errs()); errs() << '\n';
5643}
5644#endif
5645
5646namespace {
5647
5648class LoopStrengthReduce : public LoopPass {
5649public:
5650 static char ID; // Pass ID, replacement for typeid
5651
5652 LoopStrengthReduce();
5653
5654private:
5655 bool runOnLoop(Loop *L, LPPassManager &LPM) override;
5656 void getAnalysisUsage(AnalysisUsage &AU) const override;
5657};
5658
5659} // end anonymous namespace
5660
5661LoopStrengthReduce::LoopStrengthReduce() : LoopPass(ID) {
5662 initializeLoopStrengthReducePass(*PassRegistry::getPassRegistry());
5663}
5664
5665void LoopStrengthReduce::getAnalysisUsage(AnalysisUsage &AU) const {
5666 // We split critical edges, so we change the CFG. However, we do update
5667 // many analyses if they are around.
5668 AU.addPreservedID(LoopSimplifyID);
5669
5670 AU.addRequired<LoopInfoWrapperPass>();
5671 AU.addPreserved<LoopInfoWrapperPass>();
5672 AU.addRequiredID(LoopSimplifyID);
5673 AU.addRequired<DominatorTreeWrapperPass>();
5674 AU.addPreserved<DominatorTreeWrapperPass>();
5675 AU.addRequired<ScalarEvolutionWrapperPass>();
5676 AU.addPreserved<ScalarEvolutionWrapperPass>();
5677 // Requiring LoopSimplify a second time here prevents IVUsers from running
5678 // twice, since LoopSimplify was invalidated by running ScalarEvolution.
5679 AU.addRequiredID(LoopSimplifyID);
5680 AU.addRequired<IVUsersWrapperPass>();
5681 AU.addPreserved<IVUsersWrapperPass>();
5682 AU.addRequired<TargetTransformInfoWrapperPass>();
5683}
5684
5685static bool ReduceLoopStrength(Loop *L, IVUsers &IU, ScalarEvolution &SE,
5686 DominatorTree &DT, LoopInfo &LI,
5687 const TargetTransformInfo &TTI) {
5688 bool Changed = false;
5689
5690 // Run the main LSR transformation.
5691 Changed |= LSRInstance(L, IU, SE, DT, LI, TTI).getChanged();
2
Calling constructor for 'LSRInstance'
5692
5693 // Remove any extra phis created by processing inner loops.
5694 Changed |= DeleteDeadPHIs(L->getHeader());
5695 if (EnablePhiElim && L->isLoopSimplifyForm()) {
5696 SmallVector<WeakTrackingVH, 16> DeadInsts;
5697 const DataLayout &DL = L->getHeader()->getModule()->getDataLayout();
5698 SCEVExpander Rewriter(SE, DL, "lsr");
5699#ifndef NDEBUG