LLVM 23.0.0git
LoopUnrollPass.cpp
Go to the documentation of this file.
1//===- LoopUnroll.cpp - Loop unroller pass --------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This pass implements a simple loop unroller. It works best when loops have
10// been canonicalized by the -indvars pass, allowing it to determine the trip
11// counts of loops easily.
12//===----------------------------------------------------------------------===//
13
15#include "llvm/ADT/DenseMap.h"
17#include "llvm/ADT/DenseSet.h"
18#include "llvm/ADT/STLExtras.h"
19#include "llvm/ADT/SetVector.h"
22#include "llvm/ADT/StringRef.h"
36#include "llvm/IR/BasicBlock.h"
37#include "llvm/IR/CFG.h"
38#include "llvm/IR/Constant.h"
39#include "llvm/IR/Constants.h"
41#include "llvm/IR/Dominators.h"
42#include "llvm/IR/Function.h"
43#include "llvm/IR/Instruction.h"
45#include "llvm/IR/Metadata.h"
46#include "llvm/IR/PassManager.h"
48#include "llvm/Pass.h"
51#include "llvm/Support/Debug.h"
63#include <algorithm>
64#include <cassert>
65#include <cstdint>
66#include <limits>
67#include <optional>
68#include <string>
69#include <tuple>
70#include <utility>
71
72using namespace llvm;
73
74#define DEBUG_TYPE "loop-unroll"
75
77 "forget-scev-loop-unroll", cl::init(false), cl::Hidden,
78 cl::desc("Forget everything in SCEV when doing LoopUnroll, instead of just"
79 " the current top-most loop. This is sometimes preferred to reduce"
80 " compile time."));
81
83 UnrollThreshold("unroll-threshold", cl::Hidden,
84 cl::desc("The cost threshold for loop unrolling"));
85
88 "unroll-optsize-threshold", cl::init(0), cl::Hidden,
89 cl::desc("The cost threshold for loop unrolling when optimizing for "
90 "size"));
91
93 "unroll-partial-threshold", cl::Hidden,
94 cl::desc("The cost threshold for partial loop unrolling"));
95
97 "unroll-max-percent-threshold-boost", cl::init(400), cl::Hidden,
98 cl::desc("The maximum 'boost' (represented as a percentage >= 100) applied "
99 "to the threshold when aggressively unrolling a loop due to the "
100 "dynamic cost savings. If completely unrolling a loop will reduce "
101 "the total runtime from X to Y, we boost the loop unroll "
102 "threshold to DefaultThreshold*std::min(MaxPercentThresholdBoost, "
103 "X/Y). This limit avoids excessive code bloat."));
104
106 "unroll-max-iteration-count-to-analyze", cl::init(10), cl::Hidden,
107 cl::desc("Don't allow loop unrolling to simulate more than this number of "
108 "iterations when checking full unroll profitability"));
109
111 "unroll-count", cl::Hidden,
112 cl::desc("Use this unroll count for all loops including those with "
113 "unroll_count pragma values, for testing purposes"));
114
116 "unroll-max-count", cl::Hidden,
117 cl::desc("Set the max unroll count for partial and runtime unrolling, for"
118 "testing purposes"));
119
121 "unroll-full-max-count", cl::Hidden,
122 cl::desc(
123 "Set the max unroll count for full unrolling, for testing purposes"));
124
125static cl::opt<bool>
126 UnrollAllowPartial("unroll-allow-partial", cl::Hidden,
127 cl::desc("Allows loops to be partially unrolled until "
128 "-unroll-threshold loop size is reached."));
129
131 "unroll-allow-remainder", cl::Hidden,
132 cl::desc("Allow generation of a loop remainder (extra iterations) "
133 "when unrolling a loop."));
134
135static cl::opt<bool>
136 UnrollRuntime("unroll-runtime", cl::Hidden,
137 cl::desc("Unroll loops with run-time trip counts"));
138
140 "unroll-max-upperbound", cl::init(8), cl::Hidden,
141 cl::desc(
142 "The max of trip count upper bound that is considered in unrolling"));
143
145 "pragma-unroll-threshold", cl::init(16 * 1024), cl::Hidden,
146 cl::desc("Unrolled size limit for loops with unroll metadata "
147 "(full, enable, or count)."));
148
150 "flat-loop-tripcount-threshold", cl::init(5), cl::Hidden,
151 cl::desc("If the runtime tripcount for the loop is lower than the "
152 "threshold, the loop is considered as flat and will be less "
153 "aggressively unrolled."));
154
156 "unroll-remainder", cl::Hidden,
157 cl::desc("Allow the loop remainder to be unrolled."));
158
159// This option isn't ever intended to be enabled, it serves to allow
160// experiments to check the assumptions about when this kind of revisit is
161// necessary.
163 "unroll-revisit-child-loops", cl::Hidden,
164 cl::desc("Enqueue and re-visit child loops in the loop PM after unrolling. "
165 "This shouldn't typically be needed as child loops (or their "
166 "clones) were already visited."));
167
169 "unroll-threshold-aggressive", cl::init(300), cl::Hidden,
170 cl::desc("Threshold (max size of unrolled loop) to use in aggressive (O3) "
171 "optimizations"));
173 UnrollThresholdDefault("unroll-threshold-default", cl::init(150),
175 cl::desc("Default threshold (max size of unrolled "
176 "loop), used in all but O3 optimizations"));
177
179 "pragma-unroll-full-max-iterations", cl::init(1'000'000), cl::Hidden,
180 cl::desc("Maximum allowed iterations to unroll under pragma unroll full."));
181
182/// A magic value for use with the Threshold parameter to indicate
183/// that the loop unroll should be performed regardless of how much
184/// code expansion would result.
185static const unsigned NoThreshold = std::numeric_limits<unsigned>::max();
186
187/// Gather the various unrolling parameters based on the defaults, compiler
188/// flags, TTI overrides and user specified parameters.
192 OptimizationRemarkEmitter &ORE, int OptLevel,
193 std::optional<unsigned> UserThreshold, std::optional<unsigned> UserCount,
194 std::optional<bool> UserAllowPartial, std::optional<bool> UserRuntime,
195 std::optional<bool> UserUpperBound,
196 std::optional<unsigned> UserFullUnrollMaxCount) {
198
199 // Set up the defaults
200 UP.Threshold =
204 UP.PartialThreshold = 150;
206 UP.Count = 0;
208 UP.MaxCount = std::numeric_limits<unsigned>::max();
210 UP.FullUnrollMaxCount = std::numeric_limits<unsigned>::max();
211 UP.BEInsns = 2;
212 UP.Partial = false;
213 UP.Runtime = false;
214 UP.AllowRemainder = true;
215 UP.UnrollRemainder = false;
216 UP.AllowExpensiveTripCount = false;
217 UP.Force = false;
218 UP.UpperBound = false;
219 UP.UnrollAndJam = false;
223 UP.RuntimeUnrollMultiExit = false;
224 UP.AddAdditionalAccumulators = false;
225
226 // Override with any target specific settings
227 TTI.getUnrollingPreferences(L, SE, UP, &ORE);
228
229 // Apply size attributes
230 bool OptForSize = L->getHeader()->getParent()->hasOptSize() ||
231 // Let unroll hints / pragmas take precedence over PGSO.
233 llvm::shouldOptimizeForSize(L->getHeader(), PSI, BFI,
235 if (OptForSize) {
239 }
240
241 // Apply any user values specified by cl::opt
242 if (UnrollThreshold.getNumOccurrences() > 0)
244 if (UnrollPartialThreshold.getNumOccurrences() > 0)
246 if (UnrollMaxPercentThresholdBoost.getNumOccurrences() > 0)
248 if (UnrollMaxCount.getNumOccurrences() > 0)
250 if (UnrollMaxUpperBound.getNumOccurrences() > 0)
252 if (UnrollFullMaxCount.getNumOccurrences() > 0)
254 if (UnrollAllowPartial.getNumOccurrences() > 0)
256 if (UnrollAllowRemainder.getNumOccurrences() > 0)
258 if (UnrollRuntime.getNumOccurrences() > 0)
260 if (UnrollMaxUpperBound == 0)
261 UP.UpperBound = false;
262 if (UnrollUnrollRemainder.getNumOccurrences() > 0)
264 if (UnrollMaxIterationsCountToAnalyze.getNumOccurrences() > 0)
266
267 // Apply user values provided by argument
268 if (UserThreshold) {
269 UP.Threshold = *UserThreshold;
270 UP.PartialThreshold = *UserThreshold;
271 }
272 if (UserCount)
273 UP.Count = *UserCount;
274 if (UserAllowPartial)
275 UP.Partial = *UserAllowPartial;
276 if (UserRuntime)
277 UP.Runtime = *UserRuntime;
278 if (UserUpperBound)
279 UP.UpperBound = *UserUpperBound;
280 if (UserFullUnrollMaxCount)
281 UP.FullUnrollMaxCount = *UserFullUnrollMaxCount;
282
283 return UP;
284}
285
286namespace {
287
288/// A struct to densely store the state of an instruction after unrolling at
289/// each iteration.
290///
291/// This is designed to work like a tuple of <Instruction *, int> for the
292/// purposes of hashing and lookup, but to be able to associate two boolean
293/// states with each key.
294struct UnrolledInstState {
295 Instruction *I;
296 int Iteration : 30;
297 unsigned IsFree : 1;
298 unsigned IsCounted : 1;
299};
300
301/// Hashing and equality testing for a set of the instruction states.
302struct UnrolledInstStateKeyInfo {
303 using PtrInfo = DenseMapInfo<Instruction *>;
304 using PairInfo = DenseMapInfo<std::pair<Instruction *, int>>;
305
306 static inline UnrolledInstState getEmptyKey() {
307 return {PtrInfo::getEmptyKey(), 0, 0, 0};
308 }
309
310 static inline UnrolledInstState getTombstoneKey() {
311 return {PtrInfo::getTombstoneKey(), 0, 0, 0};
312 }
313
314 static inline unsigned getHashValue(const UnrolledInstState &S) {
315 return PairInfo::getHashValue({S.I, S.Iteration});
316 }
317
318 static inline bool isEqual(const UnrolledInstState &LHS,
319 const UnrolledInstState &RHS) {
320 return PairInfo::isEqual({LHS.I, LHS.Iteration}, {RHS.I, RHS.Iteration});
321 }
322};
323
324struct EstimatedUnrollCost {
325 /// The estimated cost after unrolling.
326 unsigned UnrolledCost;
327
328 /// The estimated dynamic cost of executing the instructions in the
329 /// rolled form.
330 unsigned RolledDynamicCost;
331};
332
333} // end anonymous namespace
334
335/// Figure out if the loop is worth full unrolling.
336///
337/// Complete loop unrolling can make some loads constant, and we need to know
338/// if that would expose any further optimization opportunities. This routine
339/// estimates this optimization. It computes cost of unrolled loop
340/// (UnrolledCost) and dynamic cost of the original loop (RolledDynamicCost). By
341/// dynamic cost we mean that we won't count costs of blocks that are known not
342/// to be executed (i.e. if we have a branch in the loop and we know that at the
343/// given iteration its condition would be resolved to true, we won't add up the
344/// cost of the 'false'-block).
345/// \returns Optional value, holding the RolledDynamicCost and UnrolledCost. If
346/// the analysis failed (no benefits expected from the unrolling, or the loop is
347/// too big to analyze), the returned value is std::nullopt.
348static std::optional<EstimatedUnrollCost> analyzeLoopUnrollCost(
349 const Loop *L, unsigned TripCount, DominatorTree &DT, ScalarEvolution &SE,
350 const SmallPtrSetImpl<const Value *> &EphValues,
351 const TargetTransformInfo &TTI, unsigned MaxUnrolledLoopSize,
352 unsigned MaxIterationsCountToAnalyze) {
353 // We want to be able to scale offsets by the trip count and add more offsets
354 // to them without checking for overflows, and we already don't want to
355 // analyze *massive* trip counts, so we force the max to be reasonably small.
356 assert(MaxIterationsCountToAnalyze <
357 (unsigned)(std::numeric_limits<int>::max() / 2) &&
358 "The unroll iterations max is too large!");
359
360 // Only analyze inner loops. We can't properly estimate cost of nested loops
361 // and we won't visit inner loops again anyway.
362 if (!L->isInnermost()) {
364 << "Not analyzing loop cost: not an innermost loop.\n");
365 return std::nullopt;
366 }
367
368 // Don't simulate loops with a big or unknown tripcount
369 if (!TripCount || TripCount > MaxIterationsCountToAnalyze) {
371 << "Not analyzing loop cost: trip count "
372 << (TripCount ? "too large" : "unknown") << ".\n");
373 return std::nullopt;
374 }
375
378 DenseMap<Value *, Value *> SimplifiedValues;
379 SmallVector<std::pair<Value *, Value *>, 4> SimplifiedInputValues;
380
381 // The estimated cost of the unrolled form of the loop. We try to estimate
382 // this by simplifying as much as we can while computing the estimate.
383 InstructionCost UnrolledCost = 0;
384
385 // We also track the estimated dynamic (that is, actually executed) cost in
386 // the rolled form. This helps identify cases when the savings from unrolling
387 // aren't just exposing dead control flows, but actual reduced dynamic
388 // instructions due to the simplifications which we expect to occur after
389 // unrolling.
390 InstructionCost RolledDynamicCost = 0;
391
392 // We track the simplification of each instruction in each iteration. We use
393 // this to recursively merge costs into the unrolled cost on-demand so that
394 // we don't count the cost of any dead code. This is essentially a map from
395 // <instruction, int> to <bool, bool>, but stored as a densely packed struct.
397
398 // A small worklist used to accumulate cost of instructions from each
399 // observable and reached root in the loop.
401
402 // PHI-used worklist used between iterations while accumulating cost.
404
405 // Helper function to accumulate cost for instructions in the loop.
406 auto AddCostRecursively = [&](Instruction &RootI, int Iteration) {
407 assert(Iteration >= 0 && "Cannot have a negative iteration!");
408 assert(CostWorklist.empty() && "Must start with an empty cost list");
409 assert(PHIUsedList.empty() && "Must start with an empty phi used list");
410 CostWorklist.push_back(&RootI);
412 RootI.getFunction()->hasMinSize() ?
415 for (;; --Iteration) {
416 do {
417 Instruction *I = CostWorklist.pop_back_val();
418
419 // InstCostMap only uses I and Iteration as a key, the other two values
420 // don't matter here.
421 auto CostIter = InstCostMap.find({I, Iteration, 0, 0});
422 if (CostIter == InstCostMap.end())
423 // If an input to a PHI node comes from a dead path through the loop
424 // we may have no cost data for it here. What that actually means is
425 // that it is free.
426 continue;
427 auto &Cost = *CostIter;
428 if (Cost.IsCounted)
429 // Already counted this instruction.
430 continue;
431
432 // Mark that we are counting the cost of this instruction now.
433 Cost.IsCounted = true;
434
435 // If this is a PHI node in the loop header, just add it to the PHI set.
436 if (auto *PhiI = dyn_cast<PHINode>(I))
437 if (PhiI->getParent() == L->getHeader()) {
438 assert(Cost.IsFree && "Loop PHIs shouldn't be evaluated as they "
439 "inherently simplify during unrolling.");
440 if (Iteration == 0)
441 continue;
442
443 // Push the incoming value from the backedge into the PHI used list
444 // if it is an in-loop instruction. We'll use this to populate the
445 // cost worklist for the next iteration (as we count backwards).
446 if (auto *OpI = dyn_cast<Instruction>(
447 PhiI->getIncomingValueForBlock(L->getLoopLatch())))
448 if (L->contains(OpI))
449 PHIUsedList.push_back(OpI);
450 continue;
451 }
452
453 // First accumulate the cost of this instruction.
454 if (!Cost.IsFree) {
455 // Consider simplified operands in instruction cost.
457 transform(I->operands(), std::back_inserter(Operands),
458 [&](Value *Op) {
459 if (auto Res = SimplifiedValues.lookup(Op))
460 return Res;
461 return Op;
462 });
463 UnrolledCost += TTI.getInstructionCost(I, Operands, CostKind);
465 << "Adding cost of instruction (iteration " << Iteration
466 << "): ");
467 LLVM_DEBUG(I->dump());
468 }
469
470 // We must count the cost of every operand which is not free,
471 // recursively. If we reach a loop PHI node, simply add it to the set
472 // to be considered on the next iteration (backwards!).
473 for (Value *Op : I->operands()) {
474 // Check whether this operand is free due to being a constant or
475 // outside the loop.
476 auto *OpI = dyn_cast<Instruction>(Op);
477 if (!OpI || !L->contains(OpI))
478 continue;
479
480 // Otherwise accumulate its cost.
481 CostWorklist.push_back(OpI);
482 }
483 } while (!CostWorklist.empty());
484
485 if (PHIUsedList.empty())
486 // We've exhausted the search.
487 break;
488
489 assert(Iteration > 0 &&
490 "Cannot track PHI-used values past the first iteration!");
491 CostWorklist.append(PHIUsedList.begin(), PHIUsedList.end());
492 PHIUsedList.clear();
493 }
494 };
495
496 // Ensure that we don't violate the loop structure invariants relied on by
497 // this analysis.
498 assert(L->isLoopSimplifyForm() && "Must put loop into normal form first.");
499 assert(L->isLCSSAForm(DT) &&
500 "Must have loops in LCSSA form to track live-out values.");
501
503 << "Starting LoopUnroll profitability analysis...\n");
504
506 L->getHeader()->getParent()->hasMinSize() ?
508 // Simulate execution of each iteration of the loop counting instructions,
509 // which would be simplified.
510 // Since the same load will take different values on different iterations,
511 // we literally have to go through all loop's iterations.
512 for (unsigned Iteration = 0; Iteration < TripCount; ++Iteration) {
513 LLVM_DEBUG(dbgs().indent(3) << "Analyzing iteration " << Iteration << "\n");
514
515 // Prepare for the iteration by collecting any simplified entry or backedge
516 // inputs.
517 for (Instruction &I : *L->getHeader()) {
518 auto *PHI = dyn_cast<PHINode>(&I);
519 if (!PHI)
520 break;
521
522 // The loop header PHI nodes must have exactly two input: one from the
523 // loop preheader and one from the loop latch.
524 assert(
525 PHI->getNumIncomingValues() == 2 &&
526 "Must have an incoming value only for the preheader and the latch.");
527
528 Value *V = PHI->getIncomingValueForBlock(
529 Iteration == 0 ? L->getLoopPreheader() : L->getLoopLatch());
530 if (Iteration != 0 && SimplifiedValues.count(V))
531 V = SimplifiedValues.lookup(V);
532 SimplifiedInputValues.push_back({PHI, V});
533 }
534
535 // Now clear and re-populate the map for the next iteration.
536 SimplifiedValues.clear();
537 while (!SimplifiedInputValues.empty())
538 SimplifiedValues.insert(SimplifiedInputValues.pop_back_val());
539
540 UnrolledInstAnalyzer Analyzer(Iteration, SimplifiedValues, SE, L);
541
542 BBWorklist.clear();
543 BBWorklist.insert(L->getHeader());
544 // Note that we *must not* cache the size, this loop grows the worklist.
545 for (unsigned Idx = 0; Idx != BBWorklist.size(); ++Idx) {
546 BasicBlock *BB = BBWorklist[Idx];
547
548 // Visit all instructions in the given basic block and try to simplify
549 // it. We don't change the actual IR, just count optimization
550 // opportunities.
551 for (Instruction &I : *BB) {
552 // These won't get into the final code - don't even try calculating the
553 // cost for them.
554 if (EphValues.count(&I))
555 continue;
556
557 // Track this instruction's expected baseline cost when executing the
558 // rolled loop form.
559 RolledDynamicCost += TTI.getInstructionCost(&I, CostKind);
560
561 // Visit the instruction to analyze its loop cost after unrolling,
562 // and if the visitor returns true, mark the instruction as free after
563 // unrolling and continue.
564 bool IsFree = Analyzer.visit(I);
565 bool Inserted = InstCostMap.insert({&I, (int)Iteration,
566 (unsigned)IsFree,
567 /*IsCounted*/ false}).second;
568 (void)Inserted;
569 assert(Inserted && "Cannot have a state for an unvisited instruction!");
570
571 if (IsFree)
572 continue;
573
574 // Can't properly model a cost of a call.
575 // FIXME: With a proper cost model we should be able to do it.
576 if (auto *CI = dyn_cast<CallInst>(&I)) {
577 const Function *Callee = CI->getCalledFunction();
578 if (!Callee || TTI.isLoweredToCall(Callee)) {
580 << "Can't analyze cost of loop with call\n");
581 return std::nullopt;
582 }
583 }
584
585 // If the instruction might have a side-effect recursively account for
586 // the cost of it and all the instructions leading up to it.
587 if (I.mayHaveSideEffects())
588 AddCostRecursively(I, Iteration);
589
590 // If unrolled body turns out to be too big, bail out.
591 if (UnrolledCost > MaxUnrolledLoopSize) {
592 LLVM_DEBUG({
593 dbgs().indent(3) << "Exceeded threshold.. exiting.\n";
594 dbgs().indent(3)
595 << "UnrolledCost: " << UnrolledCost
596 << ", MaxUnrolledLoopSize: " << MaxUnrolledLoopSize << "\n";
597 });
598 return std::nullopt;
599 }
600 }
601
602 Instruction *TI = BB->getTerminator();
603
604 auto getSimplifiedConstant = [&](Value *V) -> Constant * {
605 if (SimplifiedValues.count(V))
606 V = SimplifiedValues.lookup(V);
607 return dyn_cast<Constant>(V);
608 };
609
610 // Add in the live successors by first checking whether we have terminator
611 // that may be simplified based on the values simplified by this call.
612 BasicBlock *KnownSucc = nullptr;
613 if (CondBrInst *BI = dyn_cast<CondBrInst>(TI)) {
614 if (auto *SimpleCond = getSimplifiedConstant(BI->getCondition())) {
615 // Just take the first successor if condition is undef
616 if (isa<UndefValue>(SimpleCond))
617 KnownSucc = BI->getSuccessor(0);
618 else if (ConstantInt *SimpleCondVal =
619 dyn_cast<ConstantInt>(SimpleCond))
620 KnownSucc = BI->getSuccessor(SimpleCondVal->isZero() ? 1 : 0);
621 }
622 } else if (SwitchInst *SI = dyn_cast<SwitchInst>(TI)) {
623 if (auto *SimpleCond = getSimplifiedConstant(SI->getCondition())) {
624 // Just take the first successor if condition is undef
625 if (isa<UndefValue>(SimpleCond))
626 KnownSucc = SI->getSuccessor(0);
627 else if (ConstantInt *SimpleCondVal =
628 dyn_cast<ConstantInt>(SimpleCond))
629 KnownSucc = SI->findCaseValue(SimpleCondVal)->getCaseSuccessor();
630 }
631 }
632 if (KnownSucc) {
633 if (L->contains(KnownSucc))
634 BBWorklist.insert(KnownSucc);
635 else
636 ExitWorklist.insert({BB, KnownSucc});
637 continue;
638 }
639
640 // Add BB's successors to the worklist.
641 for (BasicBlock *Succ : successors(BB))
642 if (L->contains(Succ))
643 BBWorklist.insert(Succ);
644 else
645 ExitWorklist.insert({BB, Succ});
646 AddCostRecursively(*TI, Iteration);
647 }
648
649 // If we found no optimization opportunities on the first iteration, we
650 // won't find them on later ones too.
651 if (UnrolledCost == RolledDynamicCost) {
652 LLVM_DEBUG({
653 dbgs().indent(3) << "No opportunities found.. exiting.\n";
654 dbgs().indent(3) << "UnrolledCost: " << UnrolledCost << "\n";
655 });
656 return std::nullopt;
657 }
658 }
659
660 while (!ExitWorklist.empty()) {
661 BasicBlock *ExitingBB, *ExitBB;
662 std::tie(ExitingBB, ExitBB) = ExitWorklist.pop_back_val();
663
664 for (Instruction &I : *ExitBB) {
665 auto *PN = dyn_cast<PHINode>(&I);
666 if (!PN)
667 break;
668
669 Value *Op = PN->getIncomingValueForBlock(ExitingBB);
670 if (auto *OpI = dyn_cast<Instruction>(Op))
671 if (L->contains(OpI))
672 AddCostRecursively(*OpI, TripCount - 1);
673 }
674 }
675
676 assert(UnrolledCost.isValid() && RolledDynamicCost.isValid() &&
677 "All instructions must have a valid cost, whether the "
678 "loop is rolled or unrolled.");
679
680 LLVM_DEBUG({
681 dbgs().indent(3) << "Analysis finished:\n";
682 dbgs().indent(3) << "UnrolledCost: " << UnrolledCost
683 << ", RolledDynamicCost: " << RolledDynamicCost << "\n";
684 });
685 return {{unsigned(UnrolledCost.getValue()),
686 unsigned(RolledDynamicCost.getValue())}};
687}
688
690 const Loop *L, const TargetTransformInfo &TTI,
691 const SmallPtrSetImpl<const Value *> &EphValues, unsigned BEInsns,
692 bool TripCountIsUniform) {
694 for (BasicBlock *BB : L->blocks())
695 Metrics.analyzeBasicBlock(BB, TTI, EphValues, /* PrepareForLTO= */ false,
696 L);
697 NumInlineCandidates = Metrics.NumInlineCandidates;
698 NotDuplicatable = Metrics.notDuplicatable;
699 Convergence = Metrics.Convergence;
700 LoopSize = Metrics.NumInsts;
701 // Convergent operations make the remainder prelude unsafe by adding a
702 // control-flow dependency, unless the trip count is uniform per
703 // UniformityInfo, in which case all paths agree and the remainder is safe.
705 (Metrics.Convergence != ConvergenceKind::Uncontrolled &&
707 TripCountIsUniform;
708
709 // Don't allow an estimate of size zero. This would allows unrolling of loops
710 // with huge iteration counts, which is a compile time problem even if it's
711 // not a problem for code quality. Also, the code using this size may assume
712 // that each loop has at least three instructions (likely a conditional
713 // branch, a comparison feeding that branch, and some kind of loop increment
714 // feeding that comparison instruction).
715 if (LoopSize.isValid() && LoopSize < BEInsns + 1)
716 // This is an open coded max() on InstructionCost
717 LoopSize = BEInsns + 1;
718}
719
721 const Loop *L) const {
722 auto ReportCannotUnroll = [&](StringRef Reason) {
723 LLVM_DEBUG(dbgs().indent(1) << "Not unrolling: " << Reason << ".\n");
724 if (ORE && L)
725 ORE->emit([&]() {
726 return OptimizationRemarkMissed(DEBUG_TYPE, "CannotUnrollLoop",
727 L->getStartLoc(), L->getHeader())
728 << "unable to unroll loop: " << Reason;
729 });
730 };
731
733 ReportCannotUnroll("contains convergent operations");
734 return false;
735 }
736 if (!LoopSize.isValid()) {
737 ReportCannotUnroll("loop size could not be computed");
738 return false;
739 }
740 if (NotDuplicatable) {
741 ReportCannotUnroll("contains non-duplicatable instructions");
742 return false;
743 }
744 return true;
745}
746
749 unsigned CountOverwrite) const {
750 unsigned LS = LoopSize.getValue();
751 assert(LS >= UP.BEInsns && "LoopSize should not be less than BEInsns!");
752 if (CountOverwrite)
753 return static_cast<uint64_t>(LS - UP.BEInsns) * CountOverwrite + UP.BEInsns;
754 else
755 return static_cast<uint64_t>(LS - UP.BEInsns) * UP.Count + UP.BEInsns;
756}
757
758// Returns true if the loop has an unroll(full) pragma.
759static bool hasUnrollFullPragma(const Loop *L) {
760 return getUnrollMetadataForLoop(L, "llvm.loop.unroll.full");
761}
762
763// Returns true if the loop has an unroll(enable) pragma. This metadata is used
764// for both "#pragma unroll" and "#pragma clang loop unroll(enable)" directives.
765static bool hasUnrollEnablePragma(const Loop *L) {
766 return getUnrollMetadataForLoop(L, "llvm.loop.unroll.enable");
767}
768
769// Returns true if the loop has a runtime unroll(disable) pragma.
770static bool hasRuntimeUnrollDisablePragma(const Loop *L) {
771 return getUnrollMetadataForLoop(L, "llvm.loop.unroll.runtime.disable");
772}
773
774/// Returns true if the SCEV expression is uniform, i.e., all threads in a
775/// convergent execution agree on its value. Recursively checks operands.
776/// Returns false if the SCEV could not be computed.
777static bool isSCEVUniform(const SCEV *S, UniformityInfo &UI) {
779 return false;
780 if (isa<SCEVConstant>(S))
781 return true;
782 if (auto *U = dyn_cast<SCEVUnknown>(S))
783 return UI.isUniform(U->getValue());
784 for (const SCEV *Op : S->operands()) {
785 if (!isSCEVUniform(Op, UI))
786 return false;
787 }
788 return true;
789}
790
791// If loop has an unroll_count pragma return the (necessarily
792// positive) value from the pragma. Otherwise return 0.
793static unsigned unrollCountPragmaValue(const Loop *L) {
794 MDNode *MD = getUnrollMetadataForLoop(L, "llvm.loop.unroll.count");
795 if (MD) {
796 assert(MD->getNumOperands() == 2 &&
797 "Unroll count hint metadata should have two operands.");
798 unsigned Count =
799 mdconst::extract<ConstantInt>(MD->getOperand(1))->getZExtValue();
800 assert(Count >= 1 && "Unroll count must be positive.");
801 return Count;
802 }
803 return 0;
804}
805
814
815// Computes the boosting factor for complete unrolling.
816// If fully unrolling the loop would save a lot of RolledDynamicCost, it would
817// be beneficial to fully unroll the loop even if unrolledcost is large. We
818// use (RolledDynamicCost / UnrolledCost) to model the unroll benefits to adjust
819// the unroll threshold.
820static unsigned getFullUnrollBoostingFactor(const EstimatedUnrollCost &Cost,
821 unsigned MaxPercentThresholdBoost) {
822 if (Cost.RolledDynamicCost >= std::numeric_limits<unsigned>::max() / 100)
823 return 100;
824 else if (Cost.UnrolledCost != 0)
825 // The boosting factor is RolledDynamicCost / UnrolledCost
826 return std::min(100 * Cost.RolledDynamicCost / Cost.UnrolledCost,
827 MaxPercentThresholdBoost);
828 else
829 return MaxPercentThresholdBoost;
830}
831
832static std::optional<unsigned>
834 const unsigned TripMultiple, const unsigned TripCount,
835 unsigned MaxTripCount, const UnrollCostEstimator UCE,
838
839 // Using unroll pragma
840 // 1st priority is unroll count set by "unroll-count" option.
841
842 if (PInfo.UserUnrollCount) {
843 if (UP.AllowRemainder &&
844 UCE.getUnrolledLoopSize(UP, (unsigned)UnrollCount) < UP.Threshold) {
845 LLVM_DEBUG(dbgs().indent(2) << "Unrolling with user-specified count: "
846 << UnrollCount << ".\n");
847 return (unsigned)UnrollCount;
848 }
850 << "Not unrolling with user count " << UnrollCount << ": "
851 << (UP.AllowRemainder ? "exceeds threshold"
852 : "remainder not allowed")
853 << ".\n");
854 }
855
856 // 2nd priority is unroll count set by pragma.
857 if (PInfo.PragmaCount > 0) {
858 if ((UP.AllowRemainder || (TripMultiple % PInfo.PragmaCount == 0))) {
859 LLVM_DEBUG(dbgs().indent(2) << "Unrolling with pragma count: "
860 << PInfo.PragmaCount << ".\n");
861 return PInfo.PragmaCount;
862 }
864 << "Not unrolling with pragma count " << PInfo.PragmaCount
865 << ": remainder not allowed, count does not divide trip "
866 << "multiple " << TripMultiple << ".\n");
867 ORE->emit([&]() {
868 return OptimizationRemarkAnalysis(DEBUG_TYPE, "PragmaUnrollCountRejected",
869 L->getStartLoc(), L->getHeader())
870 << "may be unable to unroll loop with count "
871 << ore::NV("PragmaCount", PInfo.PragmaCount)
872 << ": remainder loop is not allowed and count does not divide "
873 "trip multiple "
874 << ore::NV("TripMultiple", TripMultiple);
875 });
876 }
877
878 if (PInfo.PragmaFullUnroll) {
879 if (TripCount != 0) {
880 // Certain cases with UBSAN can cause trip count to be calculated as
881 // INT_MAX, Block full unrolling at a reasonable limit so that the
882 // compiler doesn't hang trying to unroll the loop. See PR77842
883 if (TripCount > PragmaUnrollFullMaxIterations) {
885 << "Won't unroll; trip count is too large.\n");
886 ORE->emit([&]() {
888 "PragmaFullUnrollTripCountTooLarge",
889 L->getStartLoc(), L->getHeader())
890 << "may be unable to fully unroll loop: trip count "
891 << ore::NV("TripCount", TripCount) << " exceeds limit "
893 });
894 return std::nullopt;
895 }
896
898 << "Fully unrolling with trip count: " << TripCount << ".\n");
899 return TripCount;
900 }
902 << "Not fully unrolling: unknown trip count.\n");
903 ORE->emit([&]() {
905 "PragmaFullUnrollUnknownTripCount",
906 L->getStartLoc(), L->getHeader())
907 << "may be unable to fully unroll loop: trip count is unknown";
908 });
909 }
910
911 if (PInfo.PragmaEnableUnroll && !TripCount && MaxTripCount &&
912 MaxTripCount <= UP.MaxUpperBound) {
914 << "Unrolling with max trip count: " << MaxTripCount << ".\n");
915 return MaxTripCount;
916 }
917
918 return std::nullopt;
919}
920
921static std::optional<unsigned> shouldFullUnroll(
924 const unsigned FullUnrollTripCount, const UnrollCostEstimator UCE,
926 assert(FullUnrollTripCount && "should be non-zero!");
927
928 if (FullUnrollTripCount > UP.FullUnrollMaxCount) {
930 << "Not unrolling: trip count " << FullUnrollTripCount
931 << " exceeds max count " << UP.FullUnrollMaxCount << ".\n");
932 return std::nullopt;
933 }
934
935 // When computing the unrolled size, note that BEInsns are not replicated
936 // like the rest of the loop body.
937 uint64_t UnrolledSize = UCE.getUnrolledLoopSize(UP, FullUnrollTripCount);
938 if (UnrolledSize < UP.Threshold) {
939 LLVM_DEBUG(dbgs().indent(2) << "Unrolling: size " << UnrolledSize
940 << " < threshold " << UP.Threshold << ".\n");
941 return FullUnrollTripCount;
942 }
943
945 << "Unrolled size " << UnrolledSize << " exceeds threshold "
946 << UP.Threshold << "; checking for cost benefit.\n");
947
948 // The loop isn't that small, but we still can fully unroll it if that
949 // helps to remove a significant number of instructions.
950 // To check that, run additional analysis on the loop.
951 if (std::optional<EstimatedUnrollCost> Cost = analyzeLoopUnrollCost(
952 L, FullUnrollTripCount, DT, SE, EphValues, TTI,
955 unsigned Boost =
957 unsigned BoostedThreshold = UP.Threshold * Boost / 100;
958 if (Cost->UnrolledCost < BoostedThreshold) {
959 LLVM_DEBUG(dbgs().indent(2) << "Profitable after cost analysis.\n");
960 return FullUnrollTripCount;
961 }
963 << "Not unrolling: cost " << Cost->UnrolledCost
964 << " >= boosted threshold " << BoostedThreshold << ".\n");
965 }
966
967 return std::nullopt;
968}
969
970static std::optional<unsigned>
971shouldPartialUnroll(const unsigned LoopSize, const unsigned TripCount,
972 const UnrollCostEstimator UCE,
974
975 if (!TripCount)
976 return std::nullopt;
977
978 if (!UP.Partial) {
979 LLVM_DEBUG(dbgs().indent(2) << "Will not try to unroll partially because "
980 << "-unroll-allow-partial not given\n");
981 return 0;
982 }
983 unsigned count = UP.Count;
984 if (count == 0)
985 count = TripCount;
986 if (UP.PartialThreshold != NoThreshold) {
987 // Reduce unroll count to be modulo of TripCount for partial unrolling.
988 if (UCE.getUnrolledLoopSize(UP, count) > UP.PartialThreshold) {
989 unsigned NewCount =
990 (std::max(UP.PartialThreshold, UP.BEInsns + 1) - UP.BEInsns) /
991 (LoopSize - UP.BEInsns);
993 << "Unrolled size exceeds threshold; reducing count "
994 << "from " << count << " to " << NewCount << ".\n");
995 count = NewCount;
996 }
997 if (count > UP.MaxCount)
998 count = UP.MaxCount;
999 while (count != 0 && TripCount % count != 0)
1000 count--;
1001 if (UP.AllowRemainder && count <= 1) {
1002 // If there is no Count that is modulo of TripCount, set Count to
1003 // largest power-of-two factor that satisfies the threshold limit.
1004 // As we'll create fixup loop, do the type of unrolling only if
1005 // remainder loop is allowed.
1006 // Note: DefaultUnrollRuntimeCount is used as a reasonable starting point
1007 // even though this is partial unrolling (not runtime unrolling).
1009 while (count != 0 &&
1011 count >>= 1;
1012 }
1013 if (count < 2) {
1015 << "Will not partially unroll: no profitable count.\n");
1016 count = 0;
1017 }
1018 } else {
1019 count = TripCount;
1020 }
1021 if (count > UP.MaxCount)
1022 count = UP.MaxCount;
1023
1025 << "Partially unrolling with count: " << count << "\n");
1026
1027 return count;
1028}
1029// Calculates unroll count and writes it to UP.Count.
1030// Unless IgnoreUser is true, will also use metadata and command-line options
1031// that are specific to the LoopUnroll pass (which, for instance, are
1032// irrelevant for the LoopUnrollAndJam pass).
1033// FIXME: This function is used by LoopUnroll and LoopUnrollAndJam, but consumes
1034// many LoopUnroll-specific options. The shared functionality should be
1035// refactored into it own function.
1037 DominatorTree &DT, LoopInfo *LI,
1039 const SmallPtrSetImpl<const Value *> &EphValues,
1041 const unsigned TripCount,
1042 const unsigned MaxTripCount, const bool MaxOrZero,
1043 const unsigned TripMultiple,
1044 const UnrollCostEstimator &UCE,
1047
1048 unsigned LoopSize = UCE.getRolledLoopSize();
1049
1050 LLVM_DEBUG(dbgs().indent(1) << "Computing unroll count: TripCount="
1051 << TripCount << ", MaxTripCount=" << MaxTripCount
1052 << (MaxOrZero ? " (MaxOrZero)" : "")
1053 << ", TripMultiple=" << TripMultiple << "\n");
1054
1055 UnrollPragmaInfo PInfo(L);
1056 LLVM_DEBUG({
1057 if (PInfo.ExplicitUnroll) {
1058 dbgs().indent(1) << "Explicit unroll requested:";
1059 if (PInfo.UserUnrollCount)
1060 dbgs() << " user-count";
1061 if (PInfo.PragmaFullUnroll)
1062 dbgs() << " pragma-full";
1063 if (PInfo.PragmaCount > 0)
1064 dbgs() << " pragma-count(" << PInfo.PragmaCount << ")";
1065 if (PInfo.PragmaEnableUnroll)
1066 dbgs() << " pragma-enable";
1067 dbgs() << "\n";
1068 }
1069 });
1070
1071 // Use an explicit peel count that has been specified for testing. In this
1072 // case it's not permitted to also specify an explicit unroll count.
1073 if (PP.PeelCount) {
1074 if (UnrollCount.getNumOccurrences() > 0) {
1075 reportFatalUsageError("Cannot specify both explicit peel count and "
1076 "explicit unroll count");
1077 }
1079 << "Using explicit peel count: " << PP.PeelCount << ".\n");
1080 UP.Count = 1;
1081 UP.Runtime = false;
1082 return;
1083 }
1084
1085 // If a user provided an explicit unroll pragma (with or without count),
1086 // enable runtime unrolling and override expensive trip count checks.
1087 if (PInfo.PragmaEnableUnroll || PInfo.PragmaCount > 0) {
1088 UP.AllowExpensiveTripCount = true;
1089 UP.Runtime = true;
1090 }
1091
1092 // Check for explicit Count.
1093 // 1st priority is unroll count set by "unroll-count" option.
1094 // 2nd priority is unroll count set by pragma.
1095 LLVM_DEBUG(dbgs().indent(1) << "Trying pragma unroll...\n");
1096 if (auto UnrollFactor = shouldPragmaUnroll(L, PInfo, TripMultiple, TripCount,
1097 MaxTripCount, UCE, UP, ORE)) {
1098 UP.Count = *UnrollFactor;
1099
1100 if (PInfo.UserUnrollCount || (PInfo.PragmaCount > 0)) {
1101 UP.AllowExpensiveTripCount = true;
1102 UP.Force = true;
1103 }
1104 return;
1105 } else {
1106 if (PInfo.ExplicitUnroll && TripCount != 0) {
1107 // If the loop has an unrolling pragma, we want to be more aggressive with
1108 // unrolling limits. Set thresholds to at least the PragmaUnrollThreshold
1109 // value which is larger than the default limits.
1110 UP.Threshold = std::max<unsigned>(UP.Threshold, PragmaUnrollThreshold);
1111 UP.PartialThreshold =
1112 std::max<unsigned>(UP.PartialThreshold, PragmaUnrollThreshold);
1113 }
1114 }
1115
1116 // 3rd priority is exact full unrolling. This will eliminate all copies
1117 // of some exit test.
1118 LLVM_DEBUG(dbgs().indent(1) << "Trying full unroll...\n");
1119 assert(UP.Count == 0);
1120 if (TripCount) {
1121 if (auto UnrollFactor = shouldFullUnroll(L, TTI, DT, SE, EphValues,
1122 TripCount, UCE, UP)) {
1123 UP.Count = *UnrollFactor;
1124 return;
1125 }
1126 }
1127
1128 // 4th priority is bounded unrolling.
1129 // We can unroll by the upper bound amount if it's generally allowed or if
1130 // we know that the loop is executed either the upper bound or zero times.
1131 // (MaxOrZero unrolling keeps only the first loop test, so the number of
1132 // loop tests remains the same compared to the non-unrolled version, whereas
1133 // the generic upper bound unrolling keeps all but the last loop test so the
1134 // number of loop tests goes up which may end up being worse on targets with
1135 // constrained branch predictor resources so is controlled by an option.)
1136 // In addition we only unroll small upper bounds.
1137 // Note that the cost of bounded unrolling is always strictly greater than
1138 // cost of exact full unrolling. As such, if we have an exact count and
1139 // found it unprofitable, we'll never chose to bounded unroll.
1140 LLVM_DEBUG(dbgs().indent(1) << "Trying upper-bound unroll...\n");
1141 if (!TripCount && MaxTripCount && (UP.UpperBound || MaxOrZero) &&
1142 MaxTripCount <= UP.MaxUpperBound) {
1143 if (auto UnrollFactor = shouldFullUnroll(L, TTI, DT, SE, EphValues,
1144 MaxTripCount, UCE, UP)) {
1145 UP.Count = *UnrollFactor;
1146 return;
1147 }
1148 }
1149
1150 // 5th priority is loop peeling.
1151 LLVM_DEBUG(dbgs().indent(1) << "Trying loop peeling...\n");
1152 computePeelCount(L, LoopSize, PP, TripCount, DT, SE, TTI, AC, UP.Threshold);
1153 if (PP.PeelCount) {
1155 << "Peeling with count: " << PP.PeelCount << ".\n");
1156 UP.Runtime = false;
1157 UP.Count = 1;
1158 return;
1159 }
1160
1161 // Before starting partial unrolling, set up.partial to true,
1162 // if user explicitly asked for unrolling
1163 if (TripCount)
1164 UP.Partial |= PInfo.ExplicitUnroll;
1165
1166 // 6th priority is partial unrolling.
1167 // Try partial unroll only when TripCount could be statically calculated.
1168 LLVM_DEBUG(dbgs().indent(1) << "Trying partial unroll...\n");
1169 if (auto UnrollFactor = shouldPartialUnroll(LoopSize, TripCount, UCE, UP)) {
1170 UP.Count = *UnrollFactor;
1171 return;
1172 }
1173 assert(TripCount == 0 &&
1174 "All cases when TripCount is constant should be covered here.");
1175
1176 // 7th priority is runtime unrolling.
1177 LLVM_DEBUG(dbgs().indent(1) << "Trying runtime unroll...\n");
1178 // Don't unroll a runtime trip count loop when it is disabled.
1179 if (PInfo.PragmaRuntimeUnrollDisable) {
1181 << "Not runtime unrolling: disabled by pragma.\n");
1182 return;
1183 }
1184
1185 // Don't unroll a small upper bound loop unless user or TTI asked to do so.
1186 if (MaxTripCount && !UP.Force && MaxTripCount <= UP.MaxUpperBound) {
1187 LLVM_DEBUG(dbgs().indent(2) << "Not runtime unrolling: max trip count "
1188 << MaxTripCount << " is small (<= "
1189 << UP.MaxUpperBound << ") and not forced.\n");
1190 return;
1191 }
1192
1193 // Check if the runtime trip count is too small when profile is available.
1194 if (L->getHeader()->getParent()->hasProfileData()) {
1195 if (auto ProfileTripCount = getLoopEstimatedTripCount(L)) {
1196 if (*ProfileTripCount < FlatLoopTripCountThreshold)
1197 return;
1198 else
1199 UP.AllowExpensiveTripCount = true;
1200 }
1201 }
1202 if (!UP.Runtime) {
1204 << "Will not try to unroll loop with runtime trip count "
1205 << "because -unroll-runtime not given\n");
1206 return;
1207 }
1208
1209 assert(UP.Count == 0);
1211
1212 // Reduce unroll count to be the largest power-of-two factor of
1213 // the original count which satisfies the threshold limit.
1214 while (UP.Count != 0 &&
1216 UP.Count >>= 1;
1217
1218#ifndef NDEBUG
1219 unsigned OrigCount = UP.Count;
1220#endif
1221
1222 if (!UP.AllowRemainder && UP.Count != 0 && (TripMultiple % UP.Count) != 0) {
1223 while (UP.Count != 0 && TripMultiple % UP.Count != 0)
1224 UP.Count >>= 1;
1226 << "Remainder loop is restricted (that could be architecture "
1227 "specific or because the loop contains a convergent "
1228 "instruction), so unroll count must divide the trip "
1229 "multiple, "
1230 << TripMultiple << ". Reducing unroll count from " << OrigCount
1231 << " to " << UP.Count << ".\n");
1232 }
1233
1234 if (UP.Count > UP.MaxCount)
1235 UP.Count = UP.MaxCount;
1236
1237 if (MaxTripCount && UP.Count > MaxTripCount)
1238 UP.Count = MaxTripCount;
1239
1240 if (UP.Count < 2)
1241 UP.Count = 0;
1242 else
1244 << "Runtime unrolling with count: " << UP.Count << "\n");
1245 return;
1246}
1247
1248static LoopUnrollResult
1252 ProfileSummaryInfo *PSI, bool PreserveLCSSA, int OptLevel,
1253 bool OnlyFullUnroll, bool OnlyWhenForced, bool ForgetAllSCEV,
1254 std::optional<unsigned> ProvidedCount,
1255 std::optional<unsigned> ProvidedThreshold,
1256 std::optional<bool> ProvidedAllowPartial,
1257 std::optional<bool> ProvidedRuntime,
1258 std::optional<bool> ProvidedUpperBound,
1259 std::optional<bool> ProvidedAllowPeeling,
1260 std::optional<bool> ProvidedAllowProfileBasedPeeling,
1261 std::optional<unsigned> ProvidedFullUnrollMaxCount,
1262 UniformityInfo *UI = nullptr, AAResults *AA = nullptr) {
1263
1264 LLVM_DEBUG(dbgs() << "Loop Unroll: F["
1265 << L->getHeader()->getParent()->getName() << "] Loop %"
1266 << L->getHeader()->getName()
1267 << " (depth=" << L->getLoopDepth() << ")\n");
1269 if (TM & TM_Disable) {
1270 LLVM_DEBUG(dbgs().indent(1) << "Not unrolling: transformation disabled by "
1271 << "metadata.\n");
1273 }
1274
1275 // If this loop isn't forced to be unrolled, avoid unrolling it when the
1276 // parent loop has an explicit unroll-and-jam pragma. This is to prevent
1277 // automatic unrolling from interfering with the user requested
1278 // transformation.
1279 Loop *ParentL = L->getParentLoop();
1280 if (ParentL != nullptr &&
1283 LLVM_DEBUG(dbgs().indent(1) << "Not unrolling loop since parent loop has"
1284 << " llvm.loop.unroll_and_jam.\n");
1286 }
1287
1288 // If this loop isn't forced to be unrolled, avoid unrolling it when the
1289 // loop has an explicit unroll-and-jam pragma. This is to prevent automatic
1290 // unrolling from interfering with the user requested transformation.
1293 LLVM_DEBUG(
1294 dbgs().indent(1)
1295 << "Not unrolling loop since it has llvm.loop.unroll_and_jam.\n");
1297 }
1298
1299 if (!L->isLoopSimplifyForm()) {
1301 << "Not unrolling loop which is not in loop-simplify form.\n");
1302 if (TM & TM_ForcedByUser) {
1303 ORE.emit([&]() {
1304 return OptimizationRemarkMissed(DEBUG_TYPE, "NotInLoopSimplifyForm",
1305 L->getStartLoc(), L->getHeader())
1306 << "unable to unroll loop: not in loop-simplify form";
1307 });
1308 }
1310 }
1311
1312 // When automatic unrolling is disabled, do not unroll unless overridden for
1313 // this loop.
1314 if (OnlyWhenForced && !(TM & TM_Enable)) {
1315 LLVM_DEBUG(dbgs().indent(1) << "Not unrolling: automatic unrolling "
1316 << "disabled and loop not explicitly "
1317 << "enabled.\n");
1319 }
1320
1321 bool OptForSize = L->getHeader()->getParent()->hasOptSize();
1323 L, SE, TTI, BFI, PSI, ORE, OptLevel, ProvidedThreshold, ProvidedCount,
1324 ProvidedAllowPartial, ProvidedRuntime, ProvidedUpperBound,
1325 ProvidedFullUnrollMaxCount);
1327 L, SE, TTI, ProvidedAllowPeeling, ProvidedAllowProfileBasedPeeling, true);
1328
1329 // Exit early if unrolling is disabled. For OptForSize, we pick the loop size
1330 // as threshold later on.
1331 if (UP.Threshold == 0 && (!UP.Partial || UP.PartialThreshold == 0) &&
1332 !OptForSize) {
1333 LLVM_DEBUG(dbgs().indent(1) << "Not unrolling: all thresholds are zero.\n");
1334 if (TM & TM_ForcedByUser) {
1335 ORE.emit([&]() {
1336 return OptimizationRemarkMissed(DEBUG_TYPE, "UnrollThresholdsZero",
1337 L->getStartLoc(), L->getHeader())
1338 << "unable to unroll loop: unroll threshold is zero";
1339 });
1340 }
1342 }
1343
1345 CodeMetrics::collectEphemeralValues(L, &AC, EphValues);
1346
1347 // Check if the backedge-taken count is uniform before constructing UCE.
1348 // This is used to allow runtime unrolling with a remainder for convergent
1349 // loops when all threads agree on the trip count.
1350 const SCEV *BTC = SE.getBackedgeTakenCount(L);
1351 bool TripCountIsUniform = UI && isSCEVUniform(BTC, *UI);
1352 UnrollCostEstimator UCE(L, TTI, EphValues, UP.BEInsns, TripCountIsUniform);
1353 if (!UCE.canUnroll((TM & TM_ForcedByUser) ? &ORE : nullptr, L))
1355
1356 unsigned LoopSize = UCE.getRolledLoopSize();
1357 LLVM_DEBUG(dbgs() << "Loop Size = " << LoopSize << "\n");
1358
1359 // When optimizing for size, use LoopSize + 1 as threshold (we use < Threshold
1360 // later), to (fully) unroll loops, if it does not increase code size.
1361 if (OptForSize)
1362 UP.Threshold = std::max(UP.Threshold, LoopSize + 1);
1363
1364 if (UCE.NumInlineCandidates != 0) {
1366 << "Not unrolling loop with inlinable calls.\n");
1367 if (TM & TM_ForcedByUser) {
1368 ORE.emit([&]() {
1370 "InlineCandidatesPreventUnroll",
1371 L->getStartLoc(), L->getHeader())
1372 << "unable to unroll loop: contains inlinable calls";
1373 });
1374 }
1376 }
1377
1378 // Find the smallest exact trip count for any exit. This is an upper bound
1379 // on the loop trip count, but an exit at an earlier iteration is still
1380 // possible. An unroll by the smallest exact trip count guarantees that all
1381 // branches relating to at least one exit can be eliminated. This is unlike
1382 // the max trip count, which only guarantees that the backedge can be broken.
1383 unsigned TripCount = 0;
1384 unsigned TripMultiple = 1;
1385 SmallVector<BasicBlock *, 8> ExitingBlocks;
1386 L->getExitingBlocks(ExitingBlocks);
1387 for (BasicBlock *ExitingBlock : ExitingBlocks)
1388 if (unsigned TC = SE.getSmallConstantTripCount(L, ExitingBlock))
1389 if (!TripCount || TC < TripCount)
1390 TripCount = TripMultiple = TC;
1391
1392 if (!TripCount) {
1393 // If no exact trip count is known, determine the trip multiple of either
1394 // the loop latch or the single exiting block.
1395 // TODO: Relax for multiple exits.
1396 BasicBlock *ExitingBlock = L->getLoopLatch();
1397 if (!ExitingBlock || !L->isLoopExiting(ExitingBlock))
1398 ExitingBlock = L->getExitingBlock();
1399 if (ExitingBlock)
1400 TripMultiple = SE.getSmallConstantTripMultiple(L, ExitingBlock);
1401 }
1402
1403 // If the loop contains a convergent operation, the prelude we'd add
1404 // to do the first few instructions before we hit the unrolled loop
1405 // is unsafe -- it adds a control-flow dependency to the convergent
1406 // operation. Therefore restrict remainder loop (try unrolling without).
1408
1409 // Try to find the trip count upper bound if we cannot find the exact trip
1410 // count.
1411 unsigned MaxTripCount = 0;
1412 bool MaxOrZero = false;
1413 if (!TripCount) {
1414 MaxTripCount = SE.getSmallConstantMaxTripCount(L);
1415 MaxOrZero = SE.isBackedgeTakenCountMaxOrZero(L);
1416 }
1417
1418 // computeUnrollCount() decides whether it is beneficial to use upper bound to
1419 // fully unroll the loop.
1420 computeUnrollCount(L, TTI, DT, LI, &AC, SE, EphValues, &ORE, TripCount,
1421 MaxTripCount, MaxOrZero, TripMultiple, UCE, UP, PP);
1422 if (!UP.Count) {
1424 << "Not unrolling: no viable strategy found.\n");
1425 if (TM & TM_ForcedByUser) {
1426 ORE.emit([&]() {
1427 return OptimizationRemarkMissed(DEBUG_TYPE, "NoUnrollStrategy",
1428 L->getStartLoc(), L->getHeader())
1429 << "unable to unroll loop: no viable unroll count found";
1430 });
1431 }
1433 }
1434
1436
1437 if (PP.PeelCount) {
1438 assert(UP.Count == 1 && "Cannot perform peel and unroll in the same step");
1439 LLVM_DEBUG(dbgs() << "PEELING loop %" << L->getHeader()->getName()
1440 << " with iteration count " << PP.PeelCount << "!\n");
1441 ORE.emit([&]() {
1442 return OptimizationRemark(DEBUG_TYPE, "Peeled", L->getStartLoc(),
1443 L->getHeader())
1444 << "peeled loop by " << ore::NV("PeelCount", PP.PeelCount)
1445 << " iterations";
1446 });
1447
1448 ValueToValueMapTy VMap;
1449 peelLoop(L, PP.PeelCount, PP.PeelLast, LI, &SE, DT, &AC, PreserveLCSSA,
1450 VMap);
1451 simplifyLoopAfterUnroll(L, true, LI, &SE, &DT, &AC, &TTI, L->getBlocks(),
1452 nullptr);
1453 // If the loop was peeled, we already "used up" the profile information
1454 // we had, so we don't want to unroll or peel again.
1456 L->setLoopAlreadyUnrolled();
1458 }
1459
1460 // Do not attempt partial/runtime unrolling in FullLoopUnrolling
1461 if (OnlyFullUnroll && ((!TripCount && !MaxTripCount) ||
1462 UP.Count < TripCount || UP.Count < MaxTripCount)) {
1464 << "Not attempting partial/runtime unroll in FullLoopUnroll.\n");
1466 }
1467
1468 // At this point, UP.Runtime indicates that run-time unrolling is allowed.
1469 // However, we only want to actually perform it if we don't know the trip
1470 // count and the unroll count doesn't divide the known trip multiple.
1471 // TODO: This decision should probably be pushed up into
1472 // computeUnrollCount().
1473 UP.Runtime &= TripCount == 0 && TripMultiple % UP.Count != 0;
1474
1475 // Save loop properties before it is transformed.
1476 MDNode *OrigLoopID = L->getLoopID();
1477 UnrollPragmaInfo PInfo(L);
1478 DebugLoc LoopStartLoc = L->getStartLoc();
1479 BasicBlock *LoopHeader = L->getHeader();
1480
1481 // Unroll the loop.
1482 Loop *RemainderLoop = nullptr;
1484 ULO.Count = UP.Count;
1485 ULO.Force = UP.Force;
1488 ULO.Runtime = UP.Runtime;
1489 ULO.ForgetAllSCEV = ForgetAllSCEV;
1494 LoopUnrollResult UnrollResult = UnrollLoop(
1495 L, ULO, LI, &SE, &DT, &AC, &TTI, &ORE, PreserveLCSSA, &RemainderLoop, AA);
1496 if (UnrollResult == LoopUnrollResult::Unmodified) {
1497 if (PInfo.ExplicitUnroll) {
1499 << "Failed to unroll loop as explicitly requested.\n");
1500 ORE.emit([&]() {
1501 return OptimizationRemarkMissed(DEBUG_TYPE, "FailedToUnrollAsRequested",
1502 LoopStartLoc, LoopHeader)
1503 << "failed to unroll loop as explicitly requested";
1504 });
1505 }
1507 }
1508
1509 if (PInfo.PragmaFullUnroll && ULO.Count != TripCount) {
1510 ORE.emit([&]() {
1511 return OptimizationRemarkMissed(DEBUG_TYPE, "FullUnrollAsDirectedFailed",
1512 LoopStartLoc, LoopHeader)
1513 << "unable to fully unroll loop as directed; "
1514 << "unrolled by factor " << ore::NV("UnrollCount", ULO.Count);
1515 });
1516 }
1517 if (PInfo.PragmaCount > 0 && ULO.Count != PInfo.PragmaCount) {
1518 ORE.emit([&]() {
1519 return OptimizationRemarkMissed(DEBUG_TYPE, "UnrollCountDiffers",
1520 LoopStartLoc, LoopHeader)
1521 << "unable to unroll loop with requested count "
1522 << ore::NV("RequestedCount", PInfo.PragmaCount)
1523 << "; unrolled by factor " << ore::NV("UnrollCount", ULO.Count);
1524 });
1525 }
1526
1527 if (RemainderLoop) {
1528 std::optional<MDNode *> RemainderLoopID =
1531 if (RemainderLoopID)
1532 RemainderLoop->setLoopID(*RemainderLoopID);
1533 }
1534
1535 if (UnrollResult != LoopUnrollResult::FullyUnrolled) {
1536 std::optional<MDNode *> NewLoopID =
1539 if (NewLoopID) {
1540 L->setLoopID(*NewLoopID);
1541
1542 // Do not setLoopAlreadyUnrolled if loop attributes have been specified
1543 // explicitly.
1544 return UnrollResult;
1545 }
1546 }
1547
1548 // If loop has an unroll count pragma or unrolled by explicitly set count
1549 // mark loop as unrolled to prevent unrolling beyond that requested.
1550 if (UnrollResult != LoopUnrollResult::FullyUnrolled && PInfo.ExplicitUnroll)
1551 L->setLoopAlreadyUnrolled();
1552
1553 return UnrollResult;
1554}
1555
1556namespace {
1557
1558class LoopUnroll : public LoopPass {
1559public:
1560 static char ID; // Pass ID, replacement for typeid
1561
1562 int OptLevel;
1563
1564 /// If false, use a cost model to determine whether unrolling of a loop is
1565 /// profitable. If true, only loops that explicitly request unrolling via
1566 /// metadata are considered. All other loops are skipped.
1567 bool OnlyWhenForced;
1568
1569 /// If false, when SCEV is invalidated, only forget everything in the
1570 /// top-most loop (call forgetTopMostLoop), of the loop being processed.
1571 /// Otherwise, forgetAllLoops and rebuild when needed next.
1572 bool ForgetAllSCEV;
1573
1574 std::optional<unsigned> ProvidedCount;
1575 std::optional<unsigned> ProvidedThreshold;
1576 std::optional<bool> ProvidedAllowPartial;
1577 std::optional<bool> ProvidedRuntime;
1578 std::optional<bool> ProvidedUpperBound;
1579 std::optional<bool> ProvidedAllowPeeling;
1580 std::optional<bool> ProvidedAllowProfileBasedPeeling;
1581 std::optional<unsigned> ProvidedFullUnrollMaxCount;
1582
1583 LoopUnroll(int OptLevel = 2, bool OnlyWhenForced = false,
1584 bool ForgetAllSCEV = false,
1585 std::optional<unsigned> Threshold = std::nullopt,
1586 std::optional<unsigned> Count = std::nullopt,
1587 std::optional<bool> AllowPartial = std::nullopt,
1588 std::optional<bool> Runtime = std::nullopt,
1589 std::optional<bool> UpperBound = std::nullopt,
1590 std::optional<bool> AllowPeeling = std::nullopt,
1591 std::optional<bool> AllowProfileBasedPeeling = std::nullopt,
1592 std::optional<unsigned> ProvidedFullUnrollMaxCount = std::nullopt)
1593 : LoopPass(ID), OptLevel(OptLevel), OnlyWhenForced(OnlyWhenForced),
1594 ForgetAllSCEV(ForgetAllSCEV), ProvidedCount(std::move(Count)),
1595 ProvidedThreshold(Threshold), ProvidedAllowPartial(AllowPartial),
1596 ProvidedRuntime(Runtime), ProvidedUpperBound(UpperBound),
1597 ProvidedAllowPeeling(AllowPeeling),
1598 ProvidedAllowProfileBasedPeeling(AllowProfileBasedPeeling),
1599 ProvidedFullUnrollMaxCount(ProvidedFullUnrollMaxCount) {
1601 }
1602
1603 bool runOnLoop(Loop *L, LPPassManager &LPM) override {
1604 if (skipLoop(L))
1605 return false;
1606
1607 Function &F = *L->getHeader()->getParent();
1608
1609 auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
1610 LoopInfo *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
1611 ScalarEvolution &SE = getAnalysis<ScalarEvolutionWrapperPass>().getSE();
1612 const TargetTransformInfo &TTI =
1613 getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
1614 auto &AC = getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
1615 UniformityInfo *UI =
1617 ? &getAnalysis<UniformityInfoWrapperPass>().getUniformityInfo()
1618 : nullptr;
1619 // For the old PM, we can't use OptimizationRemarkEmitter as an analysis
1620 // pass. Function analyses need to be preserved across loop transformations
1621 // but ORE cannot be preserved (see comment before the pass definition).
1622 OptimizationRemarkEmitter ORE(&F);
1623 bool PreserveLCSSA = mustPreserveAnalysisID(LCSSAID);
1624
1626 L, DT, LI, SE, TTI, AC, ORE, nullptr, nullptr, PreserveLCSSA, OptLevel,
1627 /*OnlyFullUnroll*/ false, OnlyWhenForced, ForgetAllSCEV, ProvidedCount,
1628 ProvidedThreshold, ProvidedAllowPartial, ProvidedRuntime,
1629 ProvidedUpperBound, ProvidedAllowPeeling,
1630 ProvidedAllowProfileBasedPeeling, ProvidedFullUnrollMaxCount, UI);
1631
1632 if (Result == LoopUnrollResult::FullyUnrolled)
1633 LPM.markLoopAsDeleted(*L);
1634
1635 return Result != LoopUnrollResult::Unmodified;
1636 }
1637
1638 /// This transformation requires natural loop information & requires that
1639 /// loop preheaders be inserted into the CFG...
1640 void getAnalysisUsage(AnalysisUsage &AU) const override {
1641 AU.addRequired<AssumptionCacheTracker>();
1642 AU.addRequired<TargetTransformInfoWrapperPass>();
1643 AU.addRequired<UniformityInfoWrapperPass>();
1644 // FIXME: Loop passes are required to preserve domtree, and for now we just
1645 // recreate dom info if anything gets unrolled.
1647 }
1648};
1649
1650} // end anonymous namespace
1651
1652char LoopUnroll::ID = 0;
1653
1654INITIALIZE_PASS_BEGIN(LoopUnroll, "loop-unroll", "Unroll loops", false, false)
1659INITIALIZE_PASS_END(LoopUnroll, "loop-unroll", "Unroll loops", false, false)
1660
1661Pass *llvm::createLoopUnrollPass(int OptLevel, bool OnlyWhenForced,
1662 bool ForgetAllSCEV, int Threshold, int Count,
1663 int AllowPartial, int Runtime, int UpperBound,
1664 int AllowPeeling) {
1665 // TODO: It would make more sense for this function to take the optionals
1666 // directly, but that's dangerous since it would silently break out of tree
1667 // callers.
1668 return new LoopUnroll(
1669 OptLevel, OnlyWhenForced, ForgetAllSCEV,
1670 Threshold == -1 ? std::nullopt : std::optional<unsigned>(Threshold),
1671 Count == -1 ? std::nullopt : std::optional<unsigned>(Count),
1672 AllowPartial == -1 ? std::nullopt : std::optional<bool>(AllowPartial),
1673 Runtime == -1 ? std::nullopt : std::optional<bool>(Runtime),
1674 UpperBound == -1 ? std::nullopt : std::optional<bool>(UpperBound),
1675 AllowPeeling == -1 ? std::nullopt : std::optional<bool>(AllowPeeling));
1676}
1677
1680 LPMUpdater &Updater) {
1681 // For the new PM, we can't use OptimizationRemarkEmitter as an analysis
1682 // pass. Function analyses need to be preserved across loop transformations
1683 // but ORE cannot be preserved (see comment before the pass definition).
1684 OptimizationRemarkEmitter ORE(L.getHeader()->getParent());
1685
1686 // Keep track of the previous loop structure so we can identify new loops
1687 // created by unrolling.
1688 Loop *ParentL = L.getParentLoop();
1689 SmallPtrSet<Loop *, 4> OldLoops;
1690 if (ParentL)
1691 OldLoops.insert_range(*ParentL);
1692 else
1693 OldLoops.insert_range(AR.LI);
1694
1695 std::string LoopName = std::string(L.getName());
1696
1697 bool Changed =
1698 tryToUnrollLoop(&L, AR.DT, &AR.LI, AR.SE, AR.TTI, AR.AC, ORE,
1699 /*BFI*/ nullptr, /*PSI*/ nullptr,
1700 /*PreserveLCSSA*/ true, OptLevel, /*OnlyFullUnroll*/ true,
1701 OnlyWhenForced, ForgetSCEV, /*Count*/ std::nullopt,
1702 /*Threshold*/ std::nullopt, /*AllowPartial*/ false,
1703 /*Runtime*/ false, /*UpperBound*/ false,
1704 /*AllowPeeling*/ true,
1705 /*AllowProfileBasedPeeling*/ false,
1706 /*FullUnrollMaxCount*/ std::nullopt) !=
1708 if (!Changed)
1709 return PreservedAnalyses::all();
1710
1711 // The parent must not be damaged by unrolling!
1712#ifndef NDEBUG
1713 if (ParentL)
1714 ParentL->verifyLoop();
1715#endif
1716
1717 // Unrolling can do several things to introduce new loops into a loop nest:
1718 // - Full unrolling clones child loops within the current loop but then
1719 // removes the current loop making all of the children appear to be new
1720 // sibling loops.
1721 //
1722 // When a new loop appears as a sibling loop after fully unrolling,
1723 // its nesting structure has fundamentally changed and we want to revisit
1724 // it to reflect that.
1725 //
1726 // When unrolling has removed the current loop, we need to tell the
1727 // infrastructure that it is gone.
1728 //
1729 // Finally, we support a debugging/testing mode where we revisit child loops
1730 // as well. These are not expected to require further optimizations as either
1731 // they or the loop they were cloned from have been directly visited already.
1732 // But the debugging mode allows us to check this assumption.
1733 bool IsCurrentLoopValid = false;
1734 SmallVector<Loop *, 4> SibLoops;
1735 if (ParentL)
1736 SibLoops.append(ParentL->begin(), ParentL->end());
1737 else
1738 SibLoops.append(AR.LI.begin(), AR.LI.end());
1739 erase_if(SibLoops, [&](Loop *SibLoop) {
1740 if (SibLoop == &L) {
1741 IsCurrentLoopValid = true;
1742 return true;
1743 }
1744
1745 // Otherwise erase the loop from the list if it was in the old loops.
1746 return OldLoops.contains(SibLoop);
1747 });
1748 Updater.addSiblingLoops(SibLoops);
1749
1750 if (!IsCurrentLoopValid) {
1751 Updater.markLoopAsDeleted(L, LoopName);
1752 } else {
1753 // We can only walk child loops if the current loop remained valid.
1755 // Walk *all* of the child loops.
1756 SmallVector<Loop *, 4> ChildLoops(L.begin(), L.end());
1757 Updater.addChildLoops(ChildLoops);
1758 }
1759 }
1760
1762}
1763
1766 auto &LI = AM.getResult<LoopAnalysis>(F);
1767 // There are no loops in the function. Return before computing other expensive
1768 // analyses.
1769 if (LI.empty())
1770 return PreservedAnalyses::all();
1771 auto &SE = AM.getResult<ScalarEvolutionAnalysis>(F);
1772 auto &TTI = AM.getResult<TargetIRAnalysis>(F);
1773 auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
1774 auto &AC = AM.getResult<AssumptionAnalysis>(F);
1777
1778 UniformityInfo *UI = TTI.hasBranchDivergence(&F)
1780 : nullptr;
1781
1782 LoopAnalysisManager *LAM = nullptr;
1783 if (auto *LAMProxy = AM.getCachedResult<LoopAnalysisManagerFunctionProxy>(F))
1784 LAM = &LAMProxy->getManager();
1785
1786 auto &MAMProxy = AM.getResult<ModuleAnalysisManagerFunctionProxy>(F);
1787 ProfileSummaryInfo *PSI =
1788 MAMProxy.getCachedResult<ProfileSummaryAnalysis>(*F.getParent());
1789 auto *BFI = (PSI && PSI->hasProfileSummary()) ?
1790 &AM.getResult<BlockFrequencyAnalysis>(F) : nullptr;
1791
1792 bool Changed = false;
1793
1794 // The unroller requires loops to be in simplified form, and also needs LCSSA.
1795 // Since simplification may add new inner loops, it has to run before the
1796 // legality and profitability checks. This means running the loop unroller
1797 // will simplify all loops, regardless of whether anything end up being
1798 // unrolled.
1799 for (const auto &L : LI) {
1800 Changed |=
1801 simplifyLoop(L, &DT, &LI, &SE, &AC, nullptr, false /* PreserveLCSSA */);
1802 Changed |= formLCSSARecursively(*L, DT, &LI, &SE);
1803 }
1804
1805 // Add the loop nests in the reverse order of LoopInfo. See method
1806 // declaration.
1808 appendLoopsToWorklist(LI, Worklist);
1809
1810 while (!Worklist.empty()) {
1811 // Because the LoopInfo stores the loops in RPO, we walk the worklist
1812 // from back to front so that we work forward across the CFG, which
1813 // for unrolling is only needed to get optimization remarks emitted in
1814 // a forward order.
1815 Loop &L = *Worklist.pop_back_val();
1816#ifndef NDEBUG
1817 Loop *ParentL = L.getParentLoop();
1818#endif
1819
1820 // Check if the profile summary indicates that the profiled application
1821 // has a huge working set size, in which case we disable peeling to avoid
1822 // bloating it further.
1823 std::optional<bool> LocalAllowPeeling = UnrollOpts.AllowPeeling;
1824 if (PSI && PSI->hasHugeWorkingSetSize())
1825 LocalAllowPeeling = false;
1826 std::string LoopName = std::string(L.getName());
1827 // The API here is quite complex to call and we allow to select some
1828 // flavors of unrolling during construction time (by setting UnrollOpts).
1830 &L, DT, &LI, SE, TTI, AC, ORE, BFI, PSI,
1831 /*PreserveLCSSA*/ true, UnrollOpts.OptLevel, /*OnlyFullUnroll*/ false,
1832 UnrollOpts.OnlyWhenForced, UnrollOpts.ForgetSCEV,
1833 /*Count*/ std::nullopt,
1834 /*Threshold*/ std::nullopt, UnrollOpts.AllowPartial,
1835 UnrollOpts.AllowRuntime, UnrollOpts.AllowUpperBound, LocalAllowPeeling,
1836 UnrollOpts.AllowProfileBasedPeeling, UnrollOpts.FullUnrollMaxCount, UI,
1837 &AA);
1839
1840 // The parent must not be damaged by unrolling!
1841#ifndef NDEBUG
1842 if (Result != LoopUnrollResult::Unmodified && ParentL)
1843 ParentL->verifyLoop();
1844#endif
1845
1846 // Clear any cached analysis results for L if we removed it completely.
1847 if (LAM && Result == LoopUnrollResult::FullyUnrolled)
1848 LAM->clear(L, LoopName);
1849 }
1850
1851 if (!Changed)
1852 return PreservedAnalyses::all();
1853
1855}
1856
1858 raw_ostream &OS, function_ref<StringRef(StringRef)> MapClassName2PassName) {
1859 static_cast<PassInfoMixin<LoopUnrollPass> *>(this)->printPipeline(
1860 OS, MapClassName2PassName);
1861 OS << '<';
1862 if (UnrollOpts.AllowPartial != std::nullopt)
1863 OS << (*UnrollOpts.AllowPartial ? "" : "no-") << "partial;";
1864 if (UnrollOpts.AllowPeeling != std::nullopt)
1865 OS << (*UnrollOpts.AllowPeeling ? "" : "no-") << "peeling;";
1866 if (UnrollOpts.AllowRuntime != std::nullopt)
1867 OS << (*UnrollOpts.AllowRuntime ? "" : "no-") << "runtime;";
1868 if (UnrollOpts.AllowUpperBound != std::nullopt)
1869 OS << (*UnrollOpts.AllowUpperBound ? "" : "no-") << "upperbound;";
1870 if (UnrollOpts.AllowProfileBasedPeeling != std::nullopt)
1871 OS << (*UnrollOpts.AllowProfileBasedPeeling ? "" : "no-")
1872 << "profile-peeling;";
1873 if (UnrollOpts.FullUnrollMaxCount != std::nullopt)
1874 OS << "full-unroll-max=" << UnrollOpts.FullUnrollMaxCount << ';';
1875 OS << 'O' << UnrollOpts.OptLevel;
1876 OS << '>';
1877}
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
Rewrite undef for PHI
This file contains the declarations for the subclasses of Constant, which represent the different fla...
static cl::opt< OutputCostKind > CostKind("cost-kind", cl::desc("Target cost kind"), cl::init(OutputCostKind::RecipThroughput), cl::values(clEnumValN(OutputCostKind::RecipThroughput, "throughput", "Reciprocal throughput"), clEnumValN(OutputCostKind::Latency, "latency", "Instruction latency"), clEnumValN(OutputCostKind::CodeSize, "code-size", "Code size"), clEnumValN(OutputCostKind::SizeAndLatency, "size-latency", "Code size and latency"), clEnumValN(OutputCostKind::All, "all", "Print all cost kinds")))
This file defines DenseMapInfo traits for DenseMap.
This file defines the DenseMap class.
This file defines the DenseSet and SmallDenseSet classes.
#define DEBUG_TYPE
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
This header defines various interfaces for pass management in LLVM.
This header provides classes for managing per-loop analyses.
This header provides classes for managing a pipeline of passes over loops in LLVM IR.
static cl::opt< unsigned > UnrollMaxCount("unroll-max-count", cl::Hidden, cl::desc("Set the max unroll count for partial and runtime unrolling, for" "testing purposes"))
static cl::opt< unsigned > UnrollCount("unroll-count", cl::Hidden, cl::desc("Use this unroll count for all loops including those with " "unroll_count pragma values, for testing purposes"))
static cl::opt< unsigned > UnrollThresholdDefault("unroll-threshold-default", cl::init(150), cl::Hidden, cl::desc("Default threshold (max size of unrolled " "loop), used in all but O3 optimizations"))
static cl::opt< unsigned > FlatLoopTripCountThreshold("flat-loop-tripcount-threshold", cl::init(5), cl::Hidden, cl::desc("If the runtime tripcount for the loop is lower than the " "threshold, the loop is considered as flat and will be less " "aggressively unrolled."))
static cl::opt< unsigned > UnrollOptSizeThreshold("unroll-optsize-threshold", cl::init(0), cl::Hidden, cl::desc("The cost threshold for loop unrolling when optimizing for " "size"))
static bool hasUnrollFullPragma(const Loop *L)
static bool isSCEVUniform(const SCEV *S, UniformityInfo &UI)
Returns true if the SCEV expression is uniform, i.e., all threads in a convergent execution agree on ...
static cl::opt< bool > UnrollUnrollRemainder("unroll-remainder", cl::Hidden, cl::desc("Allow the loop remainder to be unrolled."))
static unsigned unrollCountPragmaValue(const Loop *L)
static bool hasUnrollEnablePragma(const Loop *L)
static cl::opt< unsigned > PragmaUnrollThreshold("pragma-unroll-threshold", cl::init(16 *1024), cl::Hidden, cl::desc("Unrolled size limit for loops with unroll metadata " "(full, enable, or count)."))
static cl::opt< unsigned > UnrollFullMaxCount("unroll-full-max-count", cl::Hidden, cl::desc("Set the max unroll count for full unrolling, for testing purposes"))
static cl::opt< unsigned > UnrollMaxUpperBound("unroll-max-upperbound", cl::init(8), cl::Hidden, cl::desc("The max of trip count upper bound that is considered in unrolling"))
static std::optional< unsigned > shouldPragmaUnroll(Loop *L, const UnrollPragmaInfo &PInfo, const unsigned TripMultiple, const unsigned TripCount, unsigned MaxTripCount, const UnrollCostEstimator UCE, const TargetTransformInfo::UnrollingPreferences &UP, OptimizationRemarkEmitter *ORE)
static std::optional< unsigned > shouldFullUnroll(Loop *L, const TargetTransformInfo &TTI, DominatorTree &DT, ScalarEvolution &SE, const SmallPtrSetImpl< const Value * > &EphValues, const unsigned FullUnrollTripCount, const UnrollCostEstimator UCE, const TargetTransformInfo::UnrollingPreferences &UP)
static std::optional< EstimatedUnrollCost > analyzeLoopUnrollCost(const Loop *L, unsigned TripCount, DominatorTree &DT, ScalarEvolution &SE, const SmallPtrSetImpl< const Value * > &EphValues, const TargetTransformInfo &TTI, unsigned MaxUnrolledLoopSize, unsigned MaxIterationsCountToAnalyze)
Figure out if the loop is worth full unrolling.
static LoopUnrollResult tryToUnrollLoop(Loop *L, DominatorTree &DT, LoopInfo *LI, ScalarEvolution &SE, const TargetTransformInfo &TTI, AssumptionCache &AC, OptimizationRemarkEmitter &ORE, BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI, bool PreserveLCSSA, int OptLevel, bool OnlyFullUnroll, bool OnlyWhenForced, bool ForgetAllSCEV, std::optional< unsigned > ProvidedCount, std::optional< unsigned > ProvidedThreshold, std::optional< bool > ProvidedAllowPartial, std::optional< bool > ProvidedRuntime, std::optional< bool > ProvidedUpperBound, std::optional< bool > ProvidedAllowPeeling, std::optional< bool > ProvidedAllowProfileBasedPeeling, std::optional< unsigned > ProvidedFullUnrollMaxCount, UniformityInfo *UI=nullptr, AAResults *AA=nullptr)
static cl::opt< unsigned > UnrollPartialThreshold("unroll-partial-threshold", cl::Hidden, cl::desc("The cost threshold for partial loop unrolling"))
static cl::opt< bool > UnrollAllowRemainder("unroll-allow-remainder", cl::Hidden, cl::desc("Allow generation of a loop remainder (extra iterations) " "when unrolling a loop."))
static std::optional< unsigned > shouldPartialUnroll(const unsigned LoopSize, const unsigned TripCount, const UnrollCostEstimator UCE, const TargetTransformInfo::UnrollingPreferences &UP)
static cl::opt< unsigned > PragmaUnrollFullMaxIterations("pragma-unroll-full-max-iterations", cl::init(1 '000 '000), cl::Hidden, cl::desc("Maximum allowed iterations to unroll under pragma unroll full."))
static const unsigned NoThreshold
A magic value for use with the Threshold parameter to indicate that the loop unroll should be perform...
static cl::opt< bool > UnrollRevisitChildLoops("unroll-revisit-child-loops", cl::Hidden, cl::desc("Enqueue and re-visit child loops in the loop PM after unrolling. " "This shouldn't typically be needed as child loops (or their " "clones) were already visited."))
static cl::opt< unsigned > UnrollThreshold("unroll-threshold", cl::Hidden, cl::desc("The cost threshold for loop unrolling"))
static cl::opt< bool > UnrollRuntime("unroll-runtime", cl::Hidden, cl::desc("Unroll loops with run-time trip counts"))
static bool hasRuntimeUnrollDisablePragma(const Loop *L)
static unsigned getFullUnrollBoostingFactor(const EstimatedUnrollCost &Cost, unsigned MaxPercentThresholdBoost)
static cl::opt< unsigned > UnrollThresholdAggressive("unroll-threshold-aggressive", cl::init(300), cl::Hidden, cl::desc("Threshold (max size of unrolled loop) to use in aggressive (O3) " "optimizations"))
static cl::opt< unsigned > UnrollMaxIterationsCountToAnalyze("unroll-max-iteration-count-to-analyze", cl::init(10), cl::Hidden, cl::desc("Don't allow loop unrolling to simulate more than this number of " "iterations when checking full unroll profitability"))
static cl::opt< unsigned > UnrollMaxPercentThresholdBoost("unroll-max-percent-threshold-boost", cl::init(400), cl::Hidden, cl::desc("The maximum 'boost' (represented as a percentage >= 100) applied " "to the threshold when aggressively unrolling a loop due to the " "dynamic cost savings. If completely unrolling a loop will reduce " "the total runtime from X to Y, we boost the loop unroll " "threshold to DefaultThreshold*std::min(MaxPercentThresholdBoost, " "X/Y). This limit avoids excessive code bloat."))
static cl::opt< bool > UnrollAllowPartial("unroll-allow-partial", cl::Hidden, cl::desc("Allows loops to be partially unrolled until " "-unroll-threshold loop size is reached."))
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
Machine Trace Metrics
This file exposes an interface to building/using memory SSA to walk memory instructions using a use/d...
This file contains the declarations for metadata subclasses.
LoopAnalysisManager LAM
#define INITIALIZE_PASS_DEPENDENCY(depName)
Definition PassSupport.h:42
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
Definition PassSupport.h:44
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
Definition PassSupport.h:39
This file contains some templates that are useful if you are working with the STL at all.
This file implements a set that has insertion order iteration characteristics.
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
#define LLVM_DEBUG(...)
Definition Debug.h:114
This pass exposes codegen information to IR-level passes.
LLVM IR instance of the generic uniformity analysis.
Value * RHS
Value * LHS
A manager for alias analyses.
PassT::Result * getCachedResult(IRUnitT &IR) const
Get the cached result of an analysis pass for a given IR unit.
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
AnalysisUsage & addRequired()
A function analysis which provides an AssumptionCache.
An immutable pass that tracks lazily created AssumptionCache objects.
A cache of @llvm.assume calls within a function.
LLVM Basic Block Representation.
Definition BasicBlock.h:62
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction; assumes that the block is well-formed.
Definition BasicBlock.h:237
Analysis pass which computes BlockFrequencyInfo.
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
Conditional Branch instruction.
This is the shared class of boolean and integer constants.
Definition Constants.h:87
This is an important base class in LLVM.
Definition Constant.h:43
A debug info location.
Definition DebugLoc.h:123
ValueT lookup(const_arg_type_t< KeyT > Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
Definition DenseMap.h:205
size_type count(const_arg_type_t< KeyT > Val) const
Return 1 if the specified key is in the map, 0 otherwise.
Definition DenseMap.h:174
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition DenseMap.h:241
Implements a dense probed hash-table based set.
Definition DenseSet.h:279
Analysis pass which computes a DominatorTree.
Definition Dominators.h:278
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition Dominators.h:159
bool hasMinSize() const
Optimize this function for minimum size (-Oz).
Definition Function.h:711
bool isUniform(ConstValueRefT V) const
Whether V is uniform/non-divergent.
CostType getValue() const
This function is intended to be used as sparingly as possible, since the class provides the full rang...
LLVM_ABI const Function * getFunction() const
Return the function this instruction belongs to.
This class provides an interface for updating the loop pass manager based on mutations to the loop ne...
void addChildLoops(ArrayRef< Loop * > NewChildLoops)
Loop passes should use this method to indicate they have added new child loops of the current loop.
void markLoopAsDeleted(Loop &L, llvm::StringRef Name)
Loop passes should use this method to indicate they have deleted a loop from the nest.
void addSiblingLoops(ArrayRef< Loop * > NewSibLoops)
Loop passes should use this method to indicate they have added new sibling loops to the current loop.
void markLoopAsDeleted(Loop &L)
Definition LoopPass.cpp:111
Analysis pass that exposes the LoopInfo for a function.
Definition LoopInfo.h:589
void verifyLoop() const
Verify loop structure.
iterator end() const
iterator begin() const
PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM, LoopStandardAnalysisResults &AR, LPMUpdater &U)
iterator end() const
iterator begin() const
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)
void printPipeline(raw_ostream &OS, function_ref< StringRef(StringRef)> MapClassName2PassName)
Represents a single loop in the control flow graph.
Definition LoopInfo.h:40
void setLoopID(MDNode *LoopID) const
Set the llvm.loop loop id metadata for this loop.
Definition LoopInfo.cpp:547
Metadata node.
Definition Metadata.h:1080
const MDOperand & getOperand(unsigned I) const
Definition Metadata.h:1444
unsigned getNumOperands() const
Return number of MDNode operands.
Definition Metadata.h:1450
Diagnostic information for optimization analysis remarks.
The optimization diagnostic interface.
LLVM_ABI void emit(DiagnosticInfoOptimizationBase &OptDiag)
Output the remark via the diagnostic handler and to the optimization record file.
Diagnostic information for missed-optimization remarks.
Diagnostic information for applied optimization remarks.
static LLVM_ABI PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
Pass interface - Implemented by all 'passes'.
Definition Pass.h:99
A set of analyses that are preserved following a run of a transformation pass.
Definition Analysis.h:112
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition Analysis.h:118
bool empty() const
Determine if the PriorityWorklist is empty or not.
An analysis pass based on the new PM to deliver ProfileSummaryInfo.
Analysis providing profile information.
This class represents an analyzed expression in the program.
LLVM_ABI ArrayRef< SCEVUse > operands() const
Return operands of this SCEV expression.
Analysis pass that exposes the ScalarEvolution for a function.
The main scalar evolution driver.
LLVM_ABI const SCEV * getBackedgeTakenCount(const Loop *L, ExitCountKind Kind=Exact)
If the specified loop has a predictable backedge-taken count, return it, otherwise return a SCEVCould...
LLVM_ABI unsigned getSmallConstantTripMultiple(const Loop *L, const SCEV *ExitCount)
Returns the largest constant divisor of the trip count as a normal unsigned value,...
LLVM_ABI unsigned getSmallConstantMaxTripCount(const Loop *L, SmallVectorImpl< const SCEVPredicate * > *Predicates=nullptr)
Returns the upper bound of the loop trip count as a normal unsigned value.
LLVM_ABI bool isBackedgeTakenCountMaxOrZero(const Loop *L)
Return true if the backedge taken count is either the value returned by getConstantMaxBackedgeTakenCo...
LLVM_ABI unsigned getSmallConstantTripCount(const Loop *L)
Returns the exact trip count of the loop if we can compute it, and the result is a small constant.
size_type size() const
Determine the number of elements in the SetVector.
Definition SetVector.h:103
void clear()
Completely clear the SetVector.
Definition SetVector.h:267
bool empty() const
Determine if the SetVector is empty or not.
Definition SetVector.h:100
bool insert(const value_type &X)
Insert a new element into the SetVector.
Definition SetVector.h:151
value_type pop_back_val()
Definition SetVector.h:279
A version of PriorityWorklist that selects small size optimized data structures for the vector and ma...
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
void insert_range(Range &&R)
bool contains(ConstPtrType Ptr) const
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
A SetVector that performs no allocations if smaller than a certain size.
Definition SetVector.h:339
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
Multiway switch.
Analysis pass providing the TargetTransformInfo.
Wrapper pass for TargetTransformInfo.
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
LLVM_ABI bool hasBranchDivergence(const Function *F=nullptr) const
Return true if branch divergence exists.
TargetCostKind
The kind of cost model.
@ TCK_CodeSize
Instruction code size.
@ TCK_SizeAndLatency
The weighted sum of size and latency.
Analysis pass which computes UniformityInfo.
Legacy analysis pass which computes a CycleInfo.
Produce an estimate of the unrolled cost of the specified loop.
Definition UnrollLoop.h:151
ConvergenceKind Convergence
Definition UnrollLoop.h:157
LLVM_ABI uint64_t getUnrolledLoopSize(const TargetTransformInfo::UnrollingPreferences &UP, unsigned CountOverwrite=0) const
Returns loop size estimation for unrolled loop, given the unrolling configuration specified by UP.
LLVM_ABI bool canUnroll(OptimizationRemarkEmitter *ORE=nullptr, const Loop *L=nullptr) const
Whether it is legal to unroll this loop.
LLVM_ABI UnrollCostEstimator(const Loop *L, const TargetTransformInfo &TTI, const SmallPtrSetImpl< const Value * > &EphValues, unsigned BEInsns, bool TripCountIsUniform=false)
uint64_t getRolledLoopSize() const
Definition UnrollLoop.h:173
void visit(Iterator Start, Iterator End)
Definition InstVisitor.h:87
LLVM Value Representation.
Definition Value.h:75
std::pair< iterator, bool > insert(const ValueT &V)
Definition DenseSet.h:202
iterator find(const_arg_type_t< ValueT > V)
Definition DenseSet.h:167
An efficient, type-erasing, non-owning reference to a callable.
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition raw_ostream.h:53
raw_ostream & indent(unsigned NumSpaces)
indent - Insert 'NumSpaces' spaces.
Changed
Abstract Attribute helper functions.
Definition Attributor.h:165
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
initializer< Ty > init(const Ty &Val)
std::enable_if_t< detail::IsValidPointer< X, Y >::value, X * > extract(Y &&MD)
Extract a Value from Metadata.
Definition Metadata.h:668
DiagnosticInfoOptimizationBase::Argument NV
This is an optimization pass for GlobalISel generic memory operations.
LLVM_ABI bool simplifyLoop(Loop *L, DominatorTree *DT, LoopInfo *LI, ScalarEvolution *SE, AssumptionCache *AC, MemorySSAUpdater *MSSAU, bool PreserveLCSSA)
Simplify each loop in a loop nest recursively.
GenericUniformityInfo< SSAContext > UniformityInfo
LLVM_ABI std::optional< unsigned > getLoopEstimatedTripCount(Loop *L, unsigned *EstimatedLoopInvocationWeight=nullptr)
Return either:
bool isEqual(const GCNRPTracker::LiveRegSet &S1, const GCNRPTracker::LiveRegSet &S2)
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
auto successors(const MachineBasicBlock *BB)
@ Runtime
Detect stack use after return if not disabled runtime with (ASAN_OPTIONS=detect_stack_use_after_retur...
OuterAnalysisManagerProxy< ModuleAnalysisManager, Function > ModuleAnalysisManagerFunctionProxy
Provide the ModuleAnalysisManager to Function proxy.
LLVM_ABI bool formLCSSARecursively(Loop &L, const DominatorTree &DT, const LoopInfo *LI, ScalarEvolution *SE)
Put a loop nest into LCSSA form.
Definition LCSSA.cpp:449
LLVM_ABI std::optional< MDNode * > makeFollowupLoopID(MDNode *OrigLoopID, ArrayRef< StringRef > FollowupAttrs, const char *InheritOptionsAttrsPrefix="", bool AlwaysNew=false)
Create a new loop identifier for a loop created from a loop transformation.
LLVM_ABI bool shouldOptimizeForSize(const MachineFunction *MF, ProfileSummaryInfo *PSI, const MachineBlockFrequencyInfo *BFI, PGSOQueryType QueryType=PGSOQueryType::Other)
Returns true if machine function MF is suggested to be size-optimized based on the profile.
LLVM_ABI char & LCSSAID
Definition LCSSA.cpp:526
LLVM_ABI void simplifyLoopAfterUnroll(Loop *L, bool SimplifyIVs, LoopInfo *LI, ScalarEvolution *SE, DominatorTree *DT, AssumptionCache *AC, const TargetTransformInfo *TTI, ArrayRef< BasicBlock * > Blocks, AAResults *AA=nullptr)
Perform some cleanup and simplifications on loops after unrolling.
LLVM_ABI Pass * createLoopUnrollPass(int OptLevel=2, bool OnlyWhenForced=false, bool ForgetAllSCEV=false, int Threshold=-1, int Count=-1, int AllowPartial=-1, int Runtime=-1, int UpperBound=-1, int AllowPeeling=-1)
AnalysisManager< Loop, LoopStandardAnalysisResults & > LoopAnalysisManager
The loop analysis manager.
OutputIt transform(R &&Range, OutputIt d_first, UnaryFunction F)
Wrapper function around std::transform to apply a function to a range and store the result elsewhere.
Definition STLExtras.h:2025
LLVM_ABI void initializeLoopUnrollPass(PassRegistry &)
TargetTransformInfo::PeelingPreferences gatherPeelingPreferences(Loop *L, ScalarEvolution &SE, const TargetTransformInfo &TTI, std::optional< bool > UserAllowPeeling, std::optional< bool > UserAllowProfileBasedPeeling, bool UnrollingSpecficValues=false)
LLVM_ABI CallBase * getLoopConvergenceHeart(const Loop *TheLoop)
Find the convergence heart of the loop.
LLVM_ABI TransformationMode hasUnrollAndJamTransformation(const Loop *L)
cl::opt< bool > ForgetSCEVInLoopUnroll
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
void computePeelCount(Loop *L, unsigned LoopSize, TargetTransformInfo::PeelingPreferences &PP, unsigned TripCount, DominatorTree &DT, ScalarEvolution &SE, const TargetTransformInfo &TTI, AssumptionCache *AC=nullptr, unsigned Threshold=UINT_MAX)
Definition LoopPeel.cpp:752
LLVM_TEMPLATE_ABI void appendLoopsToWorklist(RangeT &&, SmallPriorityWorklist< Loop *, 4 > &)
Utility that implements appending of loops onto a worklist given a range.
LLVM_ABI cl::opt< unsigned > SCEVCheapExpansionBudget
FunctionAddr VTableAddr Count
Definition InstrProf.h:139
LLVM_ABI TransformationMode hasUnrollTransformation(const Loop *L)
LoopUnrollResult
Represents the result of a UnrollLoop invocation.
Definition UnrollLoop.h:58
@ PartiallyUnrolled
The loop was partially unrolled – we still have a loop, but with a smaller trip count.
Definition UnrollLoop.h:65
@ Unmodified
The loop was not modified.
Definition UnrollLoop.h:60
@ FullyUnrolled
The loop was fully unrolled into straight-line code.
Definition UnrollLoop.h:69
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
LLVM_ABI void getLoopAnalysisUsage(AnalysisUsage &AU)
Helper to consistently add the set of standard passes to a loop pass's AnalysisUsage.
void peelLoop(Loop *L, unsigned PeelCount, bool PeelLast, LoopInfo *LI, ScalarEvolution *SE, DominatorTree &DT, AssumptionCache *AC, bool PreserveLCSSA, ValueToValueMapTy &VMap)
VMap is the value-map that maps instructions from the original loop to instructions in the last peele...
const char *const LLVMLoopUnrollFollowupAll
Definition UnrollLoop.h:45
TargetTransformInfo TTI
TransformationMode
The mode sets how eager a transformation should be applied.
Definition LoopUtils.h:283
@ TM_ForcedByUser
The transformation was directed by the user, e.g.
Definition LoopUtils.h:300
@ TM_Disable
The transformation should not be applied.
Definition LoopUtils.h:292
@ TM_Enable
The transformation should be applied without considering a cost model.
Definition LoopUtils.h:289
auto count(R &&Range, const E &Element)
Wrapper function around std::count to count the number of times an element Element occurs in the give...
Definition STLExtras.h:2011
LLVM_ABI MDNode * getUnrollMetadataForLoop(const Loop *L, StringRef Name)
DWARFExpression::Operation Op
LLVM_ABI TargetTransformInfo::UnrollingPreferences gatherUnrollingPreferences(Loop *L, ScalarEvolution &SE, const TargetTransformInfo &TTI, BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI, llvm::OptimizationRemarkEmitter &ORE, int OptLevel, std::optional< unsigned > UserThreshold, std::optional< unsigned > UserCount, std::optional< bool > UserAllowPartial, std::optional< bool > UserRuntime, std::optional< bool > UserUpperBound, std::optional< unsigned > UserFullUnrollMaxCount)
Gather the various unrolling parameters based on the defaults, compiler flags, TTI overrides and user...
ValueMap< const Value *, WeakTrackingVH > ValueToValueMapTy
OutputIt move(R &&Range, OutputIt Out)
Provide wrappers to std::move which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1916
const char *const LLVMLoopUnrollFollowupRemainder
Definition UnrollLoop.h:48
LLVM_ABI PreservedAnalyses getLoopPassPreservedAnalyses()
Returns the minimum set of Analyses that all loop passes must preserve.
const char *const LLVMLoopUnrollFollowupUnrolled
Definition UnrollLoop.h:46
void erase_if(Container &C, UnaryPredicate P)
Provide a container algorithm similar to C++ Library Fundamentals v2's erase_if which is equivalent t...
Definition STLExtras.h:2191
AnalysisManager< Function > FunctionAnalysisManager
Convenience typedef for the Function analysis manager.
LLVM_ABI LoopUnrollResult UnrollLoop(Loop *L, UnrollLoopOptions ULO, LoopInfo *LI, ScalarEvolution *SE, DominatorTree *DT, AssumptionCache *AC, const llvm::TargetTransformInfo *TTI, OptimizationRemarkEmitter *ORE, bool PreserveLCSSA, Loop **RemainderLoop=nullptr, AAResults *AA=nullptr)
Unroll the given loop by Count.
LLVM_ABI void computeUnrollCount(Loop *L, const TargetTransformInfo &TTI, DominatorTree &DT, LoopInfo *LI, AssumptionCache *AC, ScalarEvolution &SE, const SmallPtrSetImpl< const Value * > &EphValues, OptimizationRemarkEmitter *ORE, unsigned TripCount, unsigned MaxTripCount, bool MaxOrZero, unsigned TripMultiple, const UnrollCostEstimator &UCE, TargetTransformInfo::UnrollingPreferences &UP, TargetTransformInfo::PeelingPreferences &PP)
LLVM_ABI void reportFatalUsageError(Error Err)
Report a fatal error that does not indicate a bug in LLVM.
Definition Error.cpp:177
Utility to calculate the size and a few similar metrics for a set of basic blocks.
Definition CodeMetrics.h:34
static LLVM_ABI void collectEphemeralValues(const Loop *L, AssumptionCache *AC, SmallPtrSetImpl< const Value * > &EphValues)
Collect a loop's ephemeral values (those used only by an assume or similar intrinsics in the loop).
The adaptor from a function pass to a loop pass computes these analyses and makes them available to t...
A CRTP mix-in to automatically provide informational APIs needed for passes.
Definition PassManager.h:70
bool PeelLast
Peel off the last PeelCount loop iterations.
bool PeelProfiledIterations
Allow peeling basing on profile.
unsigned PeelCount
A forced peeling factor (the number of bodied of the original loop that should be peeled off before t...
Parameters that control the generic loop unrolling transformation.
unsigned Count
A forced unrolling factor (the number of concatenated bodies of the original loop in the unrolled loo...
bool UpperBound
Allow using trip count upper bound to unroll loops.
unsigned Threshold
The cost threshold for the unrolled loop.
bool Force
Apply loop unroll on any kind of loop (mainly to loops that fail runtime unrolling).
unsigned PartialOptSizeThreshold
The cost threshold for the unrolled loop when optimizing for size, like OptSizeThreshold,...
unsigned DefaultUnrollRuntimeCount
Default unroll count for loops with run-time trip count.
unsigned MaxPercentThresholdBoost
If complete unrolling will reduce the cost of the loop, we will boost the Threshold by a certain perc...
bool RuntimeUnrollMultiExit
Allow runtime unrolling multi-exit loops.
unsigned SCEVExpansionBudget
Don't allow runtime unrolling if expanding the trip count takes more than SCEVExpansionBudget.
bool AddAdditionalAccumulators
Allow unrolling to add parallel reduction phis.
unsigned UnrollAndJamInnerLoopThreshold
Threshold for unroll and jam, for inner loop size.
unsigned MaxIterationsCountToAnalyze
Don't allow loop unrolling to simulate more than this number of iterations when checking full unroll ...
bool AllowRemainder
Allow generation of a loop remainder (extra iterations after unroll).
bool UnrollAndJam
Allow unroll and jam. Used to enable unroll and jam for the target.
bool UnrollRemainder
Allow unrolling of all the iterations of the runtime loop remainder.
unsigned FullUnrollMaxCount
Set the maximum unrolling factor for full unrolling.
unsigned PartialThreshold
The cost threshold for the unrolled loop, like Threshold, but used for partial/runtime unrolling (set...
bool Runtime
Allow runtime unrolling (unrolling of loops to expand the size of the loop body even when the number ...
bool Partial
Allow partial unrolling (unrolling of loops to expand the size of the loop body, not only to eliminat...
unsigned OptSizeThreshold
The cost threshold for the unrolled loop when optimizing for size (set to UINT_MAX to disable).
bool AllowExpensiveTripCount
Allow emitting expensive instructions (such as divisions) when computing the trip count of a loop for...
unsigned MaxUpperBound
Set the maximum upper bound of trip count.
const Instruction * Heart
Definition UnrollLoop.h:79
const bool PragmaFullUnroll
Definition UnrollLoop.h:131
UnrollPragmaInfo(const Loop *L)
const unsigned PragmaCount
Definition UnrollLoop.h:132
const bool ExplicitUnroll
Definition UnrollLoop.h:135
const bool PragmaRuntimeUnrollDisable
Definition UnrollLoop.h:134
const bool UserUnrollCount
Definition UnrollLoop.h:130
const bool PragmaEnableUnroll
Definition UnrollLoop.h:133