LLVM 22.0.0git
LoopUnroll.cpp
Go to the documentation of this file.
1//===-- UnrollLoop.cpp - Loop unrolling utilities -------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements some loop unrolling utilities. It does not define any
10// actual pass or policy, but provides a single function to perform loop
11// unrolling.
12//
13// The process of unrolling can produce extraneous basic blocks linked with
14// unconditional branches. This will be corrected in the future.
15//
16//===----------------------------------------------------------------------===//
17
18#include "llvm/ADT/ArrayRef.h"
19#include "llvm/ADT/DenseMap.h"
20#include "llvm/ADT/STLExtras.h"
22#include "llvm/ADT/SetVector.h"
24#include "llvm/ADT/Statistic.h"
25#include "llvm/ADT/StringRef.h"
26#include "llvm/ADT/Twine.h"
36#include "llvm/IR/BasicBlock.h"
37#include "llvm/IR/CFG.h"
38#include "llvm/IR/Constants.h"
40#include "llvm/IR/DebugLoc.h"
42#include "llvm/IR/Dominators.h"
43#include "llvm/IR/Function.h"
44#include "llvm/IR/IRBuilder.h"
45#include "llvm/IR/Instruction.h"
48#include "llvm/IR/Metadata.h"
50#include "llvm/IR/Use.h"
51#include "llvm/IR/User.h"
52#include "llvm/IR/ValueHandle.h"
53#include "llvm/IR/ValueMap.h"
56#include "llvm/Support/Debug.h"
67#include <assert.h>
68#include <numeric>
69#include <vector>
70
71namespace llvm {
72class DataLayout;
73class Value;
74} // namespace llvm
75
76using namespace llvm;
77
78#define DEBUG_TYPE "loop-unroll"
79
80// TODO: Should these be here or in LoopUnroll?
81STATISTIC(NumCompletelyUnrolled, "Number of loops completely unrolled");
82STATISTIC(NumUnrolled, "Number of loops unrolled (completely or otherwise)");
83STATISTIC(NumUnrolledNotLatch, "Number of loops unrolled without a conditional "
84 "latch (completely or otherwise)");
85
86static cl::opt<bool>
87UnrollRuntimeEpilog("unroll-runtime-epilog", cl::init(false), cl::Hidden,
88 cl::desc("Allow runtime unrolled loops to be unrolled "
89 "with epilog instead of prolog."));
90
91static cl::opt<bool>
92UnrollVerifyDomtree("unroll-verify-domtree", cl::Hidden,
93 cl::desc("Verify domtree after unrolling"),
94#ifdef EXPENSIVE_CHECKS
95 cl::init(true)
96#else
97 cl::init(false)
98#endif
99 );
100
101static cl::opt<bool>
102UnrollVerifyLoopInfo("unroll-verify-loopinfo", cl::Hidden,
103 cl::desc("Verify loopinfo after unrolling"),
104#ifdef EXPENSIVE_CHECKS
105 cl::init(true)
106#else
107 cl::init(false)
108#endif
109 );
110
112 "unroll-add-parallel-reductions", cl::init(false), cl::Hidden,
113 cl::desc("Allow unrolling to add parallel reduction phis."));
114
115/// Check if unrolling created a situation where we need to insert phi nodes to
116/// preserve LCSSA form.
117/// \param Blocks is a vector of basic blocks representing unrolled loop.
118/// \param L is the outer loop.
119/// It's possible that some of the blocks are in L, and some are not. In this
120/// case, if there is a use is outside L, and definition is inside L, we need to
121/// insert a phi-node, otherwise LCSSA will be broken.
122/// The function is just a helper function for llvm::UnrollLoop that returns
123/// true if this situation occurs, indicating that LCSSA needs to be fixed.
125 const std::vector<BasicBlock *> &Blocks,
126 LoopInfo *LI) {
127 for (BasicBlock *BB : Blocks) {
128 if (LI->getLoopFor(BB) == L)
129 continue;
130 for (Instruction &I : *BB) {
131 for (Use &U : I.operands()) {
132 if (const auto *Def = dyn_cast<Instruction>(U)) {
133 Loop *DefLoop = LI->getLoopFor(Def->getParent());
134 if (!DefLoop)
135 continue;
136 if (DefLoop->contains(L))
137 return true;
138 }
139 }
140 }
141 }
142 return false;
143}
144
145/// Adds ClonedBB to LoopInfo, creates a new loop for ClonedBB if necessary
146/// and adds a mapping from the original loop to the new loop to NewLoops.
147/// Returns nullptr if no new loop was created and a pointer to the
148/// original loop OriginalBB was part of otherwise.
150 BasicBlock *ClonedBB, LoopInfo *LI,
151 NewLoopsMap &NewLoops) {
152 // Figure out which loop New is in.
153 const Loop *OldLoop = LI->getLoopFor(OriginalBB);
154 assert(OldLoop && "Should (at least) be in the loop being unrolled!");
155
156 Loop *&NewLoop = NewLoops[OldLoop];
157 if (!NewLoop) {
158 // Found a new sub-loop.
159 assert(OriginalBB == OldLoop->getHeader() &&
160 "Header should be first in RPO");
161
162 NewLoop = LI->AllocateLoop();
163 Loop *NewLoopParent = NewLoops.lookup(OldLoop->getParentLoop());
164
165 if (NewLoopParent)
166 NewLoopParent->addChildLoop(NewLoop);
167 else
168 LI->addTopLevelLoop(NewLoop);
169
170 NewLoop->addBasicBlockToLoop(ClonedBB, *LI);
171 return OldLoop;
172 } else {
173 NewLoop->addBasicBlockToLoop(ClonedBB, *LI);
174 return nullptr;
175 }
176}
177
178/// The function chooses which type of unroll (epilog or prolog) is more
179/// profitabale.
180/// Epilog unroll is more profitable when there is PHI that starts from
181/// constant. In this case epilog will leave PHI start from constant,
182/// but prolog will convert it to non-constant.
183///
184/// loop:
185/// PN = PHI [I, Latch], [CI, PreHeader]
186/// I = foo(PN)
187/// ...
188///
189/// Epilog unroll case.
190/// loop:
191/// PN = PHI [I2, Latch], [CI, PreHeader]
192/// I1 = foo(PN)
193/// I2 = foo(I1)
194/// ...
195/// Prolog unroll case.
196/// NewPN = PHI [PrologI, Prolog], [CI, PreHeader]
197/// loop:
198/// PN = PHI [I2, Latch], [NewPN, PreHeader]
199/// I1 = foo(PN)
200/// I2 = foo(I1)
201/// ...
202///
203static bool isEpilogProfitable(Loop *L) {
204 BasicBlock *PreHeader = L->getLoopPreheader();
205 BasicBlock *Header = L->getHeader();
206 assert(PreHeader && Header);
207 for (const PHINode &PN : Header->phis()) {
208 if (isa<ConstantInt>(PN.getIncomingValueForBlock(PreHeader)))
209 return true;
210 }
211 return false;
212}
213
214struct LoadValue {
215 Instruction *DefI = nullptr;
216 unsigned Generation = 0;
217 LoadValue() = default;
219 : DefI(Inst), Generation(Generation) {}
220};
221
224 unsigned CurrentGeneration;
225 unsigned ChildGeneration;
226 DomTreeNode *Node;
229 bool Processed = false;
230
231public:
233 unsigned cg, DomTreeNode *N, DomTreeNode::const_iterator Child,
235 : LoadScope(AvailableLoads), CurrentGeneration(cg), ChildGeneration(cg),
236 Node(N), ChildIter(Child), EndIter(End) {}
237 // Accessors.
238 unsigned currentGeneration() const { return CurrentGeneration; }
239 unsigned childGeneration() const { return ChildGeneration; }
240 void childGeneration(unsigned generation) { ChildGeneration = generation; }
241 DomTreeNode *node() { return Node; }
242 DomTreeNode::const_iterator childIter() const { return ChildIter; }
243
245 DomTreeNode *Child = *ChildIter;
246 ++ChildIter;
247 return Child;
248 }
249
250 DomTreeNode::const_iterator end() const { return EndIter; }
251 bool isProcessed() const { return Processed; }
252 void process() { Processed = true; }
253};
254
255Value *getMatchingValue(LoadValue LV, LoadInst *LI, unsigned CurrentGeneration,
256 BatchAAResults &BAA,
257 function_ref<MemorySSA *()> GetMSSA) {
258 if (!LV.DefI)
259 return nullptr;
260 if (LV.DefI->getType() != LI->getType())
261 return nullptr;
262 if (LV.Generation != CurrentGeneration) {
263 MemorySSA *MSSA = GetMSSA();
264 if (!MSSA)
265 return nullptr;
266 auto *EarlierMA = MSSA->getMemoryAccess(LV.DefI);
267 MemoryAccess *LaterDef =
268 MSSA->getWalker()->getClobberingMemoryAccess(LI, BAA);
269 if (!MSSA->dominates(LaterDef, EarlierMA))
270 return nullptr;
271 }
272 return LV.DefI;
273}
274
276 BatchAAResults &BAA, function_ref<MemorySSA *()> GetMSSA) {
279 DomTreeNode *HeaderD = DT.getNode(L->getHeader());
280 NodesToProcess.emplace_back(new StackNode(AvailableLoads, 0, HeaderD,
281 HeaderD->begin(), HeaderD->end()));
282
283 unsigned CurrentGeneration = 0;
284 while (!NodesToProcess.empty()) {
285 StackNode *NodeToProcess = &*NodesToProcess.back();
286
287 CurrentGeneration = NodeToProcess->currentGeneration();
288
289 if (!NodeToProcess->isProcessed()) {
290 // Process the node.
291
292 // If this block has a single predecessor, then the predecessor is the
293 // parent
294 // of the domtree node and all of the live out memory values are still
295 // current in this block. If this block has multiple predecessors, then
296 // they could have invalidated the live-out memory values of our parent
297 // value. For now, just be conservative and invalidate memory if this
298 // block has multiple predecessors.
299 if (!NodeToProcess->node()->getBlock()->getSinglePredecessor())
300 ++CurrentGeneration;
301 for (auto &I : make_early_inc_range(*NodeToProcess->node()->getBlock())) {
302
303 auto *Load = dyn_cast<LoadInst>(&I);
304 if (!Load || !Load->isSimple()) {
305 if (I.mayWriteToMemory())
306 CurrentGeneration++;
307 continue;
308 }
309
310 const SCEV *PtrSCEV = SE.getSCEV(Load->getPointerOperand());
311 LoadValue LV = AvailableLoads.lookup(PtrSCEV);
312 if (Value *M =
313 getMatchingValue(LV, Load, CurrentGeneration, BAA, GetMSSA)) {
314 if (LI.replacementPreservesLCSSAForm(Load, M)) {
315 Load->replaceAllUsesWith(M);
316 Load->eraseFromParent();
317 }
318 } else {
319 AvailableLoads.insert(PtrSCEV, LoadValue(Load, CurrentGeneration));
320 }
321 }
322 NodeToProcess->childGeneration(CurrentGeneration);
323 NodeToProcess->process();
324 } else if (NodeToProcess->childIter() != NodeToProcess->end()) {
325 // Push the next child onto the stack.
326 DomTreeNode *Child = NodeToProcess->nextChild();
327 if (!L->contains(Child->getBlock()))
328 continue;
329 NodesToProcess.emplace_back(
330 new StackNode(AvailableLoads, NodeToProcess->childGeneration(), Child,
331 Child->begin(), Child->end()));
332 } else {
333 // It has been processed, and there are no more children to process,
334 // so delete it and pop it off the stack.
335 NodesToProcess.pop_back();
336 }
337 }
338}
339
340/// Perform some cleanup and simplifications on loops after unrolling. It is
341/// useful to simplify the IV's in the new loop, as well as do a quick
342/// simplify/dce pass of the instructions.
343void llvm::simplifyLoopAfterUnroll(Loop *L, bool SimplifyIVs, LoopInfo *LI,
345 AssumptionCache *AC,
347 AAResults *AA) {
348 using namespace llvm::PatternMatch;
349
350 // Simplify any new induction variables in the partially unrolled loop.
351 if (SE && SimplifyIVs) {
353 simplifyLoopIVs(L, SE, DT, LI, TTI, DeadInsts);
354
355 // Aggressively clean up dead instructions that simplifyLoopIVs already
356 // identified. Any remaining should be cleaned up below.
357 while (!DeadInsts.empty()) {
358 Value *V = DeadInsts.pop_back_val();
361 }
362
363 if (AA) {
364 std::unique_ptr<MemorySSA> MSSA = nullptr;
365 BatchAAResults BAA(*AA);
366 loadCSE(L, *DT, *SE, *LI, BAA, [L, AA, DT, &MSSA]() -> MemorySSA * {
367 if (!MSSA)
368 MSSA.reset(new MemorySSA(*L, AA, DT));
369 return &*MSSA;
370 });
371 }
372 }
373
374 // At this point, the code is well formed. Perform constprop, instsimplify,
375 // and dce.
376 const DataLayout &DL = L->getHeader()->getDataLayout();
378 for (BasicBlock *BB : L->getBlocks()) {
379 // Remove repeated debug instructions after loop unrolling.
380 if (BB->getParent()->getSubprogram())
382
383 for (Instruction &Inst : llvm::make_early_inc_range(*BB)) {
384 if (Value *V = simplifyInstruction(&Inst, {DL, nullptr, DT, AC}))
385 if (LI->replacementPreservesLCSSAForm(&Inst, V))
386 Inst.replaceAllUsesWith(V);
388 DeadInsts.emplace_back(&Inst);
389
390 // Fold ((add X, C1), C2) to (add X, C1+C2). This is very common in
391 // unrolled loops, and handling this early allows following code to
392 // identify the IV as a "simple recurrence" without first folding away
393 // a long chain of adds.
394 {
395 Value *X;
396 const APInt *C1, *C2;
397 if (match(&Inst, m_Add(m_Add(m_Value(X), m_APInt(C1)), m_APInt(C2)))) {
398 auto *InnerI = dyn_cast<Instruction>(Inst.getOperand(0));
399 auto *InnerOBO = cast<OverflowingBinaryOperator>(Inst.getOperand(0));
400 bool SignedOverflow;
401 APInt NewC = C1->sadd_ov(*C2, SignedOverflow);
402 Inst.setOperand(0, X);
403 Inst.setOperand(1, ConstantInt::get(Inst.getType(), NewC));
404 Inst.setHasNoUnsignedWrap(Inst.hasNoUnsignedWrap() &&
405 InnerOBO->hasNoUnsignedWrap());
406 Inst.setHasNoSignedWrap(Inst.hasNoSignedWrap() &&
407 InnerOBO->hasNoSignedWrap() &&
408 !SignedOverflow);
409 if (InnerI && isInstructionTriviallyDead(InnerI))
410 DeadInsts.emplace_back(InnerI);
411 }
412 }
413 }
414 // We can't do recursive deletion until we're done iterating, as we might
415 // have a phi which (potentially indirectly) uses instructions later in
416 // the block we're iterating through.
418 }
419}
420
421// Loops containing convergent instructions that are uncontrolled or controlled
422// from outside the loop must have a count that divides their TripMultiple.
424static bool canHaveUnrollRemainder(const Loop *L) {
426 return false;
427
428 // Check for uncontrolled convergent operations.
429 for (auto &BB : L->blocks()) {
430 for (auto &I : *BB) {
432 return true;
433 if (auto *CB = dyn_cast<CallBase>(&I))
434 if (CB->isConvergent())
435 return CB->getConvergenceControlToken();
436 }
437 }
438 return true;
439}
440
441/// Unroll the given loop by Count. The loop must be in LCSSA form. Unrolling
442/// can only fail when the loop's latch block is not terminated by a conditional
443/// branch instruction. However, if the trip count (and multiple) are not known,
444/// loop unrolling will mostly produce more code that is no faster.
445///
446/// If Runtime is true then UnrollLoop will try to insert a prologue or
447/// epilogue that ensures the latch has a trip multiple of Count. UnrollLoop
448/// will not runtime-unroll the loop if computing the run-time trip count will
449/// be expensive and AllowExpensiveTripCount is false.
450///
451/// The LoopInfo Analysis that is passed will be kept consistent.
452///
453/// This utility preserves LoopInfo. It will also preserve ScalarEvolution and
454/// DominatorTree if they are non-null.
455///
456/// If RemainderLoop is non-null, it will receive the remainder loop (if
457/// required and not fully unrolled).
462 bool PreserveLCSSA, Loop **RemainderLoop, AAResults *AA) {
463 assert(DT && "DomTree is required");
464
465 if (!L->getLoopPreheader()) {
466 LLVM_DEBUG(dbgs() << " Can't unroll; loop preheader-insertion failed.\n");
468 }
469
470 if (!L->getLoopLatch()) {
471 LLVM_DEBUG(dbgs() << " Can't unroll; loop exit-block-insertion failed.\n");
473 }
474
475 // Loops with indirectbr cannot be cloned.
476 if (!L->isSafeToClone()) {
477 LLVM_DEBUG(dbgs() << " Can't unroll; Loop body cannot be cloned.\n");
479 }
480
481 if (L->getHeader()->hasAddressTaken()) {
482 // The loop-rotate pass can be helpful to avoid this in many cases.
484 dbgs() << " Won't unroll loop: address of header block is taken.\n");
486 }
487
488 assert(ULO.Count > 0);
489
490 // All these values should be taken only after peeling because they might have
491 // changed.
492 BasicBlock *Preheader = L->getLoopPreheader();
493 BasicBlock *Header = L->getHeader();
494 BasicBlock *LatchBlock = L->getLoopLatch();
496 L->getExitBlocks(ExitBlocks);
497 std::vector<BasicBlock *> OriginalLoopBlocks = L->getBlocks();
498
499 const unsigned MaxTripCount = SE->getSmallConstantMaxTripCount(L);
500 const bool MaxOrZero = SE->isBackedgeTakenCountMaxOrZero(L);
501 std::optional<unsigned> OriginalTripCount =
503 BranchProbability OriginalLoopProb = llvm::getLoopProbability(L);
504
505 // Effectively "DCE" unrolled iterations that are beyond the max tripcount
506 // and will never be executed.
507 if (MaxTripCount && ULO.Count > MaxTripCount)
508 ULO.Count = MaxTripCount;
509
510 struct ExitInfo {
511 unsigned TripCount;
512 unsigned TripMultiple;
513 unsigned BreakoutTrip;
514 bool ExitOnTrue;
515 BasicBlock *FirstExitingBlock = nullptr;
516 SmallVector<BasicBlock *> ExitingBlocks;
517 };
519 SmallVector<BasicBlock *, 4> ExitingBlocks;
520 L->getExitingBlocks(ExitingBlocks);
521 for (auto *ExitingBlock : ExitingBlocks) {
522 // The folding code is not prepared to deal with non-branch instructions
523 // right now.
524 auto *BI = dyn_cast<BranchInst>(ExitingBlock->getTerminator());
525 if (!BI)
526 continue;
527
528 ExitInfo &Info = ExitInfos[ExitingBlock];
529 Info.TripCount = SE->getSmallConstantTripCount(L, ExitingBlock);
530 Info.TripMultiple = SE->getSmallConstantTripMultiple(L, ExitingBlock);
531 if (Info.TripCount != 0) {
532 Info.BreakoutTrip = Info.TripCount % ULO.Count;
533 Info.TripMultiple = 0;
534 } else {
535 Info.BreakoutTrip = Info.TripMultiple =
536 (unsigned)std::gcd(ULO.Count, Info.TripMultiple);
537 }
538 Info.ExitOnTrue = !L->contains(BI->getSuccessor(0));
539 Info.ExitingBlocks.push_back(ExitingBlock);
540 LLVM_DEBUG(dbgs() << " Exiting block %" << ExitingBlock->getName()
541 << ": TripCount=" << Info.TripCount
542 << ", TripMultiple=" << Info.TripMultiple
543 << ", BreakoutTrip=" << Info.BreakoutTrip << "\n");
544 }
545
546 // Are we eliminating the loop control altogether? Note that we can know
547 // we're eliminating the backedge without knowing exactly which iteration
548 // of the unrolled body exits.
549 const bool CompletelyUnroll = ULO.Count == MaxTripCount;
550
551 const bool PreserveOnlyFirst = CompletelyUnroll && MaxOrZero;
552
553 // There's no point in performing runtime unrolling if this unroll count
554 // results in a full unroll.
555 if (CompletelyUnroll)
556 ULO.Runtime = false;
557
558 // Go through all exits of L and see if there are any phi-nodes there. We just
559 // conservatively assume that they're inserted to preserve LCSSA form, which
560 // means that complete unrolling might break this form. We need to either fix
561 // it in-place after the transformation, or entirely rebuild LCSSA. TODO: For
562 // now we just recompute LCSSA for the outer loop, but it should be possible
563 // to fix it in-place.
564 bool NeedToFixLCSSA =
565 PreserveLCSSA && CompletelyUnroll &&
566 any_of(ExitBlocks,
567 [](const BasicBlock *BB) { return isa<PHINode>(BB->begin()); });
568
569 // The current loop unroll pass can unroll loops that have
570 // (1) single latch; and
571 // (2a) latch is unconditional; or
572 // (2b) latch is conditional and is an exiting block
573 // FIXME: The implementation can be extended to work with more complicated
574 // cases, e.g. loops with multiple latches.
575 BranchInst *LatchBI = dyn_cast<BranchInst>(LatchBlock->getTerminator());
576
577 // A conditional branch which exits the loop, which can be optimized to an
578 // unconditional branch in the unrolled loop in some cases.
579 bool LatchIsExiting = L->isLoopExiting(LatchBlock);
580 if (!LatchBI || (LatchBI->isConditional() && !LatchIsExiting)) {
582 dbgs() << "Can't unroll; a conditional latch must exit the loop");
584 }
585
587 "Can't runtime unroll if loop contains a convergent operation.");
588
589 bool EpilogProfitability =
590 UnrollRuntimeEpilog.getNumOccurrences() ? UnrollRuntimeEpilog
592
593 if (ULO.Runtime &&
595 L, ULO.Count, ULO.AllowExpensiveTripCount, EpilogProfitability,
596 ULO.UnrollRemainder, ULO.ForgetAllSCEV, LI, SE, DT, AC, TTI,
597 PreserveLCSSA, ULO.SCEVExpansionBudget, ULO.RuntimeUnrollMultiExit,
598 RemainderLoop, OriginalTripCount, OriginalLoopProb)) {
599 if (ULO.Force)
600 ULO.Runtime = false;
601 else {
602 LLVM_DEBUG(dbgs() << "Won't unroll; remainder loop could not be "
603 "generated when assuming runtime trip count\n");
605 }
606 }
607
608 using namespace ore;
609 // Report the unrolling decision.
610 if (CompletelyUnroll) {
611 LLVM_DEBUG(dbgs() << "COMPLETELY UNROLLING loop %" << Header->getName()
612 << " with trip count " << ULO.Count << "!\n");
613 if (ORE)
614 ORE->emit([&]() {
615 return OptimizationRemark(DEBUG_TYPE, "FullyUnrolled", L->getStartLoc(),
616 L->getHeader())
617 << "completely unrolled loop with "
618 << NV("UnrollCount", ULO.Count) << " iterations";
619 });
620 } else {
621 LLVM_DEBUG(dbgs() << "UNROLLING loop %" << Header->getName() << " by "
622 << ULO.Count);
623 if (ULO.Runtime)
624 LLVM_DEBUG(dbgs() << " with run-time trip count");
625 LLVM_DEBUG(dbgs() << "!\n");
626
627 if (ORE)
628 ORE->emit([&]() {
629 OptimizationRemark Diag(DEBUG_TYPE, "PartialUnrolled", L->getStartLoc(),
630 L->getHeader());
631 Diag << "unrolled loop by a factor of " << NV("UnrollCount", ULO.Count);
632 if (ULO.Runtime)
633 Diag << " with run-time trip count";
634 return Diag;
635 });
636 }
637
638 // We are going to make changes to this loop. SCEV may be keeping cached info
639 // about it, in particular about backedge taken count. The changes we make
640 // are guaranteed to invalidate this information for our loop. It is tempting
641 // to only invalidate the loop being unrolled, but it is incorrect as long as
642 // all exiting branches from all inner loops have impact on the outer loops,
643 // and if something changes inside them then any of outer loops may also
644 // change. When we forget outermost loop, we also forget all contained loops
645 // and this is what we need here.
646 if (SE) {
647 if (ULO.ForgetAllSCEV)
648 SE->forgetAllLoops();
649 else {
650 SE->forgetTopmostLoop(L);
652 }
653 }
654
655 if (!LatchIsExiting)
656 ++NumUnrolledNotLatch;
657
658 // For the first iteration of the loop, we should use the precloned values for
659 // PHI nodes. Insert associations now.
660 ValueToValueMapTy LastValueMap;
661 std::vector<PHINode*> OrigPHINode;
662 for (BasicBlock::iterator I = Header->begin(); isa<PHINode>(I); ++I) {
663 OrigPHINode.push_back(cast<PHINode>(I));
664 }
665
666 // Collect phi nodes for reductions for which we can introduce multiple
667 // parallel reduction phis and compute the final reduction result after the
668 // loop. This requires a single exit block after unrolling. This is ensured by
669 // restricting to single-block loops where the unrolled iterations are known
670 // to not exit.
672 bool CanAddAdditionalAccumulators =
673 (UnrollAddParallelReductions.getNumOccurrences() > 0
676 !CompletelyUnroll && L->getNumBlocks() == 1 &&
677 (ULO.Runtime ||
678 (ExitInfos.contains(Header) && ((ExitInfos[Header].TripCount != 0 &&
679 ExitInfos[Header].BreakoutTrip == 0))));
680
681 // Limit parallelizing reductions to unroll counts of 4 or less for now.
682 // TODO: The number of parallel reductions should depend on the number of
683 // execution units. We also don't have to add a parallel reduction phi per
684 // unrolled iteration, but could for example add a parallel phi for every 2
685 // unrolled iterations.
686 if (CanAddAdditionalAccumulators && ULO.Count <= 4) {
687 for (PHINode &Phi : Header->phis()) {
688 auto RdxDesc = canParallelizeReductionWhenUnrolling(Phi, L, SE);
689 if (!RdxDesc)
690 continue;
691
692 // Only handle duplicate phis for a single reduction for now.
693 // TODO: Handle any number of reductions
694 if (!Reductions.empty())
695 continue;
696
697 Reductions[&Phi] = *RdxDesc;
698 }
699 }
700
701 std::vector<BasicBlock *> Headers;
702 std::vector<BasicBlock *> Latches;
703 Headers.push_back(Header);
704 Latches.push_back(LatchBlock);
705
706 // The current on-the-fly SSA update requires blocks to be processed in
707 // reverse postorder so that LastValueMap contains the correct value at each
708 // exit.
709 LoopBlocksDFS DFS(L);
710 DFS.perform(LI);
711
712 // Stash the DFS iterators before adding blocks to the loop.
713 LoopBlocksDFS::RPOIterator BlockBegin = DFS.beginRPO();
714 LoopBlocksDFS::RPOIterator BlockEnd = DFS.endRPO();
715
716 std::vector<BasicBlock*> UnrolledLoopBlocks = L->getBlocks();
717
718 // Loop Unrolling might create new loops. While we do preserve LoopInfo, we
719 // might break loop-simplified form for these loops (as they, e.g., would
720 // share the same exit blocks). We'll keep track of loops for which we can
721 // break this so that later we can re-simplify them.
722 SmallSetVector<Loop *, 4> LoopsToSimplify;
723 LoopsToSimplify.insert_range(*L);
724
725 // When a FSDiscriminator is enabled, we don't need to add the multiply
726 // factors to the discriminators.
727 if (Header->getParent()->shouldEmitDebugInfoForProfiling() &&
729 for (BasicBlock *BB : L->getBlocks())
730 for (Instruction &I : *BB)
731 if (!I.isDebugOrPseudoInst())
732 if (const DILocation *DIL = I.getDebugLoc()) {
733 auto NewDIL = DIL->cloneByMultiplyingDuplicationFactor(ULO.Count);
734 if (NewDIL)
735 I.setDebugLoc(*NewDIL);
736 else
738 << "Failed to create new discriminator: "
739 << DIL->getFilename() << " Line: " << DIL->getLine());
740 }
741
742 // Identify what noalias metadata is inside the loop: if it is inside the
743 // loop, the associated metadata must be cloned for each iteration.
744 SmallVector<MDNode *, 6> LoopLocalNoAliasDeclScopes;
745 identifyNoAliasScopesToClone(L->getBlocks(), LoopLocalNoAliasDeclScopes);
746
747 // We place the unrolled iterations immediately after the original loop
748 // latch. This is a reasonable default placement if we don't have block
749 // frequencies, and if we do, well the layout will be adjusted later.
750 auto BlockInsertPt = std::next(LatchBlock->getIterator());
751 SmallVector<Instruction *> PartialReductions;
752 for (unsigned It = 1; It != ULO.Count; ++It) {
755 NewLoops[L] = L;
756
757 for (LoopBlocksDFS::RPOIterator BB = BlockBegin; BB != BlockEnd; ++BB) {
759 BasicBlock *New = CloneBasicBlock(*BB, VMap, "." + Twine(It));
760 Header->getParent()->insert(BlockInsertPt, New);
761
762 assert((*BB != Header || LI->getLoopFor(*BB) == L) &&
763 "Header should not be in a sub-loop");
764 // Tell LI about New.
765 const Loop *OldLoop = addClonedBlockToLoopInfo(*BB, New, LI, NewLoops);
766 if (OldLoop)
767 LoopsToSimplify.insert(NewLoops[OldLoop]);
768
769 if (*BB == Header) {
770 // Loop over all of the PHI nodes in the block, changing them to use
771 // the incoming values from the previous block.
772 for (PHINode *OrigPHI : OrigPHINode) {
773 PHINode *NewPHI = cast<PHINode>(VMap[OrigPHI]);
774 Value *InVal = NewPHI->getIncomingValueForBlock(LatchBlock);
775
776 // Use cloned phis as parallel phis for partial reductions, which will
777 // get combined to the final reduction result after the loop.
778 if (Reductions.contains(OrigPHI)) {
779 // Collect partial reduction results.
780 if (PartialReductions.empty())
781 PartialReductions.push_back(cast<Instruction>(InVal));
782 PartialReductions.push_back(cast<Instruction>(VMap[InVal]));
783
784 // Update the start value for the cloned phis to use the identity
785 // value for the reduction.
786 const RecurrenceDescriptor &RdxDesc = Reductions[OrigPHI];
788 L->getLoopPreheader(),
790 OrigPHI->getType(),
791 RdxDesc.getFastMathFlags()));
792
793 // Update NewPHI to use the cloned value for the iteration and move
794 // to header.
795 NewPHI->replaceUsesOfWith(InVal, VMap[InVal]);
796 NewPHI->moveBefore(OrigPHI->getIterator());
797 continue;
798 }
799
800 if (Instruction *InValI = dyn_cast<Instruction>(InVal))
801 if (It > 1 && L->contains(InValI))
802 InVal = LastValueMap[InValI];
803 VMap[OrigPHI] = InVal;
804 NewPHI->eraseFromParent();
805 }
806
807 // Eliminate copies of the loop heart intrinsic, if any.
808 if (ULO.Heart) {
809 auto it = VMap.find(ULO.Heart);
810 assert(it != VMap.end());
811 Instruction *heartCopy = cast<Instruction>(it->second);
812 heartCopy->eraseFromParent();
813 VMap.erase(it);
814 }
815 }
816
817 // Remap source location atom instance. Do this now, rather than
818 // when we remap instructions, because remap is called once we've
819 // cloned all blocks (all the clones would get the same atom
820 // number).
821 if (!VMap.AtomMap.empty())
822 for (Instruction &I : *New)
823 RemapSourceAtom(&I, VMap);
824
825 // Update our running map of newest clones
826 LastValueMap[*BB] = New;
827 for (ValueToValueMapTy::iterator VI = VMap.begin(), VE = VMap.end();
828 VI != VE; ++VI)
829 LastValueMap[VI->first] = VI->second;
830
831 // Add phi entries for newly created values to all exit blocks.
832 for (BasicBlock *Succ : successors(*BB)) {
833 if (L->contains(Succ))
834 continue;
835 for (PHINode &PHI : Succ->phis()) {
836 Value *Incoming = PHI.getIncomingValueForBlock(*BB);
837 ValueToValueMapTy::iterator It = LastValueMap.find(Incoming);
838 if (It != LastValueMap.end())
839 Incoming = It->second;
840 PHI.addIncoming(Incoming, New);
842 }
843 }
844 // Keep track of new headers and latches as we create them, so that
845 // we can insert the proper branches later.
846 if (*BB == Header)
847 Headers.push_back(New);
848 if (*BB == LatchBlock)
849 Latches.push_back(New);
850
851 // Keep track of the exiting block and its successor block contained in
852 // the loop for the current iteration.
853 auto ExitInfoIt = ExitInfos.find(*BB);
854 if (ExitInfoIt != ExitInfos.end())
855 ExitInfoIt->second.ExitingBlocks.push_back(New);
856
857 NewBlocks.push_back(New);
858 UnrolledLoopBlocks.push_back(New);
859
860 // Update DomTree: since we just copy the loop body, and each copy has a
861 // dedicated entry block (copy of the header block), this header's copy
862 // dominates all copied blocks. That means, dominance relations in the
863 // copied body are the same as in the original body.
864 if (*BB == Header)
865 DT->addNewBlock(New, Latches[It - 1]);
866 else {
867 auto BBDomNode = DT->getNode(*BB);
868 auto BBIDom = BBDomNode->getIDom();
869 BasicBlock *OriginalBBIDom = BBIDom->getBlock();
870 DT->addNewBlock(
871 New, cast<BasicBlock>(LastValueMap[cast<Value>(OriginalBBIDom)]));
872 }
873 }
874
875 // Remap all instructions in the most recent iteration.
876 // Key Instructions: Nothing to do - we've already remapped the atoms.
877 remapInstructionsInBlocks(NewBlocks, LastValueMap);
878 for (BasicBlock *NewBlock : NewBlocks)
879 for (Instruction &I : *NewBlock)
880 if (auto *II = dyn_cast<AssumeInst>(&I))
882
883 {
884 // Identify what other metadata depends on the cloned version. After
885 // cloning, replace the metadata with the corrected version for both
886 // memory instructions and noalias intrinsics.
887 std::string ext = (Twine("It") + Twine(It)).str();
888 cloneAndAdaptNoAliasScopes(LoopLocalNoAliasDeclScopes, NewBlocks,
889 Header->getContext(), ext);
890 }
891 }
892
893 // Loop over the PHI nodes in the original block, setting incoming values.
894 for (PHINode *PN : OrigPHINode) {
895 if (CompletelyUnroll) {
896 PN->replaceAllUsesWith(PN->getIncomingValueForBlock(Preheader));
897 PN->eraseFromParent();
898 } else if (ULO.Count > 1) {
899 if (Reductions.contains(PN))
900 continue;
901
902 Value *InVal = PN->removeIncomingValue(LatchBlock, false);
903 // If this value was defined in the loop, take the value defined by the
904 // last iteration of the loop.
905 if (Instruction *InValI = dyn_cast<Instruction>(InVal)) {
906 if (L->contains(InValI))
907 InVal = LastValueMap[InVal];
908 }
909 assert(Latches.back() == LastValueMap[LatchBlock] && "bad last latch");
910 PN->addIncoming(InVal, Latches.back());
911 }
912 }
913
914 // Connect latches of the unrolled iterations to the headers of the next
915 // iteration. Currently they point to the header of the same iteration.
916 for (unsigned i = 0, e = Latches.size(); i != e; ++i) {
917 unsigned j = (i + 1) % e;
918 Latches[i]->getTerminator()->replaceSuccessorWith(Headers[i], Headers[j]);
919 }
920
921 // Update dominators of blocks we might reach through exits.
922 // Immediate dominator of such block might change, because we add more
923 // routes which can lead to the exit: we can now reach it from the copied
924 // iterations too.
925 if (ULO.Count > 1) {
926 for (auto *BB : OriginalLoopBlocks) {
927 auto *BBDomNode = DT->getNode(BB);
928 SmallVector<BasicBlock *, 16> ChildrenToUpdate;
929 for (auto *ChildDomNode : BBDomNode->children()) {
930 auto *ChildBB = ChildDomNode->getBlock();
931 if (!L->contains(ChildBB))
932 ChildrenToUpdate.push_back(ChildBB);
933 }
934 // The new idom of the block will be the nearest common dominator
935 // of all copies of the previous idom. This is equivalent to the
936 // nearest common dominator of the previous idom and the first latch,
937 // which dominates all copies of the previous idom.
938 BasicBlock *NewIDom = DT->findNearestCommonDominator(BB, LatchBlock);
939 for (auto *ChildBB : ChildrenToUpdate)
940 DT->changeImmediateDominator(ChildBB, NewIDom);
941 }
942 }
943
945 DT->verify(DominatorTree::VerificationLevel::Fast));
946
948 auto SetDest = [&](BasicBlock *Src, bool WillExit, bool ExitOnTrue) {
949 auto *Term = cast<BranchInst>(Src->getTerminator());
950 const unsigned Idx = ExitOnTrue ^ WillExit;
951 BasicBlock *Dest = Term->getSuccessor(Idx);
952 BasicBlock *DeadSucc = Term->getSuccessor(1-Idx);
953
954 // Remove predecessors from all non-Dest successors.
955 DeadSucc->removePredecessor(Src, /* KeepOneInputPHIs */ true);
956
957 // Replace the conditional branch with an unconditional one.
958 auto *BI = BranchInst::Create(Dest, Term->getIterator());
959 BI->setDebugLoc(Term->getDebugLoc());
960 Term->eraseFromParent();
961
962 DTUpdates.emplace_back(DominatorTree::Delete, Src, DeadSucc);
963 };
964
965 auto WillExit = [&](const ExitInfo &Info, unsigned i, unsigned j,
966 bool IsLatch) -> std::optional<bool> {
967 if (CompletelyUnroll) {
968 if (PreserveOnlyFirst) {
969 if (i == 0)
970 return std::nullopt;
971 return j == 0;
972 }
973 // Complete (but possibly inexact) unrolling
974 if (j == 0)
975 return true;
976 if (Info.TripCount && j != Info.TripCount)
977 return false;
978 return std::nullopt;
979 }
980
981 if (ULO.Runtime) {
982 // If runtime unrolling inserts a prologue, information about non-latch
983 // exits may be stale.
984 if (IsLatch && j != 0)
985 return false;
986 return std::nullopt;
987 }
988
989 if (j != Info.BreakoutTrip &&
990 (Info.TripMultiple == 0 || j % Info.TripMultiple != 0)) {
991 // If we know the trip count or a multiple of it, we can safely use an
992 // unconditional branch for some iterations.
993 return false;
994 }
995 return std::nullopt;
996 };
997
998 // Fold branches for iterations where we know that they will exit or not
999 // exit.
1000 for (auto &Pair : ExitInfos) {
1001 ExitInfo &Info = Pair.second;
1002 for (unsigned i = 0, e = Info.ExitingBlocks.size(); i != e; ++i) {
1003 // The branch destination.
1004 unsigned j = (i + 1) % e;
1005 bool IsLatch = Pair.first == LatchBlock;
1006 std::optional<bool> KnownWillExit = WillExit(Info, i, j, IsLatch);
1007 if (!KnownWillExit) {
1008 if (!Info.FirstExitingBlock)
1009 Info.FirstExitingBlock = Info.ExitingBlocks[i];
1010 continue;
1011 }
1012
1013 // We don't fold known-exiting branches for non-latch exits here,
1014 // because this ensures that both all loop blocks and all exit blocks
1015 // remain reachable in the CFG.
1016 // TODO: We could fold these branches, but it would require much more
1017 // sophisticated updates to LoopInfo.
1018 if (*KnownWillExit && !IsLatch) {
1019 if (!Info.FirstExitingBlock)
1020 Info.FirstExitingBlock = Info.ExitingBlocks[i];
1021 continue;
1022 }
1023
1024 SetDest(Info.ExitingBlocks[i], *KnownWillExit, Info.ExitOnTrue);
1025 }
1026 }
1027
1028 DomTreeUpdater DTU(DT, DomTreeUpdater::UpdateStrategy::Lazy);
1029 DomTreeUpdater *DTUToUse = &DTU;
1030 if (ExitingBlocks.size() == 1 && ExitInfos.size() == 1) {
1031 // Manually update the DT if there's a single exiting node. In that case
1032 // there's a single exit node and it is sufficient to update the nodes
1033 // immediately dominated by the original exiting block. They will become
1034 // dominated by the first exiting block that leaves the loop after
1035 // unrolling. Note that the CFG inside the loop does not change, so there's
1036 // no need to update the DT inside the unrolled loop.
1037 DTUToUse = nullptr;
1038 auto &[OriginalExit, Info] = *ExitInfos.begin();
1039 if (!Info.FirstExitingBlock)
1040 Info.FirstExitingBlock = Info.ExitingBlocks.back();
1041 for (auto *C : to_vector(DT->getNode(OriginalExit)->children())) {
1042 if (L->contains(C->getBlock()))
1043 continue;
1044 C->setIDom(DT->getNode(Info.FirstExitingBlock));
1045 }
1046 } else {
1047 DTU.applyUpdates(DTUpdates);
1048 }
1049
1050 // When completely unrolling, the last latch becomes unreachable.
1051 if (!LatchIsExiting && CompletelyUnroll) {
1052 // There is no need to update the DT here, because there must be a unique
1053 // latch. Hence if the latch is not exiting it must directly branch back to
1054 // the original loop header and does not dominate any nodes.
1055 assert(LatchBlock->getSingleSuccessor() && "Loop with multiple latches?");
1056 changeToUnreachable(Latches.back()->getTerminator(), PreserveLCSSA);
1057 }
1058
1059 // Merge adjacent basic blocks, if possible.
1060 for (BasicBlock *Latch : Latches) {
1061 BranchInst *Term = dyn_cast<BranchInst>(Latch->getTerminator());
1062 assert((Term ||
1063 (CompletelyUnroll && !LatchIsExiting && Latch == Latches.back())) &&
1064 "Need a branch as terminator, except when fully unrolling with "
1065 "unconditional latch");
1066 if (Term && Term->isUnconditional()) {
1067 BasicBlock *Dest = Term->getSuccessor(0);
1068 BasicBlock *Fold = Dest->getUniquePredecessor();
1069 if (MergeBlockIntoPredecessor(Dest, /*DTU=*/DTUToUse, LI,
1070 /*MSSAU=*/nullptr, /*MemDep=*/nullptr,
1071 /*PredecessorWithTwoSuccessors=*/false,
1072 DTUToUse ? nullptr : DT)) {
1073 // Dest has been folded into Fold. Update our worklists accordingly.
1074 llvm::replace(Latches, Dest, Fold);
1075 llvm::erase(UnrolledLoopBlocks, Dest);
1076 }
1077 }
1078 }
1079
1080 // If there are partial reductions, create code in the exit block to compute
1081 // the final result and update users of the final result.
1082 if (!PartialReductions.empty()) {
1083 BasicBlock *ExitBlock = L->getExitBlock();
1084 assert(ExitBlock &&
1085 "Can only introduce parallel reduction phis with single exit block");
1086 assert(Reductions.size() == 1 &&
1087 "currently only a single reduction is supported");
1088 Value *FinalRdxValue = PartialReductions.back();
1089 Value *RdxResult = nullptr;
1090 for (PHINode &Phi : ExitBlock->phis()) {
1091 if (Phi.getIncomingValueForBlock(L->getLoopLatch()) != FinalRdxValue)
1092 continue;
1093 if (!RdxResult) {
1094 RdxResult = PartialReductions.front();
1095 IRBuilder Builder(ExitBlock, ExitBlock->getFirstNonPHIIt());
1096 RecurKind RK = Reductions.begin()->second.getRecurrenceKind();
1097 for (Instruction *RdxPart : drop_begin(PartialReductions)) {
1098 RdxResult = Builder.CreateBinOp(
1100 RdxPart, RdxResult, "bin.rdx");
1101 }
1102 NeedToFixLCSSA = true;
1103 for (Instruction *RdxPart : PartialReductions)
1104 RdxPart->dropPoisonGeneratingFlags();
1105 }
1106
1107 Phi.replaceAllUsesWith(RdxResult);
1108 }
1109 }
1110
1111 if (DTUToUse) {
1112 // Apply updates to the DomTree.
1113 DT = &DTU.getDomTree();
1114 }
1116 DT->verify(DominatorTree::VerificationLevel::Fast));
1117
1118 // At this point, the code is well formed. We now simplify the unrolled loop,
1119 // doing constant propagation and dead code elimination as we go.
1120 simplifyLoopAfterUnroll(L, !CompletelyUnroll && ULO.Count > 1, LI, SE, DT, AC,
1121 TTI, AA);
1122
1123 NumCompletelyUnrolled += CompletelyUnroll;
1124 ++NumUnrolled;
1125
1126 Loop *OuterL = L->getParentLoop();
1127 // Update LoopInfo if the loop is completely removed.
1128 if (CompletelyUnroll) {
1129 LI->erase(L);
1130 // We shouldn't try to use `L` anymore.
1131 L = nullptr;
1132 } else {
1133 // Update metadata for the loop's branch weights and estimated trip count:
1134 // - If ULO.Runtime, UnrollRuntimeLoopRemainder sets the guard branch
1135 // weights, latch branch weights, and estimated trip count of the
1136 // remainder loop it creates. It also sets the branch weights for the
1137 // unrolled loop guard it creates. The branch weights for the unrolled
1138 // loop latch are adjusted below. FIXME: Handle prologue loops.
1139 // - Otherwise, if unrolled loop iteration latches become unconditional,
1140 // branch weights are adjusted above. FIXME: Actually handle such
1141 // unconditional latches.
1142 // - Otherwise, the original loop's branch weights are correct for the
1143 // unrolled loop, so do not adjust them.
1144 // - In all cases, the unrolled loop's estimated trip count is set below.
1145 //
1146 // As an example of the last case, consider what happens if the unroll count
1147 // is 4 for a loop with an estimated trip count of 10 when we do not create
1148 // a remainder loop and all iterations' latches remain conditional. Each
1149 // unrolled iteration's latch still has the same probability of exiting the
1150 // loop as it did when in the original loop, and thus it should still have
1151 // the same branch weights. Each unrolled iteration's non-zero probability
1152 // of exiting already appropriately reduces the probability of reaching the
1153 // remaining iterations just as it did in the original loop. Trying to also
1154 // adjust the branch weights of the final unrolled iteration's latch (i.e.,
1155 // the backedge for the unrolled loop as a whole) to reflect its new trip
1156 // count of 3 will erroneously further reduce its block frequencies.
1157 // However, in case an analysis later needs to estimate the trip count of
1158 // the unrolled loop as a whole without considering the branch weights for
1159 // each unrolled iteration's latch within it, we store the new trip count as
1160 // separate metadata.
1161 if (!OriginalLoopProb.isUnknown() && ULO.Runtime && EpilogProfitability) {
1162 // Where p is always the probability of executing at least 1 more
1163 // iteration, the probability for at least n more iterations is p^n.
1164 setLoopProbability(L, OriginalLoopProb.pow(ULO.Count));
1165 }
1166 if (OriginalTripCount) {
1167 unsigned NewTripCount = *OriginalTripCount / ULO.Count;
1168 if (!ULO.Runtime && *OriginalTripCount % ULO.Count)
1169 ++NewTripCount;
1170 setLoopEstimatedTripCount(L, NewTripCount);
1171 }
1172 }
1173
1174 // LoopInfo should not be valid, confirm that.
1176 LI->verify(*DT);
1177
1178 // After complete unrolling most of the blocks should be contained in OuterL.
1179 // However, some of them might happen to be out of OuterL (e.g. if they
1180 // precede a loop exit). In this case we might need to insert PHI nodes in
1181 // order to preserve LCSSA form.
1182 // We don't need to check this if we already know that we need to fix LCSSA
1183 // form.
1184 // TODO: For now we just recompute LCSSA for the outer loop in this case, but
1185 // it should be possible to fix it in-place.
1186 if (PreserveLCSSA && OuterL && CompletelyUnroll && !NeedToFixLCSSA)
1187 NeedToFixLCSSA |= ::needToInsertPhisForLCSSA(OuterL, UnrolledLoopBlocks, LI);
1188
1189 // Make sure that loop-simplify form is preserved. We want to simplify
1190 // at least one layer outside of the loop that was unrolled so that any
1191 // changes to the parent loop exposed by the unrolling are considered.
1192 if (OuterL) {
1193 // OuterL includes all loops for which we can break loop-simplify, so
1194 // it's sufficient to simplify only it (it'll recursively simplify inner
1195 // loops too).
1196 if (NeedToFixLCSSA) {
1197 // LCSSA must be performed on the outermost affected loop. The unrolled
1198 // loop's last loop latch is guaranteed to be in the outermost loop
1199 // after LoopInfo's been updated by LoopInfo::erase.
1200 Loop *LatchLoop = LI->getLoopFor(Latches.back());
1201 Loop *FixLCSSALoop = OuterL;
1202 if (!FixLCSSALoop->contains(LatchLoop))
1203 while (FixLCSSALoop->getParentLoop() != LatchLoop)
1204 FixLCSSALoop = FixLCSSALoop->getParentLoop();
1205
1206 formLCSSARecursively(*FixLCSSALoop, *DT, LI, SE);
1207 } else if (PreserveLCSSA) {
1208 assert(OuterL->isLCSSAForm(*DT) &&
1209 "Loops should be in LCSSA form after loop-unroll.");
1210 }
1211
1212 // TODO: That potentially might be compile-time expensive. We should try
1213 // to fix the loop-simplified form incrementally.
1214 simplifyLoop(OuterL, DT, LI, SE, AC, nullptr, PreserveLCSSA);
1215 } else {
1216 // Simplify loops for which we might've broken loop-simplify form.
1217 for (Loop *SubLoop : LoopsToSimplify)
1218 simplifyLoop(SubLoop, DT, LI, SE, AC, nullptr, PreserveLCSSA);
1219 }
1220
1221 return CompletelyUnroll ? LoopUnrollResult::FullyUnrolled
1223}
1224
1225/// Given an llvm.loop loop id metadata node, returns the loop hint metadata
1226/// node with the given name (for example, "llvm.loop.unroll.count"). If no
1227/// such metadata node exists, then nullptr is returned.
1229 // First operand should refer to the loop id itself.
1230 assert(LoopID->getNumOperands() > 0 && "requires at least one operand");
1231 assert(LoopID->getOperand(0) == LoopID && "invalid loop id");
1232
1233 for (const MDOperand &MDO : llvm::drop_begin(LoopID->operands())) {
1234 MDNode *MD = dyn_cast<MDNode>(MDO);
1235 if (!MD)
1236 continue;
1237
1239 if (!S)
1240 continue;
1241
1242 if (Name == S->getString())
1243 return MD;
1244 }
1245 return nullptr;
1246}
1247
1248std::optional<RecurrenceDescriptor>
1250 ScalarEvolution *SE) {
1251 RecurrenceDescriptor RdxDesc;
1252 if (!RecurrenceDescriptor::isReductionPHI(&Phi, L, RdxDesc,
1253 /*DemandedBits=*/nullptr,
1254 /*AC=*/nullptr, /*DT=*/nullptr, SE))
1255 return std::nullopt;
1256 RecurKind RK = RdxDesc.getRecurrenceKind();
1257 // Skip unsupported reductions.
1258 // TODO: Handle additional reductions, including FP and min-max
1259 // reductions.
1264 return std::nullopt;
1265
1266 if (RdxDesc.IntermediateStore)
1267 return std::nullopt;
1268
1269 // Don't unroll reductions with constant ops; those can be folded to a
1270 // single induction update.
1271 if (any_of(cast<Instruction>(Phi.getIncomingValueForBlock(L->getLoopLatch()))
1272 ->operands(),
1274 return std::nullopt;
1275
1276 BasicBlock *Latch = L->getLoopLatch();
1277 if (!Latch ||
1278 !is_contained(
1279 cast<Instruction>(Phi.getIncomingValueForBlock(Latch))->operands(),
1280 &Phi))
1281 return std::nullopt;
1282
1283 return RdxDesc;
1284}
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
Rewrite undef for PHI
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Optimize for code generation
#define LLVM_ATTRIBUTE_USED
Definition Compiler.h:236
This file contains the declarations for the subclasses of Constant, which represent the different fla...
This file defines the DenseMap class.
early cse Early CSE w MemorySSA
#define DEBUG_TYPE
This file defines a set of templates that efficiently compute a dominator tree over a generic graph.
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
This defines the Use class.
static bool needToInsertPhisForLCSSA(Loop *L, const std::vector< BasicBlock * > &Blocks, LoopInfo *LI)
Check if unrolling created a situation where we need to insert phi nodes to preserve LCSSA form.
static bool isEpilogProfitable(Loop *L)
The function chooses which type of unroll (epilog or prolog) is more profitabale.
void loadCSE(Loop *L, DominatorTree &DT, ScalarEvolution &SE, LoopInfo &LI, BatchAAResults &BAA, function_ref< MemorySSA *()> GetMSSA)
Value * getMatchingValue(LoadValue LV, LoadInst *LI, unsigned CurrentGeneration, BatchAAResults &BAA, function_ref< MemorySSA *()> GetMSSA)
static cl::opt< bool > UnrollRuntimeEpilog("unroll-runtime-epilog", cl::init(false), cl::Hidden, cl::desc("Allow runtime unrolled loops to be unrolled " "with epilog instead of prolog."))
static cl::opt< bool > UnrollVerifyLoopInfo("unroll-verify-loopinfo", cl::Hidden, cl::desc("Verify loopinfo after unrolling"), cl::init(false))
static cl::opt< bool > UnrollVerifyDomtree("unroll-verify-domtree", cl::Hidden, cl::desc("Verify domtree after unrolling"), cl::init(false))
static LLVM_ATTRIBUTE_USED bool canHaveUnrollRemainder(const Loop *L)
static cl::opt< bool > UnrollAddParallelReductions("unroll-add-parallel-reductions", cl::init(false), cl::Hidden, cl::desc("Allow unrolling to add parallel reduction phis."))
#define I(x, y, z)
Definition MD5.cpp:57
This file exposes an interface to building/using memory SSA to walk memory instructions using a use/d...
This file contains the declarations for metadata subclasses.
uint64_t IntrinsicInst * II
This file contains some templates that are useful if you are working with the STL at all.
This file implements a set that has insertion order iteration characteristics.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition Statistic.h:171
#define LLVM_DEBUG(...)
Definition Debug.h:114
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
void childGeneration(unsigned generation)
bool isProcessed() const
unsigned currentGeneration() const
unsigned childGeneration() const
StackNode(ScopedHashTable< const SCEV *, LoadValue > &AvailableLoads, unsigned cg, DomTreeNode *N, DomTreeNode::const_iterator Child, DomTreeNode::const_iterator End)
DomTreeNode::const_iterator end() const
void process()
DomTreeNode * nextChild()
DomTreeNode::const_iterator childIter() const
DomTreeNode * node()
Class for arbitrary precision integers.
Definition APInt.h:78
LLVM_ABI APInt sadd_ov(const APInt &RHS, bool &Overflow) const
Definition APInt.cpp:1928
A cache of @llvm.assume calls within a function.
LLVM_ABI void registerAssumption(AssumeInst *CI)
Add an @llvm.assume intrinsic to this function's cache.
LLVM Basic Block Representation.
Definition BasicBlock.h:62
iterator begin()
Instruction iterator methods.
Definition BasicBlock.h:459
iterator_range< const_phi_iterator > phis() const
Returns a range that iterates over the phis in the basic block.
Definition BasicBlock.h:528
LLVM_ABI InstListType::const_iterator getFirstNonPHIIt() const
Returns an iterator to the first instruction in this block that is not a PHINode instruction.
LLVM_ABI const BasicBlock * getSinglePredecessor() const
Return the predecessor of this block if it has a single predecessor block.
LLVM_ABI const BasicBlock * getUniquePredecessor() const
Return the predecessor of this block if it has a unique predecessor block.
LLVM_ABI const BasicBlock * getSingleSuccessor() const
Return the successor of this block if it has a single successor.
InstListType::iterator iterator
Instruction iterators...
Definition BasicBlock.h:170
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition BasicBlock.h:233
LLVM_ABI void removePredecessor(BasicBlock *Pred, bool KeepOneInputPHIs=false)
Update PHI nodes in this BasicBlock before removal of predecessor Pred.
This class is a wrapper over an AAResults, and it is intended to be used only when there are no IR ch...
Conditional or Unconditional Branch instruction.
static BranchInst * Create(BasicBlock *IfTrue, InsertPosition InsertBefore=nullptr)
BranchProbability pow(unsigned N) const
Compute pow(Probability, N).
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:63
ValueT lookup(const_arg_type_t< KeyT > Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
Definition DenseMap.h:205
iterator find(const_arg_type_t< KeyT > Val)
Definition DenseMap.h:178
unsigned size() const
Definition DenseMap.h:110
iterator begin()
Definition DenseMap.h:78
iterator end()
Definition DenseMap.h:81
bool contains(const_arg_type_t< KeyT > Val) const
Return true if the specified key is in the map, false otherwise.
Definition DenseMap.h:169
iterator_range< iterator > children()
DomTreeNodeBase * getIDom() const
NodeT * getBlock() const
typename SmallVector< DomTreeNodeBase *, 4 >::const_iterator const_iterator
bool verify(VerificationLevel VL=VerificationLevel::Full) const
verify - checks if the tree is correct.
void changeImmediateDominator(DomTreeNodeBase< NodeT > *N, DomTreeNodeBase< NodeT > *NewIDom)
changeImmediateDominator - This method is used to update the dominator tree information when a node's...
DomTreeNodeBase< NodeT > * addNewBlock(NodeT *BB, NodeT *DomBB)
Add a new node to the dominator tree information.
DomTreeNodeBase< NodeT > * getNode(const NodeT *BB) const
getNode - return the (Post)DominatorTree node for the specified basic block.
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition Dominators.h:164
LLVM_ABI Instruction * findNearestCommonDominator(Instruction *I1, Instruction *I2) const
Find the nearest instruction I that dominates both I1 and I2, in the sense that a result produced bef...
DomTreeT & getDomTree()
Flush DomTree updates and return DomTree.
void applyUpdates(ArrayRef< UpdateT > Updates)
Submit updates to all available trees.
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition IRBuilder.h:2788
LLVM_ABI void moveBefore(InstListType::iterator InsertPos)
Unlink this instruction from its current basic block and insert it into the basic block that MovePos ...
LLVM_ABI InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
An instruction for reading from memory.
bool contains(const LoopT *L) const
Return true if the specified loop is contained within in this loop.
BlockT * getHeader() const
void addBasicBlockToLoop(BlockT *NewBB, LoopInfoBase< BlockT, LoopT > &LI)
This method is used by other analyses to update loop information.
void addChildLoop(LoopT *NewChild)
Add the specified loop to be a child of this loop.
LoopT * getParentLoop() const
Return the parent loop if it exists or nullptr for top level loops.
Store the result of a depth first search within basic blocks contained by a single loop.
RPOIterator beginRPO() const
Reverse iterate over the cached postorder blocks.
std::vector< BasicBlock * >::const_reverse_iterator RPOIterator
void perform(const LoopInfo *LI)
Traverse the loop blocks and store the DFS result.
RPOIterator endRPO() const
void verify(const DominatorTreeBase< BlockT, false > &DomTree) const
void addTopLevelLoop(LoopT *New)
This adds the specified loop to the collection of top-level loops.
LoopT * AllocateLoop(ArgsTy &&...Args)
LoopT * getLoopFor(const BlockT *BB) const
Return the inner most loop that BB lives in.
bool replacementPreservesLCSSAForm(Instruction *From, Value *To)
Returns true if replacing From with To everywhere is guaranteed to preserve LCSSA form.
Definition LoopInfo.h:441
LLVM_ABI void erase(Loop *L)
Update LoopInfo after removing the last backedge from a loop.
Definition LoopInfo.cpp:887
Represents a single loop in the control flow graph.
Definition LoopInfo.h:40
bool isLCSSAForm(const DominatorTree &DT, bool IgnoreTokens=true) const
Return true if the Loop is in LCSSA form.
Definition LoopInfo.cpp:463
Metadata node.
Definition Metadata.h:1078
const MDOperand & getOperand(unsigned I) const
Definition Metadata.h:1442
ArrayRef< MDOperand > operands() const
Definition Metadata.h:1440
unsigned getNumOperands() const
Return number of MDNode operands.
Definition Metadata.h:1448
Tracking metadata reference owned by Metadata.
Definition Metadata.h:900
A single uniqued string.
Definition Metadata.h:721
LLVM_ABI StringRef getString() const
Definition Metadata.cpp:618
MemoryAccess * getClobberingMemoryAccess(const Instruction *I, BatchAAResults &AA)
Given a memory Mod/Ref/ModRef'ing instruction, calling this will give you the nearest dominating Memo...
Definition MemorySSA.h:1053
Encapsulates MemorySSA, including all data associated with memory accesses.
Definition MemorySSA.h:702
LLVM_ABI bool dominates(const MemoryAccess *A, const MemoryAccess *B) const
Given two memory accesses in potentially different blocks, determine whether MemoryAccess A dominates...
LLVM_ABI MemorySSAWalker * getWalker()
MemoryUseOrDef * getMemoryAccess(const Instruction *I) const
Given a memory Mod/Ref'ing instruction, get the MemorySSA access associated with it.
Definition MemorySSA.h:720
The optimization diagnostic interface.
LLVM_ABI void emit(DiagnosticInfoOptimizationBase &OptDiag)
Output the remark via the diagnostic handler and to the optimization record file.
Diagnostic information for applied optimization remarks.
void setIncomingValueForBlock(const BasicBlock *BB, Value *V)
Set every incoming value(s) for block BB to V.
Value * getIncomingValueForBlock(const BasicBlock *BB) const
The RecurrenceDescriptor is used to identify recurrences variables in a loop.
FastMathFlags getFastMathFlags() const
static LLVM_ABI unsigned getOpcode(RecurKind Kind)
Returns the opcode corresponding to the RecurrenceKind.
static LLVM_ABI bool isReductionPHI(PHINode *Phi, Loop *TheLoop, RecurrenceDescriptor &RedDes, DemandedBits *DB=nullptr, AssumptionCache *AC=nullptr, DominatorTree *DT=nullptr, ScalarEvolution *SE=nullptr)
Returns true if Phi is a reduction in TheLoop.
static bool isAnyOfRecurrenceKind(RecurKind Kind)
Returns true if the recurrence kind is of the form select(cmp(),x,y) where one of (x,...
RecurKind getRecurrenceKind() const
StoreInst * IntermediateStore
Reductions may store temporary or final result to an invariant address.
static bool isFindIVRecurrenceKind(RecurKind Kind)
Returns true if the recurrence kind is of the form select(cmp(),x,y) where one of (x,...
static LLVM_ABI bool isIntegerRecurrenceKind(RecurKind Kind)
Returns true if the recurrence kind is an integer kind.
static bool isMinMaxRecurrenceKind(RecurKind Kind)
Returns true if the recurrence kind is any min/max kind.
This class represents an analyzed expression in the program.
The main scalar evolution driver.
LLVM_ABI unsigned getSmallConstantTripMultiple(const Loop *L, const SCEV *ExitCount)
Returns the largest constant divisor of the trip count as a normal unsigned value,...
LLVM_ABI const SCEV * getSCEV(Value *V)
Return a SCEV expression for the full generality of the specified expression.
LLVM_ABI unsigned getSmallConstantMaxTripCount(const Loop *L, SmallVectorImpl< const SCEVPredicate * > *Predicates=nullptr)
Returns the upper bound of the loop trip count as a normal unsigned value.
LLVM_ABI bool isBackedgeTakenCountMaxOrZero(const Loop *L)
Return true if the backedge taken count is either the value returned by getConstantMaxBackedgeTakenCo...
LLVM_ABI void forgetTopmostLoop(const Loop *L)
LLVM_ABI void forgetBlockAndLoopDispositions(Value *V=nullptr)
Called when the client has changed the disposition of values in a loop or block.
LLVM_ABI void forgetLcssaPhiWithNewPredecessor(Loop *L, PHINode *V)
Forget LCSSA phi node V of loop L to which a new predecessor was added, such that it may no longer be...
LLVM_ABI unsigned getSmallConstantTripCount(const Loop *L)
Returns the exact trip count of the loop if we can compute it, and the result is a small constant.
LLVM_ABI void forgetAllLoops()
void insert(const K &Key, const V &Val)
V lookup(const K &Key) const
ScopedHashTableScope< K, V, KInfo, AllocatorTy > ScopeTy
ScopeTy - A type alias for easy access to the name of the scope for this hash table.
void insert_range(Range &&R)
Definition SetVector.h:174
bool insert(const value_type &X)
Insert a new element into the SetVector.
Definition SetVector.h:149
A SetVector that performs no allocations if smaller than a certain size.
Definition SetVector.h:337
reference emplace_back(ArgTypes &&... Args)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition Twine.h:82
A Use represents the edge between a Value definition and its users.
Definition Use.h:35
LLVM_ABI bool replaceUsesOfWith(Value *From, Value *To)
Replace uses of one Value with another.
Definition User.cpp:24
iterator find(const KeyT &Val)
Definition ValueMap.h:160
iterator begin()
Definition ValueMap.h:138
iterator end()
Definition ValueMap.h:139
ValueMapIteratorImpl< MapT, const Value *, false > iterator
Definition ValueMap.h:135
bool erase(const KeyT &Val)
Definition ValueMap.h:192
DMAtomT AtomMap
Map {(InlinedAt, old atom number) -> new atom number}.
Definition ValueMap.h:123
LLVM Value Representation.
Definition Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:256
An efficient, type-erasing, non-owning reference to a callable.
self_iterator getIterator()
Definition ilist_node.h:123
Abstract Attribute helper functions.
Definition Attributor.h:165
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
BinaryOp_match< LHS, RHS, Instruction::Add > m_Add(const LHS &L, const RHS &R)
ap_match< APInt > m_APInt(const APInt *&Res)
Match a ConstantInt or splatted ConstantVector, binding the specified pointer to the contained APInt.
bool match(Val *V, const Pattern &P)
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
initializer< Ty > init(const Ty &Val)
Add a small namespace to avoid name clashes with the classes used in the streaming interface.
This is an optimization pass for GlobalISel generic memory operations.
LLVM_ABI bool simplifyLoop(Loop *L, DominatorTree *DT, LoopInfo *LI, ScalarEvolution *SE, AssumptionCache *AC, MemorySSAUpdater *MSSAU, bool PreserveLCSSA)
Simplify each loop in a loop nest recursively.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition STLExtras.h:316
LLVM_ABI bool RemoveRedundantDbgInstrs(BasicBlock *BB)
Try to remove redundant dbg.value instructions from given basic block.
LLVM_ABI std::optional< unsigned > getLoopEstimatedTripCount(Loop *L, unsigned *EstimatedLoopInvocationWeight=nullptr)
Return either:
LLVM_ABI void simplifyLoopAfterUnroll(Loop *L, bool SimplifyIVs, LoopInfo *LI, ScalarEvolution *SE, DominatorTree *DT, AssumptionCache *AC, const TargetTransformInfo *TTI, AAResults *AA=nullptr)
Perform some cleanup and simplifications on loops after unrolling.
LLVM_ABI bool RecursivelyDeleteTriviallyDeadInstructions(Value *V, const TargetLibraryInfo *TLI=nullptr, MemorySSAUpdater *MSSAU=nullptr, std::function< void(Value *)> AboutToDeleteCallback=std::function< void(Value *)>())
If the specified value is a trivially dead instruction, delete it.
Definition Local.cpp:533
LLVM_ABI BasicBlock * CloneBasicBlock(const BasicBlock *BB, ValueToValueMapTy &VMap, const Twine &NameSuffix="", Function *F=nullptr, ClonedCodeInfo *CodeInfo=nullptr, bool MapAtoms=true)
Return a copy of the specified basic block, but without embedding the block into a particular functio...
LLVM_ABI std::optional< RecurrenceDescriptor > canParallelizeReductionWhenUnrolling(PHINode &Phi, Loop *L, ScalarEvolution *SE)
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
auto successors(const MachineBasicBlock *BB)
SmallDenseMap< const Loop *, Loop *, 4 > NewLoopsMap
Definition UnrollLoop.h:41
LLVM_ABI bool formLCSSARecursively(Loop &L, const DominatorTree &DT, const LoopInfo *LI, ScalarEvolution *SE)
Put a loop nest into LCSSA form.
Definition LCSSA.cpp:449
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition STLExtras.h:632
LLVM_ABI Value * simplifyInstruction(Instruction *I, const SimplifyQuery &Q)
See if we can compute a simplified version of this instruction.
DomTreeNodeBase< BasicBlock > DomTreeNode
Definition Dominators.h:94
auto dyn_cast_or_null(const Y &Val)
Definition Casting.h:753
void erase(Container &C, ValueType V)
Wrapper function to remove a value from a container:
Definition STLExtras.h:2128
LLVM_ABI cl::opt< bool > EnableFSDiscriminator
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1732
LLVM_ABI bool isInstructionTriviallyDead(Instruction *I, const TargetLibraryInfo *TLI=nullptr)
Return true if the result produced by the instruction is not used, and the instruction will return.
Definition Local.cpp:402
LLVM_ABI CallBase * getLoopConvergenceHeart(const Loop *TheLoop)
Find the convergence heart of the loop.
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
bool simplifyLoopIVs(Loop *L, ScalarEvolution *SE, DominatorTree *DT, LoopInfo *LI, const TargetTransformInfo *TTI, SmallVectorImpl< WeakTrackingVH > &Dead)
SimplifyLoopIVs - Simplify users of induction variables within this loop.
SmallVector< ValueTypeFromRangeType< R >, Size > to_vector(R &&Range)
Given a range of type R, iterate the entire range and return a SmallVector with elements of the vecto...
BranchProbability getLoopProbability(Loop *L)
Based on branch weight metadata, return either:
LoopUnrollResult
Represents the result of a UnrollLoop invocation.
Definition UnrollLoop.h:58
@ PartiallyUnrolled
The loop was partially unrolled – we still have a loop, but with a smaller trip count.
Definition UnrollLoop.h:65
@ Unmodified
The loop was not modified.
Definition UnrollLoop.h:60
@ FullyUnrolled
The loop was fully unrolled into straight-line code.
Definition UnrollLoop.h:69
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
LLVM_ABI unsigned changeToUnreachable(Instruction *I, bool PreserveLCSSA=false, DomTreeUpdater *DTU=nullptr, MemorySSAUpdater *MSSAU=nullptr)
Insert an unreachable instruction before the specified instruction, making it and the rest of the cod...
Definition Local.cpp:2513
bool setLoopProbability(Loop *L, BranchProbability P)
Set branch weight metadata for the latch of L to indicate that, at the end of any iteration,...
TargetTransformInfo TTI
LLVM_ABI bool MergeBlockIntoPredecessor(BasicBlock *BB, DomTreeUpdater *DTU=nullptr, LoopInfo *LI=nullptr, MemorySSAUpdater *MSSAU=nullptr, MemoryDependenceResults *MemDep=nullptr, bool PredecessorWithTwoSuccessors=false, DominatorTree *DT=nullptr)
Attempts to merge a block into its predecessor, if possible.
void replace(R &&Range, const T &OldValue, const T &NewValue)
Provide wrappers to std::replace which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1860
RecurKind
These are the kinds of recurrences that we support.
LLVM_ABI Value * getRecurrenceIdentity(RecurKind K, Type *Tp, FastMathFlags FMF)
Given information about an recurrence kind, return the identity for the @llvm.vector....
LLVM_ABI void cloneAndAdaptNoAliasScopes(ArrayRef< MDNode * > NoAliasDeclScopes, ArrayRef< BasicBlock * > NewBlocks, LLVMContext &Context, StringRef Ext)
Clone the specified noalias decl scopes.
LLVM_ABI void remapInstructionsInBlocks(ArrayRef< BasicBlock * > Blocks, ValueToValueMapTy &VMap)
Remaps instructions in Blocks using the mapping in VMap.
ValueMap< const Value *, WeakTrackingVH > ValueToValueMapTy
LLVM_ABI bool setLoopEstimatedTripCount(Loop *L, unsigned EstimatedTripCount, std::optional< unsigned > EstimatedLoopInvocationWeight=std::nullopt)
Set llvm.loop.estimated_trip_count with the value EstimatedTripCount in the loop metadata of L.
LLVM_ABI const Loop * addClonedBlockToLoopInfo(BasicBlock *OriginalBB, BasicBlock *ClonedBB, LoopInfo *LI, NewLoopsMap &NewLoops)
Adds ClonedBB to LoopInfo, creates a new loop for ClonedBB if necessary and adds a mapping from the o...
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition STLExtras.h:1897
LLVM_ABI void identifyNoAliasScopesToClone(ArrayRef< BasicBlock * > BBs, SmallVectorImpl< MDNode * > &NoAliasDeclScopes)
Find the 'llvm.experimental.noalias.scope.decl' intrinsics in the specified basic blocks and extract ...
LLVM_ABI bool UnrollRuntimeLoopRemainder(Loop *L, unsigned Count, bool AllowExpensiveTripCount, bool UseEpilogRemainder, bool UnrollRemainder, bool ForgetAllSCEV, LoopInfo *LI, ScalarEvolution *SE, DominatorTree *DT, AssumptionCache *AC, const TargetTransformInfo *TTI, bool PreserveLCSSA, unsigned SCEVExpansionBudget, bool RuntimeUnrollMultiExit, Loop **ResultLoop=nullptr, std::optional< unsigned > OriginalTripCount=std::nullopt, BranchProbability OriginalLoopProb=BranchProbability::getUnknown())
Insert code in the prolog/epilog code when unrolling a loop with a run-time trip-count.
LLVM_ABI MDNode * GetUnrollMetadata(MDNode *LoopID, StringRef Name)
Given an llvm.loop loop id metadata node, returns the loop hint metadata node with the given name (fo...
constexpr detail::IsaCheckPredicate< Types... > IsaPred
Function object wrapper for the llvm::isa type check.
Definition Casting.h:866
LLVM_ABI void RemapSourceAtom(Instruction *I, ValueToValueMapTy &VM)
Remap source location atom.
LLVM_ABI LoopUnrollResult UnrollLoop(Loop *L, UnrollLoopOptions ULO, LoopInfo *LI, ScalarEvolution *SE, DominatorTree *DT, AssumptionCache *AC, const llvm::TargetTransformInfo *TTI, OptimizationRemarkEmitter *ORE, bool PreserveLCSSA, Loop **RemainderLoop=nullptr, AAResults *AA=nullptr)
Unroll the given loop by Count.
#define N
Instruction * DefI
LoadValue()=default
unsigned Generation
LoadValue(Instruction *Inst, unsigned Generation)
Incoming for lane maks phi as machine instruction, incoming register Reg and incoming block Block are...
const Instruction * Heart
Definition UnrollLoop.h:79
std::conditional_t< IsConst, const ValueT &, ValueT & > second
Definition ValueMap.h:349