LLVM 23.0.0git
SimpleLoopUnswitch.cpp
Go to the documentation of this file.
1///===- SimpleLoopUnswitch.cpp - Hoist loop-invariant control flow ---------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
10#include "llvm/ADT/DenseMap.h"
11#include "llvm/ADT/STLExtras.h"
12#include "llvm/ADT/Sequence.h"
13#include "llvm/ADT/SetVector.h"
16#include "llvm/ADT/Statistic.h"
17#include "llvm/ADT/Twine.h"
20#include "llvm/Analysis/CFG.h"
33#include "llvm/IR/BasicBlock.h"
34#include "llvm/IR/Constant.h"
35#include "llvm/IR/Constants.h"
36#include "llvm/IR/Dominators.h"
37#include "llvm/IR/Function.h"
38#include "llvm/IR/IRBuilder.h"
39#include "llvm/IR/InstrTypes.h"
40#include "llvm/IR/Instruction.h"
43#include "llvm/IR/MDBuilder.h"
44#include "llvm/IR/Module.h"
47#include "llvm/IR/Use.h"
48#include "llvm/IR/Value.h"
51#include "llvm/Support/Debug.h"
62#include <algorithm>
63#include <cassert>
64#include <iterator>
65#include <numeric>
66#include <optional>
67#include <utility>
68
69#define DEBUG_TYPE "simple-loop-unswitch"
70
71using namespace llvm;
72using namespace llvm::PatternMatch;
73
74STATISTIC(NumBranches, "Number of branches unswitched");
75STATISTIC(NumSwitches, "Number of switches unswitched");
76STATISTIC(NumSelects, "Number of selects turned into branches for unswitching");
77STATISTIC(NumGuards, "Number of guards turned into branches for unswitching");
78STATISTIC(NumTrivial, "Number of unswitches that are trivial");
80 NumCostMultiplierSkipped,
81 "Number of unswitch candidates that had their cost multiplier skipped");
82STATISTIC(NumInvariantConditionsInjected,
83 "Number of invariant conditions injected and unswitched");
84
85namespace llvm {
87 "enable-nontrivial-unswitch", cl::init(false), cl::Hidden,
88 cl::desc("Forcibly enables non-trivial loop unswitching rather than "
89 "following the configuration passed into the pass."));
90
91static cl::opt<int>
92 UnswitchThreshold("unswitch-threshold", cl::init(50), cl::Hidden,
93 cl::desc("The cost threshold for unswitching a loop."));
94
96 "enable-unswitch-cost-multiplier", cl::init(true), cl::Hidden,
97 cl::desc("Enable unswitch cost multiplier that prohibits exponential "
98 "explosion in nontrivial unswitch."));
100 "unswitch-siblings-toplevel-div", cl::init(2), cl::Hidden,
101 cl::desc("Toplevel siblings divisor for cost multiplier."));
103 "unswitch-parent-blocks-div", cl::init(8), cl::Hidden,
104 cl::desc("Outer loop size divisor for cost multiplier."));
106 "unswitch-num-initial-unscaled-candidates", cl::init(8), cl::Hidden,
107 cl::desc("Number of unswitch candidates that are ignored when calculating "
108 "cost multiplier."));
110 "simple-loop-unswitch-guards", cl::init(true), cl::Hidden,
111 cl::desc("If enabled, simple loop unswitching will also consider "
112 "llvm.experimental.guard intrinsics as unswitch candidates."));
114 "simple-loop-unswitch-drop-non-trivial-implicit-null-checks",
115 cl::init(false), cl::Hidden,
116 cl::desc("If enabled, drop make.implicit metadata in unswitched implicit "
117 "null checks to save time analyzing if we can keep it."));
119 MSSAThreshold("simple-loop-unswitch-memoryssa-threshold",
120 cl::desc("Max number of memory uses to explore during "
121 "partial unswitching analysis"),
122 cl::init(100), cl::Hidden);
124 "freeze-loop-unswitch-cond", cl::init(true), cl::Hidden,
125 cl::desc("If enabled, the freeze instruction will be added to condition "
126 "of loop unswitch to prevent miscompilation."));
127
129 "simple-loop-unswitch-inject-invariant-conditions", cl::Hidden,
130 cl::desc("Whether we should inject new invariants and unswitch them to "
131 "eliminate some existing (non-invariant) conditions."),
132 cl::init(true));
133
135 "simple-loop-unswitch-inject-invariant-condition-hotness-threshold",
137 cl::desc("Only try to inject loop invariant conditions and "
138 "unswitch on them to eliminate branches that are "
139 "not-taken 1/<this option> times or less."),
140 cl::init(16));
141
142static cl::opt<bool> EstimateProfile("simple-loop-unswitch-estimate-profile",
143 cl::Hidden, cl::init(true));
145} // namespace llvm
146
148namespace {
149struct CompareDesc {
150 CondBrInst *Term;
151 Value *Invariant;
152 BasicBlock *InLoopSucc;
153
154 CompareDesc(CondBrInst *Term, Value *Invariant, BasicBlock *InLoopSucc)
155 : Term(Term), Invariant(Invariant), InLoopSucc(InLoopSucc) {}
156};
157
158struct InjectedInvariant {
159 ICmpInst::Predicate Pred;
160 Value *LHS;
161 Value *RHS;
162 BasicBlock *InLoopSucc;
163
164 InjectedInvariant(ICmpInst::Predicate Pred, Value *LHS, Value *RHS,
165 BasicBlock *InLoopSucc)
166 : Pred(Pred), LHS(LHS), RHS(RHS), InLoopSucc(InLoopSucc) {}
167};
168
169struct NonTrivialUnswitchCandidate {
170 Instruction *TI = nullptr;
171 TinyPtrVector<Value *> Invariants;
172 std::optional<InstructionCost> Cost;
173 std::optional<InjectedInvariant> PendingInjection;
174 NonTrivialUnswitchCandidate(
175 Instruction *TI, ArrayRef<Value *> Invariants,
176 std::optional<InstructionCost> Cost = std::nullopt,
177 std::optional<InjectedInvariant> PendingInjection = std::nullopt)
178 : TI(TI), Invariants(Invariants), Cost(Cost),
179 PendingInjection(PendingInjection) {};
180
181 bool hasPendingInjection() const { return PendingInjection.has_value(); }
182};
183} // end anonymous namespace.
184
185// Helper to skip (select x, true, false), which matches both a logical AND and
186// OR and can confuse code that tries to determine if \p Cond is either a
187// logical AND or OR but not both.
189 Value *CondNext;
190 while (match(Cond, m_Select(m_Value(CondNext), m_One(), m_Zero())))
191 Cond = CondNext;
192 return Cond;
193}
194
195/// Collect all of the loop invariant input values transitively used by the
196/// homogeneous instruction graph from a given root.
197///
198/// This essentially walks from a root recursively through loop variant operands
199/// which have perform the same logical operation (AND or OR) and finds all
200/// inputs which are loop invariant. For some operations these can be
201/// re-associated and unswitched out of the loop entirely.
204 const LoopInfo &LI) {
205 assert(!L.isLoopInvariant(&Root) &&
206 "Only need to walk the graph if root itself is not invariant.");
207 TinyPtrVector<Value *> Invariants;
208
209 bool IsRootAnd = match(&Root, m_LogicalAnd());
210 bool IsRootOr = match(&Root, m_LogicalOr());
211
212 // Build a worklist and recurse through operators collecting invariants.
215 Worklist.push_back(&Root);
216 Visited.insert(&Root);
217 do {
218 Instruction &I = *Worklist.pop_back_val();
219 for (Value *OpV : I.operand_values()) {
220 // Skip constants as unswitching isn't interesting for them.
221 if (isa<Constant>(OpV))
222 continue;
223
224 // Add it to our result if loop invariant.
225 if (L.isLoopInvariant(OpV)) {
226 Invariants.push_back(OpV);
227 continue;
228 }
229
230 // If not an instruction with the same opcode, nothing we can do.
232
233 if (OpI && ((IsRootAnd && match(OpI, m_LogicalAnd())) ||
234 (IsRootOr && match(OpI, m_LogicalOr())))) {
235 // Visit this operand.
236 if (Visited.insert(OpI).second)
237 Worklist.push_back(OpI);
238 }
239 }
240 } while (!Worklist.empty());
241
242 return Invariants;
243}
244
245static void replaceLoopInvariantUses(const Loop &L, Value *Invariant,
246 Constant &Replacement) {
247 assert(!isa<Constant>(Invariant) && "Why are we unswitching on a constant?");
248
249 // Replace uses of LIC in the loop with the given constant.
250 // We use make_early_inc_range as set invalidates the iterator.
251 for (Use &U : llvm::make_early_inc_range(Invariant->uses())) {
252 Instruction *UserI = dyn_cast<Instruction>(U.getUser());
253
254 // Replace this use within the loop body.
255 if (UserI && L.contains(UserI))
256 U.set(&Replacement);
257 }
258}
259
260/// Check that all the LCSSA PHI nodes in the loop exit block have trivial
261/// incoming values along this edge.
263 const BasicBlock &ExitingBB,
264 const BasicBlock &ExitBB) {
265 for (const Instruction &I : ExitBB) {
266 auto *PN = dyn_cast<PHINode>(&I);
267 if (!PN)
268 // No more PHIs to check.
269 return true;
270
271 // If the incoming value for this edge isn't loop invariant the unswitch
272 // won't be trivial.
273 if (!L.isLoopInvariant(PN->getIncomingValueForBlock(&ExitingBB)))
274 return false;
275 }
276 llvm_unreachable("Basic blocks should never be empty!");
277}
278
279/// Copy a set of loop invariant values \p Invariants and insert them at the
280/// end of \p BB and conditionally branch on the copied condition. We only
281/// branch on a single value.
282/// We attempt to estimate the profile of the resulting conditional branch from
283/// \p ComputeProfFrom, which is the original conditional branch we're
284/// unswitching.
285/// When \p Direction is true, the \p Invariants form a disjunction, and the
286/// branch conditioned on it exits the loop on the "true" case. When \p
287/// Direction is false, the \p Invariants form a conjunction and the branch
288/// exits on the "false" case.
290 BasicBlock &BB, ArrayRef<Value *> Invariants, bool Direction,
291 BasicBlock &UnswitchedSucc, BasicBlock &NormalSucc, bool InsertFreeze,
292 const Instruction *I, AssumptionCache *AC, const DominatorTree &DT,
293 const CondBrInst &ComputeProfFrom) {
294
295 SmallVector<uint32_t> BranchWeights;
296 bool HasBranchWeights = EstimateProfile && !ProfcheckDisableMetadataFixes &&
297 extractBranchWeights(ComputeProfFrom, BranchWeights);
298 // If Direction is true, that means we had a disjunction and that the "true"
299 // case exits. The probability of the disjunction of the subset of terms is at
300 // most as high as the original one. So, if the probability is higher than the
301 // one we'd assign in absence of a profile (i.e. 0.5), we will use 0.5,
302 // but if it's lower, we will use the original probability.
303 // Conversely, if Direction is false, that means we had a conjunction, and the
304 // probability of exiting is captured in the second branch weight. That
305 // probability is a disjunction (of the negation of the original terms). The
306 // same reasoning applies as above.
307 // Issue #165649: should we expect BFI to conserve, and use that to calculate
308 // the branch weights?
309 if (HasBranchWeights &&
310 static_cast<double>(BranchWeights[Direction ? 0 : 1]) /
311 static_cast<double>(sum_of(BranchWeights)) >
312 0.5)
313 HasBranchWeights = false;
314
315 IRBuilder<> IRB(&BB);
317
318 SmallVector<Value *> FrozenInvariants;
319 for (Value *Inv : Invariants) {
320 if (InsertFreeze && !isGuaranteedNotToBeUndefOrPoison(Inv, AC, I, &DT))
321 Inv = IRB.CreateFreeze(Inv, Inv->getName() + ".fr");
322 FrozenInvariants.push_back(Inv);
323 }
324
325 Value *Cond = Direction ? IRB.CreateOr(FrozenInvariants)
326 : IRB.CreateAnd(FrozenInvariants);
327 auto *BR = IRB.CreateCondBr(
328 Cond, Direction ? &UnswitchedSucc : &NormalSucc,
329 Direction ? &NormalSucc : &UnswitchedSucc,
330 HasBranchWeights ? ComputeProfFrom.getMetadata(LLVMContext::MD_prof)
331 : nullptr);
332 if (!HasBranchWeights)
334}
335
336/// Copy a set of loop invariant values, and conditionally branch on them.
338 BasicBlock &BB, ArrayRef<Value *> ToDuplicate, bool Direction,
339 BasicBlock &UnswitchedSucc, BasicBlock &NormalSucc, Loop &L,
340 MemorySSAUpdater *MSSAU, const CondBrInst &OriginalBranch) {
342 for (auto *Val : reverse(ToDuplicate)) {
343 Instruction *Inst = cast<Instruction>(Val);
344 Instruction *NewInst = Inst->clone();
345
346 if (const DebugLoc &DL = Inst->getDebugLoc())
347 mapAtomInstance(DL, VMap);
348
349 NewInst->insertInto(&BB, BB.end());
350 RemapInstruction(NewInst, VMap,
352 VMap[Val] = NewInst;
353
354 if (!MSSAU)
355 continue;
356
357 MemorySSA *MSSA = MSSAU->getMemorySSA();
358 if (auto *MemUse =
360 auto *DefiningAccess = MemUse->getDefiningAccess();
361 // Get the first defining access before the loop.
362 while (L.contains(DefiningAccess->getBlock())) {
363 // If the defining access is a MemoryPhi, get the incoming
364 // value for the pre-header as defining access.
365 if (auto *MemPhi = dyn_cast<MemoryPhi>(DefiningAccess))
366 DefiningAccess =
367 MemPhi->getIncomingValueForBlock(L.getLoopPreheader());
368 else
369 DefiningAccess = cast<MemoryDef>(DefiningAccess)->getDefiningAccess();
370 }
371 MSSAU->createMemoryAccessInBB(NewInst, DefiningAccess,
372 NewInst->getParent(),
374 }
375 }
376
377 IRBuilder<> IRB(&BB);
379 Value *Cond = VMap[ToDuplicate[0]];
380 // The expectation is that ToDuplicate[0] is the condition used by the
381 // OriginalBranch, case in which we can clone the profile metadata from there.
382 auto *ProfData =
384 ToDuplicate[0] == skipTrivialSelect(OriginalBranch.getCondition())
385 ? OriginalBranch.getMetadata(LLVMContext::MD_prof)
386 : nullptr;
387 auto *BR =
388 IRB.CreateCondBr(Cond, Direction ? &UnswitchedSucc : &NormalSucc,
389 Direction ? &NormalSucc : &UnswitchedSucc, ProfData);
390 if (!ProfData)
392}
393
394/// Rewrite the PHI nodes in an unswitched loop exit basic block.
395///
396/// Requires that the loop exit and unswitched basic block are the same, and
397/// that the exiting block was a unique predecessor of that block. Rewrites the
398/// PHI nodes in that block such that what were LCSSA PHI nodes become trivial
399/// PHI nodes from the old preheader that now contains the unswitched
400/// terminator.
402 BasicBlock &OldExitingBB,
403 BasicBlock &OldPH) {
404 for (PHINode &PN : UnswitchedBB.phis()) {
405 // When the loop exit is directly unswitched we just need to update the
406 // incoming basic block. We loop to handle weird cases with repeated
407 // incoming blocks, but expect to typically only have one operand here.
408 for (auto i : seq<int>(0, PN.getNumOperands())) {
409 assert(PN.getIncomingBlock(i) == &OldExitingBB &&
410 "Found incoming block different from unique predecessor!");
411 PN.setIncomingBlock(i, &OldPH);
412 }
413 }
414}
415
416/// Rewrite the PHI nodes in the loop exit basic block and the split off
417/// unswitched block.
418///
419/// Because the exit block remains an exit from the loop, this rewrites the
420/// LCSSA PHI nodes in it to remove the unswitched edge and introduces PHI
421/// nodes into the unswitched basic block to select between the value in the
422/// old preheader and the loop exit.
424 BasicBlock &UnswitchedBB,
425 BasicBlock &OldExitingBB,
426 BasicBlock &OldPH,
427 bool FullUnswitch) {
428 assert(&ExitBB != &UnswitchedBB &&
429 "Must have different loop exit and unswitched blocks!");
430 BasicBlock::iterator InsertPt = UnswitchedBB.begin();
431 for (PHINode &PN : ExitBB.phis()) {
432 auto *NewPN = PHINode::Create(PN.getType(), /*NumReservedValues*/ 2,
433 PN.getName() + ".split");
434 NewPN->insertBefore(InsertPt);
435
436 // Walk backwards over the old PHI node's inputs to minimize the cost of
437 // removing each one. We have to do this weird loop manually so that we
438 // create the same number of new incoming edges in the new PHI as we expect
439 // each case-based edge to be included in the unswitched switch in some
440 // cases.
441 // FIXME: This is really, really gross. It would be much cleaner if LLVM
442 // allowed us to create a single entry for a predecessor block without
443 // having separate entries for each "edge" even though these edges are
444 // required to produce identical results.
445 for (int i = PN.getNumIncomingValues() - 1; i >= 0; --i) {
446 if (PN.getIncomingBlock(i) != &OldExitingBB)
447 continue;
448
449 Value *Incoming = PN.getIncomingValue(i);
450 if (FullUnswitch)
451 // No more edge from the old exiting block to the exit block.
452 PN.removeIncomingValue(i);
453
454 NewPN->addIncoming(Incoming, &OldPH);
455 }
456
457 // Now replace the old PHI with the new one and wire the old one in as an
458 // input to the new one.
459 PN.replaceAllUsesWith(NewPN);
460 NewPN->addIncoming(&PN, &ExitBB);
461 }
462}
463
464/// Hoist the current loop up to the innermost loop containing a remaining exit.
465///
466/// Because we've removed an exit from the loop, we may have changed the set of
467/// loops reachable and need to move the current loop up the loop nest or even
468/// to an entirely separate nest.
469static void hoistLoopToNewParent(Loop &L, BasicBlock &Preheader,
470 DominatorTree &DT, LoopInfo &LI,
471 MemorySSAUpdater *MSSAU, ScalarEvolution *SE) {
472 // If the loop is already at the top level, we can't hoist it anywhere.
473 Loop *OldParentL = L.getParentLoop();
474 if (!OldParentL)
475 return;
476
478 L.getExitBlocks(Exits);
479 Loop *NewParentL = nullptr;
480 for (auto *ExitBB : Exits)
481 if (Loop *ExitL = LI.getLoopFor(ExitBB))
482 if (!NewParentL || NewParentL->contains(ExitL))
483 NewParentL = ExitL;
484
485 if (NewParentL == OldParentL)
486 return;
487
488 // The new parent loop (if different) should always contain the old one.
489 if (NewParentL)
490 assert(NewParentL->contains(OldParentL) &&
491 "Can only hoist this loop up the nest!");
492
493 // The preheader will need to move with the body of this loop. However,
494 // because it isn't in this loop we also need to update the primary loop map.
495 assert(OldParentL == LI.getLoopFor(&Preheader) &&
496 "Parent loop of this loop should contain this loop's preheader!");
497 LI.changeLoopFor(&Preheader, NewParentL);
498
499 // Remove this loop from its old parent.
500 OldParentL->removeChildLoop(&L);
501
502 // Add the loop either to the new parent or as a top-level loop.
503 if (NewParentL)
504 NewParentL->addChildLoop(&L);
505 else
506 LI.addTopLevelLoop(&L);
507
508 // Remove this loops blocks from the old parent and every other loop up the
509 // nest until reaching the new parent. Also update all of these
510 // no-longer-containing loops to reflect the nesting change.
511 for (Loop *OldContainingL = OldParentL; OldContainingL != NewParentL;
512 OldContainingL = OldContainingL->getParentLoop()) {
513 llvm::erase_if(OldContainingL->getBlocksVector(),
514 [&](const BasicBlock *BB) {
515 return BB == &Preheader || L.contains(BB);
516 });
517
518 OldContainingL->getBlocksSet().erase(&Preheader);
519 for (BasicBlock *BB : L.blocks())
520 OldContainingL->getBlocksSet().erase(BB);
521
522 // Because we just hoisted a loop out of this one, we have essentially
523 // created new exit paths from it. That means we need to form LCSSA PHI
524 // nodes for values used in the no-longer-nested loop.
525 formLCSSA(*OldContainingL, DT, &LI, SE);
526
527 // We shouldn't need to form dedicated exits because the exit introduced
528 // here is the (just split by unswitching) preheader. However, after trivial
529 // unswitching it is possible to get new non-dedicated exits out of parent
530 // loop so let's conservatively form dedicated exit blocks and figure out
531 // if we can optimize later.
532 formDedicatedExitBlocks(OldContainingL, &DT, &LI, MSSAU,
533 /*PreserveLCSSA*/ true);
534 }
535}
536
537// Return the top-most loop containing ExitBB and having ExitBB as exiting block
538// or the loop containing ExitBB, if there is no parent loop containing ExitBB
539// as exiting block.
541 const LoopInfo &LI) {
542 Loop *TopMost = LI.getLoopFor(ExitBB);
543 Loop *Current = TopMost;
544 while (Current) {
545 if (Current->isLoopExiting(ExitBB))
546 TopMost = Current;
547 Current = Current->getParentLoop();
548 }
549 return TopMost;
550}
551
552/// Unswitch a trivial branch if the condition is loop invariant.
553///
554/// This routine should only be called when loop code leading to the branch has
555/// been validated as trivial (no side effects). This routine checks if the
556/// condition is invariant and one of the successors is a loop exit. This
557/// allows us to unswitch without duplicating the loop, making it trivial.
558///
559/// If this routine fails to unswitch the branch it returns false.
560///
561/// If the branch can be unswitched, this routine splits the preheader and
562/// hoists the branch above that split. Preserves loop simplified form
563/// (splitting the exit block as necessary). It simplifies the branch within
564/// the loop to an unconditional branch but doesn't remove it entirely. Further
565/// cleanup can be done with some simplifycfg like pass.
566///
567/// If `SE` is not null, it will be updated based on the potential loop SCEVs
568/// invalidated by this.
570 LoopInfo &LI, ScalarEvolution *SE,
571 MemorySSAUpdater *MSSAU) {
572 LLVM_DEBUG(dbgs() << " Trying to unswitch branch: " << BI << "\n");
573
574 // The loop invariant values that we want to unswitch.
575 TinyPtrVector<Value *> Invariants;
576
577 // When true, we're fully unswitching the branch rather than just unswitching
578 // some input conditions to the branch.
579 bool FullUnswitch = false;
580
582 if (L.isLoopInvariant(Cond)) {
583 Invariants.push_back(Cond);
584 FullUnswitch = true;
585 } else {
586 if (auto *CondInst = dyn_cast<Instruction>(Cond))
587 Invariants = collectHomogenousInstGraphLoopInvariants(L, *CondInst, LI);
588 if (Invariants.empty()) {
589 LLVM_DEBUG(dbgs() << " Couldn't find invariant inputs!\n");
590 return false;
591 }
592 }
593
594 // Check that one of the branch's successors exits, and which one.
595 bool ExitDirection = true;
596 int LoopExitSuccIdx = 0;
597 auto *LoopExitBB = BI.getSuccessor(0);
598 if (L.contains(LoopExitBB)) {
599 ExitDirection = false;
600 LoopExitSuccIdx = 1;
601 LoopExitBB = BI.getSuccessor(1);
602 if (L.contains(LoopExitBB)) {
603 LLVM_DEBUG(dbgs() << " Branch doesn't exit the loop!\n");
604 return false;
605 }
606 }
607 auto *ContinueBB = BI.getSuccessor(1 - LoopExitSuccIdx);
608 auto *ParentBB = BI.getParent();
609 if (!areLoopExitPHIsLoopInvariant(L, *ParentBB, *LoopExitBB)) {
610 LLVM_DEBUG(dbgs() << " Loop exit PHI's aren't loop-invariant!\n");
611 return false;
612 }
613
614 // When unswitching only part of the branch's condition, we need the exit
615 // block to be reached directly from the partially unswitched input. This can
616 // be done when the exit block is along the true edge and the branch condition
617 // is a graph of `or` operations, or the exit block is along the false edge
618 // and the condition is a graph of `and` operations.
619 if (!FullUnswitch) {
620 if (ExitDirection ? !match(Cond, m_LogicalOr())
621 : !match(Cond, m_LogicalAnd())) {
622 LLVM_DEBUG(dbgs() << " Branch condition is in improper form for "
623 "non-full unswitch!\n");
624 return false;
625 }
626 }
627
628 LLVM_DEBUG({
629 dbgs() << " unswitching trivial invariant conditions for: " << BI
630 << "\n";
631 for (Value *Invariant : Invariants) {
632 dbgs() << " " << *Invariant << " == true";
633 if (Invariant != Invariants.back())
634 dbgs() << " ||";
635 dbgs() << "\n";
636 }
637 });
638
639 // If we have scalar evolutions, we need to invalidate them including this
640 // loop, the loop containing the exit block and the topmost parent loop
641 // exiting via LoopExitBB.
642 if (SE) {
643 if (const Loop *ExitL = getTopMostExitingLoop(LoopExitBB, LI))
644 SE->forgetLoop(ExitL);
645 else
646 // Forget the entire nest as this exits the entire nest.
647 SE->forgetTopmostLoop(&L);
649 }
650
651 if (MSSAU && VerifyMemorySSA)
652 MSSAU->getMemorySSA()->verifyMemorySSA();
653
654 // Split the preheader, so that we know that there is a safe place to insert
655 // the conditional branch. We will change the preheader to have a conditional
656 // branch on LoopCond.
657 BasicBlock *OldPH = L.getLoopPreheader();
658 BasicBlock *NewPH = SplitEdge(OldPH, L.getHeader(), &DT, &LI, MSSAU);
659
660 // Now that we have a place to insert the conditional branch, create a place
661 // to branch to: this is the exit block out of the loop that we are
662 // unswitching. We need to split this if there are other loop predecessors.
663 // Because the loop is in simplified form, *any* other predecessor is enough.
664 BasicBlock *UnswitchedBB;
665 if (FullUnswitch && LoopExitBB->getUniquePredecessor()) {
666 assert(LoopExitBB->getUniquePredecessor() == BI.getParent() &&
667 "A branch's parent isn't a predecessor!");
668 UnswitchedBB = LoopExitBB;
669 } else {
670 UnswitchedBB =
671 SplitBlock(LoopExitBB, LoopExitBB->begin(), &DT, &LI, MSSAU, "");
672 }
673
674 if (MSSAU && VerifyMemorySSA)
675 MSSAU->getMemorySSA()->verifyMemorySSA();
676
677 // Actually move the invariant uses into the unswitched position. If possible,
678 // we do this by moving the instructions, but when doing partial unswitching
679 // we do it by building a new merge of the values in the unswitched position.
680 OldPH->getTerminator()->eraseFromParent();
681 if (FullUnswitch) {
682 // If fully unswitching, we can use the existing branch instruction.
683 // Splice it into the old PH to gate reaching the new preheader and re-point
684 // its successors.
685 BI.moveBefore(*OldPH, OldPH->end());
686 BI.setCondition(Cond);
687 if (MSSAU) {
688 // Temporarily clone the terminator, to make MSSA update cheaper by
689 // separating "insert edge" updates from "remove edge" ones.
690 BI.clone()->insertInto(ParentBB, ParentBB->end());
691 } else {
692 // Create a new unconditional branch that will continue the loop as a new
693 // terminator.
694 Instruction *NewBI = UncondBrInst::Create(ContinueBB, ParentBB);
695 NewBI->setDebugLoc(BI.getDebugLoc());
696 }
697 BI.setSuccessor(LoopExitSuccIdx, UnswitchedBB);
698 BI.setSuccessor(1 - LoopExitSuccIdx, NewPH);
699 } else {
700 // Only unswitching a subset of inputs to the condition, so we will need to
701 // build a new branch that merges the invariant inputs.
702 if (ExitDirection)
704 "Must have an `or` of `i1`s or `select i1 X, true, Y`s for the "
705 "condition!");
706 else
708 "Must have an `and` of `i1`s or `select i1 X, Y, false`s for the"
709 " condition!");
711 *OldPH, Invariants, ExitDirection, *UnswitchedBB, *NewPH,
712 FreezeLoopUnswitchCond, OldPH->getTerminatorOrNull(), nullptr, DT, BI);
713 }
714
715 // Update the dominator tree with the added edge.
716 DT.insertEdge(OldPH, UnswitchedBB);
717
718 // After the dominator tree was updated with the added edge, update MemorySSA
719 // if available.
720 if (MSSAU) {
722 Updates.push_back({cfg::UpdateKind::Insert, OldPH, UnswitchedBB});
723 MSSAU->applyInsertUpdates(Updates, DT);
724 }
725
726 // Finish updating dominator tree and memory ssa for full unswitch.
727 if (FullUnswitch) {
728 if (MSSAU) {
729 Instruction *Term = ParentBB->getTerminator();
730 // Remove the cloned branch instruction and create unconditional branch
731 // now.
732 Instruction *NewBI = UncondBrInst::Create(ContinueBB, ParentBB);
733 NewBI->setDebugLoc(Term->getDebugLoc());
734 Term->eraseFromParent();
735 MSSAU->removeEdge(ParentBB, LoopExitBB);
736 }
737 DT.deleteEdge(ParentBB, LoopExitBB);
738 }
739
740 if (MSSAU && VerifyMemorySSA)
741 MSSAU->getMemorySSA()->verifyMemorySSA();
742
743 // Rewrite the relevant PHI nodes.
744 if (UnswitchedBB == LoopExitBB)
745 rewritePHINodesForUnswitchedExitBlock(*UnswitchedBB, *ParentBB, *OldPH);
746 else
747 rewritePHINodesForExitAndUnswitchedBlocks(*LoopExitBB, *UnswitchedBB,
748 *ParentBB, *OldPH, FullUnswitch);
749
750 // The constant we can replace all of our invariants with inside the loop
751 // body. If any of the invariants have a value other than this the loop won't
752 // be entered.
753 ConstantInt *Replacement = ExitDirection
754 ? ConstantInt::getFalse(BI.getContext())
755 : ConstantInt::getTrue(BI.getContext());
756
757 // Since this is an i1 condition we can also trivially replace uses of it
758 // within the loop with a constant.
759 for (Value *Invariant : Invariants)
760 replaceLoopInvariantUses(L, Invariant, *Replacement);
761
762 // If this was full unswitching, we may have changed the nesting relationship
763 // for this loop so hoist it to its correct parent if needed.
764 if (FullUnswitch)
765 hoistLoopToNewParent(L, *NewPH, DT, LI, MSSAU, SE);
766
767 if (MSSAU && VerifyMemorySSA)
768 MSSAU->getMemorySSA()->verifyMemorySSA();
769
770 LLVM_DEBUG(dbgs() << " done: unswitching trivial branch...\n");
771 ++NumTrivial;
772 ++NumBranches;
773 return true;
774}
775
776/// Unswitch a trivial switch if the condition is loop invariant.
777///
778/// This routine should only be called when loop code leading to the switch has
779/// been validated as trivial (no side effects). This routine checks if the
780/// condition is invariant and that at least one of the successors is a loop
781/// exit. This allows us to unswitch without duplicating the loop, making it
782/// trivial.
783///
784/// If this routine fails to unswitch the switch it returns false.
785///
786/// If the switch can be unswitched, this routine splits the preheader and
787/// copies the switch above that split. If the default case is one of the
788/// exiting cases, it copies the non-exiting cases and points them at the new
789/// preheader. If the default case is not exiting, it copies the exiting cases
790/// and points the default at the preheader. It preserves loop simplified form
791/// (splitting the exit blocks as necessary). It simplifies the switch within
792/// the loop by removing now-dead cases. If the default case is one of those
793/// unswitched, it replaces its destination with a new basic block containing
794/// only unreachable. Such basic blocks, while technically loop exits, are not
795/// considered for unswitching so this is a stable transform and the same
796/// switch will not be revisited. If after unswitching there is only a single
797/// in-loop successor, the switch is further simplified to an unconditional
798/// branch. Still more cleanup can be done with some simplifycfg like pass.
799///
800/// If `SE` is not null, it will be updated based on the potential loop SCEVs
801/// invalidated by this.
803 LoopInfo &LI, ScalarEvolution *SE,
804 MemorySSAUpdater *MSSAU) {
805 LLVM_DEBUG(dbgs() << " Trying to unswitch switch: " << SI << "\n");
806 Value *LoopCond = SI.getCondition();
807
808 // If this isn't switching on an invariant condition, we can't unswitch it.
809 if (!L.isLoopInvariant(LoopCond))
810 return false;
811
812 auto *ParentBB = SI.getParent();
813
814 // The same check must be used both for the default and the exit cases. We
815 // should never leave edges from the switch instruction to a basic block that
816 // we are unswitching, hence the condition used to determine the default case
817 // needs to also be used to populate ExitCaseIndices, which is then used to
818 // remove cases from the switch.
819 auto IsTriviallyUnswitchableExitBlock = [&](BasicBlock &BBToCheck) {
820 // BBToCheck is not an exit block if it is inside loop L.
821 if (L.contains(&BBToCheck))
822 return false;
823 // BBToCheck is not trivial to unswitch if its phis aren't loop invariant.
824 if (!areLoopExitPHIsLoopInvariant(L, *ParentBB, BBToCheck))
825 return false;
826 // We do not unswitch a block that only has an unreachable statement, as
827 // it's possible this is a previously unswitched block. Only unswitch if
828 // either the terminator is not unreachable, or, if it is, it's not the only
829 // instruction in the block.
830 auto *TI = BBToCheck.getTerminator();
831 bool isUnreachable = isa<UnreachableInst>(TI);
832 return !isUnreachable || &*BBToCheck.getFirstNonPHIOrDbg() != TI;
833 };
834
835 SmallVector<int, 4> ExitCaseIndices;
836 for (auto Case : SI.cases())
837 if (IsTriviallyUnswitchableExitBlock(*Case.getCaseSuccessor()))
838 ExitCaseIndices.push_back(Case.getCaseIndex());
839 BasicBlock *DefaultExitBB = nullptr;
842 if (IsTriviallyUnswitchableExitBlock(*SI.getDefaultDest())) {
843 DefaultExitBB = SI.getDefaultDest();
844 } else if (ExitCaseIndices.empty())
845 return false;
846
847 LLVM_DEBUG(dbgs() << " unswitching trivial switch...\n");
848
849 if (MSSAU && VerifyMemorySSA)
850 MSSAU->getMemorySSA()->verifyMemorySSA();
851
852 // We may need to invalidate SCEVs for the outermost loop reached by any of
853 // the exits.
854 Loop *OuterL = &L;
855
856 if (DefaultExitBB) {
857 // Check the loop containing this exit.
858 Loop *ExitL = getTopMostExitingLoop(DefaultExitBB, LI);
859 if (!ExitL || ExitL->contains(OuterL))
860 OuterL = ExitL;
861 }
862 for (unsigned Index : ExitCaseIndices) {
863 auto CaseI = SI.case_begin() + Index;
864 // Compute the outer loop from this exit.
865 Loop *ExitL = getTopMostExitingLoop(CaseI->getCaseSuccessor(), LI);
866 if (!ExitL || ExitL->contains(OuterL))
867 OuterL = ExitL;
868 }
869
870 if (SE) {
871 if (OuterL)
872 SE->forgetLoop(OuterL);
873 else
874 SE->forgetTopmostLoop(&L);
875 }
876
877 if (DefaultExitBB) {
878 // Clear out the default destination temporarily to allow accurate
879 // predecessor lists to be examined below.
880 SI.setDefaultDest(nullptr);
881 }
882
883 // Store the exit cases into a separate data structure and remove them from
884 // the switch.
885 SmallVector<std::tuple<ConstantInt *, BasicBlock *,
887 4> ExitCases;
888 ExitCases.reserve(ExitCaseIndices.size());
890 // We walk the case indices backwards so that we remove the last case first
891 // and don't disrupt the earlier indices.
892 for (unsigned Index : reverse(ExitCaseIndices)) {
893 auto CaseI = SI.case_begin() + Index;
894 // Save the value of this case.
895 auto W = SIW.getSuccessorWeight(CaseI->getSuccessorIndex());
896 ExitCases.emplace_back(CaseI->getCaseValue(), CaseI->getCaseSuccessor(), W);
897 // Delete the unswitched cases.
898 SIW.removeCase(CaseI);
899 }
900
901 // Check if after this all of the remaining cases point at the same
902 // successor.
903 BasicBlock *CommonSuccBB = nullptr;
904 if (SI.getNumCases() > 0 &&
905 all_of(drop_begin(SI.cases()), [&SI](const SwitchInst::CaseHandle &Case) {
906 return Case.getCaseSuccessor() == SI.case_begin()->getCaseSuccessor();
907 }))
908 CommonSuccBB = SI.case_begin()->getCaseSuccessor();
909 if (!DefaultExitBB) {
910 // If we're not unswitching the default, we need it to match any cases to
911 // have a common successor or if we have no cases it is the common
912 // successor.
913 if (SI.getNumCases() == 0)
914 CommonSuccBB = SI.getDefaultDest();
915 else if (SI.getDefaultDest() != CommonSuccBB)
916 CommonSuccBB = nullptr;
917 }
918
919 // Split the preheader, so that we know that there is a safe place to insert
920 // the switch.
921 BasicBlock *OldPH = L.getLoopPreheader();
922 BasicBlock *NewPH = SplitEdge(OldPH, L.getHeader(), &DT, &LI, MSSAU);
923 OldPH->getTerminator()->eraseFromParent();
924
925 // Now add the unswitched switch. This new switch instruction inherits the
926 // debug location of the old switch, because it semantically replace the old
927 // one.
928 auto *NewSI = SwitchInst::Create(LoopCond, NewPH, ExitCases.size(), OldPH);
929 NewSI->setDebugLoc(SIW->getDebugLoc());
930 SwitchInstProfUpdateWrapper NewSIW(*NewSI);
931
932 // Rewrite the IR for the unswitched basic blocks. This requires two steps.
933 // First, we split any exit blocks with remaining in-loop predecessors. Then
934 // we update the PHIs in one of two ways depending on if there was a split.
935 // We walk in reverse so that we split in the same order as the cases
936 // appeared. This is purely for convenience of reading the resulting IR, but
937 // it doesn't cost anything really.
938 SmallPtrSet<BasicBlock *, 2> UnswitchedExitBBs;
940 // Handle the default exit if necessary.
941 // FIXME: It'd be great if we could merge this with the loop below but LLVM's
942 // ranges aren't quite powerful enough yet.
943 if (DefaultExitBB) {
944 if (pred_empty(DefaultExitBB)) {
945 UnswitchedExitBBs.insert(DefaultExitBB);
946 rewritePHINodesForUnswitchedExitBlock(*DefaultExitBB, *ParentBB, *OldPH);
947 } else {
948 auto *SplitBB =
949 SplitBlock(DefaultExitBB, DefaultExitBB->begin(), &DT, &LI, MSSAU);
950 rewritePHINodesForExitAndUnswitchedBlocks(*DefaultExitBB, *SplitBB,
951 *ParentBB, *OldPH,
952 /*FullUnswitch*/ true);
953 DefaultExitBB = SplitExitBBMap[DefaultExitBB] = SplitBB;
954 }
955 }
956 // Note that we must use a reference in the for loop so that we update the
957 // container.
958 for (auto &ExitCase : reverse(ExitCases)) {
959 // Grab a reference to the exit block in the pair so that we can update it.
960 BasicBlock *ExitBB = std::get<1>(ExitCase);
961
962 // If this case is the last edge into the exit block, we can simply reuse it
963 // as it will no longer be a loop exit. No mapping necessary.
964 if (pred_empty(ExitBB)) {
965 // Only rewrite once.
966 if (UnswitchedExitBBs.insert(ExitBB).second)
967 rewritePHINodesForUnswitchedExitBlock(*ExitBB, *ParentBB, *OldPH);
968 continue;
969 }
970
971 // Otherwise we need to split the exit block so that we retain an exit
972 // block from the loop and a target for the unswitched condition.
973 BasicBlock *&SplitExitBB = SplitExitBBMap[ExitBB];
974 if (!SplitExitBB) {
975 // If this is the first time we see this, do the split and remember it.
976 SplitExitBB = SplitBlock(ExitBB, ExitBB->begin(), &DT, &LI, MSSAU);
977 rewritePHINodesForExitAndUnswitchedBlocks(*ExitBB, *SplitExitBB,
978 *ParentBB, *OldPH,
979 /*FullUnswitch*/ true);
980 }
981 // Update the case pair to point to the split block.
982 std::get<1>(ExitCase) = SplitExitBB;
983 }
984
985 // Now add the unswitched cases. We do this in reverse order as we built them
986 // in reverse order.
987 for (auto &ExitCase : reverse(ExitCases)) {
988 ConstantInt *CaseVal = std::get<0>(ExitCase);
989 BasicBlock *UnswitchedBB = std::get<1>(ExitCase);
990
991 NewSIW.addCase(CaseVal, UnswitchedBB, std::get<2>(ExitCase));
992 }
993
994 // If the default was unswitched, re-point it and add explicit cases for
995 // entering the loop.
996 if (DefaultExitBB) {
997 NewSIW->setDefaultDest(DefaultExitBB);
998 NewSIW.setSuccessorWeight(0, DefaultCaseWeight);
999
1000 // We removed all the exit cases, so we just copy the cases to the
1001 // unswitched switch.
1002 for (const auto &Case : SI.cases())
1003 NewSIW.addCase(Case.getCaseValue(), NewPH,
1005 } else if (DefaultCaseWeight) {
1006 // We have to set branch weight of the default case.
1007 uint64_t SW = *DefaultCaseWeight;
1008 for (const auto &Case : SI.cases()) {
1009 auto W = SIW.getSuccessorWeight(Case.getSuccessorIndex());
1010 assert(W &&
1011 "case weight must be defined as default case weight is defined");
1012 SW += *W;
1013 }
1014 NewSIW.setSuccessorWeight(0, SW);
1015 }
1016
1017 // If we ended up with a common successor for every path through the switch
1018 // after unswitching, rewrite it to an unconditional branch to make it easy
1019 // to recognize. Otherwise we potentially have to recognize the default case
1020 // pointing at unreachable and other complexity.
1021 if (CommonSuccBB) {
1022 BasicBlock *BB = SI.getParent();
1023 // We may have had multiple edges to this common successor block, so remove
1024 // them as predecessors. We skip the first one, either the default or the
1025 // actual first case.
1026 bool SkippedFirst = DefaultExitBB == nullptr;
1027 for (auto Case : SI.cases()) {
1028 assert(Case.getCaseSuccessor() == CommonSuccBB &&
1029 "Non-common successor!");
1030 (void)Case;
1031 if (!SkippedFirst) {
1032 SkippedFirst = true;
1033 continue;
1034 }
1035 CommonSuccBB->removePredecessor(BB,
1036 /*KeepOneInputPHIs*/ true);
1037 }
1038 // Now nuke the switch and replace it with a direct branch.
1039 Instruction *NewBI = UncondBrInst::Create(CommonSuccBB, BB);
1040 NewBI->setDebugLoc(SIW->getDebugLoc());
1041 SIW.eraseFromParent();
1042 } else if (DefaultExitBB) {
1043 assert(SI.getNumCases() > 0 &&
1044 "If we had no cases we'd have a common successor!");
1045 // Move the last case to the default successor. This is valid as if the
1046 // default got unswitched it cannot be reached. This has the advantage of
1047 // being simple and keeping the number of edges from this switch to
1048 // successors the same, and avoiding any PHI update complexity.
1049 auto LastCaseI = std::prev(SI.case_end());
1050
1051 SI.setDefaultDest(LastCaseI->getCaseSuccessor());
1053 0, SIW.getSuccessorWeight(LastCaseI->getSuccessorIndex()));
1054 SIW.removeCase(LastCaseI);
1055 }
1056
1057 // Walk the unswitched exit blocks and the unswitched split blocks and update
1058 // the dominator tree based on the CFG edits. While we are walking unordered
1059 // containers here, the API for applyUpdates takes an unordered list of
1060 // updates and requires them to not contain duplicates.
1062 for (auto *UnswitchedExitBB : UnswitchedExitBBs) {
1063 DTUpdates.push_back({DT.Delete, ParentBB, UnswitchedExitBB});
1064 DTUpdates.push_back({DT.Insert, OldPH, UnswitchedExitBB});
1065 }
1066 for (auto SplitUnswitchedPair : SplitExitBBMap) {
1067 DTUpdates.push_back({DT.Delete, ParentBB, SplitUnswitchedPair.first});
1068 DTUpdates.push_back({DT.Insert, OldPH, SplitUnswitchedPair.second});
1069 }
1070
1071 if (MSSAU) {
1072 MSSAU->applyUpdates(DTUpdates, DT, /*UpdateDT=*/true);
1073 if (VerifyMemorySSA)
1074 MSSAU->getMemorySSA()->verifyMemorySSA();
1075 } else {
1076 DT.applyUpdates(DTUpdates);
1077 }
1078
1079 assert(DT.verify(DominatorTree::VerificationLevel::Fast));
1080
1081 // We may have changed the nesting relationship for this loop so hoist it to
1082 // its correct parent if needed.
1083 hoistLoopToNewParent(L, *NewPH, DT, LI, MSSAU, SE);
1084
1085 if (MSSAU && VerifyMemorySSA)
1086 MSSAU->getMemorySSA()->verifyMemorySSA();
1087
1088 ++NumTrivial;
1089 ++NumSwitches;
1090 LLVM_DEBUG(dbgs() << " done: unswitching trivial switch...\n");
1091 return true;
1092}
1093
1094/// This routine scans the loop to find a branch or switch which occurs before
1095/// any side effects occur. These can potentially be unswitched without
1096/// duplicating the loop. If a branch or switch is successfully unswitched the
1097/// scanning continues to see if subsequent branches or switches have become
1098/// trivial. Once all trivial candidates have been unswitched, this routine
1099/// returns.
1100///
1101/// The return value indicates whether anything was unswitched (and therefore
1102/// changed).
1103///
1104/// If `SE` is not null, it will be updated based on the potential loop SCEVs
1105/// invalidated by this.
1107 LoopInfo &LI, ScalarEvolution *SE,
1108 MemorySSAUpdater *MSSAU) {
1109 bool Changed = false;
1110
1111 // If loop header has only one reachable successor we should keep looking for
1112 // trivial condition candidates in the successor as well. An alternative is
1113 // to constant fold conditions and merge successors into loop header (then we
1114 // only need to check header's terminator). The reason for not doing this in
1115 // LoopUnswitch pass is that it could potentially break LoopPassManager's
1116 // invariants. Folding dead branches could either eliminate the current loop
1117 // or make other loops unreachable. LCSSA form might also not be preserved
1118 // after deleting branches. The following code keeps traversing loop header's
1119 // successors until it finds the trivial condition candidate (condition that
1120 // is not a constant). Since unswitching generates branches with constant
1121 // conditions, this scenario could be very common in practice.
1122 BasicBlock *CurrentBB = L.getHeader();
1124 Visited.insert(CurrentBB);
1125 do {
1126 // Check if there are any side-effecting instructions (e.g. stores, calls,
1127 // volatile loads) in the part of the loop that the code *would* execute
1128 // without unswitching.
1129 if (MSSAU) // Possible early exit with MSSA
1130 if (auto *Defs = MSSAU->getMemorySSA()->getBlockDefs(CurrentBB))
1131 if (!isa<MemoryPhi>(*Defs->begin()) || (++Defs->begin() != Defs->end()))
1132 return Changed;
1133 if (llvm::any_of(*CurrentBB,
1134 [](Instruction &I) { return I.mayHaveSideEffects(); }))
1135 return Changed;
1136
1137 Instruction *CurrentTerm = CurrentBB->getTerminator();
1138
1139 if (auto *SI = dyn_cast<SwitchInst>(CurrentTerm)) {
1140 // Don't bother trying to unswitch past a switch with a constant
1141 // condition. This should be removed prior to running this pass by
1142 // simplifycfg.
1143 if (isa<Constant>(SI->getCondition()))
1144 return Changed;
1145
1146 if (!unswitchTrivialSwitch(L, *SI, DT, LI, SE, MSSAU))
1147 // Couldn't unswitch this one so we're done.
1148 return Changed;
1149
1150 // Mark that we managed to unswitch something.
1151 Changed = true;
1152
1153 // If unswitching turned the terminator into an unconditional branch then
1154 // we can continue. The unswitching logic specifically works to fold any
1155 // cases it can into an unconditional branch to make it easier to
1156 // recognize here.
1157 auto *BI = dyn_cast<UncondBrInst>(CurrentBB->getTerminator());
1158 if (!BI)
1159 return Changed;
1160
1161 CurrentBB = BI->getSuccessor();
1162 continue;
1163 }
1164
1165 auto *BI = dyn_cast<CondBrInst>(CurrentTerm);
1166 if (!BI)
1167 // We do not understand other terminator instructions.
1168 return Changed;
1169
1170 // Don't bother trying to unswitch past an unconditional branch or a branch
1171 // with a constant value. These should be removed by simplifycfg prior to
1172 // running this pass.
1173 if (isa<Constant>(skipTrivialSelect(BI->getCondition())))
1174 return Changed;
1175
1176 // Found a trivial condition candidate: non-foldable conditional branch. If
1177 // we fail to unswitch this, we can't do anything else that is trivial.
1178 if (!unswitchTrivialBranch(L, *BI, DT, LI, SE, MSSAU))
1179 return Changed;
1180
1181 // Mark that we managed to unswitch something.
1182 Changed = true;
1183
1184 // If we only unswitched some of the conditions feeding the branch, we won't
1185 // have collapsed it to a single successor.
1186 if (isa<CondBrInst>(CurrentBB->getTerminator()))
1187 return Changed;
1188
1189 // Follow the newly unconditional branch into its successor.
1190 CurrentBB = cast<UncondBrInst>(CurrentBB->getTerminator())->getSuccessor();
1191
1192 // When continuing, if we exit the loop or reach a previous visited block,
1193 // then we can not reach any trivial condition candidates (unfoldable
1194 // branch instructions or switch instructions) and no unswitch can happen.
1195 } while (L.contains(CurrentBB) && Visited.insert(CurrentBB).second);
1196
1197 return Changed;
1198}
1199
1200/// Build the cloned blocks for an unswitched copy of the given loop.
1201///
1202/// The cloned blocks are inserted before the loop preheader (`LoopPH`) and
1203/// after the split block (`SplitBB`) that will be used to select between the
1204/// cloned and original loop.
1205///
1206/// This routine handles cloning all of the necessary loop blocks and exit
1207/// blocks including rewriting their instructions and the relevant PHI nodes.
1208/// Any loop blocks or exit blocks which are dominated by a different successor
1209/// than the one for this clone of the loop blocks can be trivially skipped. We
1210/// use the `DominatingSucc` map to determine whether a block satisfies that
1211/// property with a simple map lookup.
1212///
1213/// It also correctly creates the unconditional branch in the cloned
1214/// unswitched parent block to only point at the unswitched successor.
1215///
1216/// This does not handle most of the necessary updates to `LoopInfo`. Only exit
1217/// block splitting is correctly reflected in `LoopInfo`, essentially all of
1218/// the cloned blocks (and their loops) are left without full `LoopInfo`
1219/// updates. This also doesn't fully update `DominatorTree`. It adds the cloned
1220/// blocks to them but doesn't create the cloned `DominatorTree` structure and
1221/// instead the caller must recompute an accurate DT. It *does* correctly
1222/// update the `AssumptionCache` provided in `AC`.
1224 Loop &L, BasicBlock *LoopPH, BasicBlock *SplitBB,
1225 ArrayRef<BasicBlock *> ExitBlocks, BasicBlock *ParentBB,
1226 BasicBlock *UnswitchedSuccBB, BasicBlock *ContinueSuccBB,
1228 ValueToValueMapTy &VMap,
1230 DominatorTree &DT, LoopInfo &LI, MemorySSAUpdater *MSSAU,
1231 ScalarEvolution *SE) {
1233 NewBlocks.reserve(L.getNumBlocks() + ExitBlocks.size());
1234
1235 // We will need to clone a bunch of blocks, wrap up the clone operation in
1236 // a helper.
1237 auto CloneBlock = [&](BasicBlock *OldBB) {
1238 // Clone the basic block and insert it before the new preheader.
1239 BasicBlock *NewBB = CloneBasicBlock(OldBB, VMap, ".us", OldBB->getParent());
1240 NewBB->moveBefore(LoopPH);
1241
1242 // Record this block and the mapping.
1243 NewBlocks.push_back(NewBB);
1244 VMap[OldBB] = NewBB;
1245
1246 return NewBB;
1247 };
1248
1249 // We skip cloning blocks when they have a dominating succ that is not the
1250 // succ we are cloning for.
1251 auto SkipBlock = [&](BasicBlock *BB) {
1252 auto It = DominatingSucc.find(BB);
1253 return It != DominatingSucc.end() && It->second != UnswitchedSuccBB;
1254 };
1255
1256 // First, clone the preheader.
1257 auto *ClonedPH = CloneBlock(LoopPH);
1258
1259 // Then clone all the loop blocks, skipping the ones that aren't necessary.
1260 for (auto *LoopBB : L.blocks())
1261 if (!SkipBlock(LoopBB))
1262 CloneBlock(LoopBB);
1263
1264 // Split all the loop exit edges so that when we clone the exit blocks, if
1265 // any of the exit blocks are *also* a preheader for some other loop, we
1266 // don't create multiple predecessors entering the loop header.
1267 for (auto *ExitBB : ExitBlocks) {
1268 if (SkipBlock(ExitBB))
1269 continue;
1270
1271 // When we are going to clone an exit, we don't need to clone all the
1272 // instructions in the exit block and we want to ensure we have an easy
1273 // place to merge the CFG, so split the exit first. This is always safe to
1274 // do because there cannot be any non-loop predecessors of a loop exit in
1275 // loop simplified form.
1276 auto *MergeBB = SplitBlock(ExitBB, ExitBB->begin(), &DT, &LI, MSSAU);
1277
1278 // Rearrange the names to make it easier to write test cases by having the
1279 // exit block carry the suffix rather than the merge block carrying the
1280 // suffix.
1281 MergeBB->takeName(ExitBB);
1282 ExitBB->setName(Twine(MergeBB->getName()) + ".split");
1283
1284 // Now clone the original exit block.
1285 auto *ClonedExitBB = CloneBlock(ExitBB);
1286 assert(ClonedExitBB->getTerminator()->getNumSuccessors() == 1 &&
1287 "Exit block should have been split to have one successor!");
1288 assert(ClonedExitBB->getTerminator()->getSuccessor(0) == MergeBB &&
1289 "Cloned exit block has the wrong successor!");
1290
1291 // Remap any cloned instructions and create a merge phi node for them.
1292 for (auto ZippedInsts : llvm::zip_first(
1293 llvm::make_range(ExitBB->begin(), std::prev(ExitBB->end())),
1294 llvm::make_range(ClonedExitBB->begin(),
1295 std::prev(ClonedExitBB->end())))) {
1296 Instruction &I = std::get<0>(ZippedInsts);
1297 Instruction &ClonedI = std::get<1>(ZippedInsts);
1298
1299 // The only instructions in the exit block should be PHI nodes and
1300 // potentially a landing pad.
1301 assert(
1303 "Bad instruction in exit block!");
1304 // We should have a value map between the instruction and its clone.
1305 assert(VMap.lookup(&I) == &ClonedI && "Mismatch in the value map!");
1306
1307 // Forget SCEVs based on exit phis in case SCEV looked through the phi.
1308 if (SE)
1309 if (auto *PN = dyn_cast<PHINode>(&I))
1311
1312 BasicBlock::iterator InsertPt = MergeBB->getFirstInsertionPt();
1313
1314 auto *MergePN =
1315 PHINode::Create(I.getType(), /*NumReservedValues*/ 2, ".us-phi");
1316 MergePN->insertBefore(InsertPt);
1317 MergePN->setDebugLoc(InsertPt->getDebugLoc());
1318 I.replaceAllUsesWith(MergePN);
1319 MergePN->addIncoming(&I, ExitBB);
1320 MergePN->addIncoming(&ClonedI, ClonedExitBB);
1321 }
1322 }
1323
1324 // Rewrite the instructions in the cloned blocks to refer to the instructions
1325 // in the cloned blocks. We have to do this as a second pass so that we have
1326 // everything available. Also, we have inserted new instructions which may
1327 // include assume intrinsics, so we update the assumption cache while
1328 // processing this.
1329 Module *M = ClonedPH->getParent()->getParent();
1330 for (auto *ClonedBB : NewBlocks)
1331 for (Instruction &I : *ClonedBB) {
1332 RemapDbgRecordRange(M, I.getDbgRecordRange(), VMap,
1334 RemapInstruction(&I, VMap,
1336 if (auto *II = dyn_cast<AssumeInst>(&I))
1338 }
1339
1340 // Update any PHI nodes in the cloned successors of the skipped blocks to not
1341 // have spurious incoming values.
1342 for (auto *LoopBB : L.blocks())
1343 if (SkipBlock(LoopBB))
1344 for (auto *SuccBB : successors(LoopBB))
1345 if (auto *ClonedSuccBB = cast_or_null<BasicBlock>(VMap.lookup(SuccBB)))
1346 for (PHINode &PN : ClonedSuccBB->phis())
1347 PN.removeIncomingValue(LoopBB, /*DeletePHIIfEmpty*/ false);
1348
1349 // Remove the cloned parent as a predecessor of any successor we ended up
1350 // cloning other than the unswitched one.
1351 auto *ClonedParentBB = cast<BasicBlock>(VMap.lookup(ParentBB));
1352 for (auto *SuccBB : successors(ParentBB)) {
1353 if (SuccBB == UnswitchedSuccBB)
1354 continue;
1355
1356 auto *ClonedSuccBB = cast_or_null<BasicBlock>(VMap.lookup(SuccBB));
1357 if (!ClonedSuccBB)
1358 continue;
1359
1360 ClonedSuccBB->removePredecessor(ClonedParentBB,
1361 /*KeepOneInputPHIs*/ true);
1362 }
1363
1364 // Replace the cloned branch with an unconditional branch to the cloned
1365 // unswitched successor.
1366 auto *ClonedSuccBB = cast<BasicBlock>(VMap.lookup(UnswitchedSuccBB));
1367 Instruction *ClonedTerminator = ClonedParentBB->getTerminator();
1368 // Trivial Simplification. If Terminator is a conditional branch and
1369 // condition becomes dead - erase it.
1370 Value *ClonedConditionToErase = nullptr;
1371 if (auto *BI = dyn_cast<CondBrInst>(ClonedTerminator))
1372 ClonedConditionToErase = BI->getCondition();
1373 else if (auto *SI = dyn_cast<SwitchInst>(ClonedTerminator))
1374 ClonedConditionToErase = SI->getCondition();
1375
1376 Instruction *BI = UncondBrInst::Create(ClonedSuccBB, ClonedParentBB);
1377 BI->setDebugLoc(ClonedTerminator->getDebugLoc());
1378 ClonedTerminator->eraseFromParent();
1379
1380 if (ClonedConditionToErase)
1381 RecursivelyDeleteTriviallyDeadInstructions(ClonedConditionToErase, nullptr,
1382 MSSAU);
1383
1384 // If there are duplicate entries in the PHI nodes because of multiple edges
1385 // to the unswitched successor, we need to nuke all but one as we replaced it
1386 // with a direct branch.
1387 for (PHINode &PN : ClonedSuccBB->phis()) {
1388 bool Found = false;
1389 // Loop over the incoming operands backwards so we can easily delete as we
1390 // go without invalidating the index.
1391 for (int i = PN.getNumOperands() - 1; i >= 0; --i) {
1392 if (PN.getIncomingBlock(i) != ClonedParentBB)
1393 continue;
1394 if (!Found) {
1395 Found = true;
1396 continue;
1397 }
1398 PN.removeIncomingValue(i, /*DeletePHIIfEmpty*/ false);
1399 }
1400 }
1401
1402 // Record the domtree updates for the new blocks.
1404 for (auto *ClonedBB : NewBlocks) {
1405 for (auto *SuccBB : successors(ClonedBB))
1406 if (SuccSet.insert(SuccBB).second)
1407 DTUpdates.push_back({DominatorTree::Insert, ClonedBB, SuccBB});
1408 SuccSet.clear();
1409 }
1410
1411 return ClonedPH;
1412}
1413
1414/// Recursively clone the specified loop and all of its children.
1415///
1416/// The target parent loop for the clone should be provided, or can be null if
1417/// the clone is a top-level loop. While cloning, all the blocks are mapped
1418/// with the provided value map. The entire original loop must be present in
1419/// the value map. The cloned loop is returned.
1420static Loop *cloneLoopNest(Loop &OrigRootL, Loop *RootParentL,
1421 const ValueToValueMapTy &VMap, LoopInfo &LI) {
1422 auto AddClonedBlocksToLoop = [&](Loop &OrigL, Loop &ClonedL) {
1423 assert(ClonedL.getBlocks().empty() && "Must start with an empty loop!");
1424 ClonedL.reserveBlocks(OrigL.getNumBlocks());
1425 for (auto *BB : OrigL.blocks()) {
1426 auto *ClonedBB = cast<BasicBlock>(VMap.lookup(BB));
1427 ClonedL.addBlockEntry(ClonedBB);
1428 if (LI.getLoopFor(BB) == &OrigL)
1429 LI.changeLoopFor(ClonedBB, &ClonedL);
1430 }
1431 };
1432
1433 // We specially handle the first loop because it may get cloned into
1434 // a different parent and because we most commonly are cloning leaf loops.
1435 Loop *ClonedRootL = LI.AllocateLoop();
1436 if (RootParentL)
1437 RootParentL->addChildLoop(ClonedRootL);
1438 else
1439 LI.addTopLevelLoop(ClonedRootL);
1440 AddClonedBlocksToLoop(OrigRootL, *ClonedRootL);
1441
1442 if (OrigRootL.isInnermost())
1443 return ClonedRootL;
1444
1445 // If we have a nest, we can quickly clone the entire loop nest using an
1446 // iterative approach because it is a tree. We keep the cloned parent in the
1447 // data structure to avoid repeatedly querying through a map to find it.
1448 SmallVector<std::pair<Loop *, Loop *>, 16> LoopsToClone;
1449 // Build up the loops to clone in reverse order as we'll clone them from the
1450 // back.
1451 for (Loop *ChildL : llvm::reverse(OrigRootL))
1452 LoopsToClone.push_back({ClonedRootL, ChildL});
1453 do {
1454 Loop *ClonedParentL, *L;
1455 std::tie(ClonedParentL, L) = LoopsToClone.pop_back_val();
1456 Loop *ClonedL = LI.AllocateLoop();
1457 ClonedParentL->addChildLoop(ClonedL);
1458 AddClonedBlocksToLoop(*L, *ClonedL);
1459 for (Loop *ChildL : llvm::reverse(*L))
1460 LoopsToClone.push_back({ClonedL, ChildL});
1461 } while (!LoopsToClone.empty());
1462
1463 return ClonedRootL;
1464}
1465
1466/// Build the cloned loops of an original loop from unswitching.
1467///
1468/// Because unswitching simplifies the CFG of the loop, this isn't a trivial
1469/// operation. We need to re-verify that there even is a loop (as the backedge
1470/// may not have been cloned), and even if there are remaining backedges the
1471/// backedge set may be different. However, we know that each child loop is
1472/// undisturbed, we only need to find where to place each child loop within
1473/// either any parent loop or within a cloned version of the original loop.
1474///
1475/// Because child loops may end up cloned outside of any cloned version of the
1476/// original loop, multiple cloned sibling loops may be created. All of them
1477/// are returned so that the newly introduced loop nest roots can be
1478/// identified.
1479static void buildClonedLoops(Loop &OrigL, ArrayRef<BasicBlock *> ExitBlocks,
1480 const ValueToValueMapTy &VMap, LoopInfo &LI,
1481 SmallVectorImpl<Loop *> &NonChildClonedLoops) {
1482 Loop *ClonedL = nullptr;
1483
1484 auto *OrigPH = OrigL.getLoopPreheader();
1485 auto *OrigHeader = OrigL.getHeader();
1486
1487 auto *ClonedPH = cast<BasicBlock>(VMap.lookup(OrigPH));
1488 auto *ClonedHeader = cast<BasicBlock>(VMap.lookup(OrigHeader));
1489
1490 // We need to know the loops of the cloned exit blocks to even compute the
1491 // accurate parent loop. If we only clone exits to some parent of the
1492 // original parent, we want to clone into that outer loop. We also keep track
1493 // of the loops that our cloned exit blocks participate in.
1494 Loop *ParentL = nullptr;
1495 SmallVector<BasicBlock *, 4> ClonedExitsInLoops;
1497 ClonedExitsInLoops.reserve(ExitBlocks.size());
1498 for (auto *ExitBB : ExitBlocks)
1499 if (auto *ClonedExitBB = cast_or_null<BasicBlock>(VMap.lookup(ExitBB)))
1500 if (Loop *ExitL = LI.getLoopFor(ExitBB)) {
1501 ExitLoopMap[ClonedExitBB] = ExitL;
1502 ClonedExitsInLoops.push_back(ClonedExitBB);
1503 if (!ParentL || (ParentL != ExitL && ParentL->contains(ExitL)))
1504 ParentL = ExitL;
1505 }
1506 assert((!ParentL || ParentL == OrigL.getParentLoop() ||
1507 ParentL->contains(OrigL.getParentLoop())) &&
1508 "The computed parent loop should always contain (or be) the parent of "
1509 "the original loop.");
1510
1511 // We build the set of blocks dominated by the cloned header from the set of
1512 // cloned blocks out of the original loop. While not all of these will
1513 // necessarily be in the cloned loop, it is enough to establish that they
1514 // aren't in unreachable cycles, etc.
1515 SmallSetVector<BasicBlock *, 16> ClonedLoopBlocks;
1516 for (auto *BB : OrigL.blocks())
1517 if (auto *ClonedBB = cast_or_null<BasicBlock>(VMap.lookup(BB)))
1518 ClonedLoopBlocks.insert(ClonedBB);
1519
1520 // Rebuild the set of blocks that will end up in the cloned loop. We may have
1521 // skipped cloning some region of this loop which can in turn skip some of
1522 // the backedges so we have to rebuild the blocks in the loop based on the
1523 // backedges that remain after cloning.
1525 SmallPtrSet<BasicBlock *, 16> BlocksInClonedLoop;
1526 for (auto *Pred : predecessors(ClonedHeader)) {
1527 // The only possible non-loop header predecessor is the preheader because
1528 // we know we cloned the loop in simplified form.
1529 if (Pred == ClonedPH)
1530 continue;
1531
1532 // Because the loop was in simplified form, the only non-loop predecessor
1533 // should be the preheader.
1534 assert(ClonedLoopBlocks.count(Pred) && "Found a predecessor of the loop "
1535 "header other than the preheader "
1536 "that is not part of the loop!");
1537
1538 // Insert this block into the loop set and on the first visit (and if it
1539 // isn't the header we're currently walking) put it into the worklist to
1540 // recurse through.
1541 if (BlocksInClonedLoop.insert(Pred).second && Pred != ClonedHeader)
1542 Worklist.push_back(Pred);
1543 }
1544
1545 // If we had any backedges then there *is* a cloned loop. Put the header into
1546 // the loop set and then walk the worklist backwards to find all the blocks
1547 // that remain within the loop after cloning.
1548 if (!BlocksInClonedLoop.empty()) {
1549 BlocksInClonedLoop.insert(ClonedHeader);
1550
1551 while (!Worklist.empty()) {
1552 BasicBlock *BB = Worklist.pop_back_val();
1553 assert(BlocksInClonedLoop.count(BB) &&
1554 "Didn't put block into the loop set!");
1555
1556 // Insert any predecessors that are in the possible set into the cloned
1557 // set, and if the insert is successful, add them to the worklist. Note
1558 // that we filter on the blocks that are definitely reachable via the
1559 // backedge to the loop header so we may prune out dead code within the
1560 // cloned loop.
1561 for (auto *Pred : predecessors(BB))
1562 if (ClonedLoopBlocks.count(Pred) &&
1563 BlocksInClonedLoop.insert(Pred).second)
1564 Worklist.push_back(Pred);
1565 }
1566
1567 ClonedL = LI.AllocateLoop();
1568 if (ParentL) {
1569 ParentL->addBasicBlockToLoop(ClonedPH, LI);
1570 ParentL->addChildLoop(ClonedL);
1571 } else {
1572 LI.addTopLevelLoop(ClonedL);
1573 }
1574 NonChildClonedLoops.push_back(ClonedL);
1575
1576 ClonedL->reserveBlocks(BlocksInClonedLoop.size());
1577 // We don't want to just add the cloned loop blocks based on how we
1578 // discovered them. The original order of blocks was carefully built in
1579 // a way that doesn't rely on predecessor ordering. Rather than re-invent
1580 // that logic, we just re-walk the original blocks (and those of the child
1581 // loops) and filter them as we add them into the cloned loop.
1582 for (auto *BB : OrigL.blocks()) {
1583 auto *ClonedBB = cast_or_null<BasicBlock>(VMap.lookup(BB));
1584 if (!ClonedBB || !BlocksInClonedLoop.count(ClonedBB))
1585 continue;
1586
1587 // Directly add the blocks that are only in this loop.
1588 if (LI.getLoopFor(BB) == &OrigL) {
1589 ClonedL->addBasicBlockToLoop(ClonedBB, LI);
1590 continue;
1591 }
1592
1593 // We want to manually add it to this loop and parents.
1594 // Registering it with LoopInfo will happen when we clone the top
1595 // loop for this block.
1596 for (Loop *PL = ClonedL; PL; PL = PL->getParentLoop())
1597 PL->addBlockEntry(ClonedBB);
1598 }
1599
1600 // Now add each child loop whose header remains within the cloned loop. All
1601 // of the blocks within the loop must satisfy the same constraints as the
1602 // header so once we pass the header checks we can just clone the entire
1603 // child loop nest.
1604 for (Loop *ChildL : OrigL) {
1605 auto *ClonedChildHeader =
1606 cast_or_null<BasicBlock>(VMap.lookup(ChildL->getHeader()));
1607 if (!ClonedChildHeader || !BlocksInClonedLoop.count(ClonedChildHeader))
1608 continue;
1609
1610#ifndef NDEBUG
1611 // We should never have a cloned child loop header but fail to have
1612 // all of the blocks for that child loop.
1613 for (auto *ChildLoopBB : ChildL->blocks())
1614 assert(BlocksInClonedLoop.count(
1615 cast<BasicBlock>(VMap.lookup(ChildLoopBB))) &&
1616 "Child cloned loop has a header within the cloned outer "
1617 "loop but not all of its blocks!");
1618#endif
1619
1620 cloneLoopNest(*ChildL, ClonedL, VMap, LI);
1621 }
1622 }
1623
1624 // Now that we've handled all the components of the original loop that were
1625 // cloned into a new loop, we still need to handle anything from the original
1626 // loop that wasn't in a cloned loop.
1627
1628 // Figure out what blocks are left to place within any loop nest containing
1629 // the unswitched loop. If we never formed a loop, the cloned PH is one of
1630 // them.
1631 SmallPtrSet<BasicBlock *, 16> UnloopedBlockSet;
1632 if (BlocksInClonedLoop.empty())
1633 UnloopedBlockSet.insert(ClonedPH);
1634 for (auto *ClonedBB : ClonedLoopBlocks)
1635 if (!BlocksInClonedLoop.count(ClonedBB))
1636 UnloopedBlockSet.insert(ClonedBB);
1637
1638 // Copy the cloned exits and sort them in ascending loop depth, we'll work
1639 // backwards across these to process them inside out. The order shouldn't
1640 // matter as we're just trying to build up the map from inside-out; we use
1641 // the map in a more stably ordered way below.
1642 auto OrderedClonedExitsInLoops = ClonedExitsInLoops;
1643 llvm::sort(OrderedClonedExitsInLoops, [&](BasicBlock *LHS, BasicBlock *RHS) {
1644 return ExitLoopMap.lookup(LHS)->getLoopDepth() <
1645 ExitLoopMap.lookup(RHS)->getLoopDepth();
1646 });
1647
1648 // Populate the existing ExitLoopMap with everything reachable from each
1649 // exit, starting from the inner most exit.
1650 while (!UnloopedBlockSet.empty() && !OrderedClonedExitsInLoops.empty()) {
1651 assert(Worklist.empty() && "Didn't clear worklist!");
1652
1653 BasicBlock *ExitBB = OrderedClonedExitsInLoops.pop_back_val();
1654 Loop *ExitL = ExitLoopMap.lookup(ExitBB);
1655
1656 // Walk the CFG back until we hit the cloned PH adding everything reachable
1657 // and in the unlooped set to this exit block's loop.
1658 Worklist.push_back(ExitBB);
1659 do {
1660 BasicBlock *BB = Worklist.pop_back_val();
1661 // We can stop recursing at the cloned preheader (if we get there).
1662 if (BB == ClonedPH)
1663 continue;
1664
1665 for (BasicBlock *PredBB : predecessors(BB)) {
1666 // If this pred has already been moved to our set or is part of some
1667 // (inner) loop, no update needed.
1668 if (!UnloopedBlockSet.erase(PredBB)) {
1669 assert(
1670 (BlocksInClonedLoop.count(PredBB) || ExitLoopMap.count(PredBB)) &&
1671 "Predecessor not mapped to a loop!");
1672 continue;
1673 }
1674
1675 // We just insert into the loop set here. We'll add these blocks to the
1676 // exit loop after we build up the set in an order that doesn't rely on
1677 // predecessor order (which in turn relies on use list order).
1678 bool Inserted = ExitLoopMap.insert({PredBB, ExitL}).second;
1679 (void)Inserted;
1680 assert(Inserted && "Should only visit an unlooped block once!");
1681
1682 // And recurse through to its predecessors.
1683 Worklist.push_back(PredBB);
1684 }
1685 } while (!Worklist.empty());
1686 }
1687
1688 // Now that the ExitLoopMap gives as mapping for all the non-looping cloned
1689 // blocks to their outer loops, walk the cloned blocks and the cloned exits
1690 // in their original order adding them to the correct loop.
1691
1692 // We need a stable insertion order. We use the order of the original loop
1693 // order and map into the correct parent loop.
1694 for (auto *BB : llvm::concat<BasicBlock *const>(
1695 ArrayRef(ClonedPH), ClonedLoopBlocks, ClonedExitsInLoops))
1696 if (Loop *OuterL = ExitLoopMap.lookup(BB))
1697 OuterL->addBasicBlockToLoop(BB, LI);
1698
1699#ifndef NDEBUG
1700 for (auto &BBAndL : ExitLoopMap) {
1701 auto *BB = BBAndL.first;
1702 auto *OuterL = BBAndL.second;
1703 assert(LI.getLoopFor(BB) == OuterL &&
1704 "Failed to put all blocks into outer loops!");
1705 }
1706#endif
1707
1708 // Now that all the blocks are placed into the correct containing loop in the
1709 // absence of child loops, find all the potentially cloned child loops and
1710 // clone them into whatever outer loop we placed their header into.
1711 for (Loop *ChildL : OrigL) {
1712 auto *ClonedChildHeader =
1713 cast_or_null<BasicBlock>(VMap.lookup(ChildL->getHeader()));
1714 if (!ClonedChildHeader || BlocksInClonedLoop.count(ClonedChildHeader))
1715 continue;
1716
1717#ifndef NDEBUG
1718 for (auto *ChildLoopBB : ChildL->blocks())
1719 assert(VMap.count(ChildLoopBB) &&
1720 "Cloned a child loop header but not all of that loops blocks!");
1721#endif
1722
1723 NonChildClonedLoops.push_back(cloneLoopNest(
1724 *ChildL, ExitLoopMap.lookup(ClonedChildHeader), VMap, LI));
1725 }
1726}
1727
1728static void
1730 ArrayRef<std::unique_ptr<ValueToValueMapTy>> VMaps,
1731 DominatorTree &DT, MemorySSAUpdater *MSSAU) {
1732 // Find all the dead clones, and remove them from their successors.
1734 for (BasicBlock *BB : llvm::concat<BasicBlock *const>(L.blocks(), ExitBlocks))
1735 for (const auto &VMap : VMaps)
1736 if (BasicBlock *ClonedBB = cast_or_null<BasicBlock>(VMap->lookup(BB)))
1737 if (!DT.isReachableFromEntry(ClonedBB)) {
1738 for (BasicBlock *SuccBB : successors(ClonedBB))
1739 SuccBB->removePredecessor(ClonedBB);
1740 DeadBlocks.push_back(ClonedBB);
1741 }
1742
1743 // Remove all MemorySSA in the dead blocks
1744 if (MSSAU) {
1745 SmallSetVector<BasicBlock *, 8> DeadBlockSet(DeadBlocks.begin(),
1746 DeadBlocks.end());
1747 MSSAU->removeBlocks(DeadBlockSet);
1748 }
1749
1750 // Drop any remaining references to break cycles.
1751 for (BasicBlock *BB : DeadBlocks)
1752 BB->dropAllReferences();
1753 // Erase them from the IR.
1754 for (BasicBlock *BB : DeadBlocks)
1755 BB->eraseFromParent();
1756}
1757
1760 DominatorTree &DT, LoopInfo &LI,
1761 MemorySSAUpdater *MSSAU,
1762 ScalarEvolution *SE,
1763 LPMUpdater &LoopUpdater) {
1764 // Find all the dead blocks tied to this loop, and remove them from their
1765 // successors.
1767
1768 // Start with loop/exit blocks and get a transitive closure of reachable dead
1769 // blocks.
1770 SmallVector<BasicBlock *, 16> DeathCandidates(ExitBlocks.begin(),
1771 ExitBlocks.end());
1772 DeathCandidates.append(L.blocks().begin(), L.blocks().end());
1773 while (!DeathCandidates.empty()) {
1774 auto *BB = DeathCandidates.pop_back_val();
1775 if (!DeadBlockSet.count(BB) && !DT.isReachableFromEntry(BB)) {
1776 for (BasicBlock *SuccBB : successors(BB)) {
1777 SuccBB->removePredecessor(BB);
1778 DeathCandidates.push_back(SuccBB);
1779 }
1780 DeadBlockSet.insert(BB);
1781 }
1782 }
1783
1784 // Remove all MemorySSA in the dead blocks
1785 if (MSSAU)
1786 MSSAU->removeBlocks(DeadBlockSet);
1787
1788 // Filter out the dead blocks from the exit blocks list so that it can be
1789 // used in the caller.
1790 llvm::erase_if(ExitBlocks,
1791 [&](BasicBlock *BB) { return DeadBlockSet.count(BB); });
1792
1793 // Walk from this loop up through its parents removing all of the dead blocks.
1794 for (Loop *ParentL = &L; ParentL; ParentL = ParentL->getParentLoop()) {
1795 for (auto *BB : DeadBlockSet)
1796 ParentL->getBlocksSet().erase(BB);
1797 llvm::erase_if(ParentL->getBlocksVector(),
1798 [&](BasicBlock *BB) { return DeadBlockSet.count(BB); });
1799 }
1800
1801 // Now delete the dead child loops. This raw delete will clear them
1802 // recursively.
1803 llvm::erase_if(L.getSubLoopsVector(), [&](Loop *ChildL) {
1804 if (!DeadBlockSet.count(ChildL->getHeader()))
1805 return false;
1806
1807 assert(llvm::all_of(ChildL->blocks(),
1808 [&](BasicBlock *ChildBB) {
1809 return DeadBlockSet.count(ChildBB);
1810 }) &&
1811 "If the child loop header is dead all blocks in the child loop must "
1812 "be dead as well!");
1813 LoopUpdater.markLoopAsDeleted(*ChildL, ChildL->getName());
1814 if (SE)
1816 LI.destroy(ChildL);
1817 return true;
1818 });
1819
1820 // Remove the loop mappings for the dead blocks and drop all the references
1821 // from these blocks to others to handle cyclic references as we start
1822 // deleting the blocks themselves.
1823 for (auto *BB : DeadBlockSet) {
1824 // Check that the dominator tree has already been updated.
1825 assert(!DT.getNode(BB) && "Should already have cleared domtree!");
1826 LI.changeLoopFor(BB, nullptr);
1827 // Drop all uses of the instructions to make sure we won't have dangling
1828 // uses in other blocks.
1829 for (auto &I : *BB)
1830 if (!I.use_empty())
1831 I.replaceAllUsesWith(PoisonValue::get(I.getType()));
1832 BB->dropAllReferences();
1833 }
1834
1835 // Actually delete the blocks now that they've been fully unhooked from the
1836 // IR.
1837 for (auto *BB : DeadBlockSet)
1838 BB->eraseFromParent();
1839}
1840
1841/// Recompute the set of blocks in a loop after unswitching.
1842///
1843/// This walks from the original headers predecessors to rebuild the loop. We
1844/// take advantage of the fact that new blocks can't have been added, and so we
1845/// filter by the original loop's blocks. This also handles potentially
1846/// unreachable code that we don't want to explore but might be found examining
1847/// the predecessors of the header.
1848///
1849/// If the original loop is no longer a loop, this will return an empty set. If
1850/// it remains a loop, all the blocks within it will be added to the set
1851/// (including those blocks in inner loops).
1853 LoopInfo &LI) {
1855
1856 auto *PH = L.getLoopPreheader();
1857 auto *Header = L.getHeader();
1858
1859 // A worklist to use while walking backwards from the header.
1861
1862 // First walk the predecessors of the header to find the backedges. This will
1863 // form the basis of our walk.
1864 for (auto *Pred : predecessors(Header)) {
1865 // Skip the preheader.
1866 if (Pred == PH)
1867 continue;
1868
1869 // Because the loop was in simplified form, the only non-loop predecessor
1870 // is the preheader.
1871 assert(L.contains(Pred) && "Found a predecessor of the loop header other "
1872 "than the preheader that is not part of the "
1873 "loop!");
1874
1875 // Insert this block into the loop set and on the first visit and, if it
1876 // isn't the header we're currently walking, put it into the worklist to
1877 // recurse through.
1878 if (LoopBlockSet.insert(Pred).second && Pred != Header)
1879 Worklist.push_back(Pred);
1880 }
1881
1882 // If no backedges were found, we're done.
1883 if (LoopBlockSet.empty())
1884 return LoopBlockSet;
1885
1886 // We found backedges, recurse through them to identify the loop blocks.
1887 while (!Worklist.empty()) {
1888 BasicBlock *BB = Worklist.pop_back_val();
1889 assert(LoopBlockSet.count(BB) && "Didn't put block into the loop set!");
1890
1891 // No need to walk past the header.
1892 if (BB == Header)
1893 continue;
1894
1895 // Because we know the inner loop structure remains valid we can use the
1896 // loop structure to jump immediately across the entire nested loop.
1897 // Further, because it is in loop simplified form, we can directly jump
1898 // to its preheader afterward.
1899 if (Loop *InnerL = LI.getLoopFor(BB))
1900 if (InnerL != &L) {
1901 assert(L.contains(InnerL) &&
1902 "Should not reach a loop *outside* this loop!");
1903 // The preheader is the only possible predecessor of the loop so
1904 // insert it into the set and check whether it was already handled.
1905 auto *InnerPH = InnerL->getLoopPreheader();
1906 assert(L.contains(InnerPH) && "Cannot contain an inner loop block "
1907 "but not contain the inner loop "
1908 "preheader!");
1909 if (!LoopBlockSet.insert(InnerPH).second)
1910 // The only way to reach the preheader is through the loop body
1911 // itself so if it has been visited the loop is already handled.
1912 continue;
1913
1914 // Insert all of the blocks (other than those already present) into
1915 // the loop set. We expect at least the block that led us to find the
1916 // inner loop to be in the block set, but we may also have other loop
1917 // blocks if they were already enqueued as predecessors of some other
1918 // outer loop block.
1919 for (auto *InnerBB : InnerL->blocks()) {
1920 if (InnerBB == BB) {
1921 assert(LoopBlockSet.count(InnerBB) &&
1922 "Block should already be in the set!");
1923 continue;
1924 }
1925
1926 LoopBlockSet.insert(InnerBB);
1927 }
1928
1929 // Add the preheader to the worklist so we will continue past the
1930 // loop body.
1931 Worklist.push_back(InnerPH);
1932 continue;
1933 }
1934
1935 // Insert any predecessors that were in the original loop into the new
1936 // set, and if the insert is successful, add them to the worklist.
1937 for (auto *Pred : predecessors(BB))
1938 if (L.contains(Pred) && LoopBlockSet.insert(Pred).second)
1939 Worklist.push_back(Pred);
1940 }
1941
1942 assert(LoopBlockSet.count(Header) && "Cannot fail to add the header!");
1943
1944 // We've found all the blocks participating in the loop, return our completed
1945 // set.
1946 return LoopBlockSet;
1947}
1948
1949/// Rebuild a loop after unswitching removes some subset of blocks and edges.
1950///
1951/// The removal may have removed some child loops entirely but cannot have
1952/// disturbed any remaining child loops. However, they may need to be hoisted
1953/// to the parent loop (or to be top-level loops). The original loop may be
1954/// completely removed.
1955///
1956/// The sibling loops resulting from this update are returned. If the original
1957/// loop remains a valid loop, it will be the first entry in this list with all
1958/// of the newly sibling loops following it.
1959///
1960/// Returns true if the loop remains a loop after unswitching, and false if it
1961/// is no longer a loop after unswitching (and should not continue to be
1962/// referenced).
1964 LoopInfo &LI,
1965 SmallVectorImpl<Loop *> &HoistedLoops,
1966 ScalarEvolution *SE) {
1967 auto *PH = L.getLoopPreheader();
1968
1969 // Compute the actual parent loop from the exit blocks. Because we may have
1970 // pruned some exits the loop may be different from the original parent.
1971 Loop *ParentL = nullptr;
1972 SmallVector<Loop *, 4> ExitLoops;
1973 SmallVector<BasicBlock *, 4> ExitsInLoops;
1974 ExitsInLoops.reserve(ExitBlocks.size());
1975 for (auto *ExitBB : ExitBlocks)
1976 if (Loop *ExitL = LI.getLoopFor(ExitBB)) {
1977 ExitLoops.push_back(ExitL);
1978 ExitsInLoops.push_back(ExitBB);
1979 if (!ParentL || (ParentL != ExitL && ParentL->contains(ExitL)))
1980 ParentL = ExitL;
1981 }
1982
1983 // Recompute the blocks participating in this loop. This may be empty if it
1984 // is no longer a loop.
1985 auto LoopBlockSet = recomputeLoopBlockSet(L, LI);
1986
1987 // If we still have a loop, we need to re-set the loop's parent as the exit
1988 // block set changing may have moved it within the loop nest. Note that this
1989 // can only happen when this loop has a parent as it can only hoist the loop
1990 // *up* the nest.
1991 if (!LoopBlockSet.empty() && L.getParentLoop() != ParentL) {
1992 // Remove this loop's (original) blocks from all of the intervening loops.
1993 for (Loop *IL = L.getParentLoop(); IL != ParentL;
1994 IL = IL->getParentLoop()) {
1995 IL->getBlocksSet().erase(PH);
1996 for (auto *BB : L.blocks())
1997 IL->getBlocksSet().erase(BB);
1998 llvm::erase_if(IL->getBlocksVector(), [&](BasicBlock *BB) {
1999 return BB == PH || L.contains(BB);
2000 });
2001 }
2002
2003 LI.changeLoopFor(PH, ParentL);
2004 L.getParentLoop()->removeChildLoop(&L);
2005 if (ParentL)
2006 ParentL->addChildLoop(&L);
2007 else
2008 LI.addTopLevelLoop(&L);
2009 }
2010
2011 // Now we update all the blocks which are no longer within the loop.
2012 auto &Blocks = L.getBlocksVector();
2013 auto BlocksSplitI =
2014 LoopBlockSet.empty()
2015 ? Blocks.begin()
2016 : std::stable_partition(
2017 Blocks.begin(), Blocks.end(),
2018 [&](BasicBlock *BB) { return LoopBlockSet.count(BB); });
2019
2020 // Before we erase the list of unlooped blocks, build a set of them.
2021 SmallPtrSet<BasicBlock *, 16> UnloopedBlocks(BlocksSplitI, Blocks.end());
2022 if (LoopBlockSet.empty())
2023 UnloopedBlocks.insert(PH);
2024
2025 // Now erase these blocks from the loop.
2026 for (auto *BB : make_range(BlocksSplitI, Blocks.end()))
2027 L.getBlocksSet().erase(BB);
2028 Blocks.erase(BlocksSplitI, Blocks.end());
2029
2030 // Sort the exits in ascending loop depth, we'll work backwards across these
2031 // to process them inside out.
2032 llvm::stable_sort(ExitsInLoops, [&](BasicBlock *LHS, BasicBlock *RHS) {
2033 return LI.getLoopDepth(LHS) < LI.getLoopDepth(RHS);
2034 });
2035
2036 // We'll build up a set for each exit loop.
2037 SmallPtrSet<BasicBlock *, 16> NewExitLoopBlocks;
2038 Loop *PrevExitL = L.getParentLoop(); // The deepest possible exit loop.
2039
2040 auto RemoveUnloopedBlocksFromLoop =
2041 [](Loop &L, SmallPtrSetImpl<BasicBlock *> &UnloopedBlocks) {
2042 for (auto *BB : UnloopedBlocks)
2043 L.getBlocksSet().erase(BB);
2044 llvm::erase_if(L.getBlocksVector(), [&](BasicBlock *BB) {
2045 return UnloopedBlocks.count(BB);
2046 });
2047 };
2048
2050 while (!UnloopedBlocks.empty() && !ExitsInLoops.empty()) {
2051 assert(Worklist.empty() && "Didn't clear worklist!");
2052 assert(NewExitLoopBlocks.empty() && "Didn't clear loop set!");
2053
2054 // Grab the next exit block, in decreasing loop depth order.
2055 BasicBlock *ExitBB = ExitsInLoops.pop_back_val();
2056 Loop &ExitL = *LI.getLoopFor(ExitBB);
2057 assert(ExitL.contains(&L) && "Exit loop must contain the inner loop!");
2058
2059 // Erase all of the unlooped blocks from the loops between the previous
2060 // exit loop and this exit loop. This works because the ExitInLoops list is
2061 // sorted in increasing order of loop depth and thus we visit loops in
2062 // decreasing order of loop depth.
2063 for (; PrevExitL != &ExitL; PrevExitL = PrevExitL->getParentLoop())
2064 RemoveUnloopedBlocksFromLoop(*PrevExitL, UnloopedBlocks);
2065
2066 // Walk the CFG back until we hit the cloned PH adding everything reachable
2067 // and in the unlooped set to this exit block's loop.
2068 Worklist.push_back(ExitBB);
2069 do {
2070 BasicBlock *BB = Worklist.pop_back_val();
2071 // We can stop recursing at the cloned preheader (if we get there).
2072 if (BB == PH)
2073 continue;
2074
2075 for (BasicBlock *PredBB : predecessors(BB)) {
2076 // If this pred has already been moved to our set or is part of some
2077 // (inner) loop, no update needed.
2078 if (!UnloopedBlocks.erase(PredBB)) {
2079 assert((NewExitLoopBlocks.count(PredBB) ||
2080 ExitL.contains(LI.getLoopFor(PredBB))) &&
2081 "Predecessor not in a nested loop (or already visited)!");
2082 continue;
2083 }
2084
2085 // We just insert into the loop set here. We'll add these blocks to the
2086 // exit loop after we build up the set in a deterministic order rather
2087 // than the predecessor-influenced visit order.
2088 bool Inserted = NewExitLoopBlocks.insert(PredBB).second;
2089 (void)Inserted;
2090 assert(Inserted && "Should only visit an unlooped block once!");
2091
2092 // And recurse through to its predecessors.
2093 Worklist.push_back(PredBB);
2094 }
2095 } while (!Worklist.empty());
2096
2097 // If blocks in this exit loop were directly part of the original loop (as
2098 // opposed to a child loop) update the map to point to this exit loop. This
2099 // just updates a map and so the fact that the order is unstable is fine.
2100 for (auto *BB : NewExitLoopBlocks)
2101 if (Loop *BBL = LI.getLoopFor(BB))
2102 if (BBL == &L || !L.contains(BBL))
2103 LI.changeLoopFor(BB, &ExitL);
2104
2105 // We will remove the remaining unlooped blocks from this loop in the next
2106 // iteration or below.
2107 NewExitLoopBlocks.clear();
2108 }
2109
2110 // Any remaining unlooped blocks are no longer part of any loop unless they
2111 // are part of some child loop.
2112 for (; PrevExitL; PrevExitL = PrevExitL->getParentLoop())
2113 RemoveUnloopedBlocksFromLoop(*PrevExitL, UnloopedBlocks);
2114 for (auto *BB : UnloopedBlocks)
2115 if (Loop *BBL = LI.getLoopFor(BB))
2116 if (BBL == &L || !L.contains(BBL))
2117 LI.changeLoopFor(BB, nullptr);
2118
2119 // Sink all the child loops whose headers are no longer in the loop set to
2120 // the parent (or to be top level loops). We reach into the loop and directly
2121 // update its subloop vector to make this batch update efficient.
2122 auto &SubLoops = L.getSubLoopsVector();
2123 auto SubLoopsSplitI =
2124 LoopBlockSet.empty()
2125 ? SubLoops.begin()
2126 : std::stable_partition(
2127 SubLoops.begin(), SubLoops.end(), [&](Loop *SubL) {
2128 return LoopBlockSet.count(SubL->getHeader());
2129 });
2130 for (auto *HoistedL : make_range(SubLoopsSplitI, SubLoops.end())) {
2131 HoistedLoops.push_back(HoistedL);
2132 HoistedL->setParentLoop(nullptr);
2133
2134 // To compute the new parent of this hoisted loop we look at where we
2135 // placed the preheader above. We can't lookup the header itself because we
2136 // retained the mapping from the header to the hoisted loop. But the
2137 // preheader and header should have the exact same new parent computed
2138 // based on the set of exit blocks from the original loop as the preheader
2139 // is a predecessor of the header and so reached in the reverse walk. And
2140 // because the loops were all in simplified form the preheader of the
2141 // hoisted loop can't be part of some *other* loop.
2142 if (auto *NewParentL = LI.getLoopFor(HoistedL->getLoopPreheader()))
2143 NewParentL->addChildLoop(HoistedL);
2144 else
2145 LI.addTopLevelLoop(HoistedL);
2146 }
2147 SubLoops.erase(SubLoopsSplitI, SubLoops.end());
2148
2149 // Actually delete the loop if nothing remained within it.
2150 if (Blocks.empty()) {
2151 assert(SubLoops.empty() &&
2152 "Failed to remove all subloops from the original loop!");
2153 if (Loop *ParentL = L.getParentLoop())
2154 ParentL->removeChildLoop(llvm::find(*ParentL, &L));
2155 else
2156 LI.removeLoop(llvm::find(LI, &L));
2157 // markLoopAsDeleted for L should be triggered by the caller (it is
2158 // typically done within postUnswitch).
2159 if (SE)
2161 LI.destroy(&L);
2162 return false;
2163 }
2164
2165 return true;
2166}
2167
2168/// Helper to visit a dominator subtree, invoking a callable on each node.
2169///
2170/// Returning false at any point will stop walking past that node of the tree.
2171template <typename CallableT>
2172void visitDomSubTree(DominatorTree &DT, BasicBlock *BB, CallableT Callable) {
2174 DomWorklist.push_back(DT[BB]);
2175#ifndef NDEBUG
2177 Visited.insert(DT[BB]);
2178#endif
2179 do {
2180 DomTreeNode *N = DomWorklist.pop_back_val();
2181
2182 // Visit this node.
2183 if (!Callable(N->getBlock()))
2184 continue;
2185
2186 // Accumulate the child nodes.
2187 for (DomTreeNode *ChildN : *N) {
2188 assert(Visited.insert(ChildN).second &&
2189 "Cannot visit a node twice when walking a tree!");
2190 DomWorklist.push_back(ChildN);
2191 }
2192 } while (!DomWorklist.empty());
2193}
2194
2196 bool CurrentLoopValid, bool PartiallyInvariant,
2197 bool InjectedCondition, ArrayRef<Loop *> NewLoops) {
2198 // If we did a non-trivial unswitch, we have added new (cloned) loops.
2199 if (!NewLoops.empty())
2200 U.addSiblingLoops(NewLoops);
2201
2202 // If the current loop remains valid, we should revisit it to catch any
2203 // other unswitch opportunities. Otherwise, we need to mark it as deleted.
2204 if (CurrentLoopValid) {
2205 if (PartiallyInvariant) {
2206 // Mark the new loop as partially unswitched, to avoid unswitching on
2207 // the same condition again.
2208 L.addStringLoopAttribute("llvm.loop.unswitch.partial.disable",
2209 {"llvm.loop.unswitch.partial"});
2210 } else if (InjectedCondition) {
2211 // Do the same for injection of invariant conditions.
2212 L.addStringLoopAttribute("llvm.loop.unswitch.injection.disable",
2213 {"llvm.loop.unswitch.injection"});
2214 } else
2215 U.revisitCurrentLoop();
2216 } else
2217 U.markLoopAsDeleted(L, LoopName);
2218}
2219
2221 Loop &L, Instruction &TI, ArrayRef<Value *> Invariants,
2222 IVConditionInfo &PartialIVInfo, DominatorTree &DT, LoopInfo &LI,
2224 LPMUpdater &LoopUpdater, bool InsertFreeze, bool InjectedCondition) {
2225 auto *ParentBB = TI.getParent();
2227 SwitchInst *SI = BI ? nullptr : cast<SwitchInst>(&TI);
2228
2229 // Save the current loop name in a variable so that we can report it even
2230 // after it has been deleted.
2231 std::string LoopName(L.getName());
2232
2233 // We can only unswitch switches, conditional branches with an invariant
2234 // condition, or combining invariant conditions with an instruction or
2235 // partially invariant instructions.
2236 assert((SI || BI) && "Can only unswitch switches and conditional branch!");
2237 bool PartiallyInvariant = !PartialIVInfo.InstToDuplicate.empty();
2238 bool FullUnswitch =
2239 SI || (skipTrivialSelect(BI->getCondition()) == Invariants[0] &&
2240 !PartiallyInvariant);
2241 if (FullUnswitch)
2242 assert(Invariants.size() == 1 &&
2243 "Cannot have other invariants with full unswitching!");
2244 else
2246 "Partial unswitching requires an instruction as the condition!");
2247
2248 if (MSSAU && VerifyMemorySSA)
2249 MSSAU->getMemorySSA()->verifyMemorySSA();
2250
2251 // Constant and BBs tracking the cloned and continuing successor. When we are
2252 // unswitching the entire condition, this can just be trivially chosen to
2253 // unswitch towards `true`. However, when we are unswitching a set of
2254 // invariants combined with `and` or `or` or partially invariant instructions,
2255 // the combining operation determines the best direction to unswitch: we want
2256 // to unswitch the direction that will collapse the branch.
2257 bool Direction = true;
2258 int ClonedSucc = 0;
2259 if (!FullUnswitch) {
2261 (void)Cond;
2263 PartiallyInvariant) &&
2264 "Only `or`, `and`, an `select`, partially invariant instructions "
2265 "can combine invariants being unswitched.");
2266 if (!match(Cond, m_LogicalOr())) {
2267 if (match(Cond, m_LogicalAnd()) ||
2268 (PartiallyInvariant && !PartialIVInfo.KnownValue->isOneValue())) {
2269 Direction = false;
2270 ClonedSucc = 1;
2271 }
2272 }
2273 }
2274
2275 BasicBlock *RetainedSuccBB =
2276 BI ? BI->getSuccessor(1 - ClonedSucc) : SI->getDefaultDest();
2277 SmallSetVector<BasicBlock *, 4> UnswitchedSuccBBs;
2278 if (BI)
2279 UnswitchedSuccBBs.insert(BI->getSuccessor(ClonedSucc));
2280 else
2281 for (auto Case : SI->cases())
2282 if (Case.getCaseSuccessor() != RetainedSuccBB)
2283 UnswitchedSuccBBs.insert(Case.getCaseSuccessor());
2284
2285 assert(!UnswitchedSuccBBs.count(RetainedSuccBB) &&
2286 "Should not unswitch the same successor we are retaining!");
2287
2288 // The branch should be in this exact loop. Any inner loop's invariant branch
2289 // should be handled by unswitching that inner loop. The caller of this
2290 // routine should filter out any candidates that remain (but were skipped for
2291 // whatever reason).
2292 assert(LI.getLoopFor(ParentBB) == &L && "Branch in an inner loop!");
2293
2294 // Compute the parent loop now before we start hacking on things.
2295 Loop *ParentL = L.getParentLoop();
2296 // Get blocks in RPO order for MSSA update, before changing the CFG.
2297 LoopBlocksRPO LBRPO(&L);
2298 if (MSSAU)
2299 LBRPO.perform(&LI);
2300
2301 // Compute the outer-most loop containing one of our exit blocks. This is the
2302 // furthest up our loopnest which can be mutated, which we will use below to
2303 // update things.
2304 Loop *OuterExitL = &L;
2306 L.getUniqueExitBlocks(ExitBlocks);
2307 for (auto *ExitBB : ExitBlocks) {
2308 // ExitBB can be an exit block for several levels in the loop nest. Make
2309 // sure we find the top most.
2310 Loop *NewOuterExitL = getTopMostExitingLoop(ExitBB, LI);
2311 if (!NewOuterExitL) {
2312 // We exited the entire nest with this block, so we're done.
2313 OuterExitL = nullptr;
2314 break;
2315 }
2316 if (NewOuterExitL != OuterExitL && NewOuterExitL->contains(OuterExitL))
2317 OuterExitL = NewOuterExitL;
2318 }
2319
2320 // At this point, we're definitely going to unswitch something so invalidate
2321 // any cached information in ScalarEvolution for the outer most loop
2322 // containing an exit block and all nested loops.
2323 if (SE) {
2324 if (OuterExitL)
2325 SE->forgetLoop(OuterExitL);
2326 else
2327 SE->forgetTopmostLoop(&L);
2329 }
2330
2331 // If the edge from this terminator to a successor dominates that successor,
2332 // store a map from each block in its dominator subtree to it. This lets us
2333 // tell when cloning for a particular successor if a block is dominated by
2334 // some *other* successor with a single data structure. We use this to
2335 // significantly reduce cloning.
2337 for (auto *SuccBB : llvm::concat<BasicBlock *const>(ArrayRef(RetainedSuccBB),
2338 UnswitchedSuccBBs))
2339 if (SuccBB->getUniquePredecessor() ||
2340 llvm::all_of(predecessors(SuccBB), [&](BasicBlock *PredBB) {
2341 return PredBB == ParentBB || DT.dominates(SuccBB, PredBB);
2342 }))
2343 visitDomSubTree(DT, SuccBB, [&](BasicBlock *BB) {
2344 DominatingSucc[BB] = SuccBB;
2345 return true;
2346 });
2347
2348 // Split the preheader, so that we know that there is a safe place to insert
2349 // the conditional branch. We will change the preheader to have a conditional
2350 // branch on LoopCond. The original preheader will become the split point
2351 // between the unswitched versions, and we will have a new preheader for the
2352 // original loop.
2353 BasicBlock *SplitBB = L.getLoopPreheader();
2354 BasicBlock *LoopPH = SplitEdge(SplitBB, L.getHeader(), &DT, &LI, MSSAU);
2355
2356 // Keep track of the dominator tree updates needed.
2358
2359 // Clone the loop for each unswitched successor.
2361 VMaps.reserve(UnswitchedSuccBBs.size());
2363 for (auto *SuccBB : UnswitchedSuccBBs) {
2364 VMaps.emplace_back(new ValueToValueMapTy());
2365 ClonedPHs[SuccBB] = buildClonedLoopBlocks(
2366 L, LoopPH, SplitBB, ExitBlocks, ParentBB, SuccBB, RetainedSuccBB,
2367 DominatingSucc, *VMaps.back(), DTUpdates, AC, DT, LI, MSSAU, SE);
2368 }
2369
2370 // Drop metadata if we may break its semantics by moving this instr into the
2371 // split block.
2372 if (TI.getMetadata(LLVMContext::MD_make_implicit)) {
2374 // Do not spend time trying to understand if we can keep it, just drop it
2375 // to save compile time.
2376 TI.setMetadata(LLVMContext::MD_make_implicit, nullptr);
2377 else {
2378 // It is only legal to preserve make.implicit metadata if we are
2379 // guaranteed no reach implicit null check after following this branch.
2380 ICFLoopSafetyInfo SafetyInfo;
2381 SafetyInfo.computeLoopSafetyInfo(&L);
2382 if (!SafetyInfo.isGuaranteedToExecute(TI, &DT, &L))
2383 TI.setMetadata(LLVMContext::MD_make_implicit, nullptr);
2384 }
2385 }
2386
2387 // The stitching of the branched code back together depends on whether we're
2388 // doing full unswitching or not with the exception that we always want to
2389 // nuke the initial terminator placed in the split block.
2390 SplitBB->getTerminator()->eraseFromParent();
2391 if (FullUnswitch) {
2392 // Keep a clone of the terminator for MSSA updates.
2393 Instruction *NewTI = TI.clone();
2394 NewTI->insertInto(ParentBB, ParentBB->end());
2395
2396 // Splice the terminator from the original loop and rewrite its
2397 // successors.
2398 TI.moveBefore(*SplitBB, SplitBB->end());
2399 TI.dropLocation();
2400
2401 // First wire up the moved terminator to the preheaders.
2402 if (BI) {
2403 BasicBlock *ClonedPH = ClonedPHs.begin()->second;
2404 BI->setSuccessor(ClonedSucc, ClonedPH);
2405 BI->setSuccessor(1 - ClonedSucc, LoopPH);
2407 if (InsertFreeze) {
2408 // We don't give any debug location to the new freeze, because the
2409 // BI (`dyn_cast<CondBrInst>(TI)`) is an in-loop instruction hoisted
2410 // out of the loop.
2411 Cond = new FreezeInst(Cond, Cond->getName() + ".fr", BI->getIterator());
2413 }
2414 BI->setCondition(Cond);
2415 DTUpdates.push_back({DominatorTree::Insert, SplitBB, ClonedPH});
2416 } else {
2417 assert(SI && "Must either be a branch or switch!");
2418
2419 // Walk the cases and directly update their successors.
2420 assert(SI->getDefaultDest() == RetainedSuccBB &&
2421 "Not retaining default successor!");
2422 SI->setDefaultDest(LoopPH);
2423 for (const auto &Case : SI->cases())
2424 if (Case.getCaseSuccessor() == RetainedSuccBB)
2425 Case.setSuccessor(LoopPH);
2426 else
2427 Case.setSuccessor(ClonedPHs.find(Case.getCaseSuccessor())->second);
2428
2429 if (InsertFreeze)
2430 SI->setCondition(new FreezeInst(SI->getCondition(),
2431 SI->getCondition()->getName() + ".fr",
2432 SI->getIterator()));
2433
2434 // We need to use the set to populate domtree updates as even when there
2435 // are multiple cases pointing at the same successor we only want to
2436 // remove and insert one edge in the domtree.
2437 for (BasicBlock *SuccBB : UnswitchedSuccBBs)
2438 DTUpdates.push_back(
2439 {DominatorTree::Insert, SplitBB, ClonedPHs.find(SuccBB)->second});
2440 }
2441
2442 if (MSSAU) {
2443 DT.applyUpdates(DTUpdates);
2444 DTUpdates.clear();
2445
2446 // Remove all but one edge to the retained block and all unswitched
2447 // blocks. This is to avoid having duplicate entries in the cloned Phis,
2448 // when we know we only keep a single edge for each case.
2449 MSSAU->removeDuplicatePhiEdgesBetween(ParentBB, RetainedSuccBB);
2450 for (BasicBlock *SuccBB : UnswitchedSuccBBs)
2451 MSSAU->removeDuplicatePhiEdgesBetween(ParentBB, SuccBB);
2452
2453 for (auto &VMap : VMaps)
2454 MSSAU->updateForClonedLoop(LBRPO, ExitBlocks, *VMap,
2455 /*IgnoreIncomingWithNoClones=*/true);
2456 MSSAU->updateExitBlocksForClonedLoop(ExitBlocks, VMaps, DT);
2457
2458 // Remove all edges to unswitched blocks.
2459 for (BasicBlock *SuccBB : UnswitchedSuccBBs)
2460 MSSAU->removeEdge(ParentBB, SuccBB);
2461 }
2462
2463 // Now unhook the successor relationship as we'll be replacing
2464 // the terminator with a direct branch. This is much simpler for branches
2465 // than switches so we handle those first.
2466 if (BI) {
2467 // Remove the parent as a predecessor of the unswitched successor.
2468 assert(UnswitchedSuccBBs.size() == 1 &&
2469 "Only one possible unswitched block for a branch!");
2470 BasicBlock *UnswitchedSuccBB = *UnswitchedSuccBBs.begin();
2471 UnswitchedSuccBB->removePredecessor(ParentBB,
2472 /*KeepOneInputPHIs*/ true);
2473 DTUpdates.push_back({DominatorTree::Delete, ParentBB, UnswitchedSuccBB});
2474 } else {
2475 // Note that we actually want to remove the parent block as a predecessor
2476 // of *every* case successor. The case successor is either unswitched,
2477 // completely eliminating an edge from the parent to that successor, or it
2478 // is a duplicate edge to the retained successor as the retained successor
2479 // is always the default successor and as we'll replace this with a direct
2480 // branch we no longer need the duplicate entries in the PHI nodes.
2481 SwitchInst *NewSI = cast<SwitchInst>(NewTI);
2482 assert(NewSI->getDefaultDest() == RetainedSuccBB &&
2483 "Not retaining default successor!");
2484 for (const auto &Case : NewSI->cases())
2485 Case.getCaseSuccessor()->removePredecessor(
2486 ParentBB,
2487 /*KeepOneInputPHIs*/ true);
2488
2489 // We need to use the set to populate domtree updates as even when there
2490 // are multiple cases pointing at the same successor we only want to
2491 // remove and insert one edge in the domtree.
2492 for (BasicBlock *SuccBB : UnswitchedSuccBBs)
2493 DTUpdates.push_back({DominatorTree::Delete, ParentBB, SuccBB});
2494 }
2495
2496 // Create a new unconditional branch to the continuing block (as opposed to
2497 // the one cloned).
2498 Instruction *NewBI = UncondBrInst::Create(RetainedSuccBB, ParentBB);
2499 NewBI->setDebugLoc(NewTI->getDebugLoc());
2500
2501 // After MSSAU update, remove the cloned terminator instruction NewTI.
2502 NewTI->eraseFromParent();
2503 } else {
2504 assert(BI && "Only branches have partial unswitching.");
2505 assert(UnswitchedSuccBBs.size() == 1 &&
2506 "Only one possible unswitched block for a branch!");
2507 BasicBlock *ClonedPH = ClonedPHs.begin()->second;
2508 // When doing a partial unswitch, we have to do a bit more work to build up
2509 // the branch in the split block.
2510 if (PartiallyInvariant)
2512 *SplitBB, Invariants, Direction, *ClonedPH, *LoopPH, L, MSSAU, *BI);
2513 else {
2515 *SplitBB, Invariants, Direction, *ClonedPH, *LoopPH,
2516 FreezeLoopUnswitchCond, BI, &AC, DT, *BI);
2517 }
2518 DTUpdates.push_back({DominatorTree::Insert, SplitBB, ClonedPH});
2519
2520 if (MSSAU) {
2521 DT.applyUpdates(DTUpdates);
2522 DTUpdates.clear();
2523
2524 // Perform MSSA cloning updates.
2525 for (auto &VMap : VMaps)
2526 MSSAU->updateForClonedLoop(LBRPO, ExitBlocks, *VMap,
2527 /*IgnoreIncomingWithNoClones=*/true);
2528 MSSAU->updateExitBlocksForClonedLoop(ExitBlocks, VMaps, DT);
2529 }
2530 }
2531
2532 // Apply the updates accumulated above to get an up-to-date dominator tree.
2533 DT.applyUpdates(DTUpdates);
2534
2535 // Now that we have an accurate dominator tree, first delete the dead cloned
2536 // blocks so that we can accurately build any cloned loops. It is important to
2537 // not delete the blocks from the original loop yet because we still want to
2538 // reference the original loop to understand the cloned loop's structure.
2539 deleteDeadClonedBlocks(L, ExitBlocks, VMaps, DT, MSSAU);
2540
2541 // Build the cloned loop structure itself. This may be substantially
2542 // different from the original structure due to the simplified CFG. This also
2543 // handles inserting all the cloned blocks into the correct loops.
2544 SmallVector<Loop *, 4> NonChildClonedLoops;
2545 for (std::unique_ptr<ValueToValueMapTy> &VMap : VMaps)
2546 buildClonedLoops(L, ExitBlocks, *VMap, LI, NonChildClonedLoops);
2547
2548 // Now that our cloned loops have been built, we can update the original loop.
2549 // First we delete the dead blocks from it and then we rebuild the loop
2550 // structure taking these deletions into account.
2551 deleteDeadBlocksFromLoop(L, ExitBlocks, DT, LI, MSSAU, SE, LoopUpdater);
2552
2553 if (MSSAU && VerifyMemorySSA)
2554 MSSAU->getMemorySSA()->verifyMemorySSA();
2555
2556 SmallVector<Loop *, 4> HoistedLoops;
2557 bool IsStillLoop =
2558 rebuildLoopAfterUnswitch(L, ExitBlocks, LI, HoistedLoops, SE);
2559
2560 if (MSSAU && VerifyMemorySSA)
2561 MSSAU->getMemorySSA()->verifyMemorySSA();
2562
2563#ifdef EXPENSIVE_CHECKS
2564 // This transformation has a high risk of corrupting the dominator tree, and
2565 // the below steps to rebuild loop structures will result in hard to debug
2566 // errors in that case so verify that the dominator tree is sane first.
2567 // FIXME: Remove this when the bugs stop showing up and rely on existing
2568 // verification steps.
2569 assert(DT.verify(DominatorTree::VerificationLevel::Fast));
2570#endif
2571
2572 if (BI && !PartiallyInvariant) {
2573 // If we unswitched a branch which collapses the condition to a known
2574 // constant we want to replace all the uses of the invariants within both
2575 // the original and cloned blocks. We do this here so that we can use the
2576 // now updated dominator tree to identify which side the users are on.
2577 assert(UnswitchedSuccBBs.size() == 1 &&
2578 "Only one possible unswitched block for a branch!");
2579 BasicBlock *ClonedPH = ClonedPHs.begin()->second;
2580
2581 // When considering multiple partially-unswitched invariants
2582 // we cant just go replace them with constants in both branches.
2583 //
2584 // For 'AND' we infer that true branch ("continue") means true
2585 // for each invariant operand.
2586 // For 'OR' we can infer that false branch ("continue") means false
2587 // for each invariant operand.
2588 // So it happens that for multiple-partial case we dont replace
2589 // in the unswitched branch.
2590 bool ReplaceUnswitched =
2591 FullUnswitch || (Invariants.size() == 1) || PartiallyInvariant;
2592
2593 ConstantInt *UnswitchedReplacement =
2594 Direction ? ConstantInt::getTrue(BI->getContext())
2595 : ConstantInt::getFalse(BI->getContext());
2596 ConstantInt *ContinueReplacement =
2597 Direction ? ConstantInt::getFalse(BI->getContext())
2598 : ConstantInt::getTrue(BI->getContext());
2599 for (Value *Invariant : Invariants) {
2600 assert(!isa<Constant>(Invariant) &&
2601 "Should not be replacing constant values!");
2602 // Use make_early_inc_range here as set invalidates the iterator.
2603 for (Use &U : llvm::make_early_inc_range(Invariant->uses())) {
2604 Instruction *UserI = dyn_cast<Instruction>(U.getUser());
2605 if (!UserI)
2606 continue;
2607
2608 // Replace it with the 'continue' side if in the main loop body, and the
2609 // unswitched if in the cloned blocks.
2610 if (DT.dominates(LoopPH, UserI->getParent()))
2611 U.set(ContinueReplacement);
2612 else if (ReplaceUnswitched &&
2613 DT.dominates(ClonedPH, UserI->getParent()))
2614 U.set(UnswitchedReplacement);
2615 }
2616 }
2617 }
2618
2619 // We can change which blocks are exit blocks of all the cloned sibling
2620 // loops, the current loop, and any parent loops which shared exit blocks
2621 // with the current loop. As a consequence, we need to re-form LCSSA for
2622 // them. But we shouldn't need to re-form LCSSA for any child loops.
2623 // FIXME: This could be made more efficient by tracking which exit blocks are
2624 // new, and focusing on them, but that isn't likely to be necessary.
2625 //
2626 // In order to reasonably rebuild LCSSA we need to walk inside-out across the
2627 // loop nest and update every loop that could have had its exits changed. We
2628 // also need to cover any intervening loops. We add all of these loops to
2629 // a list and sort them by loop depth to achieve this without updating
2630 // unnecessary loops.
2631 auto UpdateLoop = [&](Loop &UpdateL) {
2632#ifndef NDEBUG
2633 UpdateL.verifyLoop();
2634 for (Loop *ChildL : UpdateL) {
2635 ChildL->verifyLoop();
2636 assert(ChildL->isRecursivelyLCSSAForm(DT, LI) &&
2637 "Perturbed a child loop's LCSSA form!");
2638 }
2639#endif
2640 // First build LCSSA for this loop so that we can preserve it when
2641 // forming dedicated exits. We don't want to perturb some other loop's
2642 // LCSSA while doing that CFG edit.
2643 formLCSSA(UpdateL, DT, &LI, SE);
2644
2645 // For loops reached by this loop's original exit blocks we may
2646 // introduced new, non-dedicated exits. At least try to re-form dedicated
2647 // exits for these loops. This may fail if they couldn't have dedicated
2648 // exits to start with.
2649 formDedicatedExitBlocks(&UpdateL, &DT, &LI, MSSAU, /*PreserveLCSSA*/ true);
2650 };
2651
2652 // For non-child cloned loops and hoisted loops, we just need to update LCSSA
2653 // and we can do it in any order as they don't nest relative to each other.
2654 //
2655 // Also check if any of the loops we have updated have become top-level loops
2656 // as that will necessitate widening the outer loop scope.
2657 for (Loop *UpdatedL :
2658 llvm::concat<Loop *>(NonChildClonedLoops, HoistedLoops)) {
2659 UpdateLoop(*UpdatedL);
2660 if (UpdatedL->isOutermost())
2661 OuterExitL = nullptr;
2662 }
2663 if (IsStillLoop) {
2664 UpdateLoop(L);
2665 if (L.isOutermost())
2666 OuterExitL = nullptr;
2667 }
2668
2669 // If the original loop had exit blocks, walk up through the outer most loop
2670 // of those exit blocks to update LCSSA and form updated dedicated exits.
2671 if (OuterExitL != &L)
2672 for (Loop *OuterL = ParentL; OuterL != OuterExitL;
2673 OuterL = OuterL->getParentLoop())
2674 UpdateLoop(*OuterL);
2675
2676#ifdef EXPENSIVE_CHECKS
2677 // Verify the entire loop structure to catch any incorrect updates before we
2678 // progress in the pass pipeline.
2679 LI.verify(DT);
2680#endif
2681
2682 // Now that we've unswitched something, make callbacks to report the changes.
2683 // For that we need to merge together the updated loops and the cloned loops
2684 // and check whether the original loop survived.
2685 SmallVector<Loop *, 4> SibLoops;
2686 for (Loop *UpdatedL : llvm::concat<Loop *>(NonChildClonedLoops, HoistedLoops))
2687 if (UpdatedL->getParentLoop() == ParentL)
2688 SibLoops.push_back(UpdatedL);
2689 postUnswitch(L, LoopUpdater, LoopName, IsStillLoop, PartiallyInvariant,
2690 InjectedCondition, SibLoops);
2691
2692 if (MSSAU && VerifyMemorySSA)
2693 MSSAU->getMemorySSA()->verifyMemorySSA();
2694
2695 if (BI)
2696 ++NumBranches;
2697 else
2698 ++NumSwitches;
2699}
2700
2701/// Recursively compute the cost of a dominator subtree based on the per-block
2702/// cost map provided.
2703///
2704/// The recursive computation is memozied into the provided DT-indexed cost map
2705/// to allow querying it for most nodes in the domtree without it becoming
2706/// quadratic.
2708 DomTreeNode &N,
2711 // Don't accumulate cost (or recurse through) blocks not in our block cost
2712 // map and thus not part of the duplication cost being considered.
2713 auto BBCostIt = BBCostMap.find(N.getBlock());
2714 if (BBCostIt == BBCostMap.end())
2715 return 0;
2716
2717 // Lookup this node to see if we already computed its cost.
2718 auto DTCostIt = DTCostMap.find(&N);
2719 if (DTCostIt != DTCostMap.end())
2720 return DTCostIt->second;
2721
2722 // If not, we have to compute it. We can't use insert above and update
2723 // because computing the cost may insert more things into the map.
2724 InstructionCost Cost = std::accumulate(
2725 N.begin(), N.end(), BBCostIt->second,
2726 [&](InstructionCost Sum, DomTreeNode *ChildN) -> InstructionCost {
2727 return Sum + computeDomSubtreeCost(*ChildN, BBCostMap, DTCostMap);
2728 });
2729 bool Inserted = DTCostMap.insert({&N, Cost}).second;
2730 (void)Inserted;
2731 assert(Inserted && "Should not insert a node while visiting children!");
2732 return Cost;
2733}
2734
2735/// Turns a select instruction into implicit control flow branch,
2736/// making the following replacement:
2737///
2738/// head:
2739/// --code before select--
2740/// select %cond, %trueval, %falseval
2741/// --code after select--
2742///
2743/// into
2744///
2745/// head:
2746/// --code before select--
2747/// br i1 %cond, label %then, label %tail
2748///
2749/// then:
2750/// br %tail
2751///
2752/// tail:
2753/// phi [ %trueval, %then ], [ %falseval, %head]
2754/// unreachable
2755///
2756/// It also makes all relevant DT and LI updates, so that all structures are in
2757/// valid state after this transform.
2759 LoopInfo &LI, MemorySSAUpdater *MSSAU,
2760 AssumptionCache *AC) {
2761 LLVM_DEBUG(dbgs() << "Turning " << *SI << " into a branch.\n");
2762 BasicBlock *HeadBB = SI->getParent();
2763
2764 DomTreeUpdater DTU(DT, DomTreeUpdater::UpdateStrategy::Eager);
2765 SplitBlockAndInsertIfThen(SI->getCondition(), SI, false,
2766 SI->getMetadata(LLVMContext::MD_prof), &DTU, &LI);
2767 auto *CondBr = cast<CondBrInst>(HeadBB->getTerminator());
2768 BasicBlock *ThenBB = CondBr->getSuccessor(0),
2769 *TailBB = CondBr->getSuccessor(1);
2770 if (MSSAU)
2771 MSSAU->moveAllAfterSpliceBlocks(HeadBB, TailBB, SI);
2772
2773 PHINode *Phi =
2774 PHINode::Create(SI->getType(), 2, "unswitched.select", SI->getIterator());
2775 Phi->addIncoming(SI->getTrueValue(), ThenBB);
2776 Phi->addIncoming(SI->getFalseValue(), HeadBB);
2777 Phi->setDebugLoc(SI->getDebugLoc());
2778 SI->replaceAllUsesWith(Phi);
2779 SI->eraseFromParent();
2780
2781 if (MSSAU && VerifyMemorySSA)
2782 MSSAU->getMemorySSA()->verifyMemorySSA();
2783
2784 ++NumSelects;
2785 return CondBr;
2786}
2787
2788/// Turns a llvm.experimental.guard intrinsic into implicit control flow branch,
2789/// making the following replacement:
2790///
2791/// --code before guard--
2792/// call void (i1, ...) @llvm.experimental.guard(i1 %cond) [ "deopt"() ]
2793/// --code after guard--
2794///
2795/// into
2796///
2797/// --code before guard--
2798/// br i1 %cond, label %guarded, label %deopt
2799///
2800/// guarded:
2801/// --code after guard--
2802///
2803/// deopt:
2804/// call void (i1, ...) @llvm.experimental.guard(i1 false) [ "deopt"() ]
2805/// unreachable
2806///
2807/// It also makes all relevant DT and LI updates, so that all structures are in
2808/// valid state after this transform.
2810 DominatorTree &DT, LoopInfo &LI,
2811 MemorySSAUpdater *MSSAU) {
2812 LLVM_DEBUG(dbgs() << "Turning " << *GI << " into a branch.\n");
2813 BasicBlock *CheckBB = GI->getParent();
2814
2815 if (MSSAU && VerifyMemorySSA)
2816 MSSAU->getMemorySSA()->verifyMemorySSA();
2817
2818 DomTreeUpdater DTU(DT, DomTreeUpdater::UpdateStrategy::Eager);
2819 // llvm.experimental.guard doesn't have branch weights. We can assume,
2820 // however, that the deopt path is unlikely.
2821 Instruction *DeoptBlockTerm = SplitBlockAndInsertIfThen(
2822 GI->getArgOperand(0), GI, true,
2825 : nullptr,
2826 &DTU, &LI);
2827 CondBrInst *CheckBI = cast<CondBrInst>(CheckBB->getTerminator());
2828 // SplitBlockAndInsertIfThen inserts control flow that branches to
2829 // DeoptBlockTerm if the condition is true. We want the opposite.
2830 CheckBI->swapSuccessors();
2831
2832 BasicBlock *GuardedBlock = CheckBI->getSuccessor(0);
2833 GuardedBlock->setName("guarded");
2834 CheckBI->getSuccessor(1)->setName("deopt");
2835 BasicBlock *DeoptBlock = CheckBI->getSuccessor(1);
2836
2837 if (MSSAU)
2838 MSSAU->moveAllAfterSpliceBlocks(CheckBB, GuardedBlock, GI);
2839
2840 GI->moveBefore(DeoptBlockTerm->getIterator());
2842
2843 if (MSSAU) {
2845 MSSAU->moveToPlace(MD, DeoptBlock, MemorySSA::BeforeTerminator);
2846 if (VerifyMemorySSA)
2847 MSSAU->getMemorySSA()->verifyMemorySSA();
2848 }
2849
2850 if (VerifyLoopInfo)
2851 LI.verify(DT);
2852 ++NumGuards;
2853 return CheckBI;
2854}
2855
2856/// Cost multiplier is a way to limit potentially exponential behavior
2857/// of loop-unswitch. Cost is multiplied in proportion of 2^number of unswitch
2858/// candidates available. Also consider the number of "sibling" loops with
2859/// the idea of accounting for previous unswitches that already happened on this
2860/// cluster of loops. There was an attempt to keep this formula simple,
2861/// just enough to limit the worst case behavior. Even if it is not that simple
2862/// now it is still not an attempt to provide a detailed heuristic size
2863/// prediction.
2864///
2865/// TODO: Make a proper accounting of "explosion" effect for all kinds of
2866/// unswitch candidates, making adequate predictions instead of wild guesses.
2867/// That requires knowing not just the number of "remaining" candidates but
2868/// also costs of unswitching for each of these candidates.
2870 const Instruction &TI, const Loop &L, const LoopInfo &LI,
2871 const DominatorTree &DT,
2872 ArrayRef<NonTrivialUnswitchCandidate> UnswitchCandidates) {
2873
2874 // Guards and other exiting conditions do not contribute to exponential
2875 // explosion as soon as they dominate the latch (otherwise there might be
2876 // another path to the latch remaining that does not allow to eliminate the
2877 // loop copy on unswitch).
2878 const BasicBlock *Latch = L.getLoopLatch();
2879 const BasicBlock *CondBlock = TI.getParent();
2880 if (DT.dominates(CondBlock, Latch) &&
2881 (isGuard(&TI) ||
2882 (TI.isTerminator() &&
2883 llvm::count_if(successors(&TI), [&L](const BasicBlock *SuccBB) {
2884 return L.contains(SuccBB);
2885 }) <= 1))) {
2886 NumCostMultiplierSkipped++;
2887 return 1;
2888 }
2889
2890 // Each invariant non-trivial condition, after being unswitched, is supposed
2891 // to have its own specialized sibling loop (the invariant condition has been
2892 // hoisted out of the child loop into a newly-cloned loop). When unswitching
2893 // conditions in nested loops, the basic block size of the outer loop should
2894 // not be altered. If such a size significantly increases across unswitching
2895 // invocations, something may be wrong; so adjust the final cost taking this
2896 // into account.
2897 auto *ParentL = L.getParentLoop();
2898 int ParentLoopSizeMultiplier = 1;
2899 if (ParentL)
2900 ParentLoopSizeMultiplier =
2901 std::max<int>(ParentL->getNumBlocks() / UnswitchParentBlocksDiv, 1);
2902
2903 int SiblingsCount =
2904 (ParentL ? ParentL->getSubLoopsVector().size() : llvm::size(LI));
2905 // Count amount of clones that all the candidates might cause during
2906 // unswitching. Branch/guard/select counts as 1, switch counts as log2 of its
2907 // cases.
2908 int UnswitchedClones = 0;
2909 for (const auto &Candidate : UnswitchCandidates) {
2910 const Instruction *CI = Candidate.TI;
2911 const BasicBlock *CondBlock = CI->getParent();
2912 bool SkipExitingSuccessors = DT.dominates(CondBlock, Latch);
2913 if (isa<SelectInst>(CI)) {
2914 UnswitchedClones++;
2915 continue;
2916 }
2917 if (isGuard(CI)) {
2918 if (!SkipExitingSuccessors)
2919 UnswitchedClones++;
2920 continue;
2921 }
2922 int NonExitingSuccessors =
2923 llvm::count_if(successors(CondBlock),
2924 [SkipExitingSuccessors, &L](const BasicBlock *SuccBB) {
2925 return !SkipExitingSuccessors || L.contains(SuccBB);
2926 });
2927 UnswitchedClones += Log2_32(NonExitingSuccessors);
2928 }
2929
2930 // Ignore up to the "unscaled candidates" number of unswitch candidates
2931 // when calculating the power-of-two scaling of the cost. The main idea
2932 // with this control is to allow a small number of unswitches to happen
2933 // and rely more on siblings multiplier (see below) when the number
2934 // of candidates is small.
2935 unsigned ClonesPower =
2936 std::max(UnswitchedClones - (int)UnswitchNumInitialUnscaledCandidates, 0);
2937
2938 // Allowing top-level loops to spread a bit more than nested ones.
2939 int SiblingsMultiplier =
2940 std::max((ParentL ? SiblingsCount
2941 : SiblingsCount / (int)UnswitchSiblingsToplevelDiv),
2942 1);
2943 // Compute the cost multiplier in a way that won't overflow by saturating
2944 // at an upper bound.
2945 int CostMultiplier;
2946 if (ClonesPower > Log2_32(UnswitchThreshold) ||
2947 SiblingsMultiplier > UnswitchThreshold ||
2948 ParentLoopSizeMultiplier > UnswitchThreshold)
2949 CostMultiplier = UnswitchThreshold;
2950 else
2951 CostMultiplier = std::min(SiblingsMultiplier * (1 << ClonesPower),
2952 (int)UnswitchThreshold);
2953
2954 LLVM_DEBUG(dbgs() << " Computed multiplier " << CostMultiplier
2955 << " (siblings " << SiblingsMultiplier << " * parent size "
2956 << ParentLoopSizeMultiplier << " * clones "
2957 << (1 << ClonesPower) << ")"
2958 << " for unswitch candidate: " << TI << "\n");
2959 return CostMultiplier;
2960}
2961
2964 IVConditionInfo &PartialIVInfo, Instruction *&PartialIVCondBranch,
2965 const Loop &L, const LoopInfo &LI, AAResults &AA,
2966 const MemorySSAUpdater *MSSAU) {
2967 assert(UnswitchCandidates.empty() && "Should be!");
2968
2969 auto AddUnswitchCandidatesForInst = [&](Instruction *I, Value *Cond) {
2971 if (isa<Constant>(Cond))
2972 return;
2973 if (L.isLoopInvariant(Cond)) {
2974 UnswitchCandidates.push_back({I, {Cond}});
2975 return;
2976 }
2978 TinyPtrVector<Value *> Invariants =
2980 L, *static_cast<Instruction *>(Cond), LI);
2981 if (!Invariants.empty())
2982 UnswitchCandidates.push_back({I, std::move(Invariants)});
2983 }
2984 };
2985
2986 // Whether or not we should also collect guards in the loop.
2987 bool CollectGuards = false;
2988 if (UnswitchGuards) {
2989 auto *GuardDecl = Intrinsic::getDeclarationIfExists(
2990 L.getHeader()->getParent()->getParent(), Intrinsic::experimental_guard);
2991 if (GuardDecl && !GuardDecl->use_empty())
2992 CollectGuards = true;
2993 }
2994
2995 for (auto *BB : L.blocks()) {
2996 if (LI.getLoopFor(BB) != &L)
2997 continue;
2998
2999 for (auto &I : *BB) {
3000 if (auto *SI = dyn_cast<SelectInst>(&I)) {
3001 auto *Cond = SI->getCondition();
3002 // Do not unswitch vector selects and logical and/or selects
3003 if (Cond->getType()->isIntegerTy(1) && !SI->getType()->isIntegerTy(1))
3004 AddUnswitchCandidatesForInst(SI, Cond);
3005 } else if (CollectGuards && isGuard(&I)) {
3006 auto *Cond =
3007 skipTrivialSelect(cast<IntrinsicInst>(&I)->getArgOperand(0));
3008 // TODO: Support AND, OR conditions and partial unswitching.
3009 if (!isa<Constant>(Cond) && L.isLoopInvariant(Cond))
3010 UnswitchCandidates.push_back({&I, {Cond}});
3011 }
3012 }
3013
3014 if (auto *SI = dyn_cast<SwitchInst>(BB->getTerminator())) {
3015 // We can only consider fully loop-invariant switch conditions as we need
3016 // to completely eliminate the switch after unswitching.
3017 if (!isa<Constant>(SI->getCondition()) &&
3018 L.isLoopInvariant(SI->getCondition()) && !BB->getUniqueSuccessor())
3019 UnswitchCandidates.push_back({SI, {SI->getCondition()}});
3020 continue;
3021 }
3022
3023 auto *BI = dyn_cast<CondBrInst>(BB->getTerminator());
3024 if (!BI || BI->getSuccessor(0) == BI->getSuccessor(1))
3025 continue;
3026
3027 AddUnswitchCandidatesForInst(BI, BI->getCondition());
3028 }
3029
3030 if (MSSAU && !findOptionMDForLoop(&L, "llvm.loop.unswitch.partial.disable") &&
3031 !any_of(UnswitchCandidates, [&L](auto &TerminatorAndInvariants) {
3032 return TerminatorAndInvariants.TI == L.getHeader()->getTerminator();
3033 })) {
3034 MemorySSA *MSSA = MSSAU->getMemorySSA();
3035 if (auto Info = hasPartialIVCondition(L, MSSAThreshold, *MSSA, AA)) {
3036 LLVM_DEBUG(
3037 dbgs() << "simple-loop-unswitch: Found partially invariant condition "
3038 << *Info->InstToDuplicate[0] << "\n");
3039 PartialIVInfo = *Info;
3040 PartialIVCondBranch = L.getHeader()->getTerminator();
3041 TinyPtrVector<Value *> ValsToDuplicate;
3042 llvm::append_range(ValsToDuplicate, Info->InstToDuplicate);
3043 UnswitchCandidates.push_back(
3044 {L.getHeader()->getTerminator(), std::move(ValsToDuplicate)});
3045 }
3046 }
3047 return !UnswitchCandidates.empty();
3048}
3049
3050/// Tries to canonicalize condition described by:
3051///
3052/// br (LHS pred RHS), label IfTrue, label IfFalse
3053///
3054/// into its equivalent where `Pred` is something that we support for injected
3055/// invariants (so far it is limited to ult), LHS in canonicalized form is
3056/// non-invariant and RHS is an invariant.
3058 Value *&LHS, Value *&RHS,
3059 BasicBlock *&IfTrue,
3060 BasicBlock *&IfFalse,
3061 const Loop &L) {
3062 if (!L.contains(IfTrue)) {
3063 Pred = ICmpInst::getInversePredicate(Pred);
3064 std::swap(IfTrue, IfFalse);
3065 }
3066
3067 // Move loop-invariant argument to RHS position.
3068 if (L.isLoopInvariant(LHS)) {
3069 Pred = ICmpInst::getSwappedPredicate(Pred);
3070 std::swap(LHS, RHS);
3071 }
3072
3073 if (Pred == ICmpInst::ICMP_SGE && match(RHS, m_Zero())) {
3074 // Turn "x >=s 0" into "x <u UMIN_INT"
3075 Pred = ICmpInst::ICMP_ULT;
3076 RHS = ConstantInt::get(
3077 RHS->getContext(),
3078 APInt::getSignedMinValue(RHS->getType()->getIntegerBitWidth()));
3079 }
3080}
3081
3082/// Returns true, if predicate described by ( \p Pred, \p LHS, \p RHS )
3083/// succeeding into blocks ( \p IfTrue, \p IfFalse) can be optimized by
3084/// injecting a loop-invariant condition.
3086 const ICmpInst::Predicate Pred, const Value *LHS, const Value *RHS,
3087 const BasicBlock *IfTrue, const BasicBlock *IfFalse, const Loop &L) {
3088 if (L.isLoopInvariant(LHS) || !L.isLoopInvariant(RHS))
3089 return false;
3090 // TODO: Support other predicates.
3091 if (Pred != ICmpInst::ICMP_ULT)
3092 return false;
3093 // TODO: Support non-loop-exiting branches?
3094 if (!L.contains(IfTrue) || L.contains(IfFalse))
3095 return false;
3096 // FIXME: For some reason this causes problems with MSSA updates, need to
3097 // investigate why. So far, just don't unswitch latch.
3098 if (L.getHeader() == IfTrue)
3099 return false;
3100 return true;
3101}
3102
3103/// Returns true, if metadata on \p BI allows us to optimize branching into \p
3104/// TakenSucc via injection of invariant conditions. The branch should be not
3105/// enough and not previously unswitched, the information about this comes from
3106/// the metadata.
3108 const BasicBlock *TakenSucc) {
3109 SmallVector<uint32_t> Weights;
3110 if (!extractBranchWeights(*BI, Weights))
3111 return false;
3113 BranchProbability LikelyTaken(T - 1, T);
3114
3115 assert(Weights.size() == 2 && "Unexpected profile data!");
3116 size_t Idx = BI->getSuccessor(0) == TakenSucc ? 0 : 1;
3117 auto Num = Weights[Idx];
3118 auto Denom = Weights[0] + Weights[1];
3119 // Degenerate or overflowed metadata.
3120 if (Denom == 0 || Num > Denom)
3121 return false;
3122 BranchProbability ActualTaken(Num, Denom);
3123 if (LikelyTaken > ActualTaken)
3124 return false;
3125 return true;
3126}
3127
3128/// Materialize pending invariant condition of the given candidate into IR. The
3129/// injected loop-invariant condition implies the original loop-variant branch
3130/// condition, so the materialization turns
3131///
3132/// loop_block:
3133/// ...
3134/// br i1 %variant_cond, label InLoopSucc, label OutOfLoopSucc
3135///
3136/// into
3137///
3138/// preheader:
3139/// %invariant_cond = LHS pred RHS
3140/// ...
3141/// loop_block:
3142/// br i1 %invariant_cond, label InLoopSucc, label OriginalCheck
3143/// OriginalCheck:
3144/// br i1 %variant_cond, label InLoopSucc, label OutOfLoopSucc
3145/// ...
3146static NonTrivialUnswitchCandidate
3147injectPendingInvariantConditions(NonTrivialUnswitchCandidate Candidate, Loop &L,
3148 DominatorTree &DT, LoopInfo &LI,
3149 AssumptionCache &AC, MemorySSAUpdater *MSSAU) {
3150 assert(Candidate.hasPendingInjection() && "Nothing to inject!");
3151 BasicBlock *Preheader = L.getLoopPreheader();
3152 assert(Preheader && "Loop is not in simplified form?");
3153 assert(LI.getLoopFor(Candidate.TI->getParent()) == &L &&
3154 "Unswitching branch of inner loop!");
3155
3156 auto Pred = Candidate.PendingInjection->Pred;
3157 auto *LHS = Candidate.PendingInjection->LHS;
3158 auto *RHS = Candidate.PendingInjection->RHS;
3159 auto *InLoopSucc = Candidate.PendingInjection->InLoopSucc;
3160 auto *TI = cast<CondBrInst>(Candidate.TI);
3161 auto *BB = Candidate.TI->getParent();
3162 auto *OutOfLoopSucc = InLoopSucc == TI->getSuccessor(0) ? TI->getSuccessor(1)
3163 : TI->getSuccessor(0);
3164 // FIXME: Remove this once limitation on successors is lifted.
3165 assert(L.contains(InLoopSucc) && "Not supported yet!");
3166 assert(!L.contains(OutOfLoopSucc) && "Not supported yet!");
3167 auto &Ctx = BB->getContext();
3168
3169 IRBuilder<> Builder(Preheader->getTerminator());
3170 assert(ICmpInst::isUnsigned(Pred) && "Not supported yet!");
3171 if (LHS->getType() != RHS->getType()) {
3172 if (LHS->getType()->getIntegerBitWidth() <
3173 RHS->getType()->getIntegerBitWidth())
3174 LHS = Builder.CreateZExt(LHS, RHS->getType(), LHS->getName() + ".wide");
3175 else
3176 RHS = Builder.CreateZExt(RHS, LHS->getType(), RHS->getName() + ".wide");
3177 }
3178 // Do not use builder here: CreateICmp may simplify this into a constant and
3179 // unswitching will break. Better optimize it away later.
3180 auto *InjectedCond =
3181 ICmpInst::Create(Instruction::ICmp, Pred, LHS, RHS, "injected.cond",
3182 Preheader->getTerminator()->getIterator());
3183
3184 BasicBlock *CheckBlock = BasicBlock::Create(Ctx, BB->getName() + ".check",
3185 BB->getParent(), InLoopSucc);
3186 Builder.SetInsertPoint(TI);
3187 auto *InvariantBr =
3188 Builder.CreateCondBr(InjectedCond, InLoopSucc, CheckBlock);
3189 // We don't know anything about the relation between the limits.
3191
3192 Builder.SetInsertPoint(CheckBlock);
3193 Builder.CreateCondBr(
3194 TI->getCondition(), TI->getSuccessor(0), TI->getSuccessor(1),
3195 !ProfcheckDisableMetadataFixes ? TI->getMetadata(LLVMContext::MD_prof)
3196 : nullptr);
3197 TI->eraseFromParent();
3198
3199 // Fixup phis.
3200 for (auto &I : *InLoopSucc) {
3201 auto *PN = dyn_cast<PHINode>(&I);
3202 if (!PN)
3203 break;
3204 auto *Inc = PN->getIncomingValueForBlock(BB);
3205 PN->addIncoming(Inc, CheckBlock);
3206 }
3207 OutOfLoopSucc->replacePhiUsesWith(BB, CheckBlock);
3208
3210 { DominatorTree::Insert, BB, CheckBlock },
3211 { DominatorTree::Insert, CheckBlock, InLoopSucc },
3212 { DominatorTree::Insert, CheckBlock, OutOfLoopSucc },
3213 { DominatorTree::Delete, BB, OutOfLoopSucc }
3214 };
3215
3216 DT.applyUpdates(DTUpdates);
3217 if (MSSAU)
3218 MSSAU->applyUpdates(DTUpdates, DT);
3219 L.addBasicBlockToLoop(CheckBlock, LI);
3220
3221#ifndef NDEBUG
3222 DT.verify();
3223 LI.verify(DT);
3224 if (MSSAU && VerifyMemorySSA)
3225 MSSAU->getMemorySSA()->verifyMemorySSA();
3226#endif
3227
3228 // TODO: In fact, cost of unswitching a new invariant candidate is *slightly*
3229 // higher because we have just inserted a new block. Need to think how to
3230 // adjust the cost of injected candidates when it was first computed.
3231 LLVM_DEBUG(dbgs() << "Injected a new loop-invariant branch " << *InvariantBr
3232 << " and considering it for unswitching.");
3233 ++NumInvariantConditionsInjected;
3234 return NonTrivialUnswitchCandidate(InvariantBr, { InjectedCond },
3235 Candidate.Cost);
3236}
3237
3238/// Given chain of loop branch conditions looking like:
3239/// br (Variant < Invariant1)
3240/// br (Variant < Invariant2)
3241/// br (Variant < Invariant3)
3242/// ...
3243/// collect set of invariant conditions on which we want to unswitch, which
3244/// look like:
3245/// Invariant1 <= Invariant2
3246/// Invariant2 <= Invariant3
3247/// ...
3248/// Though they might not immediately exist in the IR, we can still inject them.
3250 SmallVectorImpl<NonTrivialUnswitchCandidate> &UnswitchCandidates, Loop &L,
3252 const DominatorTree &DT) {
3253
3256 if (Compares.size() < 2)
3257 return false;
3259 for (auto Prev = Compares.begin(), Next = Compares.begin() + 1;
3260 Next != Compares.end(); ++Prev, ++Next) {
3261 Value *LHS = Next->Invariant;
3262 Value *RHS = Prev->Invariant;
3263 BasicBlock *InLoopSucc = Prev->InLoopSucc;
3264 InjectedInvariant ToInject(NonStrictPred, LHS, RHS, InLoopSucc);
3265 NonTrivialUnswitchCandidate Candidate(Prev->Term, { LHS, RHS },
3266 std::nullopt, std::move(ToInject));
3267 UnswitchCandidates.push_back(std::move(Candidate));
3268 }
3269 return true;
3270}
3271
3272/// Collect unswitch candidates by invariant conditions that are not immediately
3273/// present in the loop. However, they can be injected into the code if we
3274/// decide it's profitable.
3275/// An example of such conditions is following:
3276///
3277/// for (...) {
3278/// x = load ...
3279/// if (! x <u C1) break;
3280/// if (! x <u C2) break;
3281/// <do something>
3282/// }
3283///
3284/// We can unswitch by condition "C1 <=u C2". If that is true, then "x <u C1 <=
3285/// C2" automatically implies "x <u C2", so we can get rid of one of
3286/// loop-variant checks in unswitched loop version.
3289 IVConditionInfo &PartialIVInfo, Instruction *&PartialIVCondBranch, Loop &L,
3290 const DominatorTree &DT, const LoopInfo &LI, AAResults &AA,
3291 const MemorySSAUpdater *MSSAU) {
3293 return false;
3294
3295 if (!DT.isReachableFromEntry(L.getHeader()))
3296 return false;
3297 auto *Latch = L.getLoopLatch();
3298 // Need to have a single latch and a preheader.
3299 if (!Latch)
3300 return false;
3301 assert(L.getLoopPreheader() && "Must have a preheader!");
3302
3304 // Traverse the conditions that dominate latch (and therefore dominate each
3305 // other).
3306 for (auto *DTN = DT.getNode(Latch); L.contains(DTN->getBlock());
3307 DTN = DTN->getIDom()) {
3308 CmpPredicate Pred;
3309 Value *LHS = nullptr, *RHS = nullptr;
3310 BasicBlock *IfTrue = nullptr, *IfFalse = nullptr;
3311 auto *BB = DTN->getBlock();
3312 // Ignore inner loops.
3313 if (LI.getLoopFor(BB) != &L)
3314 continue;
3315 auto *Term = BB->getTerminator();
3316 if (!match(Term, m_Br(m_ICmp(Pred, m_Value(LHS), m_Value(RHS)),
3317 m_BasicBlock(IfTrue), m_BasicBlock(IfFalse))))
3318 continue;
3319 if (!LHS->getType()->isIntegerTy())
3320 continue;
3321 canonicalizeForInvariantConditionInjection(Pred, LHS, RHS, IfTrue, IfFalse,
3322 L);
3323 if (!shouldTryInjectInvariantCondition(Pred, LHS, RHS, IfTrue, IfFalse, L))
3324 continue;
3326 continue;
3327 // Strip ZEXT for unsigned predicate.
3328 // TODO: once signed predicates are supported, also strip SEXT.
3329 CompareDesc Desc(cast<CondBrInst>(Term), RHS, IfTrue);
3330 while (auto *Zext = dyn_cast<ZExtInst>(LHS))
3331 LHS = Zext->getOperand(0);
3332 CandidatesULT[LHS].push_back(Desc);
3333 }
3334
3335 bool Found = false;
3336 for (auto &It : CandidatesULT)
3338 UnswitchCandidates, L, ICmpInst::ICMP_ULT, It.second, DT);
3339 return Found;
3340}
3341
3343 if (!L.isSafeToClone())
3344 return false;
3345 for (auto *BB : L.blocks())
3346 for (auto &I : *BB) {
3347 if (I.getType()->isTokenTy() && I.isUsedOutsideOfBlock(BB))
3348 return false;
3349 if (auto *CB = dyn_cast<CallBase>(&I)) {
3350 assert(!CB->cannotDuplicate() && "Checked by L.isSafeToClone().");
3351 if (CB->isConvergent())
3352 return false;
3353 }
3354 }
3355
3356 // Check if there are irreducible CFG cycles in this loop. If so, we cannot
3357 // easily unswitch non-trivial edges out of the loop. Doing so might turn the
3358 // irreducible control flow into reducible control flow and introduce new
3359 // loops "out of thin air". If we ever discover important use cases for doing
3360 // this, we can add support to loop unswitch, but it is a lot of complexity
3361 // for what seems little or no real world benefit.
3362 LoopBlocksRPO RPOT(&L);
3363 RPOT.perform(&LI);
3365 return false;
3366
3368 L.getUniqueExitBlocks(ExitBlocks);
3369 // We cannot unswitch if exit blocks contain a cleanuppad/catchswitch
3370 // instruction as we don't know how to split those exit blocks.
3371 // FIXME: We should teach SplitBlock to handle this and remove this
3372 // restriction.
3373 for (auto *ExitBB : ExitBlocks) {
3374 auto It = ExitBB->getFirstNonPHIIt();
3376 LLVM_DEBUG(dbgs() << "Cannot unswitch because of cleanuppad/catchswitch "
3377 "in exit block\n");
3378 return false;
3379 }
3380 }
3381
3382 return true;
3383}
3384
3385static NonTrivialUnswitchCandidate findBestNonTrivialUnswitchCandidate(
3386 ArrayRef<NonTrivialUnswitchCandidate> UnswitchCandidates, const Loop &L,
3387 const DominatorTree &DT, const LoopInfo &LI, AssumptionCache &AC,
3388 const TargetTransformInfo &TTI, const IVConditionInfo &PartialIVInfo) {
3389 // Given that unswitching these terminators will require duplicating parts of
3390 // the loop, so we need to be able to model that cost. Compute the ephemeral
3391 // values and set up a data structure to hold per-BB costs. We cache each
3392 // block's cost so that we don't recompute this when considering different
3393 // subsets of the loop for duplication during unswitching.
3395 CodeMetrics::collectEphemeralValues(&L, &AC, EphValues);
3397
3398 // Compute the cost of each block, as well as the total loop cost. Also, bail
3399 // out if we see instructions which are incompatible with loop unswitching
3400 // (convergent, noduplicate, or cross-basic-block tokens).
3401 // FIXME: We might be able to safely handle some of these in non-duplicated
3402 // regions.
3404 L.getHeader()->getParent()->hasMinSize()
3407 InstructionCost LoopCost = 0;
3408 for (auto *BB : L.blocks()) {
3409 InstructionCost Cost = 0;
3410 for (auto &I : *BB) {
3411 if (EphValues.count(&I))
3412 continue;
3413 Cost += TTI.getInstructionCost(&I, CostKind);
3414 }
3415 assert(Cost >= 0 && "Must not have negative costs!");
3416 LoopCost += Cost;
3417 assert(LoopCost >= 0 && "Must not have negative loop costs!");
3418 BBCostMap[BB] = Cost;
3419 }
3420 LLVM_DEBUG(dbgs() << " Total loop cost: " << LoopCost << "\n");
3421
3422 // Now we find the best candidate by searching for the one with the following
3423 // properties in order:
3424 //
3425 // 1) An unswitching cost below the threshold
3426 // 2) The smallest number of duplicated unswitch candidates (to avoid
3427 // creating redundant subsequent unswitching)
3428 // 3) The smallest cost after unswitching.
3429 //
3430 // We prioritize reducing fanout of unswitch candidates provided the cost
3431 // remains below the threshold because this has a multiplicative effect.
3432 //
3433 // This requires memoizing each dominator subtree to avoid redundant work.
3434 //
3435 // FIXME: Need to actually do the number of candidates part above.
3437 // Given a terminator which might be unswitched, computes the non-duplicated
3438 // cost for that terminator.
3439 auto ComputeUnswitchedCost = [&](Instruction &TI,
3440 bool FullUnswitch) -> InstructionCost {
3441 // Unswitching selects unswitches the entire loop.
3442 if (isa<SelectInst>(TI))
3443 return LoopCost;
3444
3445 BasicBlock &BB = *TI.getParent();
3447
3448 InstructionCost Cost = 0;
3449 for (BasicBlock *SuccBB : successors(&BB)) {
3450 // Don't count successors more than once.
3451 if (!Visited.insert(SuccBB).second)
3452 continue;
3453
3454 // If this is a partial unswitch candidate, then it must be a conditional
3455 // branch with a condition of either `or`, `and`, their corresponding
3456 // select forms or partially invariant instructions. In that case, one of
3457 // the successors is necessarily duplicated, so don't even try to remove
3458 // its cost.
3459 if (!FullUnswitch) {
3460 auto &BI = cast<CondBrInst>(TI);
3461 Value *Cond = skipTrivialSelect(BI.getCondition());
3462 if (match(Cond, m_LogicalAnd())) {
3463 if (SuccBB == BI.getSuccessor(1))
3464 continue;
3465 } else if (match(Cond, m_LogicalOr())) {
3466 if (SuccBB == BI.getSuccessor(0))
3467 continue;
3468 } else if ((PartialIVInfo.KnownValue->isOneValue() &&
3469 SuccBB == BI.getSuccessor(0)) ||
3470 (!PartialIVInfo.KnownValue->isOneValue() &&
3471 SuccBB == BI.getSuccessor(1)))
3472 continue;
3473 }
3474
3475 // This successor's domtree will not need to be duplicated after
3476 // unswitching if the edge to the successor dominates it (and thus the
3477 // entire tree). This essentially means there is no other path into this
3478 // subtree and so it will end up live in only one clone of the loop.
3479 if (SuccBB->getUniquePredecessor() ||
3480 llvm::all_of(predecessors(SuccBB), [&](BasicBlock *PredBB) {
3481 return PredBB == &BB || DT.dominates(SuccBB, PredBB);
3482 })) {
3483 Cost += computeDomSubtreeCost(*DT[SuccBB], BBCostMap, DTCostMap);
3484 assert(Cost <= LoopCost &&
3485 "Non-duplicated cost should never exceed total loop cost!");
3486 }
3487 }
3488
3489 // Now scale the cost by the number of unique successors minus one. We
3490 // subtract one because there is already at least one copy of the entire
3491 // loop. This is computing the new cost of unswitching a condition.
3492 // Note that guards always have 2 unique successors that are implicit and
3493 // will be materialized if we decide to unswitch it.
3494 int SuccessorsCount = isGuard(&TI) ? 2 : Visited.size();
3495 assert(SuccessorsCount > 1 &&
3496 "Cannot unswitch a condition without multiple distinct successors!");
3497 return (LoopCost - Cost) * (SuccessorsCount - 1);
3498 };
3499
3500 std::optional<NonTrivialUnswitchCandidate> Best;
3501 for (auto &Candidate : UnswitchCandidates) {
3502 Instruction &TI = *Candidate.TI;
3503 ArrayRef<Value *> Invariants = Candidate.Invariants;
3505 bool FullUnswitch =
3506 !BI || Candidate.hasPendingInjection() ||
3507 (Invariants.size() == 1 &&
3508 Invariants[0] == skipTrivialSelect(BI->getCondition()));
3509 InstructionCost CandidateCost = ComputeUnswitchedCost(TI, FullUnswitch);
3510 // Calculate cost multiplier which is a tool to limit potentially
3511 // exponential behavior of loop-unswitch.
3513 int CostMultiplier =
3514 CalculateUnswitchCostMultiplier(TI, L, LI, DT, UnswitchCandidates);
3515 assert(
3516 (CostMultiplier > 0 && CostMultiplier <= UnswitchThreshold) &&
3517 "cost multiplier needs to be in the range of 1..UnswitchThreshold");
3518 CandidateCost *= CostMultiplier;
3519 LLVM_DEBUG(dbgs() << " Computed cost of " << CandidateCost
3520 << " (multiplier: " << CostMultiplier << ")"
3521 << " for unswitch candidate: " << TI << "\n");
3522 } else {
3523 LLVM_DEBUG(dbgs() << " Computed cost of " << CandidateCost
3524 << " for unswitch candidate: " << TI << "\n");
3525 }
3526
3527 if (!Best || CandidateCost < Best->Cost) {
3528 Best = Candidate;
3529 Best->Cost = CandidateCost;
3530 }
3531 }
3532 assert(Best && "Must be!");
3533 return *Best;
3534}
3535
3536// Insert a freeze on an unswitched branch if all is true:
3537// 1. freeze-loop-unswitch-cond option is true
3538// 2. The branch may not execute in the loop pre-transformation. If a branch may
3539// not execute and could cause UB, it would always cause UB if it is hoisted outside
3540// of the loop. Insert a freeze to prevent this case.
3541// 3. The branch condition may be poison or undef
3543 AssumptionCache &AC) {
3546 return false;
3547
3548 ICFLoopSafetyInfo SafetyInfo;
3549 SafetyInfo.computeLoopSafetyInfo(&L);
3550 if (SafetyInfo.isGuaranteedToExecute(TI, &DT, &L))
3551 return false;
3552
3553 Value *Cond;
3554 if (CondBrInst *BI = dyn_cast<CondBrInst>(&TI))
3555 Cond = skipTrivialSelect(BI->getCondition());
3556 else
3559 Cond, &AC, L.getLoopPreheader()->getTerminator(), &DT);
3560}
3561
3565 MemorySSAUpdater *MSSAU,
3566 LPMUpdater &LoopUpdater) {
3567 // Collect all invariant conditions within this loop (as opposed to an inner
3568 // loop which would be handled when visiting that inner loop).
3570 IVConditionInfo PartialIVInfo;
3571 Instruction *PartialIVCondBranch = nullptr;
3572 collectUnswitchCandidates(UnswitchCandidates, PartialIVInfo,
3573 PartialIVCondBranch, L, LI, AA, MSSAU);
3574 if (!findOptionMDForLoop(&L, "llvm.loop.unswitch.injection.disable"))
3575 collectUnswitchCandidatesWithInjections(UnswitchCandidates, PartialIVInfo,
3576 PartialIVCondBranch, L, DT, LI, AA,
3577 MSSAU);
3578 // If we didn't find any candidates, we're done.
3579 if (UnswitchCandidates.empty())
3580 return false;
3581
3582 LLVM_DEBUG(
3583 dbgs() << "Considering " << UnswitchCandidates.size()
3584 << " non-trivial loop invariant conditions for unswitching.\n");
3585
3586 NonTrivialUnswitchCandidate Best = findBestNonTrivialUnswitchCandidate(
3587 UnswitchCandidates, L, DT, LI, AC, TTI, PartialIVInfo);
3588
3589 assert(Best.TI && "Failed to find loop unswitch candidate");
3590 assert(Best.Cost && "Failed to compute cost");
3591
3592 if (*Best.Cost >= UnswitchThreshold) {
3593 LLVM_DEBUG(dbgs() << "Cannot unswitch, lowest cost found: " << *Best.Cost
3594 << "\n");
3595 return false;
3596 }
3597
3598 bool InjectedCondition = false;
3599 if (Best.hasPendingInjection()) {
3600 Best = injectPendingInvariantConditions(Best, L, DT, LI, AC, MSSAU);
3601 InjectedCondition = true;
3602 }
3603 assert(!Best.hasPendingInjection() &&
3604 "All injections should have been done by now!");
3605
3606 if (Best.TI != PartialIVCondBranch)
3607 PartialIVInfo.InstToDuplicate.clear();
3608
3609 bool InsertFreeze;
3610 if (auto *SI = dyn_cast<SelectInst>(Best.TI)) {
3611 // If the best candidate is a select, turn it into a branch. Select
3612 // instructions with a poison conditional do not propagate poison, but
3613 // branching on poison causes UB. Insert a freeze on the select
3614 // conditional to prevent UB after turning the select into a branch.
3615 InsertFreeze = !isGuaranteedNotToBeUndefOrPoison(
3616 SI->getCondition(), &AC, L.getLoopPreheader()->getTerminator(), &DT);
3617 Best.TI = turnSelectIntoBranch(SI, DT, LI, MSSAU, &AC);
3618 } else {
3619 // If the best candidate is a guard, turn it into a branch.
3620 if (isGuard(Best.TI))
3621 Best.TI =
3622 turnGuardIntoBranch(cast<IntrinsicInst>(Best.TI), L, DT, LI, MSSAU);
3623 InsertFreeze = shouldInsertFreeze(L, *Best.TI, DT, AC);
3624 }
3625
3626 LLVM_DEBUG(dbgs() << " Unswitching non-trivial (cost = " << Best.Cost
3627 << ") terminator: " << *Best.TI << "\n");
3628 unswitchNontrivialInvariants(L, *Best.TI, Best.Invariants, PartialIVInfo, DT,
3629 LI, AC, SE, MSSAU, LoopUpdater, InsertFreeze,
3630 InjectedCondition);
3631 return true;
3632}
3633
3634/// Unswitch control flow predicated on loop invariant conditions.
3635///
3636/// This first hoists all branches or switches which are trivial (IE, do not
3637/// require duplicating any part of the loop) out of the loop body. It then
3638/// looks at other loop invariant control flows and tries to unswitch those as
3639/// well by cloning the loop if the result is small enough.
3640///
3641/// The `DT`, `LI`, `AC`, `AA`, `TTI` parameters are required analyses that are
3642/// also updated based on the unswitch. The `MSSA` analysis is also updated if
3643/// valid (i.e. its use is enabled).
3644///
3645/// If either `NonTrivial` is true or the flag `EnableNonTrivialUnswitch` is
3646/// true, we will attempt to do non-trivial unswitching as well as trivial
3647/// unswitching.
3648///
3649/// The `postUnswitch` function will be run after unswitching is complete
3650/// with information on whether or not the provided loop remains a loop and
3651/// a list of new sibling loops created.
3652///
3653/// If `SE` is non-null, we will update that analysis based on the unswitching
3654/// done.
3655static bool unswitchLoop(Loop &L, DominatorTree &DT, LoopInfo &LI,
3657 TargetTransformInfo &TTI, bool Trivial,
3658 bool NonTrivial, ScalarEvolution *SE,
3659 MemorySSAUpdater *MSSAU, LPMUpdater &LoopUpdater) {
3660 assert(L.isRecursivelyLCSSAForm(DT, LI) &&
3661 "Loops must be in LCSSA form before unswitching.");
3662
3663 // Must be in loop simplified form: we need a preheader and dedicated exits.
3664 if (!L.isLoopSimplifyForm())
3665 return false;
3666
3667 // Try trivial unswitch first before loop over other basic blocks in the loop.
3668 if (Trivial && unswitchAllTrivialConditions(L, DT, LI, SE, MSSAU)) {
3669 // If we unswitched successfully we will want to clean up the loop before
3670 // processing it further so just mark it as unswitched and return.
3671 postUnswitch(L, LoopUpdater, L.getName(),
3672 /*CurrentLoopValid*/ true, /*PartiallyInvariant*/ false,
3673 /*InjectedCondition*/ false, {});
3674 return true;
3675 }
3676
3677 const Function *F = L.getHeader()->getParent();
3678
3679 // Check whether we should continue with non-trivial conditions.
3680 // EnableNonTrivialUnswitch: Global variable that forces non-trivial
3681 // unswitching for testing and debugging.
3682 // NonTrivial: Parameter that enables non-trivial unswitching for this
3683 // invocation of the transform. But this should be allowed only
3684 // for targets without branch divergence.
3685 //
3686 // FIXME: If divergence analysis becomes available to a loop
3687 // transform, we should allow unswitching for non-trivial uniform
3688 // branches even on targets that have divergence.
3689 // https://bugs.llvm.org/show_bug.cgi?id=48819
3690 bool ContinueWithNonTrivial =
3691 EnableNonTrivialUnswitch || (NonTrivial && !TTI.hasBranchDivergence(F));
3692 if (!ContinueWithNonTrivial)
3693 return false;
3694
3695 // Skip non-trivial unswitching for optsize functions.
3696 if (F->hasOptSize())
3697 return false;
3698
3699 // Perform legality checks.
3701 return false;
3702
3703 // For non-trivial unswitching, because it often creates new loops, we rely on
3704 // the pass manager to iterate on the loops rather than trying to immediately
3705 // reach a fixed point. There is no substantial advantage to iterating
3706 // internally, and if any of the new loops are simplified enough to contain
3707 // trivial unswitching we want to prefer those.
3708
3709 // Try to unswitch the best invariant condition. We prefer this full unswitch to
3710 // a partial unswitch when possible below the threshold.
3711 if (unswitchBestCondition(L, DT, LI, AC, AA, TTI, SE, MSSAU, LoopUpdater))
3712 return true;
3713
3714 // No other opportunities to unswitch.
3715 return false;
3716}
3717
3720 LPMUpdater &U) {
3721 Function &F = *L.getHeader()->getParent();
3722 (void)F;
3723 LLVM_DEBUG(dbgs() << "Unswitching loop in " << F.getName() << ": " << L
3724 << "\n");
3725
3726 std::optional<MemorySSAUpdater> MSSAU;
3727 if (AR.MSSA) {
3728 MSSAU = MemorySSAUpdater(AR.MSSA);
3729 if (VerifyMemorySSA)
3730 AR.MSSA->verifyMemorySSA();
3731 }
3732 if (!unswitchLoop(L, AR.DT, AR.LI, AR.AC, AR.AA, AR.TTI, Trivial, NonTrivial,
3733 &AR.SE, MSSAU ? &*MSSAU : nullptr, U))
3734 return PreservedAnalyses::all();
3735
3736 if (AR.MSSA && VerifyMemorySSA)
3737 AR.MSSA->verifyMemorySSA();
3738
3739#ifdef EXPENSIVE_CHECKS
3740 // Historically this pass has had issues with the dominator tree so verify it
3741 // in asserts builds.
3742 assert(AR.DT.verify(DominatorTree::VerificationLevel::Fast));
3743#endif
3744
3745 auto PA = getLoopPassPreservedAnalyses();
3746 if (AR.MSSA)
3747 PA.preserve<MemorySSAAnalysis>();
3748 return PA;
3749}
3750
3752 raw_ostream &OS, function_ref<StringRef(StringRef)> MapClassName2PassName) {
3754 OS, MapClassName2PassName);
3755
3756 OS << '<';
3757 OS << (NonTrivial ? "" : "no-") << "nontrivial;";
3758 OS << (Trivial ? "" : "no-") << "trivial";
3759 OS << '>';
3760}
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
This file contains the declarations for the subclasses of Constant, which represent the different fla...
static cl::opt< OutputCostKind > CostKind("cost-kind", cl::desc("Target cost kind"), cl::init(OutputCostKind::RecipThroughput), cl::values(clEnumValN(OutputCostKind::RecipThroughput, "throughput", "Reciprocal throughput"), clEnumValN(OutputCostKind::Latency, "latency", "Instruction latency"), clEnumValN(OutputCostKind::CodeSize, "code-size", "Code size"), clEnumValN(OutputCostKind::SizeAndLatency, "size-latency", "Code size and latency"), clEnumValN(OutputCostKind::All, "all", "Print all cost kinds")))
This file defines the DenseMap class.
#define DEBUG_TYPE
This file defines a set of templates that efficiently compute a dominator tree over a generic graph.
static Value * getCondition(Instruction *I)
Module.h This file contains the declarations for the Module class.
This defines the Use class.
This file defines an InstructionCost class that is used when calculating the cost of an instruction,...
This header provides classes for managing per-loop analyses.
Loop::LoopBounds::Direction Direction
Definition LoopInfo.cpp:253
This header provides classes for managing a pipeline of passes over loops in LLVM IR.
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
This file exposes an interface to building/using memory SSA to walk memory instructions using a use/d...
#define T
Contains a collection of routines for determining if a given instruction is guaranteed to execute if ...
uint64_t IntrinsicInst * II
This file contains the declarations for profiling metadata utility functions.
const SmallVectorImpl< MachineOperand > & Cond
This file contains some templates that are useful if you are working with the STL at all.
Provides some synthesis utilities to produce sequences of values.
This file implements a set that has insertion order iteration characteristics.
static void rewritePHINodesForUnswitchedExitBlock(BasicBlock &UnswitchedBB, BasicBlock &OldExitingBB, BasicBlock &OldPH)
Rewrite the PHI nodes in an unswitched loop exit basic block.
static bool unswitchAllTrivialConditions(Loop &L, DominatorTree &DT, LoopInfo &LI, ScalarEvolution *SE, MemorySSAUpdater *MSSAU)
This routine scans the loop to find a branch or switch which occurs before any side effects occur.
static SmallPtrSet< const BasicBlock *, 16 > recomputeLoopBlockSet(Loop &L, LoopInfo &LI)
Recompute the set of blocks in a loop after unswitching.
static int CalculateUnswitchCostMultiplier(const Instruction &TI, const Loop &L, const LoopInfo &LI, const DominatorTree &DT, ArrayRef< NonTrivialUnswitchCandidate > UnswitchCandidates)
Cost multiplier is a way to limit potentially exponential behavior of loop-unswitch.
static TinyPtrVector< Value * > collectHomogenousInstGraphLoopInvariants(const Loop &L, Instruction &Root, const LoopInfo &LI)
Collect all of the loop invariant input values transitively used by the homogeneous instruction graph...
static void deleteDeadClonedBlocks(Loop &L, ArrayRef< BasicBlock * > ExitBlocks, ArrayRef< std::unique_ptr< ValueToValueMapTy > > VMaps, DominatorTree &DT, MemorySSAUpdater *MSSAU)
void visitDomSubTree(DominatorTree &DT, BasicBlock *BB, CallableT Callable)
Helper to visit a dominator subtree, invoking a callable on each node.
static bool isSafeForNoNTrivialUnswitching(Loop &L, LoopInfo &LI)
void postUnswitch(Loop &L, LPMUpdater &U, StringRef LoopName, bool CurrentLoopValid, bool PartiallyInvariant, bool InjectedCondition, ArrayRef< Loop * > NewLoops)
static bool shouldTryInjectInvariantCondition(const ICmpInst::Predicate Pred, const Value *LHS, const Value *RHS, const BasicBlock *IfTrue, const BasicBlock *IfFalse, const Loop &L)
Returns true, if predicate described by ( Pred, LHS, RHS ) succeeding into blocks ( IfTrue,...
static NonTrivialUnswitchCandidate findBestNonTrivialUnswitchCandidate(ArrayRef< NonTrivialUnswitchCandidate > UnswitchCandidates, const Loop &L, const DominatorTree &DT, const LoopInfo &LI, AssumptionCache &AC, const TargetTransformInfo &TTI, const IVConditionInfo &PartialIVInfo)
static void buildPartialInvariantUnswitchConditionalBranch(BasicBlock &BB, ArrayRef< Value * > ToDuplicate, bool Direction, BasicBlock &UnswitchedSucc, BasicBlock &NormalSucc, Loop &L, MemorySSAUpdater *MSSAU, const CondBrInst &OriginalBranch)
Copy a set of loop invariant values, and conditionally branch on them.
static Value * skipTrivialSelect(Value *Cond)
static Loop * getTopMostExitingLoop(const BasicBlock *ExitBB, const LoopInfo &LI)
static bool collectUnswitchCandidatesWithInjections(SmallVectorImpl< NonTrivialUnswitchCandidate > &UnswitchCandidates, IVConditionInfo &PartialIVInfo, Instruction *&PartialIVCondBranch, Loop &L, const DominatorTree &DT, const LoopInfo &LI, AAResults &AA, const MemorySSAUpdater *MSSAU)
Collect unswitch candidates by invariant conditions that are not immediately present in the loop.
static void replaceLoopInvariantUses(const Loop &L, Value *Invariant, Constant &Replacement)
static CondBrInst * turnGuardIntoBranch(IntrinsicInst *GI, Loop &L, DominatorTree &DT, LoopInfo &LI, MemorySSAUpdater *MSSAU)
Turns a llvm.experimental.guard intrinsic into implicit control flow branch, making the following rep...
static bool collectUnswitchCandidates(SmallVectorImpl< NonTrivialUnswitchCandidate > &UnswitchCandidates, IVConditionInfo &PartialIVInfo, Instruction *&PartialIVCondBranch, const Loop &L, const LoopInfo &LI, AAResults &AA, const MemorySSAUpdater *MSSAU)
static InstructionCost computeDomSubtreeCost(DomTreeNode &N, const SmallDenseMap< BasicBlock *, InstructionCost, 4 > &BBCostMap, SmallDenseMap< DomTreeNode *, InstructionCost, 4 > &DTCostMap)
Recursively compute the cost of a dominator subtree based on the per-block cost map provided.
static bool shouldInsertFreeze(Loop &L, Instruction &TI, DominatorTree &DT, AssumptionCache &AC)
bool shouldTryInjectBasingOnMetadata(const CondBrInst *BI, const BasicBlock *TakenSucc)
Returns true, if metadata on BI allows us to optimize branching into TakenSucc via injection of invar...
static void canonicalizeForInvariantConditionInjection(CmpPredicate &Pred, Value *&LHS, Value *&RHS, BasicBlock *&IfTrue, BasicBlock *&IfFalse, const Loop &L)
Tries to canonicalize condition described by:
static bool areLoopExitPHIsLoopInvariant(const Loop &L, const BasicBlock &ExitingBB, const BasicBlock &ExitBB)
Check that all the LCSSA PHI nodes in the loop exit block have trivial incoming values along this edg...
static void rewritePHINodesForExitAndUnswitchedBlocks(BasicBlock &ExitBB, BasicBlock &UnswitchedBB, BasicBlock &OldExitingBB, BasicBlock &OldPH, bool FullUnswitch)
Rewrite the PHI nodes in the loop exit basic block and the split off unswitched block.
static bool insertCandidatesWithPendingInjections(SmallVectorImpl< NonTrivialUnswitchCandidate > &UnswitchCandidates, Loop &L, ICmpInst::Predicate Pred, ArrayRef< CompareDesc > Compares, const DominatorTree &DT)
Given chain of loop branch conditions looking like: br (Variant < Invariant1) br (Variant < Invariant...
static NonTrivialUnswitchCandidate injectPendingInvariantConditions(NonTrivialUnswitchCandidate Candidate, Loop &L, DominatorTree &DT, LoopInfo &LI, AssumptionCache &AC, MemorySSAUpdater *MSSAU)
Materialize pending invariant condition of the given candidate into IR.
static bool unswitchTrivialSwitch(Loop &L, SwitchInst &SI, DominatorTree &DT, LoopInfo &LI, ScalarEvolution *SE, MemorySSAUpdater *MSSAU)
Unswitch a trivial switch if the condition is loop invariant.
static void unswitchNontrivialInvariants(Loop &L, Instruction &TI, ArrayRef< Value * > Invariants, IVConditionInfo &PartialIVInfo, DominatorTree &DT, LoopInfo &LI, AssumptionCache &AC, ScalarEvolution *SE, MemorySSAUpdater *MSSAU, LPMUpdater &LoopUpdater, bool InsertFreeze, bool InjectedCondition)
static bool rebuildLoopAfterUnswitch(Loop &L, ArrayRef< BasicBlock * > ExitBlocks, LoopInfo &LI, SmallVectorImpl< Loop * > &HoistedLoops, ScalarEvolution *SE)
Rebuild a loop after unswitching removes some subset of blocks and edges.
static CondBrInst * turnSelectIntoBranch(SelectInst *SI, DominatorTree &DT, LoopInfo &LI, MemorySSAUpdater *MSSAU, AssumptionCache *AC)
Turns a select instruction into implicit control flow branch, making the following replacement:
static bool unswitchBestCondition(Loop &L, DominatorTree &DT, LoopInfo &LI, AssumptionCache &AC, AAResults &AA, TargetTransformInfo &TTI, ScalarEvolution *SE, MemorySSAUpdater *MSSAU, LPMUpdater &LoopUpdater)
static bool unswitchLoop(Loop &L, DominatorTree &DT, LoopInfo &LI, AssumptionCache &AC, AAResults &AA, TargetTransformInfo &TTI, bool Trivial, bool NonTrivial, ScalarEvolution *SE, MemorySSAUpdater *MSSAU, LPMUpdater &LoopUpdater)
Unswitch control flow predicated on loop invariant conditions.
static bool unswitchTrivialBranch(Loop &L, CondBrInst &BI, DominatorTree &DT, LoopInfo &LI, ScalarEvolution *SE, MemorySSAUpdater *MSSAU)
Unswitch a trivial branch if the condition is loop invariant.
static BasicBlock * buildClonedLoopBlocks(Loop &L, BasicBlock *LoopPH, BasicBlock *SplitBB, ArrayRef< BasicBlock * > ExitBlocks, BasicBlock *ParentBB, BasicBlock *UnswitchedSuccBB, BasicBlock *ContinueSuccBB, const SmallDenseMap< BasicBlock *, BasicBlock *, 16 > &DominatingSucc, ValueToValueMapTy &VMap, SmallVectorImpl< DominatorTree::UpdateType > &DTUpdates, AssumptionCache &AC, DominatorTree &DT, LoopInfo &LI, MemorySSAUpdater *MSSAU, ScalarEvolution *SE)
Build the cloned blocks for an unswitched copy of the given loop.
static void deleteDeadBlocksFromLoop(Loop &L, SmallVectorImpl< BasicBlock * > &ExitBlocks, DominatorTree &DT, LoopInfo &LI, MemorySSAUpdater *MSSAU, ScalarEvolution *SE, LPMUpdater &LoopUpdater)
static void buildPartialUnswitchConditionalBranch(BasicBlock &BB, ArrayRef< Value * > Invariants, bool Direction, BasicBlock &UnswitchedSucc, BasicBlock &NormalSucc, bool InsertFreeze, const Instruction *I, AssumptionCache *AC, const DominatorTree &DT, const CondBrInst &ComputeProfFrom)
Copy a set of loop invariant values Invariants and insert them at the end of BB and conditionally bra...
static Loop * cloneLoopNest(Loop &OrigRootL, Loop *RootParentL, const ValueToValueMapTy &VMap, LoopInfo &LI)
Recursively clone the specified loop and all of its children.
static void hoistLoopToNewParent(Loop &L, BasicBlock &Preheader, DominatorTree &DT, LoopInfo &LI, MemorySSAUpdater *MSSAU, ScalarEvolution *SE)
Hoist the current loop up to the innermost loop containing a remaining exit.
static void buildClonedLoops(Loop &OrigL, ArrayRef< BasicBlock * > ExitBlocks, const ValueToValueMapTy &VMap, LoopInfo &LI, SmallVectorImpl< Loop * > &NonChildClonedLoops)
Build the cloned loops of an original loop from unswitching.
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition Statistic.h:171
#define LLVM_DEBUG(...)
Definition Debug.h:114
This pass exposes codegen information to IR-level passes.
Value * RHS
Value * LHS
static APInt getSignedMinValue(unsigned numBits)
Gets minimum signed value of APInt for a specific bit width.
Definition APInt.h:220
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:40
iterator end() const
Definition ArrayRef.h:131
size_t size() const
size - Get the array size.
Definition ArrayRef.h:142
iterator begin() const
Definition ArrayRef.h:130
bool empty() const
empty - Check if the array is empty.
Definition ArrayRef.h:137
A cache of @llvm.assume calls within a function.
LLVM_ABI void registerAssumption(AssumeInst *CI)
Add an @llvm.assume intrinsic to this function's cache.
LLVM Basic Block Representation.
Definition BasicBlock.h:62
iterator end()
Definition BasicBlock.h:474
iterator begin()
Instruction iterator methods.
Definition BasicBlock.h:461
iterator_range< const_phi_iterator > phis() const
Returns a range that iterates over the phis in the basic block.
Definition BasicBlock.h:530
static BasicBlock * Create(LLVMContext &Context, const Twine &Name="", Function *Parent=nullptr, BasicBlock *InsertBefore=nullptr)
Creates a new BasicBlock.
Definition BasicBlock.h:206
const Instruction * getTerminatorOrNull() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition BasicBlock.h:248
InstListType::iterator iterator
Instruction iterators...
Definition BasicBlock.h:170
size_t size() const
Definition BasicBlock.h:482
void moveBefore(BasicBlock *MovePos)
Unlink this basic block from its current function and insert it into the function that MovePos lives ...
Definition BasicBlock.h:388
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction; assumes that the block is well-formed.
Definition BasicBlock.h:237
LLVM_ABI void removePredecessor(BasicBlock *Pred, bool KeepOneInputPHIs=false)
Update PHI nodes in this BasicBlock before removal of predecessor Pred.
Value * getArgOperand(unsigned i) const
void setArgOperand(unsigned i, Value *v)
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition InstrTypes.h:676
@ ICMP_ULT
unsigned less than
Definition InstrTypes.h:701
@ ICMP_SGE
signed greater or equal
Definition InstrTypes.h:704
Predicate getSwappedPredicate() const
For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
Definition InstrTypes.h:827
static LLVM_ABI CmpInst * Create(OtherOps Op, Predicate Pred, Value *S1, Value *S2, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Construct a compare instruction, given the opcode, the predicate and the two operands.
Predicate getNonStrictPredicate() const
For example, SGT -> SGE, SLT -> SLE, ULT -> ULE, UGT -> UGE.
Definition InstrTypes.h:871
static LLVM_ABI bool isStrictPredicate(Predicate predicate)
This is a static version that you can use without an instruction available.
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE,...
Definition InstrTypes.h:789
bool isUnsigned() const
Definition InstrTypes.h:936
An abstraction over a floating-point predicate, and a pack of an integer predicate with samesign info...
Conditional Branch instruction.
LLVM_ABI void swapSuccessors()
Swap the successors of this branch instruction.
void setSuccessor(unsigned idx, BasicBlock *NewSucc)
void setCondition(Value *V)
Value * getCondition() const
BasicBlock * getSuccessor(unsigned i) const
This is the shared class of boolean and integer constants.
Definition Constants.h:87
static LLVM_ABI ConstantInt * getTrue(LLVMContext &Context)
static LLVM_ABI ConstantInt * getFalse(LLVMContext &Context)
This is an important base class in LLVM.
Definition Constant.h:43
LLVM_ABI bool isOneValue() const
Returns true if the value is one.
A debug info location.
Definition DebugLoc.h:123
static DebugLoc getCompilerGenerated()
Definition DebugLoc.h:162
static DebugLoc getDropped()
Definition DebugLoc.h:163
ValueT lookup(const_arg_type_t< KeyT > Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
Definition DenseMap.h:205
iterator find(const_arg_type_t< KeyT > Val)
Definition DenseMap.h:178
iterator begin()
Definition DenseMap.h:78
size_type count(const_arg_type_t< KeyT > Val) const
Return 1 if the specified key is in the map, 0 otherwise.
Definition DenseMap.h:174
iterator end()
Definition DenseMap.h:81
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition DenseMap.h:241
bool verify(VerificationLevel VL=VerificationLevel::Full) const
verify - checks if the tree is correct.
void applyUpdates(ArrayRef< UpdateType > Updates)
Inform the dominator tree about a sequence of CFG edge insertions and deletions and perform a batch u...
void insertEdge(NodeT *From, NodeT *To)
Inform the dominator tree about a CFG edge insertion and update the tree.
static constexpr UpdateKind Delete
static constexpr UpdateKind Insert
void deleteEdge(NodeT *From, NodeT *To)
Inform the dominator tree about a CFG edge deletion and update the tree.
DomTreeNodeBase< NodeT > * getNode(const NodeT *BB) const
getNode - return the (Post)DominatorTree node for the specified basic block.
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition Dominators.h:159
LLVM_ABI bool isReachableFromEntry(const Use &U) const
Provide an overload for a Use.
LLVM_ABI bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
This class represents a freeze function that returns random concrete value if an operand is either a ...
This implementation of LoopSafetyInfo use ImplicitControlFlowTracking to give precise answers on "may...
bool isGuaranteedToExecute(const Instruction &Inst, const DominatorTree *DT, const Loop *CurLoop) const override
Returns true if the instruction in a loop is guaranteed to execute at least once (under the assumptio...
void computeLoopSafetyInfo(const Loop *CurLoop) override
Computes safety information for a loop checks loop body & header for the possibility of may throw exc...
bool isRelational() const
Return true if the predicate is relational (not EQ or NE).
CondBrInst * CreateCondBr(Value *Cond, BasicBlock *True, BasicBlock *False, MDNode *BranchWeights=nullptr, MDNode *Unpredictable=nullptr)
Create a conditional 'br Cond, TrueDest, FalseDest' instruction.
Definition IRBuilder.h:1237
Value * CreateFreeze(Value *V, const Twine &Name="")
Definition IRBuilder.h:2686
void SetCurrentDebugLocation(const DebugLoc &L)
Set location information used by debugging information.
Definition IRBuilder.h:247
Value * CreateAnd(Value *LHS, Value *RHS, const Twine &Name="")
Definition IRBuilder.h:1591
Value * CreateOr(Value *LHS, Value *RHS, const Twine &Name="", bool IsDisjoint=false)
Definition IRBuilder.h:1613
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition IRBuilder.h:2847
LLVM_ABI Instruction * clone() const
Create a copy of 'this' instruction that is identical in all ways except the following:
LLVM_ABI void dropLocation()
Drop the instruction's debug location.
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
LLVM_ABI void moveBefore(InstListType::iterator InsertPos)
Unlink this instruction from its current basic block and insert it into the basic block that MovePos ...
LLVM_ABI InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
bool isTerminator() const
LLVM_ABI void setMetadata(unsigned KindID, MDNode *Node)
Set the metadata of the specified kind to the specified node.
void setDebugLoc(DebugLoc Loc)
Set the debug location information for this instruction.
LLVM_ABI InstListType::iterator insertInto(BasicBlock *ParentBB, InstListType::iterator It)
Inserts an unlinked instruction into ParentBB at position It and returns the iterator of the inserted...
A wrapper class for inspecting calls to intrinsic functions.
This class provides an interface for updating the loop pass manager based on mutations to the loop ne...
void markLoopAsDeleted(Loop &L, llvm::StringRef Name)
Loop passes should use this method to indicate they have deleted a loop from the nest.
bool contains(const LoopT *L) const
Return true if the specified loop is contained within in this loop.
bool isInnermost() const
Return true if the loop does not contain any (natural) loops.
unsigned getNumBlocks() const
Get the number of blocks in this loop in constant time.
BlockT * getHeader() const
void addBasicBlockToLoop(BlockT *NewBB, LoopInfoBase< BlockT, LoopT > &LI)
This method is used by other analyses to update loop information.
void reserveBlocks(unsigned size)
interface to do reserve() for Blocks
iterator_range< block_iterator > blocks() const
void addChildLoop(LoopT *NewChild)
Add the specified loop to be a child of this loop.
BlockT * getLoopPreheader() const
If there is a preheader for this loop, return it.
LoopT * getParentLoop() const
Return the parent loop if it exists or nullptr for top level loops.
bool isLoopExiting(const BlockT *BB) const
True if terminator in the block can branch to another block that is outside of the current loop.
LoopT * removeChildLoop(iterator I)
This removes the specified child from being a subloop of this loop.
Wrapper class to LoopBlocksDFS that provides a standard begin()/end() interface for the DFS reverse p...
void perform(const LoopInfo *LI)
Traverse the loop blocks and store the DFS result.
void verify(const DominatorTreeBase< BlockT, false > &DomTree) const
void addTopLevelLoop(LoopT *New)
This adds the specified loop to the collection of top-level loops.
LoopT * AllocateLoop(ArgsTy &&...Args)
LoopT * removeLoop(iterator I)
This removes the specified top-level loop from this loop info object.
unsigned getLoopDepth(const BlockT *BB) const
Return the loop nesting level of the specified block.
LoopT * getLoopFor(const BlockT *BB) const
Return the inner most loop that BB lives in.
void destroy(LoopT *L)
Destroy a loop that has been removed from the LoopInfo nest.
void changeLoopFor(const BlockT *BB, LoopT *L)
Change the top-level loop that contains BB to the specified loop.
Represents a single loop in the control flow graph.
Definition LoopInfo.h:40
StringRef getName() const
Definition LoopInfo.h:409
LLVM_ABI MDNode * createUnlikelyBranchWeights()
Return metadata containing two branch weights, with significant bias towards false destination.
Definition MDBuilder.cpp:48
Represents a read-write access to memory, whether it is a must-alias, or a may-alias.
Definition MemorySSA.h:371
An analysis that produces MemorySSA for a function.
Definition MemorySSA.h:922
MemorySSA * getMemorySSA() const
Get handle on MemorySSA.
LLVM_ABI void removeEdge(BasicBlock *From, BasicBlock *To)
Update the MemoryPhi in To following an edge deletion between From and To.
LLVM_ABI void updateForClonedLoop(const LoopBlocksRPO &LoopBlocks, ArrayRef< BasicBlock * > ExitBlocks, const ValueToValueMapTy &VM, bool IgnoreIncomingWithNoClones=false)
Update MemorySSA after a loop was cloned, given the blocks in RPO order, the exit blocks and a 1:1 ma...
LLVM_ABI void removeDuplicatePhiEdgesBetween(const BasicBlock *From, const BasicBlock *To)
Update the MemoryPhi in To to have a single incoming edge from From, following a CFG change that repl...
LLVM_ABI void removeBlocks(const SmallSetVector< BasicBlock *, 8 > &DeadBlocks)
Remove all MemoryAcceses in a set of BasicBlocks about to be deleted.
LLVM_ABI void moveAllAfterSpliceBlocks(BasicBlock *From, BasicBlock *To, Instruction *Start)
From block was spliced into From and To.
LLVM_ABI MemoryAccess * createMemoryAccessInBB(Instruction *I, MemoryAccess *Definition, const BasicBlock *BB, MemorySSA::InsertionPlace Point, bool CreationMustSucceed=true)
Create a MemoryAccess in MemorySSA at a specified point in a block.
LLVM_ABI void applyInsertUpdates(ArrayRef< CFGUpdate > Updates, DominatorTree &DT)
Apply CFG insert updates, analogous with the DT edge updates.
LLVM_ABI void applyUpdates(ArrayRef< CFGUpdate > Updates, DominatorTree &DT, bool UpdateDTFirst=false)
Apply CFG updates, analogous with the DT edge updates.
LLVM_ABI void moveToPlace(MemoryUseOrDef *What, BasicBlock *BB, MemorySSA::InsertionPlace Where)
LLVM_ABI void updateExitBlocksForClonedLoop(ArrayRef< BasicBlock * > ExitBlocks, const ValueToValueMapTy &VMap, DominatorTree &DT)
Update phi nodes in exit block successors following cloning.
Encapsulates MemorySSA, including all data associated with memory accesses.
Definition MemorySSA.h:702
DefsList * getBlockDefs(const BasicBlock *BB) const
Return the list of MemoryDef's and MemoryPhi's for a given basic block.
Definition MemorySSA.h:765
LLVM_ABI void verifyMemorySSA(VerificationLevel=VerificationLevel::Fast) const
Verify that MemorySSA is self consistent (IE definitions dominate all uses, uses appear in the right ...
MemoryUseOrDef * getMemoryAccess(const Instruction *I) const
Given a memory Mod/Ref'ing instruction, get the MemorySSA access associated with it.
Definition MemorySSA.h:720
A Module instance is used to store all the information related to an LLVM module.
Definition Module.h:67
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
static LLVM_ABI PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
A set of analyses that are preserved following a run of a transformation pass.
Definition Analysis.h:112
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition Analysis.h:118
The main scalar evolution driver.
LLVM_ABI void forgetLoop(const Loop *L)
This method should be called by the client when it has changed a loop in a way that may effect Scalar...
LLVM_ABI void forgetTopmostLoop(const Loop *L)
LLVM_ABI void forgetBlockAndLoopDispositions(Value *V=nullptr)
Called when the client has changed the disposition of values in a loop or block.
LLVM_ABI void forgetLcssaPhiWithNewPredecessor(Loop *L, PHINode *V)
Forget LCSSA phi node V of loop L to which a new predecessor was added, such that it may no longer be...
This class represents the LLVM 'select' instruction.
size_type size() const
Determine the number of elements in the SetVector.
Definition SetVector.h:103
size_type count(const_arg_type key) const
Count the number of elements of a given key in the SetVector.
Definition SetVector.h:262
iterator begin()
Get an iterator to the beginning of the SetVector.
Definition SetVector.h:106
bool insert(const value_type &X)
Insert a new element into the SetVector.
Definition SetVector.h:151
void printPipeline(raw_ostream &OS, function_ref< StringRef(StringRef)> MapClassName2PassName)
PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM, LoopStandardAnalysisResults &AR, LPMUpdater &U)
size_type size() const
Definition SmallPtrSet.h:99
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
bool erase(PtrType Ptr)
Remove pointer from the set.
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
A SetVector that performs no allocations if smaller than a certain size.
Definition SetVector.h:339
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void reserve(size_type N)
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
A wrapper class to simplify modification of SwitchInst cases along with their prof branch_weights met...
LLVM_ABI void setSuccessorWeight(unsigned idx, CaseWeightOpt W)
LLVM_ABI Instruction::InstListType::iterator eraseFromParent()
Delegate the call to the underlying SwitchInst::eraseFromParent() and mark this object to not touch t...
LLVM_ABI void addCase(ConstantInt *OnVal, BasicBlock *Dest, CaseWeightOpt W)
Delegate the call to the underlying SwitchInst::addCase() and set the specified branch weight for the...
LLVM_ABI CaseWeightOpt getSuccessorWeight(unsigned idx)
std::optional< uint32_t > CaseWeightOpt
LLVM_ABI SwitchInst::CaseIt removeCase(SwitchInst::CaseIt I)
Delegate the call to the underlying SwitchInst::removeCase() and remove correspondent branch weight.
unsigned getSuccessorIndex() const
Returns successor index for current case successor.
BasicBlockT * getCaseSuccessor() const
Resolves successor for current case.
ConstantIntT * getCaseValue() const
Resolves case value for current case.
Multiway switch.
BasicBlock * getDefaultDest() const
static SwitchInst * Create(Value *Value, BasicBlock *Default, unsigned NumCases, InsertPosition InsertBefore=nullptr)
void setDefaultDest(BasicBlock *DefaultCase)
iterator_range< CaseIt > cases()
Iteration adapter for range-for loops.
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
TargetCostKind
The kind of cost model.
@ TCK_CodeSize
Instruction code size.
@ TCK_SizeAndLatency
The weighted sum of size and latency.
TinyPtrVector - This class is specialized for cases where there are normally 0 or 1 element in a vect...
void push_back(EltTy NewVal)
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition Twine.h:82
static UncondBrInst * Create(BasicBlock *Target, InsertPosition InsertBefore=nullptr)
A Use represents the edge between a Value definition and its users.
Definition Use.h:35
ValueT lookup(const KeyT &Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
Definition ValueMap.h:167
size_type count(const KeyT &Val) const
Return 1 if the specified key is in the map, 0 otherwise.
Definition ValueMap.h:156
LLVM Value Representation.
Definition Value.h:75
LLVM_ABI void setName(const Twine &Name)
Change the name of the value.
Definition Value.cpp:393
LLVMContext & getContext() const
All values hold a context through their type.
Definition Value.h:258
iterator_range< use_iterator > uses()
Definition Value.h:380
An efficient, type-erasing, non-owning reference to a callable.
const ParentTy * getParent() const
Definition ilist_node.h:34
self_iterator getIterator()
Definition ilist_node.h:123
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition raw_ostream.h:53
Changed
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
Abstract Attribute helper functions.
Definition Attributor.h:165
@ BasicBlock
Various leaf nodes.
Definition ISDOpcodes.h:81
LLVM_ABI Function * getDeclarationIfExists(const Module *M, ID id)
Look up the Function declaration of the intrinsic id in the Module M and return it if it exists.
match_combine_or< Ty... > m_CombineOr(const Ty &...Ps)
Combine pattern matchers matching any of Ps patterns.
LogicalOp_match< LHS, RHS, Instruction::And > m_LogicalAnd(const LHS &L, const RHS &R)
Matches L && R either in the form of L & R or L ?
bool match(Val *V, const Pattern &P)
cst_pred_ty< is_one > m_One()
Match an integer 1 or a vector with all elements equal to 1.
ThreeOps_match< Cond, LHS, RHS, Instruction::Select > m_Select(const Cond &C, const LHS &L, const RHS &R)
Matches SelectInst.
auto m_BasicBlock()
Match an arbitrary basic block value and ignore it.
auto m_Value()
Match an arbitrary value and ignore it.
auto m_LogicalOr()
Matches L || R where L and R are arbitrary values.
CmpClass_match< LHS, RHS, ICmpInst > m_ICmp(CmpPredicate &Pred, const LHS &L, const RHS &R)
auto m_LogicalAnd()
Matches L && R where L and R are arbitrary values.
brc_match< Cond_t, match_bind< BasicBlock >, match_bind< BasicBlock > > m_Br(const Cond_t &C, BasicBlock *&T, BasicBlock *&F)
LogicalOp_match< LHS, RHS, Instruction::Or > m_LogicalOr(const LHS &L, const RHS &R)
Matches L || R either in the form of L | R or L ?
is_zero m_Zero()
Match any null constant or a vector with all elements equal to 0.
initializer< Ty > init(const Ty &Val)
friend class Instruction
Iterator for Instructions in a `BasicBlock.
Definition BasicBlock.h:73
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition STLExtras.h:315
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
void stable_sort(R &&Range)
Definition STLExtras.h:2115
auto find(R &&Range, const T &Val)
Provide wrappers to std::find which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1764
cl::opt< bool > ProfcheckDisableMetadataFixes
Definition LoopInfo.cpp:60
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1738
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
Definition STLExtras.h:1668
LLVM_ABI bool RecursivelyDeleteTriviallyDeadInstructions(Value *V, const TargetLibraryInfo *TLI=nullptr, MemorySSAUpdater *MSSAU=nullptr, std::function< void(Value *)> AboutToDeleteCallback=std::function< void(Value *)>())
If the specified value is a trivially dead instruction, delete it.
Definition Local.cpp:535
LLVM_ABI BasicBlock * CloneBasicBlock(const BasicBlock *BB, ValueToValueMapTy &VMap, const Twine &NameSuffix="", Function *F=nullptr, ClonedCodeInfo *CodeInfo=nullptr, bool MapAtoms=true)
Return a copy of the specified basic block, but without embedding the block into a particular functio...
LLVM_ABI void setExplicitlyUnknownBranchWeightsIfProfiled(Instruction &I, StringRef PassName, const Function *F=nullptr)
Like setExplicitlyUnknownBranchWeights(...), but only sets unknown branch weights in the new instruct...
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
static cl::opt< int > UnswitchThreshold("unswitch-threshold", cl::init(50), cl::Hidden, cl::desc("The cost threshold for unswitching a loop."))
auto successors(const MachineBasicBlock *BB)
static cl::opt< bool > EnableNonTrivialUnswitch("enable-nontrivial-unswitch", cl::init(false), cl::Hidden, cl::desc("Forcibly enables non-trivial loop unswitching rather than " "following the configuration passed into the pass."))
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Definition STLExtras.h:2207
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition STLExtras.h:633
auto cast_or_null(const Y &Val)
Definition Casting.h:714
LLVM_ABI MDNode * findOptionMDForLoop(const Loop *TheLoop, StringRef Name)
Find string metadata for a loop.
Op::Description Desc
detail::concat_range< ValueT, RangeTs... > concat(RangeTs &&...Ranges)
Returns a concatenated range across two or more ranges.
Definition STLExtras.h:1151
DomTreeNodeBase< BasicBlock > DomTreeNode
Definition Dominators.h:94
AnalysisManager< Loop, LoopStandardAnalysisResults & > LoopAnalysisManager
The loop analysis manager.
static cl::opt< bool > EnableUnswitchCostMultiplier("enable-unswitch-cost-multiplier", cl::init(true), cl::Hidden, cl::desc("Enable unswitch cost multiplier that prohibits exponential " "explosion in nontrivial unswitch."))
auto dyn_cast_or_null(const Y &Val)
Definition Casting.h:753
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1745
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
Definition MathExtras.h:331
bool isGuard(const User *U)
Returns true iff U has semantics of a guard expressed in a form of call of llvm.experimental....
void RemapDbgRecordRange(Module *M, iterator_range< DbgRecordIterator > Range, ValueToValueMapTy &VM, RemapFlags Flags=RF_None, ValueMapTypeRemapper *TypeMapper=nullptr, ValueMaterializer *Materializer=nullptr, const MetadataPredicate *IdentityMD=nullptr)
Remap the Values used in the DbgRecords Range using the value map VM.
auto reverse(ContainerTy &&C)
Definition STLExtras.h:407
static cl::opt< bool > DropNonTrivialImplicitNullChecks("simple-loop-unswitch-drop-non-trivial-implicit-null-checks", cl::init(false), cl::Hidden, cl::desc("If enabled, drop make.implicit metadata in unswitched implicit " "null checks to save time analyzing if we can keep it."))
bool containsIrreducibleCFG(RPOTraversalT &RPOTraversal, const LoopInfoT &LI)
Return true if the control flow in RPOTraversal is irreducible.
Definition CFG.h:154
static cl::opt< unsigned > InjectInvariantConditionHotnesThreshold("simple-loop-unswitch-inject-invariant-condition-hotness-threshold", cl::Hidden, cl::desc("Only try to inject loop invariant conditions and " "unswitch on them to eliminate branches that are " "not-taken 1/<this option> times or less."), cl::init(16))
static cl::opt< int > UnswitchSiblingsToplevelDiv("unswitch-siblings-toplevel-div", cl::init(2), cl::Hidden, cl::desc("Toplevel siblings divisor for cost multiplier."))
detail::zippy< detail::zip_first, T, U, Args... > zip_first(T &&t, U &&u, Args &&...args)
zip iterator that, for the sake of efficiency, assumes the first iteratee to be the shortest.
Definition STLExtras.h:853
void sort(IteratorTy Start, IteratorTy End)
Definition STLExtras.h:1635
@ RF_IgnoreMissingLocals
If this flag is set, the remapper ignores missing function-local entries (Argument,...
Definition ValueMapper.h:98
@ RF_NoModuleLevelChanges
If this flag is set, the remapper knows that only local values within a function (such as an instruct...
Definition ValueMapper.h:80
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
static cl::opt< bool > InjectInvariantConditions("simple-loop-unswitch-inject-invariant-conditions", cl::Hidden, cl::desc("Whether we should inject new invariants and unswitch them to " "eliminate some existing (non-invariant) conditions."), cl::init(true))
LLVM_ABI bool VerifyLoopInfo
Enable verification of loop info.
Definition LoopInfo.cpp:53
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
TargetTransformInfo TTI
LLVM_ABI bool VerifyMemorySSA
Enables verification of MemorySSA.
Definition MemorySSA.cpp:84
LLVM_ABI BasicBlock * SplitBlock(BasicBlock *Old, BasicBlock::iterator SplitPt, DominatorTree *DT, LoopInfo *LI=nullptr, MemorySSAUpdater *MSSAU=nullptr, const Twine &BBName="")
Split the specified block at the specified instruction.
FunctionAddr VTableAddr Next
Definition InstrProf.h:141
LLVM_ABI bool formDedicatedExitBlocks(Loop *L, DominatorTree *DT, LoopInfo *LI, MemorySSAUpdater *MSSAU, bool PreserveLCSSA)
Ensure that all exit blocks of the loop are dedicated exits.
Definition LoopUtils.cpp:61
void RemapInstruction(Instruction *I, ValueToValueMapTy &VM, RemapFlags Flags=RF_None, ValueMapTypeRemapper *TypeMapper=nullptr, ValueMaterializer *Materializer=nullptr, const MetadataPredicate *IdentityMD=nullptr)
Convert the instruction operands from referencing the current values into those specified by VM.
LLVM_ABI bool isGuaranteedNotToBeUndefOrPoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Return true if this function can prove that V does not have undef bits and is never poison.
ArrayRef(const T &OneElt) -> ArrayRef< T >
auto sum_of(R &&Range, E Init=E{0})
Returns the sum of all values in Range with Init initial value.
Definition STLExtras.h:1716
ValueMap< const Value *, WeakTrackingVH > ValueToValueMapTy
static cl::opt< int > UnswitchNumInitialUnscaledCandidates("unswitch-num-initial-unscaled-candidates", cl::init(8), cl::Hidden, cl::desc("Number of unswitch candidates that are ignored when calculating " "cost multiplier."))
LLVM_ABI bool extractBranchWeights(const MDNode *ProfileData, SmallVectorImpl< uint32_t > &Weights)
Extract branch weights from MD_prof metadata.
auto count_if(R &&Range, UnaryPredicate P)
Wrapper function around std::count_if to count the number of times an element satisfying a given pred...
Definition STLExtras.h:2018
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
LLVM_ABI PreservedAnalyses getLoopPassPreservedAnalyses()
Returns the minimum set of Analyses that all loop passes must preserve.
static cl::opt< bool > EstimateProfile("simple-loop-unswitch-estimate-profile", cl::Hidden, cl::init(true))
static cl::opt< unsigned > MSSAThreshold("simple-loop-unswitch-memoryssa-threshold", cl::desc("Max number of memory uses to explore during " "partial unswitching analysis"), cl::init(100), cl::Hidden)
void erase_if(Container &C, UnaryPredicate P)
Provide a container algorithm similar to C++ Library Fundamentals v2's erase_if which is equivalent t...
Definition STLExtras.h:2191
auto predecessors(const MachineBasicBlock *BB)
bool pred_empty(const BasicBlock *BB)
Definition CFG.h:119
LLVM_ABI Instruction * SplitBlockAndInsertIfThen(Value *Cond, BasicBlock::iterator SplitBefore, bool Unreachable, MDNode *BranchWeights=nullptr, DomTreeUpdater *DTU=nullptr, LoopInfo *LI=nullptr, BasicBlock *ThenBlock=nullptr)
Split the containing block at the specified instruction - everything before SplitBefore stays in the ...
auto seq(T Begin, T End)
Iterate over an integral type from Begin up to - but not including - End.
Definition Sequence.h:305
LLVM_ABI BasicBlock * SplitEdge(BasicBlock *From, BasicBlock *To, DominatorTree *DT=nullptr, LoopInfo *LI=nullptr, MemorySSAUpdater *MSSAU=nullptr, const Twine &BBName="")
Split the edge connecting the specified blocks, and return the newly created basic block between From...
static cl::opt< bool > FreezeLoopUnswitchCond("freeze-loop-unswitch-cond", cl::init(true), cl::Hidden, cl::desc("If enabled, the freeze instruction will be added to condition " "of loop unswitch to prevent miscompilation."))
LLVM_ABI std::optional< IVConditionInfo > hasPartialIVCondition(const Loop &L, unsigned MSSAThreshold, const MemorySSA &MSSA, AAResults &AA)
Check if the loop header has a conditional branch that is not loop-invariant, because it involves loa...
LLVM_ABI bool formLCSSA(Loop &L, const DominatorTree &DT, const LoopInfo *LI, ScalarEvolution *SE)
Put loop into LCSSA form.
Definition LCSSA.cpp:427
static cl::opt< bool > UnswitchGuards("simple-loop-unswitch-guards", cl::init(true), cl::Hidden, cl::desc("If enabled, simple loop unswitching will also consider " "llvm.experimental.guard intrinsics as unswitch candidates."))
LLVM_ABI void mapAtomInstance(const DebugLoc &DL, ValueToValueMapTy &VMap)
Mark a cloned instruction as a new instance so that its source loc can be updated when remapped.
static cl::opt< int > UnswitchParentBlocksDiv("unswitch-parent-blocks-div", cl::init(8), cl::Hidden, cl::desc("Outer loop size divisor for cost multiplier."))
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition BitVector.h:872
#define N
A special type used by analysis passes to provide an address that identifies that particular analysis...
Definition Analysis.h:29
static LLVM_ABI void collectEphemeralValues(const Loop *L, AssumptionCache *AC, SmallPtrSetImpl< const Value * > &EphValues)
Collect a loop's ephemeral values (those used only by an assume or similar intrinsics in the loop).
Struct to hold information about a partially invariant condition.
Definition LoopUtils.h:654
SmallVector< Instruction * > InstToDuplicate
Instructions that need to be duplicated and checked for the unswitching condition.
Definition LoopUtils.h:657
Constant * KnownValue
Constant to indicate for which value the condition is invariant.
Definition LoopUtils.h:660
The adaptor from a function pass to a loop pass computes these analyses and makes them available to t...
A CRTP mix-in to automatically provide informational APIs needed for passes.
Definition PassManager.h:70