LLVM 18.0.0git
LICM.cpp
Go to the documentation of this file.
1//===-- LICM.cpp - Loop Invariant Code Motion Pass ------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This pass performs loop invariant code motion, attempting to remove as much
10// code from the body of a loop as possible. It does this by either hoisting
11// code into the preheader block, or by sinking code to the exit blocks if it is
12// safe. This pass also promotes must-aliased memory locations in the loop to
13// live in registers, thus hoisting and sinking "invariant" loads and stores.
14//
15// Hoisting operations out of loops is a canonicalization transform. It
16// enables and simplifies subsequent optimizations in the middle-end.
17// Rematerialization of hoisted instructions to reduce register pressure is the
18// responsibility of the back-end, which has more accurate information about
19// register pressure and also handles other optimizations than LICM that
20// increase live-ranges.
21//
22// This pass uses alias analysis for two purposes:
23//
24// 1. Moving loop invariant loads and calls out of loops. If we can determine
25// that a load or call inside of a loop never aliases anything stored to,
26// we can hoist it or sink it like any other instruction.
27// 2. Scalar Promotion of Memory - If there is a store instruction inside of
28// the loop, we try to move the store to happen AFTER the loop instead of
29// inside of the loop. This can only happen if a few conditions are true:
30// A. The pointer stored through is loop invariant
31// B. There are no stores or loads in the loop which _may_ alias the
32// pointer. There are no calls in the loop which mod/ref the pointer.
33// If these conditions are true, we can promote the loads and stores in the
34// loop of the pointer to use a temporary alloca'd variable. We then use
35// the SSAUpdater to construct the appropriate SSA form for the value.
36//
37//===----------------------------------------------------------------------===//
38
42#include "llvm/ADT/Statistic.h"
49#include "llvm/Analysis/Loads.h"
62#include "llvm/IR/CFG.h"
63#include "llvm/IR/Constants.h"
64#include "llvm/IR/DataLayout.h"
67#include "llvm/IR/Dominators.h"
70#include "llvm/IR/IRBuilder.h"
71#include "llvm/IR/LLVMContext.h"
72#include "llvm/IR/Metadata.h"
77#include "llvm/Support/Debug.h"
86#include <algorithm>
87#include <utility>
88using namespace llvm;
89
90namespace llvm {
91class LPMUpdater;
92} // namespace llvm
93
94#define DEBUG_TYPE "licm"
95
96STATISTIC(NumCreatedBlocks, "Number of blocks created");
97STATISTIC(NumClonedBranches, "Number of branches cloned");
98STATISTIC(NumSunk, "Number of instructions sunk out of loop");
99STATISTIC(NumHoisted, "Number of instructions hoisted out of loop");
100STATISTIC(NumMovedLoads, "Number of load insts hoisted or sunk");
101STATISTIC(NumMovedCalls, "Number of call insts hoisted or sunk");
102STATISTIC(NumPromotionCandidates, "Number of promotion candidates");
103STATISTIC(NumLoadPromoted, "Number of load-only promotions");
104STATISTIC(NumLoadStorePromoted, "Number of load and store promotions");
105STATISTIC(NumMinMaxHoisted,
106 "Number of min/max expressions hoisted out of the loop");
107STATISTIC(NumGEPsHoisted,
108 "Number of geps reassociated and hoisted out of the loop");
109STATISTIC(NumAddSubHoisted, "Number of add/subtract expressions reassociated "
110 "and hoisted out of the loop");
111STATISTIC(NumFPAssociationsHoisted, "Number of invariant FP expressions "
112 "reassociated and hoisted out of the loop");
113
114/// Memory promotion is enabled by default.
115static cl::opt<bool>
116 DisablePromotion("disable-licm-promotion", cl::Hidden, cl::init(false),
117 cl::desc("Disable memory promotion in LICM pass"));
118
120 "licm-control-flow-hoisting", cl::Hidden, cl::init(false),
121 cl::desc("Enable control flow (and PHI) hoisting in LICM"));
122
123static cl::opt<bool>
124 SingleThread("licm-force-thread-model-single", cl::Hidden, cl::init(false),
125 cl::desc("Force thread model single in LICM pass"));
126
128 "licm-max-num-uses-traversed", cl::Hidden, cl::init(8),
129 cl::desc("Max num uses visited for identifying load "
130 "invariance in loop using invariant start (default = 8)"));
131
133 "licm-max-num-fp-reassociations", cl::init(5U), cl::Hidden,
134 cl::desc(
135 "Set upper limit for the number of transformations performed "
136 "during a single round of hoisting the reassociated expressions."));
137
138// Experimental option to allow imprecision in LICM in pathological cases, in
139// exchange for faster compile. This is to be removed if MemorySSA starts to
140// address the same issue. LICM calls MemorySSAWalker's
141// getClobberingMemoryAccess, up to the value of the Cap, getting perfect
142// accuracy. Afterwards, LICM will call into MemorySSA's getDefiningAccess,
143// which may not be precise, since optimizeUses is capped. The result is
144// correct, but we may not get as "far up" as possible to get which access is
145// clobbering the one queried.
147 "licm-mssa-optimization-cap", cl::init(100), cl::Hidden,
148 cl::desc("Enable imprecision in LICM in pathological cases, in exchange "
149 "for faster compile. Caps the MemorySSA clobbering calls."));
150
151// Experimentally, memory promotion carries less importance than sinking and
152// hoisting. Limit when we do promotion when using MemorySSA, in order to save
153// compile time.
155 "licm-mssa-max-acc-promotion", cl::init(250), cl::Hidden,
156 cl::desc("[LICM & MemorySSA] When MSSA in LICM is disabled, this has no "
157 "effect. When MSSA in LICM is enabled, then this is the maximum "
158 "number of accesses allowed to be present in a loop in order to "
159 "enable memory promotion."));
160
161static bool inSubLoop(BasicBlock *BB, Loop *CurLoop, LoopInfo *LI);
162static bool isNotUsedOrFoldableInLoop(const Instruction &I, const Loop *CurLoop,
163 const LoopSafetyInfo *SafetyInfo,
165 bool &FoldableInLoop, bool LoopNestMode);
166static void hoist(Instruction &I, const DominatorTree *DT, const Loop *CurLoop,
167 BasicBlock *Dest, ICFLoopSafetyInfo *SafetyInfo,
170static bool sink(Instruction &I, LoopInfo *LI, DominatorTree *DT,
171 const Loop *CurLoop, ICFLoopSafetyInfo *SafetyInfo,
174 Instruction &Inst, const DominatorTree *DT, const TargetLibraryInfo *TLI,
175 const Loop *CurLoop, const LoopSafetyInfo *SafetyInfo,
176 OptimizationRemarkEmitter *ORE, const Instruction *CtxI,
177 AssumptionCache *AC, bool AllowSpeculation);
178static bool pointerInvalidatedByLoop(MemorySSA *MSSA, MemoryUse *MU,
179 Loop *CurLoop, Instruction &I,
181 bool InvariantGroup);
182static bool pointerInvalidatedByBlock(BasicBlock &BB, MemorySSA &MSSA,
183 MemoryUse &MU);
184/// Aggregates various functions for hoisting computations out of loop.
185static bool hoistArithmetics(Instruction &I, Loop &L,
186 ICFLoopSafetyInfo &SafetyInfo,
188 DominatorTree *DT);
190 Instruction &I, BasicBlock &ExitBlock, PHINode &PN, const LoopInfo *LI,
191 const LoopSafetyInfo *SafetyInfo, MemorySSAUpdater &MSSAU);
192
193static void eraseInstruction(Instruction &I, ICFLoopSafetyInfo &SafetyInfo,
194 MemorySSAUpdater &MSSAU);
195
197 ICFLoopSafetyInfo &SafetyInfo,
199
200static void foreachMemoryAccess(MemorySSA *MSSA, Loop *L,
201 function_ref<void(Instruction *)> Fn);
203 std::pair<SmallSetVector<Value *, 8>, bool>;
206
207namespace {
208struct LoopInvariantCodeMotion {
209 bool runOnLoop(Loop *L, AAResults *AA, LoopInfo *LI, DominatorTree *DT,
212 OptimizationRemarkEmitter *ORE, bool LoopNestMode = false);
213
214 LoopInvariantCodeMotion(unsigned LicmMssaOptCap,
215 unsigned LicmMssaNoAccForPromotionCap,
216 bool LicmAllowSpeculation)
217 : LicmMssaOptCap(LicmMssaOptCap),
218 LicmMssaNoAccForPromotionCap(LicmMssaNoAccForPromotionCap),
219 LicmAllowSpeculation(LicmAllowSpeculation) {}
220
221private:
222 unsigned LicmMssaOptCap;
223 unsigned LicmMssaNoAccForPromotionCap;
224 bool LicmAllowSpeculation;
225};
226
227struct LegacyLICMPass : public LoopPass {
228 static char ID; // Pass identification, replacement for typeid
229 LegacyLICMPass(
230 unsigned LicmMssaOptCap = SetLicmMssaOptCap,
231 unsigned LicmMssaNoAccForPromotionCap = SetLicmMssaNoAccForPromotionCap,
232 bool LicmAllowSpeculation = true)
233 : LoopPass(ID), LICM(LicmMssaOptCap, LicmMssaNoAccForPromotionCap,
234 LicmAllowSpeculation) {
236 }
237
238 bool runOnLoop(Loop *L, LPPassManager &LPM) override {
239 if (skipLoop(L))
240 return false;
241
242 LLVM_DEBUG(dbgs() << "Perform LICM on Loop with header at block "
243 << L->getHeader()->getNameOrAsOperand() << "\n");
244
245 Function *F = L->getHeader()->getParent();
246
247 auto *SE = getAnalysisIfAvailable<ScalarEvolutionWrapperPass>();
248 MemorySSA *MSSA = &getAnalysis<MemorySSAWrapperPass>().getMSSA();
249 // For the old PM, we can't use OptimizationRemarkEmitter as an analysis
250 // pass. Function analyses need to be preserved across loop transformations
251 // but ORE cannot be preserved (see comment before the pass definition).
252 OptimizationRemarkEmitter ORE(L->getHeader()->getParent());
253 return LICM.runOnLoop(
254 L, &getAnalysis<AAResultsWrapperPass>().getAAResults(),
255 &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(),
256 &getAnalysis<DominatorTreeWrapperPass>().getDomTree(),
257 &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(*F),
258 &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(*F),
259 &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(*F),
260 SE ? &SE->getSE() : nullptr, MSSA, &ORE);
261 }
262
263 /// This transformation requires natural loop information & requires that
264 /// loop preheaders be inserted into the CFG...
265 ///
266 void getAnalysisUsage(AnalysisUsage &AU) const override {
278 }
279
280private:
281 LoopInvariantCodeMotion LICM;
282};
283} // namespace
284
287 if (!AR.MSSA)
288 report_fatal_error("LICM requires MemorySSA (loop-mssa)",
289 /*GenCrashDiag*/false);
290
291 // For the new PM, we also can't use OptimizationRemarkEmitter as an analysis
292 // pass. Function analyses need to be preserved across loop transformations
293 // but ORE cannot be preserved (see comment before the pass definition).
294 OptimizationRemarkEmitter ORE(L.getHeader()->getParent());
295
296 LoopInvariantCodeMotion LICM(Opts.MssaOptCap, Opts.MssaNoAccForPromotionCap,
297 Opts.AllowSpeculation);
298 if (!LICM.runOnLoop(&L, &AR.AA, &AR.LI, &AR.DT, &AR.AC, &AR.TLI, &AR.TTI,
299 &AR.SE, AR.MSSA, &ORE))
300 return PreservedAnalyses::all();
301
303 PA.preserve<MemorySSAAnalysis>();
304
305 return PA;
306}
307
309 raw_ostream &OS, function_ref<StringRef(StringRef)> MapClassName2PassName) {
310 static_cast<PassInfoMixin<LICMPass> *>(this)->printPipeline(
311 OS, MapClassName2PassName);
312
313 OS << '<';
314 OS << (Opts.AllowSpeculation ? "" : "no-") << "allowspeculation";
315 OS << '>';
316}
317
320 LPMUpdater &) {
321 if (!AR.MSSA)
322 report_fatal_error("LNICM requires MemorySSA (loop-mssa)",
323 /*GenCrashDiag*/false);
324
325 // For the new PM, we also can't use OptimizationRemarkEmitter as an analysis
326 // pass. Function analyses need to be preserved across loop transformations
327 // but ORE cannot be preserved (see comment before the pass definition).
329
330 LoopInvariantCodeMotion LICM(Opts.MssaOptCap, Opts.MssaNoAccForPromotionCap,
331 Opts.AllowSpeculation);
332
333 Loop &OutermostLoop = LN.getOutermostLoop();
334 bool Changed = LICM.runOnLoop(&OutermostLoop, &AR.AA, &AR.LI, &AR.DT, &AR.AC,
335 &AR.TLI, &AR.TTI, &AR.SE, AR.MSSA, &ORE, true);
336
337 if (!Changed)
338 return PreservedAnalyses::all();
339
341
342 PA.preserve<DominatorTreeAnalysis>();
343 PA.preserve<LoopAnalysis>();
344 PA.preserve<MemorySSAAnalysis>();
345
346 return PA;
347}
348
350 raw_ostream &OS, function_ref<StringRef(StringRef)> MapClassName2PassName) {
351 static_cast<PassInfoMixin<LNICMPass> *>(this)->printPipeline(
352 OS, MapClassName2PassName);
353
354 OS << '<';
355 OS << (Opts.AllowSpeculation ? "" : "no-") << "allowspeculation";
356 OS << '>';
357}
358
359char LegacyLICMPass::ID = 0;
360INITIALIZE_PASS_BEGIN(LegacyLICMPass, "licm", "Loop Invariant Code Motion",
361 false, false)
367INITIALIZE_PASS_END(LegacyLICMPass, "licm", "Loop Invariant Code Motion", false,
368 false)
369
370Pass *llvm::createLICMPass() { return new LegacyLICMPass(); }
371
373 MemorySSA &MSSA)
375 IsSink, L, MSSA) {}
376
378 unsigned LicmMssaOptCap, unsigned LicmMssaNoAccForPromotionCap, bool IsSink,
379 Loop &L, MemorySSA &MSSA)
380 : LicmMssaOptCap(LicmMssaOptCap),
381 LicmMssaNoAccForPromotionCap(LicmMssaNoAccForPromotionCap),
382 IsSink(IsSink) {
383 unsigned AccessCapCount = 0;
384 for (auto *BB : L.getBlocks())
385 if (const auto *Accesses = MSSA.getBlockAccesses(BB))
386 for (const auto &MA : *Accesses) {
387 (void)MA;
388 ++AccessCapCount;
389 if (AccessCapCount > LicmMssaNoAccForPromotionCap) {
390 NoOfMemAccTooLarge = true;
391 return;
392 }
393 }
394}
395
396/// Hoist expressions out of the specified loop. Note, alias info for inner
397/// loop is not preserved so it is not a good idea to run LICM multiple
398/// times on one loop.
399bool LoopInvariantCodeMotion::runOnLoop(Loop *L, AAResults *AA, LoopInfo *LI,
403 ScalarEvolution *SE, MemorySSA *MSSA,
405 bool LoopNestMode) {
406 bool Changed = false;
407
408 assert(L->isLCSSAForm(*DT) && "Loop is not in LCSSA form.");
409
410 // If this loop has metadata indicating that LICM is not to be performed then
411 // just exit.
413 return false;
414 }
415
416 // Don't sink stores from loops with coroutine suspend instructions.
417 // LICM would sink instructions into the default destination of
418 // the coroutine switch. The default destination of the switch is to
419 // handle the case where the coroutine is suspended, by which point the
420 // coroutine frame may have been destroyed. No instruction can be sunk there.
421 // FIXME: This would unfortunately hurt the performance of coroutines, however
422 // there is currently no general solution for this. Similar issues could also
423 // potentially happen in other passes where instructions are being moved
424 // across that edge.
425 bool HasCoroSuspendInst = llvm::any_of(L->getBlocks(), [](BasicBlock *BB) {
426 return llvm::any_of(*BB, [](Instruction &I) {
427 IntrinsicInst *II = dyn_cast<IntrinsicInst>(&I);
428 return II && II->getIntrinsicID() == Intrinsic::coro_suspend;
429 });
430 });
431
432 MemorySSAUpdater MSSAU(MSSA);
433 SinkAndHoistLICMFlags Flags(LicmMssaOptCap, LicmMssaNoAccForPromotionCap,
434 /*IsSink=*/true, *L, *MSSA);
435
436 // Get the preheader block to move instructions into...
437 BasicBlock *Preheader = L->getLoopPreheader();
438
439 // Compute loop safety information.
440 ICFLoopSafetyInfo SafetyInfo;
441 SafetyInfo.computeLoopSafetyInfo(L);
442
443 // We want to visit all of the instructions in this loop... that are not parts
444 // of our subloops (they have already had their invariants hoisted out of
445 // their loop, into this loop, so there is no need to process the BODIES of
446 // the subloops).
447 //
448 // Traverse the body of the loop in depth first order on the dominator tree so
449 // that we are guaranteed to see definitions before we see uses. This allows
450 // us to sink instructions in one pass, without iteration. After sinking
451 // instructions, we perform another pass to hoist them out of the loop.
452 if (L->hasDedicatedExits())
453 Changed |=
454 LoopNestMode
455 ? sinkRegionForLoopNest(DT->getNode(L->getHeader()), AA, LI, DT,
456 TLI, TTI, L, MSSAU, &SafetyInfo, Flags, ORE)
457 : sinkRegion(DT->getNode(L->getHeader()), AA, LI, DT, TLI, TTI, L,
458 MSSAU, &SafetyInfo, Flags, ORE);
459 Flags.setIsSink(false);
460 if (Preheader)
461 Changed |= hoistRegion(DT->getNode(L->getHeader()), AA, LI, DT, AC, TLI, L,
462 MSSAU, SE, &SafetyInfo, Flags, ORE, LoopNestMode,
463 LicmAllowSpeculation);
464
465 // Now that all loop invariants have been removed from the loop, promote any
466 // memory references to scalars that we can.
467 // Don't sink stores from loops without dedicated block exits. Exits
468 // containing indirect branches are not transformed by loop simplify,
469 // make sure we catch that. An additional load may be generated in the
470 // preheader for SSA updater, so also avoid sinking when no preheader
471 // is available.
472 if (!DisablePromotion && Preheader && L->hasDedicatedExits() &&
473 !Flags.tooManyMemoryAccesses() && !HasCoroSuspendInst) {
474 // Figure out the loop exits and their insertion points
476 L->getUniqueExitBlocks(ExitBlocks);
477
478 // We can't insert into a catchswitch.
479 bool HasCatchSwitch = llvm::any_of(ExitBlocks, [](BasicBlock *Exit) {
480 return isa<CatchSwitchInst>(Exit->getTerminator());
481 });
482
483 if (!HasCatchSwitch) {
485 SmallVector<MemoryAccess *, 8> MSSAInsertPts;
486 InsertPts.reserve(ExitBlocks.size());
487 MSSAInsertPts.reserve(ExitBlocks.size());
488 for (BasicBlock *ExitBlock : ExitBlocks) {
489 InsertPts.push_back(ExitBlock->getFirstInsertionPt());
490 MSSAInsertPts.push_back(nullptr);
491 }
492
494
495 // Promoting one set of accesses may make the pointers for another set
496 // loop invariant, so run this in a loop.
497 bool Promoted = false;
498 bool LocalPromoted;
499 do {
500 LocalPromoted = false;
501 for (auto [PointerMustAliases, HasReadsOutsideSet] :
502 collectPromotionCandidates(MSSA, AA, L)) {
503 LocalPromoted |= promoteLoopAccessesToScalars(
504 PointerMustAliases, ExitBlocks, InsertPts, MSSAInsertPts, PIC, LI,
505 DT, AC, TLI, TTI, L, MSSAU, &SafetyInfo, ORE,
506 LicmAllowSpeculation, HasReadsOutsideSet);
507 }
508 Promoted |= LocalPromoted;
509 } while (LocalPromoted);
510
511 // Once we have promoted values across the loop body we have to
512 // recursively reform LCSSA as any nested loop may now have values defined
513 // within the loop used in the outer loop.
514 // FIXME: This is really heavy handed. It would be a bit better to use an
515 // SSAUpdater strategy during promotion that was LCSSA aware and reformed
516 // it as it went.
517 if (Promoted)
518 formLCSSARecursively(*L, *DT, LI, SE);
519
520 Changed |= Promoted;
521 }
522 }
523
524 // Check that neither this loop nor its parent have had LCSSA broken. LICM is
525 // specifically moving instructions across the loop boundary and so it is
526 // especially in need of basic functional correctness checking here.
527 assert(L->isLCSSAForm(*DT) && "Loop not left in LCSSA form after LICM!");
528 assert((L->isOutermost() || L->getParentLoop()->isLCSSAForm(*DT)) &&
529 "Parent loop not left in LCSSA form after LICM!");
530
531 if (VerifyMemorySSA)
532 MSSA->verifyMemorySSA();
533
534 if (Changed && SE)
536 return Changed;
537}
538
539/// Walk the specified region of the CFG (defined by all blocks dominated by
540/// the specified block, and that are in the current loop) in reverse depth
541/// first order w.r.t the DominatorTree. This allows us to visit uses before
542/// definitions, allowing us to sink a loop body in one pass without iteration.
543///
546 TargetTransformInfo *TTI, Loop *CurLoop,
547 MemorySSAUpdater &MSSAU, ICFLoopSafetyInfo *SafetyInfo,
549 OptimizationRemarkEmitter *ORE, Loop *OutermostLoop) {
550
551 // Verify inputs.
552 assert(N != nullptr && AA != nullptr && LI != nullptr && DT != nullptr &&
553 CurLoop != nullptr && SafetyInfo != nullptr &&
554 "Unexpected input to sinkRegion.");
555
556 // We want to visit children before parents. We will enqueue all the parents
557 // before their children in the worklist and process the worklist in reverse
558 // order.
560
561 bool Changed = false;
562 for (DomTreeNode *DTN : reverse(Worklist)) {
563 BasicBlock *BB = DTN->getBlock();
564 // Only need to process the contents of this block if it is not part of a
565 // subloop (which would already have been processed).
566 if (inSubLoop(BB, CurLoop, LI))
567 continue;
568
569 for (BasicBlock::iterator II = BB->end(); II != BB->begin();) {
570 Instruction &I = *--II;
571
572 // The instruction is not used in the loop if it is dead. In this case,
573 // we just delete it instead of sinking it.
574 if (isInstructionTriviallyDead(&I, TLI)) {
575 LLVM_DEBUG(dbgs() << "LICM deleting dead inst: " << I << '\n');
578 ++II;
579 eraseInstruction(I, *SafetyInfo, MSSAU);
580 Changed = true;
581 continue;
582 }
583
584 // Check to see if we can sink this instruction to the exit blocks
585 // of the loop. We can do this if the all users of the instruction are
586 // outside of the loop. In this case, it doesn't even matter if the
587 // operands of the instruction are loop invariant.
588 //
589 bool FoldableInLoop = false;
590 bool LoopNestMode = OutermostLoop != nullptr;
591 if (!I.mayHaveSideEffects() &&
592 isNotUsedOrFoldableInLoop(I, LoopNestMode ? OutermostLoop : CurLoop,
593 SafetyInfo, TTI, FoldableInLoop,
594 LoopNestMode) &&
595 canSinkOrHoistInst(I, AA, DT, CurLoop, MSSAU, true, Flags, ORE)) {
596 if (sink(I, LI, DT, CurLoop, SafetyInfo, MSSAU, ORE)) {
597 if (!FoldableInLoop) {
598 ++II;
600 eraseInstruction(I, *SafetyInfo, MSSAU);
601 }
602 Changed = true;
603 }
604 }
605 }
606 }
607 if (VerifyMemorySSA)
608 MSSAU.getMemorySSA()->verifyMemorySSA();
609 return Changed;
610}
611
614 TargetTransformInfo *TTI, Loop *CurLoop,
615 MemorySSAUpdater &MSSAU,
616 ICFLoopSafetyInfo *SafetyInfo,
619
620 bool Changed = false;
622 Worklist.insert(CurLoop);
623 appendLoopsToWorklist(*CurLoop, Worklist);
624 while (!Worklist.empty()) {
625 Loop *L = Worklist.pop_back_val();
626 Changed |= sinkRegion(DT->getNode(L->getHeader()), AA, LI, DT, TLI, TTI, L,
627 MSSAU, SafetyInfo, Flags, ORE, CurLoop);
628 }
629 return Changed;
630}
631
632namespace {
633// This is a helper class for hoistRegion to make it able to hoist control flow
634// in order to be able to hoist phis. The way this works is that we initially
635// start hoisting to the loop preheader, and when we see a loop invariant branch
636// we make note of this. When we then come to hoist an instruction that's
637// conditional on such a branch we duplicate the branch and the relevant control
638// flow, then hoist the instruction into the block corresponding to its original
639// block in the duplicated control flow.
640class ControlFlowHoister {
641private:
642 // Information about the loop we are hoisting from
643 LoopInfo *LI;
644 DominatorTree *DT;
645 Loop *CurLoop;
646 MemorySSAUpdater &MSSAU;
647
648 // A map of blocks in the loop to the block their instructions will be hoisted
649 // to.
650 DenseMap<BasicBlock *, BasicBlock *> HoistDestinationMap;
651
652 // The branches that we can hoist, mapped to the block that marks a
653 // convergence point of their control flow.
654 DenseMap<BranchInst *, BasicBlock *> HoistableBranches;
655
656public:
657 ControlFlowHoister(LoopInfo *LI, DominatorTree *DT, Loop *CurLoop,
658 MemorySSAUpdater &MSSAU)
659 : LI(LI), DT(DT), CurLoop(CurLoop), MSSAU(MSSAU) {}
660
661 void registerPossiblyHoistableBranch(BranchInst *BI) {
662 // We can only hoist conditional branches with loop invariant operands.
663 if (!ControlFlowHoisting || !BI->isConditional() ||
664 !CurLoop->hasLoopInvariantOperands(BI))
665 return;
666
667 // The branch destinations need to be in the loop, and we don't gain
668 // anything by duplicating conditional branches with duplicate successors,
669 // as it's essentially the same as an unconditional branch.
670 BasicBlock *TrueDest = BI->getSuccessor(0);
671 BasicBlock *FalseDest = BI->getSuccessor(1);
672 if (!CurLoop->contains(TrueDest) || !CurLoop->contains(FalseDest) ||
673 TrueDest == FalseDest)
674 return;
675
676 // We can hoist BI if one branch destination is the successor of the other,
677 // or both have common successor which we check by seeing if the
678 // intersection of their successors is non-empty.
679 // TODO: This could be expanded to allowing branches where both ends
680 // eventually converge to a single block.
681 SmallPtrSet<BasicBlock *, 4> TrueDestSucc, FalseDestSucc;
682 TrueDestSucc.insert(succ_begin(TrueDest), succ_end(TrueDest));
683 FalseDestSucc.insert(succ_begin(FalseDest), succ_end(FalseDest));
684 BasicBlock *CommonSucc = nullptr;
685 if (TrueDestSucc.count(FalseDest)) {
686 CommonSucc = FalseDest;
687 } else if (FalseDestSucc.count(TrueDest)) {
688 CommonSucc = TrueDest;
689 } else {
690 set_intersect(TrueDestSucc, FalseDestSucc);
691 // If there's one common successor use that.
692 if (TrueDestSucc.size() == 1)
693 CommonSucc = *TrueDestSucc.begin();
694 // If there's more than one pick whichever appears first in the block list
695 // (we can't use the value returned by TrueDestSucc.begin() as it's
696 // unpredicatable which element gets returned).
697 else if (!TrueDestSucc.empty()) {
698 Function *F = TrueDest->getParent();
699 auto IsSucc = [&](BasicBlock &BB) { return TrueDestSucc.count(&BB); };
700 auto It = llvm::find_if(*F, IsSucc);
701 assert(It != F->end() && "Could not find successor in function");
702 CommonSucc = &*It;
703 }
704 }
705 // The common successor has to be dominated by the branch, as otherwise
706 // there will be some other path to the successor that will not be
707 // controlled by this branch so any phi we hoist would be controlled by the
708 // wrong condition. This also takes care of avoiding hoisting of loop back
709 // edges.
710 // TODO: In some cases this could be relaxed if the successor is dominated
711 // by another block that's been hoisted and we can guarantee that the
712 // control flow has been replicated exactly.
713 if (CommonSucc && DT->dominates(BI, CommonSucc))
714 HoistableBranches[BI] = CommonSucc;
715 }
716
717 bool canHoistPHI(PHINode *PN) {
718 // The phi must have loop invariant operands.
719 if (!ControlFlowHoisting || !CurLoop->hasLoopInvariantOperands(PN))
720 return false;
721 // We can hoist phis if the block they are in is the target of hoistable
722 // branches which cover all of the predecessors of the block.
723 SmallPtrSet<BasicBlock *, 8> PredecessorBlocks;
724 BasicBlock *BB = PN->getParent();
725 for (BasicBlock *PredBB : predecessors(BB))
726 PredecessorBlocks.insert(PredBB);
727 // If we have less predecessor blocks than predecessors then the phi will
728 // have more than one incoming value for the same block which we can't
729 // handle.
730 // TODO: This could be handled be erasing some of the duplicate incoming
731 // values.
732 if (PredecessorBlocks.size() != pred_size(BB))
733 return false;
734 for (auto &Pair : HoistableBranches) {
735 if (Pair.second == BB) {
736 // Which blocks are predecessors via this branch depends on if the
737 // branch is triangle-like or diamond-like.
738 if (Pair.first->getSuccessor(0) == BB) {
739 PredecessorBlocks.erase(Pair.first->getParent());
740 PredecessorBlocks.erase(Pair.first->getSuccessor(1));
741 } else if (Pair.first->getSuccessor(1) == BB) {
742 PredecessorBlocks.erase(Pair.first->getParent());
743 PredecessorBlocks.erase(Pair.first->getSuccessor(0));
744 } else {
745 PredecessorBlocks.erase(Pair.first->getSuccessor(0));
746 PredecessorBlocks.erase(Pair.first->getSuccessor(1));
747 }
748 }
749 }
750 // PredecessorBlocks will now be empty if for every predecessor of BB we
751 // found a hoistable branch source.
752 return PredecessorBlocks.empty();
753 }
754
755 BasicBlock *getOrCreateHoistedBlock(BasicBlock *BB) {
757 return CurLoop->getLoopPreheader();
758 // If BB has already been hoisted, return that
759 if (HoistDestinationMap.count(BB))
760 return HoistDestinationMap[BB];
761
762 // Check if this block is conditional based on a pending branch
763 auto HasBBAsSuccessor =
765 return BB != Pair.second && (Pair.first->getSuccessor(0) == BB ||
766 Pair.first->getSuccessor(1) == BB);
767 };
768 auto It = llvm::find_if(HoistableBranches, HasBBAsSuccessor);
769
770 // If not involved in a pending branch, hoist to preheader
771 BasicBlock *InitialPreheader = CurLoop->getLoopPreheader();
772 if (It == HoistableBranches.end()) {
773 LLVM_DEBUG(dbgs() << "LICM using "
774 << InitialPreheader->getNameOrAsOperand()
775 << " as hoist destination for "
776 << BB->getNameOrAsOperand() << "\n");
777 HoistDestinationMap[BB] = InitialPreheader;
778 return InitialPreheader;
779 }
780 BranchInst *BI = It->first;
781 assert(std::find_if(++It, HoistableBranches.end(), HasBBAsSuccessor) ==
782 HoistableBranches.end() &&
783 "BB is expected to be the target of at most one branch");
784
785 LLVMContext &C = BB->getContext();
786 BasicBlock *TrueDest = BI->getSuccessor(0);
787 BasicBlock *FalseDest = BI->getSuccessor(1);
788 BasicBlock *CommonSucc = HoistableBranches[BI];
789 BasicBlock *HoistTarget = getOrCreateHoistedBlock(BI->getParent());
790
791 // Create hoisted versions of blocks that currently don't have them
792 auto CreateHoistedBlock = [&](BasicBlock *Orig) {
793 if (HoistDestinationMap.count(Orig))
794 return HoistDestinationMap[Orig];
795 BasicBlock *New =
796 BasicBlock::Create(C, Orig->getName() + ".licm", Orig->getParent());
797 HoistDestinationMap[Orig] = New;
798 DT->addNewBlock(New, HoistTarget);
799 if (CurLoop->getParentLoop())
800 CurLoop->getParentLoop()->addBasicBlockToLoop(New, *LI);
801 ++NumCreatedBlocks;
802 LLVM_DEBUG(dbgs() << "LICM created " << New->getName()
803 << " as hoist destination for " << Orig->getName()
804 << "\n");
805 return New;
806 };
807 BasicBlock *HoistTrueDest = CreateHoistedBlock(TrueDest);
808 BasicBlock *HoistFalseDest = CreateHoistedBlock(FalseDest);
809 BasicBlock *HoistCommonSucc = CreateHoistedBlock(CommonSucc);
810
811 // Link up these blocks with branches.
812 if (!HoistCommonSucc->getTerminator()) {
813 // The new common successor we've generated will branch to whatever that
814 // hoist target branched to.
815 BasicBlock *TargetSucc = HoistTarget->getSingleSuccessor();
816 assert(TargetSucc && "Expected hoist target to have a single successor");
817 HoistCommonSucc->moveBefore(TargetSucc);
818 BranchInst::Create(TargetSucc, HoistCommonSucc);
819 }
820 if (!HoistTrueDest->getTerminator()) {
821 HoistTrueDest->moveBefore(HoistCommonSucc);
822 BranchInst::Create(HoistCommonSucc, HoistTrueDest);
823 }
824 if (!HoistFalseDest->getTerminator()) {
825 HoistFalseDest->moveBefore(HoistCommonSucc);
826 BranchInst::Create(HoistCommonSucc, HoistFalseDest);
827 }
828
829 // If BI is being cloned to what was originally the preheader then
830 // HoistCommonSucc will now be the new preheader.
831 if (HoistTarget == InitialPreheader) {
832 // Phis in the loop header now need to use the new preheader.
833 InitialPreheader->replaceSuccessorsPhiUsesWith(HoistCommonSucc);
835 HoistTarget->getSingleSuccessor(), HoistCommonSucc, {HoistTarget});
836 // The new preheader dominates the loop header.
837 DomTreeNode *PreheaderNode = DT->getNode(HoistCommonSucc);
838 DomTreeNode *HeaderNode = DT->getNode(CurLoop->getHeader());
839 DT->changeImmediateDominator(HeaderNode, PreheaderNode);
840 // The preheader hoist destination is now the new preheader, with the
841 // exception of the hoist destination of this branch.
842 for (auto &Pair : HoistDestinationMap)
843 if (Pair.second == InitialPreheader && Pair.first != BI->getParent())
844 Pair.second = HoistCommonSucc;
845 }
846
847 // Now finally clone BI.
849 HoistTarget->getTerminator(),
850 BranchInst::Create(HoistTrueDest, HoistFalseDest, BI->getCondition()));
851 ++NumClonedBranches;
852
853 assert(CurLoop->getLoopPreheader() &&
854 "Hoisting blocks should not have destroyed preheader");
855 return HoistDestinationMap[BB];
856 }
857};
858} // namespace
859
860/// Walk the specified region of the CFG (defined by all blocks dominated by
861/// the specified block, and that are in the current loop) in depth first
862/// order w.r.t the DominatorTree. This allows us to visit definitions before
863/// uses, allowing us to hoist a loop body in one pass without iteration.
864///
867 TargetLibraryInfo *TLI, Loop *CurLoop,
869 ICFLoopSafetyInfo *SafetyInfo,
871 OptimizationRemarkEmitter *ORE, bool LoopNestMode,
872 bool AllowSpeculation) {
873 // Verify inputs.
874 assert(N != nullptr && AA != nullptr && LI != nullptr && DT != nullptr &&
875 CurLoop != nullptr && SafetyInfo != nullptr &&
876 "Unexpected input to hoistRegion.");
877
878 ControlFlowHoister CFH(LI, DT, CurLoop, MSSAU);
879
880 // Keep track of instructions that have been hoisted, as they may need to be
881 // re-hoisted if they end up not dominating all of their uses.
882 SmallVector<Instruction *, 16> HoistedInstructions;
883
884 // For PHI hoisting to work we need to hoist blocks before their successors.
885 // We can do this by iterating through the blocks in the loop in reverse
886 // post-order.
887 LoopBlocksRPO Worklist(CurLoop);
888 Worklist.perform(LI);
889 bool Changed = false;
890 BasicBlock *Preheader = CurLoop->getLoopPreheader();
891 for (BasicBlock *BB : Worklist) {
892 // Only need to process the contents of this block if it is not part of a
893 // subloop (which would already have been processed).
894 if (!LoopNestMode && inSubLoop(BB, CurLoop, LI))
895 continue;
896
898 // Try hoisting the instruction out to the preheader. We can only do
899 // this if all of the operands of the instruction are loop invariant and
900 // if it is safe to hoist the instruction. We also check block frequency
901 // to make sure instruction only gets hoisted into colder blocks.
902 // TODO: It may be safe to hoist if we are hoisting to a conditional block
903 // and we have accurately duplicated the control flow from the loop header
904 // to that block.
905 if (CurLoop->hasLoopInvariantOperands(&I) &&
906 canSinkOrHoistInst(I, AA, DT, CurLoop, MSSAU, true, Flags, ORE) &&
908 I, DT, TLI, CurLoop, SafetyInfo, ORE,
909 Preheader->getTerminator(), AC, AllowSpeculation)) {
910 hoist(I, DT, CurLoop, CFH.getOrCreateHoistedBlock(BB), SafetyInfo,
911 MSSAU, SE, ORE);
912 HoistedInstructions.push_back(&I);
913 Changed = true;
914 continue;
915 }
916
917 // Attempt to remove floating point division out of the loop by
918 // converting it to a reciprocal multiplication.
919 if (I.getOpcode() == Instruction::FDiv && I.hasAllowReciprocal() &&
920 CurLoop->isLoopInvariant(I.getOperand(1))) {
921 auto Divisor = I.getOperand(1);
922 auto One = llvm::ConstantFP::get(Divisor->getType(), 1.0);
923 auto ReciprocalDivisor = BinaryOperator::CreateFDiv(One, Divisor);
924 ReciprocalDivisor->setFastMathFlags(I.getFastMathFlags());
925 SafetyInfo->insertInstructionTo(ReciprocalDivisor, I.getParent());
926 ReciprocalDivisor->insertBefore(&I);
927
928 auto Product =
929 BinaryOperator::CreateFMul(I.getOperand(0), ReciprocalDivisor);
930 Product->setFastMathFlags(I.getFastMathFlags());
931 SafetyInfo->insertInstructionTo(Product, I.getParent());
932 Product->insertAfter(&I);
933 I.replaceAllUsesWith(Product);
934 eraseInstruction(I, *SafetyInfo, MSSAU);
935
936 hoist(*ReciprocalDivisor, DT, CurLoop, CFH.getOrCreateHoistedBlock(BB),
937 SafetyInfo, MSSAU, SE, ORE);
938 HoistedInstructions.push_back(ReciprocalDivisor);
939 Changed = true;
940 continue;
941 }
942
943 auto IsInvariantStart = [&](Instruction &I) {
944 using namespace PatternMatch;
945 return I.use_empty() &&
946 match(&I, m_Intrinsic<Intrinsic::invariant_start>());
947 };
948 auto MustExecuteWithoutWritesBefore = [&](Instruction &I) {
949 return SafetyInfo->isGuaranteedToExecute(I, DT, CurLoop) &&
950 SafetyInfo->doesNotWriteMemoryBefore(I, CurLoop);
951 };
952 if ((IsInvariantStart(I) || isGuard(&I)) &&
953 CurLoop->hasLoopInvariantOperands(&I) &&
954 MustExecuteWithoutWritesBefore(I)) {
955 hoist(I, DT, CurLoop, CFH.getOrCreateHoistedBlock(BB), SafetyInfo,
956 MSSAU, SE, ORE);
957 HoistedInstructions.push_back(&I);
958 Changed = true;
959 continue;
960 }
961
962 if (PHINode *PN = dyn_cast<PHINode>(&I)) {
963 if (CFH.canHoistPHI(PN)) {
964 // Redirect incoming blocks first to ensure that we create hoisted
965 // versions of those blocks before we hoist the phi.
966 for (unsigned int i = 0; i < PN->getNumIncomingValues(); ++i)
968 i, CFH.getOrCreateHoistedBlock(PN->getIncomingBlock(i)));
969 hoist(*PN, DT, CurLoop, CFH.getOrCreateHoistedBlock(BB), SafetyInfo,
970 MSSAU, SE, ORE);
971 assert(DT->dominates(PN, BB) && "Conditional PHIs not expected");
972 Changed = true;
973 continue;
974 }
975 }
976
977 // Try to reassociate instructions so that part of computations can be
978 // done out of loop.
979 if (hoistArithmetics(I, *CurLoop, *SafetyInfo, MSSAU, AC, DT)) {
980 Changed = true;
981 continue;
982 }
983
984 // Remember possibly hoistable branches so we can actually hoist them
985 // later if needed.
986 if (BranchInst *BI = dyn_cast<BranchInst>(&I))
987 CFH.registerPossiblyHoistableBranch(BI);
988 }
989 }
990
991 // If we hoisted instructions to a conditional block they may not dominate
992 // their uses that weren't hoisted (such as phis where some operands are not
993 // loop invariant). If so make them unconditional by moving them to their
994 // immediate dominator. We iterate through the instructions in reverse order
995 // which ensures that when we rehoist an instruction we rehoist its operands,
996 // and also keep track of where in the block we are rehoisting to make sure
997 // that we rehoist instructions before the instructions that use them.
998 Instruction *HoistPoint = nullptr;
1000 for (Instruction *I : reverse(HoistedInstructions)) {
1001 if (!llvm::all_of(I->uses(),
1002 [&](Use &U) { return DT->dominates(I, U); })) {
1003 BasicBlock *Dominator =
1004 DT->getNode(I->getParent())->getIDom()->getBlock();
1005 if (!HoistPoint || !DT->dominates(HoistPoint->getParent(), Dominator)) {
1006 if (HoistPoint)
1007 assert(DT->dominates(Dominator, HoistPoint->getParent()) &&
1008 "New hoist point expected to dominate old hoist point");
1009 HoistPoint = Dominator->getTerminator();
1010 }
1011 LLVM_DEBUG(dbgs() << "LICM rehoisting to "
1012 << HoistPoint->getParent()->getNameOrAsOperand()
1013 << ": " << *I << "\n");
1014 moveInstructionBefore(*I, *HoistPoint, *SafetyInfo, MSSAU, SE);
1015 HoistPoint = I;
1016 Changed = true;
1017 }
1018 }
1019 }
1020 if (VerifyMemorySSA)
1021 MSSAU.getMemorySSA()->verifyMemorySSA();
1022
1023 // Now that we've finished hoisting make sure that LI and DT are still
1024 // valid.
1025#ifdef EXPENSIVE_CHECKS
1026 if (Changed) {
1027 assert(DT->verify(DominatorTree::VerificationLevel::Fast) &&
1028 "Dominator tree verification failed");
1029 LI->verify(*DT);
1030 }
1031#endif
1032
1033 return Changed;
1034}
1035
1036// Return true if LI is invariant within scope of the loop. LI is invariant if
1037// CurLoop is dominated by an invariant.start representing the same memory
1038// location and size as the memory location LI loads from, and also the
1039// invariant.start has no uses.
1041 Loop *CurLoop) {
1042 Value *Addr = LI->getPointerOperand();
1043 const DataLayout &DL = LI->getModule()->getDataLayout();
1044 const TypeSize LocSizeInBits = DL.getTypeSizeInBits(LI->getType());
1045
1046 // It is not currently possible for clang to generate an invariant.start
1047 // intrinsic with scalable vector types because we don't support thread local
1048 // sizeless types and we don't permit sizeless types in structs or classes.
1049 // Furthermore, even if support is added for this in future the intrinsic
1050 // itself is defined to have a size of -1 for variable sized objects. This
1051 // makes it impossible to verify if the intrinsic envelops our region of
1052 // interest. For example, both <vscale x 32 x i8> and <vscale x 16 x i8>
1053 // types would have a -1 parameter, but the former is clearly double the size
1054 // of the latter.
1055 if (LocSizeInBits.isScalable())
1056 return false;
1057
1058 // If we've ended up at a global/constant, bail. We shouldn't be looking at
1059 // uselists for non-local Values in a loop pass.
1060 if (isa<Constant>(Addr))
1061 return false;
1062
1063 unsigned UsesVisited = 0;
1064 // Traverse all uses of the load operand value, to see if invariant.start is
1065 // one of the uses, and whether it dominates the load instruction.
1066 for (auto *U : Addr->users()) {
1067 // Avoid traversing for Load operand with high number of users.
1068 if (++UsesVisited > MaxNumUsesTraversed)
1069 return false;
1070 IntrinsicInst *II = dyn_cast<IntrinsicInst>(U);
1071 // If there are escaping uses of invariant.start instruction, the load maybe
1072 // non-invariant.
1073 if (!II || II->getIntrinsicID() != Intrinsic::invariant_start ||
1074 !II->use_empty())
1075 continue;
1076 ConstantInt *InvariantSize = cast<ConstantInt>(II->getArgOperand(0));
1077 // The intrinsic supports having a -1 argument for variable sized objects
1078 // so we should check for that here.
1079 if (InvariantSize->isNegative())
1080 continue;
1081 uint64_t InvariantSizeInBits = InvariantSize->getSExtValue() * 8;
1082 // Confirm the invariant.start location size contains the load operand size
1083 // in bits. Also, the invariant.start should dominate the load, and we
1084 // should not hoist the load out of a loop that contains this dominating
1085 // invariant.start.
1086 if (LocSizeInBits.getFixedValue() <= InvariantSizeInBits &&
1087 DT->properlyDominates(II->getParent(), CurLoop->getHeader()))
1088 return true;
1089 }
1090
1091 return false;
1092}
1093
1094namespace {
1095/// Return true if-and-only-if we know how to (mechanically) both hoist and
1096/// sink a given instruction out of a loop. Does not address legality
1097/// concerns such as aliasing or speculation safety.
1098bool isHoistableAndSinkableInst(Instruction &I) {
1099 // Only these instructions are hoistable/sinkable.
1100 return (isa<LoadInst>(I) || isa<StoreInst>(I) || isa<CallInst>(I) ||
1101 isa<FenceInst>(I) || isa<CastInst>(I) || isa<UnaryOperator>(I) ||
1102 isa<BinaryOperator>(I) || isa<SelectInst>(I) ||
1103 isa<GetElementPtrInst>(I) || isa<CmpInst>(I) ||
1104 isa<InsertElementInst>(I) || isa<ExtractElementInst>(I) ||
1105 isa<ShuffleVectorInst>(I) || isa<ExtractValueInst>(I) ||
1106 isa<InsertValueInst>(I) || isa<FreezeInst>(I));
1107}
1108/// Return true if MSSA knows there are no MemoryDefs in the loop.
1109bool isReadOnly(const MemorySSAUpdater &MSSAU, const Loop *L) {
1110 for (auto *BB : L->getBlocks())
1111 if (MSSAU.getMemorySSA()->getBlockDefs(BB))
1112 return false;
1113 return true;
1114}
1115
1116/// Return true if I is the only Instruction with a MemoryAccess in L.
1117bool isOnlyMemoryAccess(const Instruction *I, const Loop *L,
1118 const MemorySSAUpdater &MSSAU) {
1119 for (auto *BB : L->getBlocks())
1120 if (auto *Accs = MSSAU.getMemorySSA()->getBlockAccesses(BB)) {
1121 int NotAPhi = 0;
1122 for (const auto &Acc : *Accs) {
1123 if (isa<MemoryPhi>(&Acc))
1124 continue;
1125 const auto *MUD = cast<MemoryUseOrDef>(&Acc);
1126 if (MUD->getMemoryInst() != I || NotAPhi++ == 1)
1127 return false;
1128 }
1129 }
1130 return true;
1131}
1132}
1133
1135 BatchAAResults &BAA,
1136 SinkAndHoistLICMFlags &Flags,
1137 MemoryUseOrDef *MA) {
1138 // See declaration of SetLicmMssaOptCap for usage details.
1139 if (Flags.tooManyClobberingCalls())
1140 return MA->getDefiningAccess();
1141
1142 MemoryAccess *Source =
1144 Flags.incrementClobberingCalls();
1145 return Source;
1146}
1147
1149 Loop *CurLoop, MemorySSAUpdater &MSSAU,
1150 bool TargetExecutesOncePerLoop,
1151 SinkAndHoistLICMFlags &Flags,
1153 // If we don't understand the instruction, bail early.
1154 if (!isHoistableAndSinkableInst(I))
1155 return false;
1156
1157 MemorySSA *MSSA = MSSAU.getMemorySSA();
1158 // Loads have extra constraints we have to verify before we can hoist them.
1159 if (LoadInst *LI = dyn_cast<LoadInst>(&I)) {
1160 if (!LI->isUnordered())
1161 return false; // Don't sink/hoist volatile or ordered atomic loads!
1162
1163 // Loads from constant memory are always safe to move, even if they end up
1164 // in the same alias set as something that ends up being modified.
1165 if (!isModSet(AA->getModRefInfoMask(LI->getOperand(0))))
1166 return true;
1167 if (LI->hasMetadata(LLVMContext::MD_invariant_load))
1168 return true;
1169
1170 if (LI->isAtomic() && !TargetExecutesOncePerLoop)
1171 return false; // Don't risk duplicating unordered loads
1172
1173 // This checks for an invariant.start dominating the load.
1174 if (isLoadInvariantInLoop(LI, DT, CurLoop))
1175 return true;
1176
1177 auto MU = cast<MemoryUse>(MSSA->getMemoryAccess(LI));
1178
1179 bool InvariantGroup = LI->hasMetadata(LLVMContext::MD_invariant_group);
1180
1182 MSSA, MU, CurLoop, I, Flags, InvariantGroup);
1183 // Check loop-invariant address because this may also be a sinkable load
1184 // whose address is not necessarily loop-invariant.
1185 if (ORE && Invalidated && CurLoop->isLoopInvariant(LI->getPointerOperand()))
1186 ORE->emit([&]() {
1188 DEBUG_TYPE, "LoadWithLoopInvariantAddressInvalidated", LI)
1189 << "failed to move load with loop-invariant address "
1190 "because the loop may invalidate its value";
1191 });
1192
1193 return !Invalidated;
1194 } else if (CallInst *CI = dyn_cast<CallInst>(&I)) {
1195 // Don't sink or hoist dbg info; it's legal, but not useful.
1196 if (isa<DbgInfoIntrinsic>(I))
1197 return false;
1198
1199 // Don't sink calls which can throw.
1200 if (CI->mayThrow())
1201 return false;
1202
1203 // Convergent attribute has been used on operations that involve
1204 // inter-thread communication which results are implicitly affected by the
1205 // enclosing control flows. It is not safe to hoist or sink such operations
1206 // across control flow.
1207 if (CI->isConvergent())
1208 return false;
1209
1210 using namespace PatternMatch;
1211 if (match(CI, m_Intrinsic<Intrinsic::assume>()))
1212 // Assumes don't actually alias anything or throw
1213 return true;
1214
1215 // Handle simple cases by querying alias analysis.
1216 MemoryEffects Behavior = AA->getMemoryEffects(CI);
1217
1218 // FIXME: we don't handle the semantics of thread local well. So that the
1219 // address of thread locals are fake constants in coroutines. So We forbid
1220 // to treat onlyReadsMemory call in coroutines as constants now. Note that
1221 // it is possible to hide a thread local access in a onlyReadsMemory call.
1222 // Remove this check after we handle the semantics of thread locals well.
1223 if (Behavior.onlyReadsMemory() && CI->getFunction()->isPresplitCoroutine())
1224 return false;
1225
1226 if (Behavior.doesNotAccessMemory())
1227 return true;
1228 if (Behavior.onlyReadsMemory()) {
1229 // A readonly argmemonly function only reads from memory pointed to by
1230 // it's arguments with arbitrary offsets. If we can prove there are no
1231 // writes to this memory in the loop, we can hoist or sink.
1232 if (Behavior.onlyAccessesArgPointees()) {
1233 // TODO: expand to writeable arguments
1234 for (Value *Op : CI->args())
1235 if (Op->getType()->isPointerTy() &&
1237 MSSA, cast<MemoryUse>(MSSA->getMemoryAccess(CI)), CurLoop, I,
1238 Flags, /*InvariantGroup=*/false))
1239 return false;
1240 return true;
1241 }
1242
1243 // If this call only reads from memory and there are no writes to memory
1244 // in the loop, we can hoist or sink the call as appropriate.
1245 if (isReadOnly(MSSAU, CurLoop))
1246 return true;
1247 }
1248
1249 // FIXME: This should use mod/ref information to see if we can hoist or
1250 // sink the call.
1251
1252 return false;
1253 } else if (auto *FI = dyn_cast<FenceInst>(&I)) {
1254 // Fences alias (most) everything to provide ordering. For the moment,
1255 // just give up if there are any other memory operations in the loop.
1256 return isOnlyMemoryAccess(FI, CurLoop, MSSAU);
1257 } else if (auto *SI = dyn_cast<StoreInst>(&I)) {
1258 if (!SI->isUnordered())
1259 return false; // Don't sink/hoist volatile or ordered atomic store!
1260
1261 // We can only hoist a store that we can prove writes a value which is not
1262 // read or overwritten within the loop. For those cases, we fallback to
1263 // load store promotion instead. TODO: We can extend this to cases where
1264 // there is exactly one write to the location and that write dominates an
1265 // arbitrary number of reads in the loop.
1266 if (isOnlyMemoryAccess(SI, CurLoop, MSSAU))
1267 return true;
1268 // If there are more accesses than the Promotion cap, then give up as we're
1269 // not walking a list that long.
1270 if (Flags.tooManyMemoryAccesses())
1271 return false;
1272
1273 auto *SIMD = MSSA->getMemoryAccess(SI);
1274 BatchAAResults BAA(*AA);
1275 auto *Source = getClobberingMemoryAccess(*MSSA, BAA, Flags, SIMD);
1276 // Make sure there are no clobbers inside the loop.
1277 if (!MSSA->isLiveOnEntryDef(Source) &&
1278 CurLoop->contains(Source->getBlock()))
1279 return false;
1280
1281 // If there are interfering Uses (i.e. their defining access is in the
1282 // loop), or ordered loads (stored as Defs!), don't move this store.
1283 // Could do better here, but this is conservatively correct.
1284 // TODO: Cache set of Uses on the first walk in runOnLoop, update when
1285 // moving accesses. Can also extend to dominating uses.
1286 for (auto *BB : CurLoop->getBlocks())
1287 if (auto *Accesses = MSSA->getBlockAccesses(BB)) {
1288 for (const auto &MA : *Accesses)
1289 if (const auto *MU = dyn_cast<MemoryUse>(&MA)) {
1290 auto *MD = getClobberingMemoryAccess(*MSSA, BAA, Flags,
1291 const_cast<MemoryUse *>(MU));
1292 if (!MSSA->isLiveOnEntryDef(MD) &&
1293 CurLoop->contains(MD->getBlock()))
1294 return false;
1295 // Disable hoisting past potentially interfering loads. Optimized
1296 // Uses may point to an access outside the loop, as getClobbering
1297 // checks the previous iteration when walking the backedge.
1298 // FIXME: More precise: no Uses that alias SI.
1299 if (!Flags.getIsSink() && !MSSA->dominates(SIMD, MU))
1300 return false;
1301 } else if (const auto *MD = dyn_cast<MemoryDef>(&MA)) {
1302 if (auto *LI = dyn_cast<LoadInst>(MD->getMemoryInst())) {
1303 (void)LI; // Silence warning.
1304 assert(!LI->isUnordered() && "Expected unordered load");
1305 return false;
1306 }
1307 // Any call, while it may not be clobbering SI, it may be a use.
1308 if (auto *CI = dyn_cast<CallInst>(MD->getMemoryInst())) {
1309 // Check if the call may read from the memory location written
1310 // to by SI. Check CI's attributes and arguments; the number of
1311 // such checks performed is limited above by NoOfMemAccTooLarge.
1313 if (isModOrRefSet(MRI))
1314 return false;
1315 }
1316 }
1317 }
1318 return true;
1319 }
1320
1321 assert(!I.mayReadOrWriteMemory() && "unhandled aliasing");
1322
1323 // We've established mechanical ability and aliasing, it's up to the caller
1324 // to check fault safety
1325 return true;
1326}
1327
1328/// Returns true if a PHINode is a trivially replaceable with an
1329/// Instruction.
1330/// This is true when all incoming values are that instruction.
1331/// This pattern occurs most often with LCSSA PHI nodes.
1332///
1333static bool isTriviallyReplaceablePHI(const PHINode &PN, const Instruction &I) {
1334 for (const Value *IncValue : PN.incoming_values())
1335 if (IncValue != &I)
1336 return false;
1337
1338 return true;
1339}
1340
1341/// Return true if the instruction is foldable in the loop.
1342static bool isFoldableInLoop(const Instruction &I, const Loop *CurLoop,
1343 const TargetTransformInfo *TTI) {
1344 if (auto *GEP = dyn_cast<GetElementPtrInst>(&I)) {
1345 InstructionCost CostI =
1347 if (CostI != TargetTransformInfo::TCC_Free)
1348 return false;
1349 // For a GEP, we cannot simply use getInstructionCost because currently
1350 // it optimistically assumes that a GEP will fold into addressing mode
1351 // regardless of its users.
1352 const BasicBlock *BB = GEP->getParent();
1353 for (const User *U : GEP->users()) {
1354 const Instruction *UI = cast<Instruction>(U);
1355 if (CurLoop->contains(UI) &&
1356 (BB != UI->getParent() ||
1357 (!isa<StoreInst>(UI) && !isa<LoadInst>(UI))))
1358 return false;
1359 }
1360 return true;
1361 }
1362
1363 return false;
1364}
1365
1366/// Return true if the only users of this instruction are outside of
1367/// the loop. If this is true, we can sink the instruction to the exit
1368/// blocks of the loop.
1369///
1370/// We also return true if the instruction could be folded away in lowering.
1371/// (e.g., a GEP can be folded into a load as an addressing mode in the loop).
1372static bool isNotUsedOrFoldableInLoop(const Instruction &I, const Loop *CurLoop,
1373 const LoopSafetyInfo *SafetyInfo,
1375 bool &FoldableInLoop, bool LoopNestMode) {
1376 const auto &BlockColors = SafetyInfo->getBlockColors();
1377 bool IsFoldable = isFoldableInLoop(I, CurLoop, TTI);
1378 for (const User *U : I.users()) {
1379 const Instruction *UI = cast<Instruction>(U);
1380 if (const PHINode *PN = dyn_cast<PHINode>(UI)) {
1381 const BasicBlock *BB = PN->getParent();
1382 // We cannot sink uses in catchswitches.
1383 if (isa<CatchSwitchInst>(BB->getTerminator()))
1384 return false;
1385
1386 // We need to sink a callsite to a unique funclet. Avoid sinking if the
1387 // phi use is too muddled.
1388 if (isa<CallInst>(I))
1389 if (!BlockColors.empty() &&
1390 BlockColors.find(const_cast<BasicBlock *>(BB))->second.size() != 1)
1391 return false;
1392
1393 if (LoopNestMode) {
1394 while (isa<PHINode>(UI) && UI->hasOneUser() &&
1395 UI->getNumOperands() == 1) {
1396 if (!CurLoop->contains(UI))
1397 break;
1398 UI = cast<Instruction>(UI->user_back());
1399 }
1400 }
1401 }
1402
1403 if (CurLoop->contains(UI)) {
1404 if (IsFoldable) {
1405 FoldableInLoop = true;
1406 continue;
1407 }
1408 return false;
1409 }
1410 }
1411 return true;
1412}
1413
1415 Instruction &I, BasicBlock &ExitBlock, PHINode &PN, const LoopInfo *LI,
1416 const LoopSafetyInfo *SafetyInfo, MemorySSAUpdater &MSSAU) {
1417 Instruction *New;
1418 if (auto *CI = dyn_cast<CallInst>(&I)) {
1419 const auto &BlockColors = SafetyInfo->getBlockColors();
1420
1421 // Sinking call-sites need to be handled differently from other
1422 // instructions. The cloned call-site needs a funclet bundle operand
1423 // appropriate for its location in the CFG.
1425 for (unsigned BundleIdx = 0, BundleEnd = CI->getNumOperandBundles();
1426 BundleIdx != BundleEnd; ++BundleIdx) {
1427 OperandBundleUse Bundle = CI->getOperandBundleAt(BundleIdx);
1428 if (Bundle.getTagID() == LLVMContext::OB_funclet)
1429 continue;
1430
1431 OpBundles.emplace_back(Bundle);
1432 }
1433
1434 if (!BlockColors.empty()) {
1435 const ColorVector &CV = BlockColors.find(&ExitBlock)->second;
1436 assert(CV.size() == 1 && "non-unique color for exit block!");
1437 BasicBlock *BBColor = CV.front();
1438 Instruction *EHPad = BBColor->getFirstNonPHI();
1439 if (EHPad->isEHPad())
1440 OpBundles.emplace_back("funclet", EHPad);
1441 }
1442
1443 New = CallInst::Create(CI, OpBundles);
1444 } else {
1445 New = I.clone();
1446 }
1447
1448 New->insertInto(&ExitBlock, ExitBlock.getFirstInsertionPt());
1449 if (!I.getName().empty())
1450 New->setName(I.getName() + ".le");
1451
1452 if (MSSAU.getMemorySSA()->getMemoryAccess(&I)) {
1453 // Create a new MemoryAccess and let MemorySSA set its defining access.
1454 MemoryAccess *NewMemAcc = MSSAU.createMemoryAccessInBB(
1455 New, nullptr, New->getParent(), MemorySSA::Beginning);
1456 if (NewMemAcc) {
1457 if (auto *MemDef = dyn_cast<MemoryDef>(NewMemAcc))
1458 MSSAU.insertDef(MemDef, /*RenameUses=*/true);
1459 else {
1460 auto *MemUse = cast<MemoryUse>(NewMemAcc);
1461 MSSAU.insertUse(MemUse, /*RenameUses=*/true);
1462 }
1463 }
1464 }
1465
1466 // Build LCSSA PHI nodes for any in-loop operands (if legal). Note that
1467 // this is particularly cheap because we can rip off the PHI node that we're
1468 // replacing for the number and blocks of the predecessors.
1469 // OPT: If this shows up in a profile, we can instead finish sinking all
1470 // invariant instructions, and then walk their operands to re-establish
1471 // LCSSA. That will eliminate creating PHI nodes just to nuke them when
1472 // sinking bottom-up.
1473 for (Use &Op : New->operands())
1474 if (LI->wouldBeOutOfLoopUseRequiringLCSSA(Op.get(), PN.getParent())) {
1475 auto *OInst = cast<Instruction>(Op.get());
1476 PHINode *OpPN =
1477 PHINode::Create(OInst->getType(), PN.getNumIncomingValues(),
1478 OInst->getName() + ".lcssa");
1479 OpPN->insertBefore(ExitBlock.begin());
1480 for (unsigned i = 0, e = PN.getNumIncomingValues(); i != e; ++i)
1481 OpPN->addIncoming(OInst, PN.getIncomingBlock(i));
1482 Op = OpPN;
1483 }
1484 return New;
1485}
1486
1488 MemorySSAUpdater &MSSAU) {
1489 MSSAU.removeMemoryAccess(&I);
1490 SafetyInfo.removeInstruction(&I);
1491 I.eraseFromParent();
1492}
1493
1495 ICFLoopSafetyInfo &SafetyInfo,
1496 MemorySSAUpdater &MSSAU,
1497 ScalarEvolution *SE) {
1498 SafetyInfo.removeInstruction(&I);
1499 SafetyInfo.insertInstructionTo(&I, Dest.getParent());
1500 I.moveBefore(&Dest);
1501 if (MemoryUseOrDef *OldMemAcc = cast_or_null<MemoryUseOrDef>(
1502 MSSAU.getMemorySSA()->getMemoryAccess(&I)))
1503 MSSAU.moveToPlace(OldMemAcc, Dest.getParent(), MemorySSA::BeforeTerminator);
1504 if (SE)
1506}
1507
1509 PHINode *TPN, Instruction *I, LoopInfo *LI,
1511 const LoopSafetyInfo *SafetyInfo, const Loop *CurLoop,
1512 MemorySSAUpdater &MSSAU) {
1514 "Expect only trivially replaceable PHI");
1515 BasicBlock *ExitBlock = TPN->getParent();
1516 Instruction *New;
1517 auto It = SunkCopies.find(ExitBlock);
1518 if (It != SunkCopies.end())
1519 New = It->second;
1520 else
1521 New = SunkCopies[ExitBlock] = cloneInstructionInExitBlock(
1522 *I, *ExitBlock, *TPN, LI, SafetyInfo, MSSAU);
1523 return New;
1524}
1525
1526static bool canSplitPredecessors(PHINode *PN, LoopSafetyInfo *SafetyInfo) {
1527 BasicBlock *BB = PN->getParent();
1528 if (!BB->canSplitPredecessors())
1529 return false;
1530 // It's not impossible to split EHPad blocks, but if BlockColors already exist
1531 // it require updating BlockColors for all offspring blocks accordingly. By
1532 // skipping such corner case, we can make updating BlockColors after splitting
1533 // predecessor fairly simple.
1534 if (!SafetyInfo->getBlockColors().empty() && BB->getFirstNonPHI()->isEHPad())
1535 return false;
1536 for (BasicBlock *BBPred : predecessors(BB)) {
1537 if (isa<IndirectBrInst>(BBPred->getTerminator()))
1538 return false;
1539 }
1540 return true;
1541}
1542
1544 LoopInfo *LI, const Loop *CurLoop,
1545 LoopSafetyInfo *SafetyInfo,
1546 MemorySSAUpdater *MSSAU) {
1547#ifndef NDEBUG
1549 CurLoop->getUniqueExitBlocks(ExitBlocks);
1550 SmallPtrSet<BasicBlock *, 32> ExitBlockSet(ExitBlocks.begin(),
1551 ExitBlocks.end());
1552#endif
1553 BasicBlock *ExitBB = PN->getParent();
1554 assert(ExitBlockSet.count(ExitBB) && "Expect the PHI is in an exit block.");
1555
1556 // Split predecessors of the loop exit to make instructions in the loop are
1557 // exposed to exit blocks through trivially replaceable PHIs while keeping the
1558 // loop in the canonical form where each predecessor of each exit block should
1559 // be contained within the loop. For example, this will convert the loop below
1560 // from
1561 //
1562 // LB1:
1563 // %v1 =
1564 // br %LE, %LB2
1565 // LB2:
1566 // %v2 =
1567 // br %LE, %LB1
1568 // LE:
1569 // %p = phi [%v1, %LB1], [%v2, %LB2] <-- non-trivially replaceable
1570 //
1571 // to
1572 //
1573 // LB1:
1574 // %v1 =
1575 // br %LE.split, %LB2
1576 // LB2:
1577 // %v2 =
1578 // br %LE.split2, %LB1
1579 // LE.split:
1580 // %p1 = phi [%v1, %LB1] <-- trivially replaceable
1581 // br %LE
1582 // LE.split2:
1583 // %p2 = phi [%v2, %LB2] <-- trivially replaceable
1584 // br %LE
1585 // LE:
1586 // %p = phi [%p1, %LE.split], [%p2, %LE.split2]
1587 //
1588 const auto &BlockColors = SafetyInfo->getBlockColors();
1589 SmallSetVector<BasicBlock *, 8> PredBBs(pred_begin(ExitBB), pred_end(ExitBB));
1590 while (!PredBBs.empty()) {
1591 BasicBlock *PredBB = *PredBBs.begin();
1592 assert(CurLoop->contains(PredBB) &&
1593 "Expect all predecessors are in the loop");
1594 if (PN->getBasicBlockIndex(PredBB) >= 0) {
1596 ExitBB, PredBB, ".split.loop.exit", DT, LI, MSSAU, true);
1597 // Since we do not allow splitting EH-block with BlockColors in
1598 // canSplitPredecessors(), we can simply assign predecessor's color to
1599 // the new block.
1600 if (!BlockColors.empty())
1601 // Grab a reference to the ColorVector to be inserted before getting the
1602 // reference to the vector we are copying because inserting the new
1603 // element in BlockColors might cause the map to be reallocated.
1604 SafetyInfo->copyColors(NewPred, PredBB);
1605 }
1606 PredBBs.remove(PredBB);
1607 }
1608}
1609
1610/// When an instruction is found to only be used outside of the loop, this
1611/// function moves it to the exit blocks and patches up SSA form as needed.
1612/// This method is guaranteed to remove the original instruction from its
1613/// position, and may either delete it or move it to outside of the loop.
1614///
1615static bool sink(Instruction &I, LoopInfo *LI, DominatorTree *DT,
1616 const Loop *CurLoop, ICFLoopSafetyInfo *SafetyInfo,
1618 bool Changed = false;
1619 LLVM_DEBUG(dbgs() << "LICM sinking instruction: " << I << "\n");
1620
1621 // Iterate over users to be ready for actual sinking. Replace users via
1622 // unreachable blocks with undef and make all user PHIs trivially replaceable.
1623 SmallPtrSet<Instruction *, 8> VisitedUsers;
1624 for (Value::user_iterator UI = I.user_begin(), UE = I.user_end(); UI != UE;) {
1625 auto *User = cast<Instruction>(*UI);
1626 Use &U = UI.getUse();
1627 ++UI;
1628
1629 if (VisitedUsers.count(User) || CurLoop->contains(User))
1630 continue;
1631
1632 if (!DT->isReachableFromEntry(User->getParent())) {
1633 U = PoisonValue::get(I.getType());
1634 Changed = true;
1635 continue;
1636 }
1637
1638 // The user must be a PHI node.
1639 PHINode *PN = cast<PHINode>(User);
1640
1641 // Surprisingly, instructions can be used outside of loops without any
1642 // exits. This can only happen in PHI nodes if the incoming block is
1643 // unreachable.
1644 BasicBlock *BB = PN->getIncomingBlock(U);
1645 if (!DT->isReachableFromEntry(BB)) {
1646 U = PoisonValue::get(I.getType());
1647 Changed = true;
1648 continue;
1649 }
1650
1651 VisitedUsers.insert(PN);
1652 if (isTriviallyReplaceablePHI(*PN, I))
1653 continue;
1654
1655 if (!canSplitPredecessors(PN, SafetyInfo))
1656 return Changed;
1657
1658 // Split predecessors of the PHI so that we can make users trivially
1659 // replaceable.
1660 splitPredecessorsOfLoopExit(PN, DT, LI, CurLoop, SafetyInfo, &MSSAU);
1661
1662 // Should rebuild the iterators, as they may be invalidated by
1663 // splitPredecessorsOfLoopExit().
1664 UI = I.user_begin();
1665 UE = I.user_end();
1666 }
1667
1668 if (VisitedUsers.empty())
1669 return Changed;
1670
1671 ORE->emit([&]() {
1672 return OptimizationRemark(DEBUG_TYPE, "InstSunk", &I)
1673 << "sinking " << ore::NV("Inst", &I);
1674 });
1675 if (isa<LoadInst>(I))
1676 ++NumMovedLoads;
1677 else if (isa<CallInst>(I))
1678 ++NumMovedCalls;
1679 ++NumSunk;
1680
1681#ifndef NDEBUG
1683 CurLoop->getUniqueExitBlocks(ExitBlocks);
1684 SmallPtrSet<BasicBlock *, 32> ExitBlockSet(ExitBlocks.begin(),
1685 ExitBlocks.end());
1686#endif
1687
1688 // Clones of this instruction. Don't create more than one per exit block!
1690
1691 // If this instruction is only used outside of the loop, then all users are
1692 // PHI nodes in exit blocks due to LCSSA form. Just RAUW them with clones of
1693 // the instruction.
1694 // First check if I is worth sinking for all uses. Sink only when it is worth
1695 // across all uses.
1696 SmallSetVector<User*, 8> Users(I.user_begin(), I.user_end());
1697 for (auto *UI : Users) {
1698 auto *User = cast<Instruction>(UI);
1699
1700 if (CurLoop->contains(User))
1701 continue;
1702
1703 PHINode *PN = cast<PHINode>(User);
1704 assert(ExitBlockSet.count(PN->getParent()) &&
1705 "The LCSSA PHI is not in an exit block!");
1706
1707 // The PHI must be trivially replaceable.
1709 PN, &I, LI, SunkCopies, SafetyInfo, CurLoop, MSSAU);
1710 // As we sink the instruction out of the BB, drop its debug location.
1711 New->dropLocation();
1712 PN->replaceAllUsesWith(New);
1713 eraseInstruction(*PN, *SafetyInfo, MSSAU);
1714 Changed = true;
1715 }
1716 return Changed;
1717}
1718
1719/// When an instruction is found to only use loop invariant operands that
1720/// is safe to hoist, this instruction is called to do the dirty work.
1721///
1722static void hoist(Instruction &I, const DominatorTree *DT, const Loop *CurLoop,
1723 BasicBlock *Dest, ICFLoopSafetyInfo *SafetyInfo,
1726 LLVM_DEBUG(dbgs() << "LICM hoisting to " << Dest->getNameOrAsOperand() << ": "
1727 << I << "\n");
1728 ORE->emit([&]() {
1729 return OptimizationRemark(DEBUG_TYPE, "Hoisted", &I) << "hoisting "
1730 << ore::NV("Inst", &I);
1731 });
1732
1733 // Metadata can be dependent on conditions we are hoisting above.
1734 // Conservatively strip all metadata on the instruction unless we were
1735 // guaranteed to execute I if we entered the loop, in which case the metadata
1736 // is valid in the loop preheader.
1737 // Similarly, If I is a call and it is not guaranteed to execute in the loop,
1738 // then moving to the preheader means we should strip attributes on the call
1739 // that can cause UB since we may be hoisting above conditions that allowed
1740 // inferring those attributes. They may not be valid at the preheader.
1741 if ((I.hasMetadataOtherThanDebugLoc() || isa<CallInst>(I)) &&
1742 // The check on hasMetadataOtherThanDebugLoc is to prevent us from burning
1743 // time in isGuaranteedToExecute if we don't actually have anything to
1744 // drop. It is a compile time optimization, not required for correctness.
1745 !SafetyInfo->isGuaranteedToExecute(I, DT, CurLoop))
1746 I.dropUBImplyingAttrsAndMetadata();
1747
1748 if (isa<PHINode>(I))
1749 // Move the new node to the end of the phi list in the destination block.
1750 moveInstructionBefore(I, *Dest->getFirstNonPHI(), *SafetyInfo, MSSAU, SE);
1751 else
1752 // Move the new node to the destination block, before its terminator.
1753 moveInstructionBefore(I, *Dest->getTerminator(), *SafetyInfo, MSSAU, SE);
1754
1755 I.updateLocationAfterHoist();
1756
1757 if (isa<LoadInst>(I))
1758 ++NumMovedLoads;
1759 else if (isa<CallInst>(I))
1760 ++NumMovedCalls;
1761 ++NumHoisted;
1762}
1763
1764/// Only sink or hoist an instruction if it is not a trapping instruction,
1765/// or if the instruction is known not to trap when moved to the preheader.
1766/// or if it is a trapping instruction and is guaranteed to execute.
1768 Instruction &Inst, const DominatorTree *DT, const TargetLibraryInfo *TLI,
1769 const Loop *CurLoop, const LoopSafetyInfo *SafetyInfo,
1770 OptimizationRemarkEmitter *ORE, const Instruction *CtxI,
1771 AssumptionCache *AC, bool AllowSpeculation) {
1772 if (AllowSpeculation &&
1773 isSafeToSpeculativelyExecute(&Inst, CtxI, AC, DT, TLI))
1774 return true;
1775
1776 bool GuaranteedToExecute =
1777 SafetyInfo->isGuaranteedToExecute(Inst, DT, CurLoop);
1778
1779 if (!GuaranteedToExecute) {
1780 auto *LI = dyn_cast<LoadInst>(&Inst);
1781 if (LI && CurLoop->isLoopInvariant(LI->getPointerOperand()))
1782 ORE->emit([&]() {
1784 DEBUG_TYPE, "LoadWithLoopInvariantAddressCondExecuted", LI)
1785 << "failed to hoist load with loop-invariant address "
1786 "because load is conditionally executed";
1787 });
1788 }
1789
1790 return GuaranteedToExecute;
1791}
1792
1793namespace {
1794class LoopPromoter : public LoadAndStorePromoter {
1795 Value *SomePtr; // Designated pointer to store to.
1796 SmallVectorImpl<BasicBlock *> &LoopExitBlocks;
1798 SmallVectorImpl<MemoryAccess *> &MSSAInsertPts;
1799 PredIteratorCache &PredCache;
1800 MemorySSAUpdater &MSSAU;
1801 LoopInfo &LI;
1802 DebugLoc DL;
1803 Align Alignment;
1804 bool UnorderedAtomic;
1805 AAMDNodes AATags;
1806 ICFLoopSafetyInfo &SafetyInfo;
1807 bool CanInsertStoresInExitBlocks;
1809
1810 // We're about to add a use of V in a loop exit block. Insert an LCSSA phi
1811 // (if legal) if doing so would add an out-of-loop use to an instruction
1812 // defined in-loop.
1813 Value *maybeInsertLCSSAPHI(Value *V, BasicBlock *BB) const {
1814 if (!LI.wouldBeOutOfLoopUseRequiringLCSSA(V, BB))
1815 return V;
1816
1817 Instruction *I = cast<Instruction>(V);
1818 // We need to create an LCSSA PHI node for the incoming value and
1819 // store that.
1820 PHINode *PN = PHINode::Create(I->getType(), PredCache.size(BB),
1821 I->getName() + ".lcssa");
1822 PN->insertBefore(BB->begin());
1823 for (BasicBlock *Pred : PredCache.get(BB))
1824 PN->addIncoming(I, Pred);
1825 return PN;
1826 }
1827
1828public:
1829 LoopPromoter(Value *SP, ArrayRef<const Instruction *> Insts, SSAUpdater &S,
1833 MemorySSAUpdater &MSSAU, LoopInfo &li, DebugLoc dl,
1834 Align Alignment, bool UnorderedAtomic, const AAMDNodes &AATags,
1835 ICFLoopSafetyInfo &SafetyInfo, bool CanInsertStoresInExitBlocks)
1836 : LoadAndStorePromoter(Insts, S), SomePtr(SP), LoopExitBlocks(LEB),
1837 LoopInsertPts(LIP), MSSAInsertPts(MSSAIP), PredCache(PIC), MSSAU(MSSAU),
1838 LI(li), DL(std::move(dl)), Alignment(Alignment),
1839 UnorderedAtomic(UnorderedAtomic), AATags(AATags),
1840 SafetyInfo(SafetyInfo),
1841 CanInsertStoresInExitBlocks(CanInsertStoresInExitBlocks), Uses(Insts) {}
1842
1843 void insertStoresInLoopExitBlocks() {
1844 // Insert stores after in the loop exit blocks. Each exit block gets a
1845 // store of the live-out values that feed them. Since we've already told
1846 // the SSA updater about the defs in the loop and the preheader
1847 // definition, it is all set and we can start using it.
1848 DIAssignID *NewID = nullptr;
1849 for (unsigned i = 0, e = LoopExitBlocks.size(); i != e; ++i) {
1850 BasicBlock *ExitBlock = LoopExitBlocks[i];
1851 Value *LiveInValue = SSA.GetValueInMiddleOfBlock(ExitBlock);
1852 LiveInValue = maybeInsertLCSSAPHI(LiveInValue, ExitBlock);
1853 Value *Ptr = maybeInsertLCSSAPHI(SomePtr, ExitBlock);
1854 BasicBlock::iterator InsertPos = LoopInsertPts[i];
1855 StoreInst *NewSI = new StoreInst(LiveInValue, Ptr, InsertPos);
1856 if (UnorderedAtomic)
1857 NewSI->setOrdering(AtomicOrdering::Unordered);
1858 NewSI->setAlignment(Alignment);
1859 NewSI->setDebugLoc(DL);
1860 // Attach DIAssignID metadata to the new store, generating it on the
1861 // first loop iteration.
1862 if (i == 0) {
1863 // NewSI will have its DIAssignID set here if there are any stores in
1864 // Uses with a DIAssignID attachment. This merged ID will then be
1865 // attached to the other inserted stores (in the branch below).
1866 NewSI->mergeDIAssignID(Uses);
1867 NewID = cast_or_null<DIAssignID>(
1868 NewSI->getMetadata(LLVMContext::MD_DIAssignID));
1869 } else {
1870 // Attach the DIAssignID (or nullptr) merged from Uses in the branch
1871 // above.
1872 NewSI->setMetadata(LLVMContext::MD_DIAssignID, NewID);
1873 }
1874
1875 if (AATags)
1876 NewSI->setAAMetadata(AATags);
1877
1878 MemoryAccess *MSSAInsertPoint = MSSAInsertPts[i];
1879 MemoryAccess *NewMemAcc;
1880 if (!MSSAInsertPoint) {
1881 NewMemAcc = MSSAU.createMemoryAccessInBB(
1882 NewSI, nullptr, NewSI->getParent(), MemorySSA::Beginning);
1883 } else {
1884 NewMemAcc =
1885 MSSAU.createMemoryAccessAfter(NewSI, nullptr, MSSAInsertPoint);
1886 }
1887 MSSAInsertPts[i] = NewMemAcc;
1888 MSSAU.insertDef(cast<MemoryDef>(NewMemAcc), true);
1889 // FIXME: true for safety, false may still be correct.
1890 }
1891 }
1892
1893 void doExtraRewritesBeforeFinalDeletion() override {
1894 if (CanInsertStoresInExitBlocks)
1895 insertStoresInLoopExitBlocks();
1896 }
1897
1898 void instructionDeleted(Instruction *I) const override {
1899 SafetyInfo.removeInstruction(I);
1900 MSSAU.removeMemoryAccess(I);
1901 }
1902
1903 bool shouldDelete(Instruction *I) const override {
1904 if (isa<StoreInst>(I))
1905 return CanInsertStoresInExitBlocks;
1906 return true;
1907 }
1908};
1909
1910bool isNotCapturedBeforeOrInLoop(const Value *V, const Loop *L,
1911 DominatorTree *DT) {
1912 // We can perform the captured-before check against any instruction in the
1913 // loop header, as the loop header is reachable from any instruction inside
1914 // the loop.
1915 // TODO: ReturnCaptures=true shouldn't be necessary here.
1916 return !PointerMayBeCapturedBefore(V, /* ReturnCaptures */ true,
1917 /* StoreCaptures */ true,
1918 L->getHeader()->getTerminator(), DT);
1919}
1920
1921/// Return true if we can prove that a caller cannot inspect the object if an
1922/// unwind occurs inside the loop.
1923bool isNotVisibleOnUnwindInLoop(const Value *Object, const Loop *L,
1924 DominatorTree *DT) {
1925 bool RequiresNoCaptureBeforeUnwind;
1926 if (!isNotVisibleOnUnwind(Object, RequiresNoCaptureBeforeUnwind))
1927 return false;
1928
1929 return !RequiresNoCaptureBeforeUnwind ||
1930 isNotCapturedBeforeOrInLoop(Object, L, DT);
1931}
1932
1933bool isThreadLocalObject(const Value *Object, const Loop *L, DominatorTree *DT,
1935 // The object must be function-local to start with, and then not captured
1936 // before/in the loop.
1937 return (isIdentifiedFunctionLocal(Object) &&
1938 isNotCapturedBeforeOrInLoop(Object, L, DT)) ||
1940}
1941
1942} // namespace
1943
1944/// Try to promote memory values to scalars by sinking stores out of the
1945/// loop and moving loads to before the loop. We do this by looping over
1946/// the stores in the loop, looking for stores to Must pointers which are
1947/// loop invariant.
1948///
1950 const SmallSetVector<Value *, 8> &PointerMustAliases,
1955 const TargetLibraryInfo *TLI, TargetTransformInfo *TTI, Loop *CurLoop,
1956 MemorySSAUpdater &MSSAU, ICFLoopSafetyInfo *SafetyInfo,
1957 OptimizationRemarkEmitter *ORE, bool AllowSpeculation,
1958 bool HasReadsOutsideSet) {
1959 // Verify inputs.
1960 assert(LI != nullptr && DT != nullptr && CurLoop != nullptr &&
1961 SafetyInfo != nullptr &&
1962 "Unexpected Input to promoteLoopAccessesToScalars");
1963
1964 LLVM_DEBUG({
1965 dbgs() << "Trying to promote set of must-aliased pointers:\n";
1966 for (Value *Ptr : PointerMustAliases)
1967 dbgs() << " " << *Ptr << "\n";
1968 });
1969 ++NumPromotionCandidates;
1970
1971 Value *SomePtr = *PointerMustAliases.begin();
1972 BasicBlock *Preheader = CurLoop->getLoopPreheader();
1973
1974 // It is not safe to promote a load/store from the loop if the load/store is
1975 // conditional. For example, turning:
1976 //
1977 // for () { if (c) *P += 1; }
1978 //
1979 // into:
1980 //
1981 // tmp = *P; for () { if (c) tmp +=1; } *P = tmp;
1982 //
1983 // is not safe, because *P may only be valid to access if 'c' is true.
1984 //
1985 // The safety property divides into two parts:
1986 // p1) The memory may not be dereferenceable on entry to the loop. In this
1987 // case, we can't insert the required load in the preheader.
1988 // p2) The memory model does not allow us to insert a store along any dynamic
1989 // path which did not originally have one.
1990 //
1991 // If at least one store is guaranteed to execute, both properties are
1992 // satisfied, and promotion is legal.
1993 //
1994 // This, however, is not a necessary condition. Even if no store/load is
1995 // guaranteed to execute, we can still establish these properties.
1996 // We can establish (p1) by proving that hoisting the load into the preheader
1997 // is safe (i.e. proving dereferenceability on all paths through the loop). We
1998 // can use any access within the alias set to prove dereferenceability,
1999 // since they're all must alias.
2000 //
2001 // There are two ways establish (p2):
2002 // a) Prove the location is thread-local. In this case the memory model
2003 // requirement does not apply, and stores are safe to insert.
2004 // b) Prove a store dominates every exit block. In this case, if an exit
2005 // blocks is reached, the original dynamic path would have taken us through
2006 // the store, so inserting a store into the exit block is safe. Note that this
2007 // is different from the store being guaranteed to execute. For instance,
2008 // if an exception is thrown on the first iteration of the loop, the original
2009 // store is never executed, but the exit blocks are not executed either.
2010
2011 bool DereferenceableInPH = false;
2012 bool StoreIsGuanteedToExecute = false;
2013 bool FoundLoadToPromote = false;
2014 // Goes from Unknown to either Safe or Unsafe, but can't switch between them.
2015 enum {
2016 StoreSafe,
2017 StoreUnsafe,
2018 StoreSafetyUnknown,
2019 } StoreSafety = StoreSafetyUnknown;
2020
2022
2023 // We start with an alignment of one and try to find instructions that allow
2024 // us to prove better alignment.
2025 Align Alignment;
2026 // Keep track of which types of access we see
2027 bool SawUnorderedAtomic = false;
2028 bool SawNotAtomic = false;
2029 AAMDNodes AATags;
2030
2031 const DataLayout &MDL = Preheader->getModule()->getDataLayout();
2032
2033 // If there are reads outside the promoted set, then promoting stores is
2034 // definitely not safe.
2035 if (HasReadsOutsideSet)
2036 StoreSafety = StoreUnsafe;
2037
2038 if (StoreSafety == StoreSafetyUnknown && SafetyInfo->anyBlockMayThrow()) {
2039 // If a loop can throw, we have to insert a store along each unwind edge.
2040 // That said, we can't actually make the unwind edge explicit. Therefore,
2041 // we have to prove that the store is dead along the unwind edge. We do
2042 // this by proving that the caller can't have a reference to the object
2043 // after return and thus can't possibly load from the object.
2044 Value *Object = getUnderlyingObject(SomePtr);
2045 if (!isNotVisibleOnUnwindInLoop(Object, CurLoop, DT))
2046 StoreSafety = StoreUnsafe;
2047 }
2048
2049 // Check that all accesses to pointers in the alias set use the same type.
2050 // We cannot (yet) promote a memory location that is loaded and stored in
2051 // different sizes. While we are at it, collect alignment and AA info.
2052 Type *AccessTy = nullptr;
2053 for (Value *ASIV : PointerMustAliases) {
2054 for (Use &U : ASIV->uses()) {
2055 // Ignore instructions that are outside the loop.
2056 Instruction *UI = dyn_cast<Instruction>(U.getUser());
2057 if (!UI || !CurLoop->contains(UI))
2058 continue;
2059
2060 // If there is an non-load/store instruction in the loop, we can't promote
2061 // it.
2062 if (LoadInst *Load = dyn_cast<LoadInst>(UI)) {
2063 if (!Load->isUnordered())
2064 return false;
2065
2066 SawUnorderedAtomic |= Load->isAtomic();
2067 SawNotAtomic |= !Load->isAtomic();
2068 FoundLoadToPromote = true;
2069
2070 Align InstAlignment = Load->getAlign();
2071
2072 // Note that proving a load safe to speculate requires proving
2073 // sufficient alignment at the target location. Proving it guaranteed
2074 // to execute does as well. Thus we can increase our guaranteed
2075 // alignment as well.
2076 if (!DereferenceableInPH || (InstAlignment > Alignment))
2078 *Load, DT, TLI, CurLoop, SafetyInfo, ORE,
2079 Preheader->getTerminator(), AC, AllowSpeculation)) {
2080 DereferenceableInPH = true;
2081 Alignment = std::max(Alignment, InstAlignment);
2082 }
2083 } else if (const StoreInst *Store = dyn_cast<StoreInst>(UI)) {
2084 // Stores *of* the pointer are not interesting, only stores *to* the
2085 // pointer.
2086 if (U.getOperandNo() != StoreInst::getPointerOperandIndex())
2087 continue;
2088 if (!Store->isUnordered())
2089 return false;
2090
2091 SawUnorderedAtomic |= Store->isAtomic();
2092 SawNotAtomic |= !Store->isAtomic();
2093
2094 // If the store is guaranteed to execute, both properties are satisfied.
2095 // We may want to check if a store is guaranteed to execute even if we
2096 // already know that promotion is safe, since it may have higher
2097 // alignment than any other guaranteed stores, in which case we can
2098 // raise the alignment on the promoted store.
2099 Align InstAlignment = Store->getAlign();
2100 bool GuaranteedToExecute =
2101 SafetyInfo->isGuaranteedToExecute(*UI, DT, CurLoop);
2102 StoreIsGuanteedToExecute |= GuaranteedToExecute;
2103 if (GuaranteedToExecute) {
2104 DereferenceableInPH = true;
2105 if (StoreSafety == StoreSafetyUnknown)
2106 StoreSafety = StoreSafe;
2107 Alignment = std::max(Alignment, InstAlignment);
2108 }
2109
2110 // If a store dominates all exit blocks, it is safe to sink.
2111 // As explained above, if an exit block was executed, a dominating
2112 // store must have been executed at least once, so we are not
2113 // introducing stores on paths that did not have them.
2114 // Note that this only looks at explicit exit blocks. If we ever
2115 // start sinking stores into unwind edges (see above), this will break.
2116 if (StoreSafety == StoreSafetyUnknown &&
2117 llvm::all_of(ExitBlocks, [&](BasicBlock *Exit) {
2118 return DT->dominates(Store->getParent(), Exit);
2119 }))
2120 StoreSafety = StoreSafe;
2121
2122 // If the store is not guaranteed to execute, we may still get
2123 // deref info through it.
2124 if (!DereferenceableInPH) {
2125 DereferenceableInPH = isDereferenceableAndAlignedPointer(
2126 Store->getPointerOperand(), Store->getValueOperand()->getType(),
2127 Store->getAlign(), MDL, Preheader->getTerminator(), AC, DT, TLI);
2128 }
2129 } else
2130 continue; // Not a load or store.
2131
2132 if (!AccessTy)
2133 AccessTy = getLoadStoreType(UI);
2134 else if (AccessTy != getLoadStoreType(UI))
2135 return false;
2136
2137 // Merge the AA tags.
2138 if (LoopUses.empty()) {
2139 // On the first load/store, just take its AA tags.
2140 AATags = UI->getAAMetadata();
2141 } else if (AATags) {
2142 AATags = AATags.merge(UI->getAAMetadata());
2143 }
2144
2145 LoopUses.push_back(UI);
2146 }
2147 }
2148
2149 // If we found both an unordered atomic instruction and a non-atomic memory
2150 // access, bail. We can't blindly promote non-atomic to atomic since we
2151 // might not be able to lower the result. We can't downgrade since that
2152 // would violate memory model. Also, align 0 is an error for atomics.
2153 if (SawUnorderedAtomic && SawNotAtomic)
2154 return false;
2155
2156 // If we're inserting an atomic load in the preheader, we must be able to
2157 // lower it. We're only guaranteed to be able to lower naturally aligned
2158 // atomics.
2159 if (SawUnorderedAtomic && Alignment < MDL.getTypeStoreSize(AccessTy))
2160 return false;
2161
2162 // If we couldn't prove we can hoist the load, bail.
2163 if (!DereferenceableInPH) {
2164 LLVM_DEBUG(dbgs() << "Not promoting: Not dereferenceable in preheader\n");
2165 return false;
2166 }
2167
2168 // We know we can hoist the load, but don't have a guaranteed store.
2169 // Check whether the location is writable and thread-local. If it is, then we
2170 // can insert stores along paths which originally didn't have them without
2171 // violating the memory model.
2172 if (StoreSafety == StoreSafetyUnknown) {
2173 Value *Object = getUnderlyingObject(SomePtr);
2174 bool ExplicitlyDereferenceableOnly;
2175 if (isWritableObject(Object, ExplicitlyDereferenceableOnly) &&
2176 (!ExplicitlyDereferenceableOnly ||
2177 isDereferenceablePointer(SomePtr, AccessTy, MDL)) &&
2178 isThreadLocalObject(Object, CurLoop, DT, TTI))
2179 StoreSafety = StoreSafe;
2180 }
2181
2182 // If we've still failed to prove we can sink the store, hoist the load
2183 // only, if possible.
2184 if (StoreSafety != StoreSafe && !FoundLoadToPromote)
2185 // If we cannot hoist the load either, give up.
2186 return false;
2187
2188 // Lets do the promotion!
2189 if (StoreSafety == StoreSafe) {
2190 LLVM_DEBUG(dbgs() << "LICM: Promoting load/store of the value: " << *SomePtr
2191 << '\n');
2192 ++NumLoadStorePromoted;
2193 } else {
2194 LLVM_DEBUG(dbgs() << "LICM: Promoting load of the value: " << *SomePtr
2195 << '\n');
2196 ++NumLoadPromoted;
2197 }
2198
2199 ORE->emit([&]() {
2200 return OptimizationRemark(DEBUG_TYPE, "PromoteLoopAccessesToScalar",
2201 LoopUses[0])
2202 << "Moving accesses to memory location out of the loop";
2203 });
2204
2205 // Look at all the loop uses, and try to merge their locations.
2206 std::vector<DILocation *> LoopUsesLocs;
2207 for (auto *U : LoopUses)
2208 LoopUsesLocs.push_back(U->getDebugLoc().get());
2209 auto DL = DebugLoc(DILocation::getMergedLocations(LoopUsesLocs));
2210
2211 // We use the SSAUpdater interface to insert phi nodes as required.
2213 SSAUpdater SSA(&NewPHIs);
2214 LoopPromoter Promoter(SomePtr, LoopUses, SSA, ExitBlocks, InsertPts,
2215 MSSAInsertPts, PIC, MSSAU, *LI, DL, Alignment,
2216 SawUnorderedAtomic, AATags, *SafetyInfo,
2217 StoreSafety == StoreSafe);
2218
2219 // Set up the preheader to have a definition of the value. It is the live-out
2220 // value from the preheader that uses in the loop will use.
2221 LoadInst *PreheaderLoad = nullptr;
2222 if (FoundLoadToPromote || !StoreIsGuanteedToExecute) {
2223 PreheaderLoad =
2224 new LoadInst(AccessTy, SomePtr, SomePtr->getName() + ".promoted",
2225 Preheader->getTerminator());
2226 if (SawUnorderedAtomic)
2227 PreheaderLoad->setOrdering(AtomicOrdering::Unordered);
2228 PreheaderLoad->setAlignment(Alignment);
2229 PreheaderLoad->setDebugLoc(DebugLoc());
2230 if (AATags)
2231 PreheaderLoad->setAAMetadata(AATags);
2232
2233 MemoryAccess *PreheaderLoadMemoryAccess = MSSAU.createMemoryAccessInBB(
2234 PreheaderLoad, nullptr, PreheaderLoad->getParent(), MemorySSA::End);
2235 MemoryUse *NewMemUse = cast<MemoryUse>(PreheaderLoadMemoryAccess);
2236 MSSAU.insertUse(NewMemUse, /*RenameUses=*/true);
2237 SSA.AddAvailableValue(Preheader, PreheaderLoad);
2238 } else {
2239 SSA.AddAvailableValue(Preheader, PoisonValue::get(AccessTy));
2240 }
2241
2242 if (VerifyMemorySSA)
2243 MSSAU.getMemorySSA()->verifyMemorySSA();
2244 // Rewrite all the loads in the loop and remember all the definitions from
2245 // stores in the loop.
2246 Promoter.run(LoopUses);
2247
2248 if (VerifyMemorySSA)
2249 MSSAU.getMemorySSA()->verifyMemorySSA();
2250 // If the SSAUpdater didn't use the load in the preheader, just zap it now.
2251 if (PreheaderLoad && PreheaderLoad->use_empty())
2252 eraseInstruction(*PreheaderLoad, *SafetyInfo, MSSAU);
2253
2254 return true;
2255}
2256
2257static void foreachMemoryAccess(MemorySSA *MSSA, Loop *L,
2258 function_ref<void(Instruction *)> Fn) {
2259 for (const BasicBlock *BB : L->blocks())
2260 if (const auto *Accesses = MSSA->getBlockAccesses(BB))
2261 for (const auto &Access : *Accesses)
2262 if (const auto *MUD = dyn_cast<MemoryUseOrDef>(&Access))
2263 Fn(MUD->getMemoryInst());
2264}
2265
2266// The bool indicates whether there might be reads outside the set, in which
2267// case only loads may be promoted.
2270 BatchAAResults BatchAA(*AA);
2271 AliasSetTracker AST(BatchAA);
2272
2273 auto IsPotentiallyPromotable = [L](const Instruction *I) {
2274 if (const auto *SI = dyn_cast<StoreInst>(I))
2275 return L->isLoopInvariant(SI->getPointerOperand());
2276 if (const auto *LI = dyn_cast<LoadInst>(I))
2277 return L->isLoopInvariant(LI->getPointerOperand());
2278 return false;
2279 };
2280
2281 // Populate AST with potentially promotable accesses.
2282 SmallPtrSet<Value *, 16> AttemptingPromotion;
2283 foreachMemoryAccess(MSSA, L, [&](Instruction *I) {
2284 if (IsPotentiallyPromotable(I)) {
2285 AttemptingPromotion.insert(I);
2286 AST.add(I);
2287 }
2288 });
2289
2290 // We're only interested in must-alias sets that contain a mod.
2292 for (AliasSet &AS : AST)
2293 if (!AS.isForwardingAliasSet() && AS.isMod() && AS.isMustAlias())
2294 Sets.push_back({&AS, false});
2295
2296 if (Sets.empty())
2297 return {}; // Nothing to promote...
2298
2299 // Discard any sets for which there is an aliasing non-promotable access.
2300 foreachMemoryAccess(MSSA, L, [&](Instruction *I) {
2301 if (AttemptingPromotion.contains(I))
2302 return;
2303
2305 ModRefInfo MR = Pair.getPointer()->aliasesUnknownInst(I, BatchAA);
2306 // Cannot promote if there are writes outside the set.
2307 if (isModSet(MR))
2308 return true;
2309 if (isRefSet(MR)) {
2310 // Remember reads outside the set.
2311 Pair.setInt(true);
2312 // If this is a mod-only set and there are reads outside the set,
2313 // we will not be able to promote, so bail out early.
2314 return !Pair.getPointer()->isRef();
2315 }
2316 return false;
2317 });
2318 });
2319
2321 for (auto [Set, HasReadsOutsideSet] : Sets) {
2322 SmallSetVector<Value *, 8> PointerMustAliases;
2323 for (const auto &ASI : *Set)
2324 PointerMustAliases.insert(ASI.getValue());
2325 Result.emplace_back(std::move(PointerMustAliases), HasReadsOutsideSet);
2326 }
2327
2328 return Result;
2329}
2330
2332 Loop *CurLoop, Instruction &I,
2333 SinkAndHoistLICMFlags &Flags,
2334 bool InvariantGroup) {
2335 // For hoisting, use the walker to determine safety
2336 if (!Flags.getIsSink()) {
2337 // If hoisting an invariant group, we only need to check that there
2338 // is no store to the loaded pointer between the start of the loop,
2339 // and the load (since all values must be the same).
2340
2341 // This can be checked in two conditions:
2342 // 1) if the memoryaccess is outside the loop
2343 // 2) the earliest access is at the loop header,
2344 // if the memory loaded is the phi node
2345
2346 BatchAAResults BAA(MSSA->getAA());
2347 MemoryAccess *Source = getClobberingMemoryAccess(*MSSA, BAA, Flags, MU);
2348 return !MSSA->isLiveOnEntryDef(Source) &&
2349 CurLoop->contains(Source->getBlock()) &&
2350 !(InvariantGroup && Source->getBlock() == CurLoop->getHeader() && isa<MemoryPhi>(Source));
2351 }
2352
2353 // For sinking, we'd need to check all Defs below this use. The getClobbering
2354 // call will look on the backedge of the loop, but will check aliasing with
2355 // the instructions on the previous iteration.
2356 // For example:
2357 // for (i ... )
2358 // load a[i] ( Use (LoE)
2359 // store a[i] ( 1 = Def (2), with 2 = Phi for the loop.
2360 // i++;
2361 // The load sees no clobbering inside the loop, as the backedge alias check
2362 // does phi translation, and will check aliasing against store a[i-1].
2363 // However sinking the load outside the loop, below the store is incorrect.
2364
2365 // For now, only sink if there are no Defs in the loop, and the existing ones
2366 // precede the use and are in the same block.
2367 // FIXME: Increase precision: Safe to sink if Use post dominates the Def;
2368 // needs PostDominatorTreeAnalysis.
2369 // FIXME: More precise: no Defs that alias this Use.
2370 if (Flags.tooManyMemoryAccesses())
2371 return true;
2372 for (auto *BB : CurLoop->getBlocks())
2373 if (pointerInvalidatedByBlock(*BB, *MSSA, *MU))
2374 return true;
2375 // When sinking, the source block may not be part of the loop so check it.
2376 if (!CurLoop->contains(&I))
2377 return pointerInvalidatedByBlock(*I.getParent(), *MSSA, *MU);
2378
2379 return false;
2380}
2381
2383 if (const auto *Accesses = MSSA.getBlockDefs(&BB))
2384 for (const auto &MA : *Accesses)
2385 if (const auto *MD = dyn_cast<MemoryDef>(&MA))
2386 if (MU.getBlock() != MD->getBlock() || !MSSA.locallyDominates(MD, &MU))
2387 return true;
2388 return false;
2389}
2390
2391/// Try to simplify things like (A < INV_1 AND icmp A < INV_2) into (A <
2392/// min(INV_1, INV_2)), if INV_1 and INV_2 are both loop invariants and their
2393/// minimun can be computed outside of loop, and X is not a loop-invariant.
2394static bool hoistMinMax(Instruction &I, Loop &L, ICFLoopSafetyInfo &SafetyInfo,
2395 MemorySSAUpdater &MSSAU) {
2396 bool Inverse = false;
2397 using namespace PatternMatch;
2398 Value *Cond1, *Cond2;
2399 if (match(&I, m_LogicalOr(m_Value(Cond1), m_Value(Cond2)))) {
2400 Inverse = true;
2401 } else if (match(&I, m_LogicalAnd(m_Value(Cond1), m_Value(Cond2)))) {
2402 // Do nothing
2403 } else
2404 return false;
2405
2406 auto MatchICmpAgainstInvariant = [&](Value *C, ICmpInst::Predicate &P,
2407 Value *&LHS, Value *&RHS) {
2408 if (!match(C, m_OneUse(m_ICmp(P, m_Value(LHS), m_Value(RHS)))))
2409 return false;
2410 if (!LHS->getType()->isIntegerTy())
2411 return false;
2413 return false;
2414 if (L.isLoopInvariant(LHS)) {
2415 std::swap(LHS, RHS);
2417 }
2418 if (L.isLoopInvariant(LHS) || !L.isLoopInvariant(RHS))
2419 return false;
2420 if (Inverse)
2422 return true;
2423 };
2424 ICmpInst::Predicate P1, P2;
2425 Value *LHS1, *LHS2, *RHS1, *RHS2;
2426 if (!MatchICmpAgainstInvariant(Cond1, P1, LHS1, RHS1) ||
2427 !MatchICmpAgainstInvariant(Cond2, P2, LHS2, RHS2))
2428 return false;
2429 if (P1 != P2 || LHS1 != LHS2)
2430 return false;
2431
2432 // Everything is fine, we can do the transform.
2433 bool UseMin = ICmpInst::isLT(P1) || ICmpInst::isLE(P1);
2434 assert(
2435 (UseMin || ICmpInst::isGT(P1) || ICmpInst::isGE(P1)) &&
2436 "Relational predicate is either less (or equal) or greater (or equal)!");
2438 ? (UseMin ? Intrinsic::smin : Intrinsic::smax)
2439 : (UseMin ? Intrinsic::umin : Intrinsic::umax);
2440 auto *Preheader = L.getLoopPreheader();
2441 assert(Preheader && "Loop is not in simplify form?");
2442 IRBuilder<> Builder(Preheader->getTerminator());
2443 // We are about to create a new guaranteed use for RHS2 which might not exist
2444 // before (if it was a non-taken input of logical and/or instruction). If it
2445 // was poison, we need to freeze it. Note that no new use for LHS and RHS1 are
2446 // introduced, so they don't need this.
2447 if (isa<SelectInst>(I))
2448 RHS2 = Builder.CreateFreeze(RHS2, RHS2->getName() + ".fr");
2449 Value *NewRHS = Builder.CreateBinaryIntrinsic(
2450 id, RHS1, RHS2, nullptr, StringRef("invariant.") +
2451 (ICmpInst::isSigned(P1) ? "s" : "u") +
2452 (UseMin ? "min" : "max"));
2453 Builder.SetInsertPoint(&I);
2455 if (Inverse)
2457 Value *NewCond = Builder.CreateICmp(P, LHS1, NewRHS);
2458 NewCond->takeName(&I);
2459 I.replaceAllUsesWith(NewCond);
2460 eraseInstruction(I, SafetyInfo, MSSAU);
2461 eraseInstruction(*cast<Instruction>(Cond1), SafetyInfo, MSSAU);
2462 eraseInstruction(*cast<Instruction>(Cond2), SafetyInfo, MSSAU);
2463 return true;
2464}
2465
2466/// Reassociate gep (gep ptr, idx1), idx2 to gep (gep ptr, idx2), idx1 if
2467/// this allows hoisting the inner GEP.
2468static bool hoistGEP(Instruction &I, Loop &L, ICFLoopSafetyInfo &SafetyInfo,
2470 DominatorTree *DT) {
2471 auto *GEP = dyn_cast<GetElementPtrInst>(&I);
2472 if (!GEP)
2473 return false;
2474
2475 auto *Src = dyn_cast<GetElementPtrInst>(GEP->getPointerOperand());
2476 if (!Src || !Src->hasOneUse() || !L.contains(Src))
2477 return false;
2478
2479 Value *SrcPtr = Src->getPointerOperand();
2480 auto LoopInvariant = [&](Value *V) { return L.isLoopInvariant(V); };
2481 if (!L.isLoopInvariant(SrcPtr) || !all_of(GEP->indices(), LoopInvariant))
2482 return false;
2483
2484 // This can only happen if !AllowSpeculation, otherwise this would already be
2485 // handled.
2486 // FIXME: Should we respect AllowSpeculation in these reassociation folds?
2487 // The flag exists to prevent metadata dropping, which is not relevant here.
2488 if (all_of(Src->indices(), LoopInvariant))
2489 return false;
2490
2491 // The swapped GEPs are inbounds if both original GEPs are inbounds
2492 // and the sign of the offsets is the same. For simplicity, only
2493 // handle both offsets being non-negative.
2494 const DataLayout &DL = GEP->getModule()->getDataLayout();
2495 auto NonNegative = [&](Value *V) {
2496 return isKnownNonNegative(V, SimplifyQuery(DL, DT, AC, GEP));
2497 };
2498 bool IsInBounds = Src->isInBounds() && GEP->isInBounds() &&
2499 all_of(Src->indices(), NonNegative) &&
2500 all_of(GEP->indices(), NonNegative);
2501
2502 BasicBlock *Preheader = L.getLoopPreheader();
2503 IRBuilder<> Builder(Preheader->getTerminator());
2504 Value *NewSrc = Builder.CreateGEP(GEP->getSourceElementType(), SrcPtr,
2505 SmallVector<Value *>(GEP->indices()),
2506 "invariant.gep", IsInBounds);
2507 Builder.SetInsertPoint(GEP);
2508 Value *NewGEP = Builder.CreateGEP(Src->getSourceElementType(), NewSrc,
2509 SmallVector<Value *>(Src->indices()), "gep",
2510 IsInBounds);
2511 GEP->replaceAllUsesWith(NewGEP);
2512 eraseInstruction(*GEP, SafetyInfo, MSSAU);
2513 eraseInstruction(*Src, SafetyInfo, MSSAU);
2514 return true;
2515}
2516
2517/// Try to turn things like "LV + C1 < C2" into "LV < C2 - C1". Here
2518/// C1 and C2 are loop invariants and LV is a loop-variant.
2519static bool hoistAdd(ICmpInst::Predicate Pred, Value *VariantLHS,
2520 Value *InvariantRHS, ICmpInst &ICmp, Loop &L,
2521 ICFLoopSafetyInfo &SafetyInfo, MemorySSAUpdater &MSSAU,
2522 AssumptionCache *AC, DominatorTree *DT) {
2523 assert(ICmpInst::isSigned(Pred) && "Not supported yet!");
2524 assert(!L.isLoopInvariant(VariantLHS) && "Precondition.");
2525 assert(L.isLoopInvariant(InvariantRHS) && "Precondition.");
2526
2527 // Try to represent VariantLHS as sum of invariant and variant operands.
2528 using namespace PatternMatch;
2529 Value *VariantOp, *InvariantOp;
2530 if (!match(VariantLHS, m_NSWAdd(m_Value(VariantOp), m_Value(InvariantOp))))
2531 return false;
2532
2533 // LHS itself is a loop-variant, try to represent it in the form:
2534 // "VariantOp + InvariantOp". If it is possible, then we can reassociate.
2535 if (L.isLoopInvariant(VariantOp))
2536 std::swap(VariantOp, InvariantOp);
2537 if (L.isLoopInvariant(VariantOp) || !L.isLoopInvariant(InvariantOp))
2538 return false;
2539
2540 // In order to turn "LV + C1 < C2" into "LV < C2 - C1", we need to be able to
2541 // freely move values from left side of inequality to right side (just as in
2542 // normal linear arithmetics). Overflows make things much more complicated, so
2543 // we want to avoid this.
2544 auto &DL = L.getHeader()->getModule()->getDataLayout();
2545 bool ProvedNoOverflowAfterReassociate =
2546 computeOverflowForSignedSub(InvariantRHS, InvariantOp,
2547 SimplifyQuery(DL, DT, AC, &ICmp)) ==
2549 if (!ProvedNoOverflowAfterReassociate)
2550 return false;
2551 auto *Preheader = L.getLoopPreheader();
2552 assert(Preheader && "Loop is not in simplify form?");
2553 IRBuilder<> Builder(Preheader->getTerminator());
2554 Value *NewCmpOp = Builder.CreateSub(InvariantRHS, InvariantOp, "invariant.op",
2555 /*HasNUW*/ false, /*HasNSW*/ true);
2556 ICmp.setPredicate(Pred);
2557 ICmp.setOperand(0, VariantOp);
2558 ICmp.setOperand(1, NewCmpOp);
2559 eraseInstruction(cast<Instruction>(*VariantLHS), SafetyInfo, MSSAU);
2560 return true;
2561}
2562
2563/// Try to reassociate and hoist the following two patterns:
2564/// LV - C1 < C2 --> LV < C1 + C2,
2565/// C1 - LV < C2 --> LV > C1 - C2.
2566static bool hoistSub(ICmpInst::Predicate Pred, Value *VariantLHS,
2567 Value *InvariantRHS, ICmpInst &ICmp, Loop &L,
2568 ICFLoopSafetyInfo &SafetyInfo, MemorySSAUpdater &MSSAU,
2569 AssumptionCache *AC, DominatorTree *DT) {
2570 assert(ICmpInst::isSigned(Pred) && "Not supported yet!");
2571 assert(!L.isLoopInvariant(VariantLHS) && "Precondition.");
2572 assert(L.isLoopInvariant(InvariantRHS) && "Precondition.");
2573
2574 // Try to represent VariantLHS as sum of invariant and variant operands.
2575 using namespace PatternMatch;
2576 Value *VariantOp, *InvariantOp;
2577 if (!match(VariantLHS, m_NSWSub(m_Value(VariantOp), m_Value(InvariantOp))))
2578 return false;
2579
2580 bool VariantSubtracted = false;
2581 // LHS itself is a loop-variant, try to represent it in the form:
2582 // "VariantOp + InvariantOp". If it is possible, then we can reassociate. If
2583 // the variant operand goes with minus, we use a slightly different scheme.
2584 if (L.isLoopInvariant(VariantOp)) {
2585 std::swap(VariantOp, InvariantOp);
2586 VariantSubtracted = true;
2587 Pred = ICmpInst::getSwappedPredicate(Pred);
2588 }
2589 if (L.isLoopInvariant(VariantOp) || !L.isLoopInvariant(InvariantOp))
2590 return false;
2591
2592 // In order to turn "LV - C1 < C2" into "LV < C2 + C1", we need to be able to
2593 // freely move values from left side of inequality to right side (just as in
2594 // normal linear arithmetics). Overflows make things much more complicated, so
2595 // we want to avoid this. Likewise, for "C1 - LV < C2" we need to prove that
2596 // "C1 - C2" does not overflow.
2597 auto &DL = L.getHeader()->getModule()->getDataLayout();
2598 SimplifyQuery SQ(DL, DT, AC, &ICmp);
2599 if (VariantSubtracted) {
2600 // C1 - LV < C2 --> LV > C1 - C2
2601 if (computeOverflowForSignedSub(InvariantOp, InvariantRHS, SQ) !=
2603 return false;
2604 } else {
2605 // LV - C1 < C2 --> LV < C1 + C2
2606 if (computeOverflowForSignedAdd(InvariantOp, InvariantRHS, SQ) !=
2608 return false;
2609 }
2610 auto *Preheader = L.getLoopPreheader();
2611 assert(Preheader && "Loop is not in simplify form?");
2612 IRBuilder<> Builder(Preheader->getTerminator());
2613 Value *NewCmpOp =
2614 VariantSubtracted
2615 ? Builder.CreateSub(InvariantOp, InvariantRHS, "invariant.op",
2616 /*HasNUW*/ false, /*HasNSW*/ true)
2617 : Builder.CreateAdd(InvariantOp, InvariantRHS, "invariant.op",
2618 /*HasNUW*/ false, /*HasNSW*/ true);
2619 ICmp.setPredicate(Pred);
2620 ICmp.setOperand(0, VariantOp);
2621 ICmp.setOperand(1, NewCmpOp);
2622 eraseInstruction(cast<Instruction>(*VariantLHS), SafetyInfo, MSSAU);
2623 return true;
2624}
2625
2626/// Reassociate and hoist add/sub expressions.
2627static bool hoistAddSub(Instruction &I, Loop &L, ICFLoopSafetyInfo &SafetyInfo,
2629 DominatorTree *DT) {
2630 using namespace PatternMatch;
2632 Value *LHS, *RHS;
2633 if (!match(&I, m_ICmp(Pred, m_Value(LHS), m_Value(RHS))))
2634 return false;
2635
2636 // TODO: Support unsigned predicates?
2637 if (!ICmpInst::isSigned(Pred))
2638 return false;
2639
2640 // Put variant operand to LHS position.
2641 if (L.isLoopInvariant(LHS)) {
2642 std::swap(LHS, RHS);
2643 Pred = ICmpInst::getSwappedPredicate(Pred);
2644 }
2645 // We want to delete the initial operation after reassociation, so only do it
2646 // if it has no other uses.
2647 if (L.isLoopInvariant(LHS) || !L.isLoopInvariant(RHS) || !LHS->hasOneUse())
2648 return false;
2649
2650 // TODO: We could go with smarter context, taking common dominator of all I's
2651 // users instead of I itself.
2652 if (hoistAdd(Pred, LHS, RHS, cast<ICmpInst>(I), L, SafetyInfo, MSSAU, AC, DT))
2653 return true;
2654
2655 if (hoistSub(Pred, LHS, RHS, cast<ICmpInst>(I), L, SafetyInfo, MSSAU, AC, DT))
2656 return true;
2657
2658 return false;
2659}
2660
2661/// Try to reassociate expressions like ((A1 * B1) + (A2 * B2) + ...) * C where
2662/// A1, A2, ... and C are loop invariants into expressions like
2663/// ((A1 * C * B1) + (A2 * C * B2) + ...) and hoist the (A1 * C), (A2 * C), ...
2664/// invariant expressions. This functions returns true only if any hoisting has
2665/// actually occured.
2667 ICFLoopSafetyInfo &SafetyInfo,
2669 DominatorTree *DT) {
2670 using namespace PatternMatch;
2671 Value *VariantOp = nullptr, *InvariantOp = nullptr;
2672
2673 if (!match(&I, m_FMul(m_Value(VariantOp), m_Value(InvariantOp))) ||
2674 !I.hasAllowReassoc() || !I.hasNoSignedZeros())
2675 return false;
2676 if (L.isLoopInvariant(VariantOp))
2677 std::swap(VariantOp, InvariantOp);
2678 if (L.isLoopInvariant(VariantOp) || !L.isLoopInvariant(InvariantOp))
2679 return false;
2680 Value *Factor = InvariantOp;
2681
2682 // First, we need to make sure we should do the transformation.
2683 SmallVector<Use *> Changes;
2685 if (BinaryOperator *VariantBinOp = dyn_cast<BinaryOperator>(VariantOp))
2686 Worklist.push_back(VariantBinOp);
2687 while (!Worklist.empty()) {
2688 BinaryOperator *BO = Worklist.pop_back_val();
2689 if (!BO->hasOneUse() || !BO->hasAllowReassoc() || !BO->hasNoSignedZeros())
2690 return false;
2691 BinaryOperator *Op0, *Op1;
2692 if (match(BO, m_FAdd(m_BinOp(Op0), m_BinOp(Op1)))) {
2693 Worklist.push_back(Op0);
2694 Worklist.push_back(Op1);
2695 continue;
2696 }
2697 if (BO->getOpcode() != Instruction::FMul || L.isLoopInvariant(BO))
2698 return false;
2699 Use &U0 = BO->getOperandUse(0);
2700 Use &U1 = BO->getOperandUse(1);
2701 if (L.isLoopInvariant(U0))
2702 Changes.push_back(&U0);
2703 else if (L.isLoopInvariant(U1))
2704 Changes.push_back(&U1);
2705 else
2706 return false;
2707 if (Changes.size() > FPAssociationUpperLimit)
2708 return false;
2709 }
2710 if (Changes.empty())
2711 return false;
2712
2713 // We know we should do it so let's do the transformation.
2714 auto *Preheader = L.getLoopPreheader();
2715 assert(Preheader && "Loop is not in simplify form?");
2716 IRBuilder<> Builder(Preheader->getTerminator());
2717 for (auto *U : Changes) {
2718 assert(L.isLoopInvariant(U->get()));
2719 Instruction *Ins = cast<Instruction>(U->getUser());
2720 U->set(Builder.CreateFMulFMF(U->get(), Factor, Ins, "factor.op.fmul"));
2721 }
2722 I.replaceAllUsesWith(VariantOp);
2723 eraseInstruction(I, SafetyInfo, MSSAU);
2724 return true;
2725}
2726
2728 ICFLoopSafetyInfo &SafetyInfo,
2730 DominatorTree *DT) {
2731 // Optimize complex patterns, such as (x < INV1 && x < INV2), turning them
2732 // into (x < min(INV1, INV2)), and hoisting the invariant part of this
2733 // expression out of the loop.
2734 if (hoistMinMax(I, L, SafetyInfo, MSSAU)) {
2735 ++NumHoisted;
2736 ++NumMinMaxHoisted;
2737 return true;
2738 }
2739
2740 // Try to hoist GEPs by reassociation.
2741 if (hoistGEP(I, L, SafetyInfo, MSSAU, AC, DT)) {
2742 ++NumHoisted;
2743 ++NumGEPsHoisted;
2744 return true;
2745 }
2746
2747 // Try to hoist add/sub's by reassociation.
2748 if (hoistAddSub(I, L, SafetyInfo, MSSAU, AC, DT)) {
2749 ++NumHoisted;
2750 ++NumAddSubHoisted;
2751 return true;
2752 }
2753
2754 if (hoistFPAssociation(I, L, SafetyInfo, MSSAU, AC, DT)) {
2755 ++NumHoisted;
2756 ++NumFPAssociationsHoisted;
2757 return true;
2758 }
2759
2760 return false;
2761}
2762
2763/// Little predicate that returns true if the specified basic block is in
2764/// a subloop of the current one, not the current one itself.
2765///
2766static bool inSubLoop(BasicBlock *BB, Loop *CurLoop, LoopInfo *LI) {
2767 assert(CurLoop->contains(BB) && "Only valid if BB is IN the loop");
2768 return LI->getLoopFor(BB) != CurLoop;
2769}
unsigned const MachineRegisterInfo * MRI
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
This file contains the declarations for the subclasses of Constant, which represent the different fla...
@ NonNegative
#define LLVM_DEBUG(X)
Definition: Debug.h:101
uint64_t Addr
Rewrite Partial Register Uses
#define DEBUG_TYPE
Hexagon Common GEP
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
iv Induction Variable Users
Definition: IVUsers.cpp:48
static bool isNotUsedOrFoldableInLoop(const Instruction &I, const Loop *CurLoop, const LoopSafetyInfo *SafetyInfo, TargetTransformInfo *TTI, bool &FoldableInLoop, bool LoopNestMode)
Return true if the only users of this instruction are outside of the loop.
Definition: LICM.cpp:1372
static bool hoistGEP(Instruction &I, Loop &L, ICFLoopSafetyInfo &SafetyInfo, MemorySSAUpdater &MSSAU, AssumptionCache *AC, DominatorTree *DT)
Reassociate gep (gep ptr, idx1), idx2 to gep (gep ptr, idx2), idx1 if this allows hoisting the inner ...
Definition: LICM.cpp:2468
static cl::opt< bool > SingleThread("licm-force-thread-model-single", cl::Hidden, cl::init(false), cl::desc("Force thread model single in LICM pass"))
static void splitPredecessorsOfLoopExit(PHINode *PN, DominatorTree *DT, LoopInfo *LI, const Loop *CurLoop, LoopSafetyInfo *SafetyInfo, MemorySSAUpdater *MSSAU)
Definition: LICM.cpp:1543
static cl::opt< unsigned > FPAssociationUpperLimit("licm-max-num-fp-reassociations", cl::init(5U), cl::Hidden, cl::desc("Set upper limit for the number of transformations performed " "during a single round of hoisting the reassociated expressions."))
static bool isFoldableInLoop(const Instruction &I, const Loop *CurLoop, const TargetTransformInfo *TTI)
Return true if the instruction is foldable in the loop.
Definition: LICM.cpp:1342
static bool hoistMinMax(Instruction &I, Loop &L, ICFLoopSafetyInfo &SafetyInfo, MemorySSAUpdater &MSSAU)
Try to simplify things like (A < INV_1 AND icmp A < INV_2) into (A < min(INV_1, INV_2)),...
Definition: LICM.cpp:2394
static Instruction * cloneInstructionInExitBlock(Instruction &I, BasicBlock &ExitBlock, PHINode &PN, const LoopInfo *LI, const LoopSafetyInfo *SafetyInfo, MemorySSAUpdater &MSSAU)
Definition: LICM.cpp:1414
static cl::opt< bool > ControlFlowHoisting("licm-control-flow-hoisting", cl::Hidden, cl::init(false), cl::desc("Enable control flow (and PHI) hoisting in LICM"))
static bool pointerInvalidatedByLoop(MemorySSA *MSSA, MemoryUse *MU, Loop *CurLoop, Instruction &I, SinkAndHoistLICMFlags &Flags, bool InvariantGroup)
Definition: LICM.cpp:2331
static bool hoistAdd(ICmpInst::Predicate Pred, Value *VariantLHS, Value *InvariantRHS, ICmpInst &ICmp, Loop &L, ICFLoopSafetyInfo &SafetyInfo, MemorySSAUpdater &MSSAU, AssumptionCache *AC, DominatorTree *DT)
Try to turn things like "LV + C1 < C2" into "LV < C2 - C1".
Definition: LICM.cpp:2519
static MemoryAccess * getClobberingMemoryAccess(MemorySSA &MSSA, BatchAAResults &BAA, SinkAndHoistLICMFlags &Flags, MemoryUseOrDef *MA)
Definition: LICM.cpp:1134
static SmallVector< PointersAndHasReadsOutsideSet, 0 > collectPromotionCandidates(MemorySSA *MSSA, AliasAnalysis *AA, Loop *L)
Definition: LICM.cpp:2269
static void hoist(Instruction &I, const DominatorTree *DT, const Loop *CurLoop, BasicBlock *Dest, ICFLoopSafetyInfo *SafetyInfo, MemorySSAUpdater &MSSAU, ScalarEvolution *SE, OptimizationRemarkEmitter *ORE)
When an instruction is found to only use loop invariant operands that is safe to hoist,...
Definition: LICM.cpp:1722
static bool canSplitPredecessors(PHINode *PN, LoopSafetyInfo *SafetyInfo)
Definition: LICM.cpp:1526
static void moveInstructionBefore(Instruction &I, Instruction &Dest, ICFLoopSafetyInfo &SafetyInfo, MemorySSAUpdater &MSSAU, ScalarEvolution *SE)
Definition: LICM.cpp:1494
static bool sink(Instruction &I, LoopInfo *LI, DominatorTree *DT, const Loop *CurLoop, ICFLoopSafetyInfo *SafetyInfo, MemorySSAUpdater &MSSAU, OptimizationRemarkEmitter *ORE)
When an instruction is found to only be used outside of the loop, this function moves it to the exit ...
Definition: LICM.cpp:1615
static bool hoistAddSub(Instruction &I, Loop &L, ICFLoopSafetyInfo &SafetyInfo, MemorySSAUpdater &MSSAU, AssumptionCache *AC, DominatorTree *DT)
Reassociate and hoist add/sub expressions.
Definition: LICM.cpp:2627
static cl::opt< uint32_t > MaxNumUsesTraversed("licm-max-num-uses-traversed", cl::Hidden, cl::init(8), cl::desc("Max num uses visited for identifying load " "invariance in loop using invariant start (default = 8)"))
static void foreachMemoryAccess(MemorySSA *MSSA, Loop *L, function_ref< void(Instruction *)> Fn)
Definition: LICM.cpp:2257
static bool isLoadInvariantInLoop(LoadInst *LI, DominatorTree *DT, Loop *CurLoop)
Definition: LICM.cpp:1040
static Instruction * sinkThroughTriviallyReplaceablePHI(PHINode *TPN, Instruction *I, LoopInfo *LI, SmallDenseMap< BasicBlock *, Instruction *, 32 > &SunkCopies, const LoopSafetyInfo *SafetyInfo, const Loop *CurLoop, MemorySSAUpdater &MSSAU)
Definition: LICM.cpp:1508
licm
Definition: LICM.cpp:367
static bool inSubLoop(BasicBlock *BB, Loop *CurLoop, LoopInfo *LI)
Little predicate that returns true if the specified basic block is in a subloop of the current one,...
Definition: LICM.cpp:2766
static bool hoistSub(ICmpInst::Predicate Pred, Value *VariantLHS, Value *InvariantRHS, ICmpInst &ICmp, Loop &L, ICFLoopSafetyInfo &SafetyInfo, MemorySSAUpdater &MSSAU, AssumptionCache *AC, DominatorTree *DT)
Try to reassociate and hoist the following two patterns: LV - C1 < C2 --> LV < C1 + C2,...
Definition: LICM.cpp:2566
static void eraseInstruction(Instruction &I, ICFLoopSafetyInfo &SafetyInfo, MemorySSAUpdater &MSSAU)
Definition: LICM.cpp:1487
static bool isSafeToExecuteUnconditionally(Instruction &Inst, const DominatorTree *DT, const TargetLibraryInfo *TLI, const Loop *CurLoop, const LoopSafetyInfo *SafetyInfo, OptimizationRemarkEmitter *ORE, const Instruction *CtxI, AssumptionCache *AC, bool AllowSpeculation)
Only sink or hoist an instruction if it is not a trapping instruction, or if the instruction is known...
Definition: LICM.cpp:1767
static bool hoistArithmetics(Instruction &I, Loop &L, ICFLoopSafetyInfo &SafetyInfo, MemorySSAUpdater &MSSAU, AssumptionCache *AC, DominatorTree *DT)
Aggregates various functions for hoisting computations out of loop.
Definition: LICM.cpp:2727
static bool isTriviallyReplaceablePHI(const PHINode &PN, const Instruction &I)
Returns true if a PHINode is a trivially replaceable with an Instruction.
Definition: LICM.cpp:1333
std::pair< SmallSetVector< Value *, 8 >, bool > PointersAndHasReadsOutsideSet
Definition: LICM.cpp:203
static cl::opt< bool > DisablePromotion("disable-licm-promotion", cl::Hidden, cl::init(false), cl::desc("Disable memory promotion in LICM pass"))
Memory promotion is enabled by default.
static bool pointerInvalidatedByBlock(BasicBlock &BB, MemorySSA &MSSA, MemoryUse &MU)
Definition: LICM.cpp:2382
static bool hoistFPAssociation(Instruction &I, Loop &L, ICFLoopSafetyInfo &SafetyInfo, MemorySSAUpdater &MSSAU, AssumptionCache *AC, DominatorTree *DT)
Try to reassociate expressions like ((A1 * B1) + (A2 * B2) + ...) * C where A1, A2,...
Definition: LICM.cpp:2666
This file defines the interface for the loop nest analysis.
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
Machine Loop Invariant Code Motion
Memory SSA
Definition: MemorySSA.cpp:71
This file exposes an interface to building/using memory SSA to walk memory instructions using a use/d...
This file contains the declarations for metadata subclasses.
Contains a collection of routines for determining if a given instruction is guaranteed to execute if ...
#define P(N)
PassInstrumentationCallbacks PIC
#define INITIALIZE_PASS_DEPENDENCY(depName)
Definition: PassSupport.h:55
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
Definition: PassSupport.h:59
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
Definition: PassSupport.h:52
This file provides a priority worklist.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
raw_pwrite_stream & OS
This file defines generic set operations that may be used on set's of different types,...
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition: Statistic.h:167
This pass exposes codegen information to IR-level passes.
static cl::opt< bool > DisablePromotion("disable-type-promotion", cl::Hidden, cl::init(false), cl::desc("Disable type promotion pass"))
Value * RHS
Value * LHS
ModRefInfo getModRefInfoMask(const MemoryLocation &Loc, bool IgnoreLocals=false)
Returns a bitmask that should be unconditionally applied to the ModRef info of a memory location.
MemoryEffects getMemoryEffects(const CallBase *Call)
Return the behavior of the given call site.
void add(Value *Ptr, LocationSize Size, const AAMDNodes &AAInfo)
These methods are used to add different types of instructions to the alias sets.
A container for analyses that lazily runs them and caches their results.
Definition: PassManager.h:649
Represent the analysis usage information of a pass.
AnalysisUsage & addRequired()
AnalysisUsage & addPreserved()
Add the specified Pass class to the set of analyses preserved by this pass.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
An immutable pass that tracks lazily created AssumptionCache objects.
A cache of @llvm.assume calls within a function.
LLVM Basic Block Representation.
Definition: BasicBlock.h:60
void replaceSuccessorsPhiUsesWith(BasicBlock *Old, BasicBlock *New)
Update all phi nodes in this basic block's successors to refer to basic block New instead of basic bl...
Definition: BasicBlock.cpp:692
iterator end()
Definition: BasicBlock.h:450
iterator begin()
Instruction iterator methods.
Definition: BasicBlock.h:437
const_iterator getFirstInsertionPt() const
Returns an iterator to the first instruction in this block that is suitable for inserting a non-PHI i...
Definition: BasicBlock.cpp:446
const Instruction * getFirstNonPHI() const
Returns a pointer to the first instruction in this block that is not a PHINode instruction.
Definition: BasicBlock.cpp:399
static BasicBlock * Create(LLVMContext &Context, const Twine &Name="", Function *Parent=nullptr, BasicBlock *InsertBefore=nullptr)
Creates a new BasicBlock.
Definition: BasicBlock.h:206
const BasicBlock * getSingleSuccessor() const
Return the successor of this block if it has a single successor.
Definition: BasicBlock.cpp:519
const Function * getParent() const
Return the enclosing method, or null if none.
Definition: BasicBlock.h:213
InstListType::iterator iterator
Instruction iterators...
Definition: BasicBlock.h:173
LLVMContext & getContext() const
Get the context in which this basic block lives.
Definition: BasicBlock.cpp:207
void moveBefore(BasicBlock *MovePos)
Unlink this basic block from its current function and insert it into the function that MovePos lives ...
Definition: BasicBlock.h:365
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition: BasicBlock.h:228
bool canSplitPredecessors() const
Definition: BasicBlock.cpp:575
const Module * getModule() const
Return the module owning the function this basic block belongs to, or nullptr if the function does no...
Definition: BasicBlock.cpp:328
This class is a wrapper over an AAResults, and it is intended to be used only when there are no IR ch...
ModRefInfo getModRefInfo(const Instruction *I, const std::optional< MemoryLocation > &OptLoc)
BinaryOps getOpcode() const
Definition: InstrTypes.h:391
Conditional or Unconditional Branch instruction.
bool isConditional() const
static BranchInst * Create(BasicBlock *IfTrue, Instruction *InsertBefore=nullptr)
BasicBlock * getSuccessor(unsigned i) const
Value * getCondition() const
Value * getArgOperand(unsigned i) const
Definition: InstrTypes.h:1394
This class represents a function call, abstracting a target machine's calling convention.
static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr="", Instruction *InsertBefore=nullptr)
void setPredicate(Predicate P)
Set the predicate for this instruction to the specified value.
Definition: InstrTypes.h:841
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition: InstrTypes.h:748
bool isSigned() const
Definition: InstrTypes.h:998
Predicate getSwappedPredicate() const
For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
Definition: InstrTypes.h:900
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE,...
Definition: InstrTypes.h:862
static Constant * get(Type *Ty, double V)
This returns a ConstantFP, or a vector containing a splat of a ConstantFP, for the specified value in...
Definition: Constants.cpp:927
This is the shared class of boolean and integer constants.
Definition: Constants.h:78
bool isNegative() const
Definition: Constants.h:192
int64_t getSExtValue() const
Return the constant as a 64-bit integer value after it has been sign extended as appropriate for the ...
Definition: Constants.h:151
Assignment ID.
static DILocation * getMergedLocations(ArrayRef< DILocation * > Locs)
Try to combine the vector of locations passed as input in a single one.
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:110
TypeSize getTypeStoreSize(Type *Ty) const
Returns the maximum number of bytes that may be overwritten by storing the specified type.
Definition: DataLayout.h:472
A debug info location.
Definition: DebugLoc.h:33
iterator find(const_arg_type_t< KeyT > Val)
Definition: DenseMap.h:155
size_type count(const_arg_type_t< KeyT > Val) const
Return 1 if the specified key is in the map, 0 otherwise.
Definition: DenseMap.h:151
iterator end()
Definition: DenseMap.h:84
DomTreeNodeBase * getIDom() const
NodeT * getBlock() const
Analysis pass which computes a DominatorTree.
Definition: Dominators.h:278
bool verify(VerificationLevel VL=VerificationLevel::Full) const
verify - checks if the tree is correct.
void changeImmediateDominator(DomTreeNodeBase< NodeT > *N, DomTreeNodeBase< NodeT > *NewIDom)
changeImmediateDominator - This method is used to update the dominator tree information when a node's...
DomTreeNodeBase< NodeT > * addNewBlock(NodeT *BB, NodeT *DomBB)
Add a new node to the dominator tree information.
DomTreeNodeBase< NodeT > * getNode(const NodeT *BB) const
getNode - return the (Post)DominatorTree node for the specified basic block.
bool properlyDominates(const DomTreeNodeBase< NodeT > *A, const DomTreeNodeBase< NodeT > *B) const
properlyDominates - Returns true iff A dominates B and A != B.
Legacy analysis pass which computes a DominatorTree.
Definition: Dominators.h:313
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition: Dominators.h:165
bool isReachableFromEntry(const Use &U) const
Provide an overload for a Use.
Definition: Dominators.cpp:322
bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
Definition: Dominators.cpp:123
This implementation of LoopSafetyInfo use ImplicitControlFlowTracking to give precise answers on "may...
Definition: MustExecute.h:132
bool doesNotWriteMemoryBefore(const BasicBlock *BB, const Loop *CurLoop) const
Returns true if we could not execute a memory-modifying instruction before we enter BB under assumpti...
void removeInstruction(const Instruction *Inst)
Inform safety info that we are planning to remove the instruction Inst from its block.
Definition: MustExecute.cpp:99
bool isGuaranteedToExecute(const Instruction &Inst, const DominatorTree *DT, const Loop *CurLoop) const override
Returns true if the instruction in a loop is guaranteed to execute at least once (under the assumptio...
bool anyBlockMayThrow() const override
Returns true iff any block of the loop for which this info is contains an instruction that may throw ...
Definition: MustExecute.cpp:75
void computeLoopSafetyInfo(const Loop *CurLoop) override
Computes safety information for a loop checks loop body & header for the possibility of may throw exc...
Definition: MustExecute.cpp:79
void insertInstructionTo(const Instruction *Inst, const BasicBlock *BB)
Inform the safety info that we are planning to insert a new instruction Inst into the basic block BB.
Definition: MustExecute.cpp:93
This instruction compares its operands according to the predicate given to the constructor.
static bool isGE(Predicate P)
Return true if the predicate is SGE or UGE.
static bool isLT(Predicate P)
Return true if the predicate is SLT or ULT.
static bool isGT(Predicate P)
Return true if the predicate is SGT or UGT.
bool isRelational() const
Return true if the predicate is relational (not EQ or NE).
static bool isLE(Predicate P)
Return true if the predicate is SLE or ULE.
Value * CreateFMulFMF(Value *L, Value *R, Instruction *FMFSource, const Twine &Name="")
Copy fast-math-flags from an instruction rather than using the builder's default FMF.
Definition: IRBuilder.h:1595
Value * CreateFreeze(Value *V, const Twine &Name="")
Definition: IRBuilder.h:2508
Value * CreateSub(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition: IRBuilder.h:1338
CallInst * CreateBinaryIntrinsic(Intrinsic::ID ID, Value *LHS, Value *RHS, Instruction *FMFSource=nullptr, const Twine &Name="")
Create a call to intrinsic ID with 2 operands which is mangled on the first type.
Definition: IRBuilder.cpp:921
Value * CreateAdd(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition: IRBuilder.h:1321
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block.
Definition: IRBuilder.h:180
Value * CreateGEP(Type *Ty, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &Name="", bool IsInBounds=false)
Definition: IRBuilder.h:1865
Value * CreateICmp(CmpInst::Predicate P, Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:2324
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition: IRBuilder.h:2639
void mergeDIAssignID(ArrayRef< const Instruction * > SourceInstructions)
Merge the DIAssignID metadata from this instruction and those attached to instructions in SourceInstr...
Definition: DebugInfo.cpp:898
bool hasNoSignedZeros() const LLVM_READONLY
Determine whether the no-signed-zeros flag is set.
void insertBefore(Instruction *InsertPos)
Insert an unlinked instruction into a basic block immediately before the specified instruction.
Definition: Instruction.cpp:98
const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
Definition: Instruction.cpp:71
void setAAMetadata(const AAMDNodes &N)
Sets the AA metadata on this instruction from the AAMDNodes structure.
Definition: Metadata.cpp:1666
bool isEHPad() const
Return true if the instruction is a variety of EH-block.
Definition: Instruction.h:786
const BasicBlock * getParent() const
Definition: Instruction.h:139
Instruction * user_back()
Specialize the methods defined in Value, as we know that an instruction can only be used by other ins...
Definition: Instruction.h:136
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
Definition: Instruction.h:346
void setMetadata(unsigned KindID, MDNode *Node)
Set the metadata of the specified kind to the specified node.
Definition: Metadata.cpp:1581
AAMDNodes getAAMetadata() const
Returns the AA metadata for this instruction.
Definition: Metadata.cpp:1652
void setDebugLoc(DebugLoc Loc)
Set the debug location information for this instruction.
Definition: Instruction.h:435
bool hasAllowReassoc() const LLVM_READONLY
Determine whether the allow-reassociation flag is set.
A wrapper class for inspecting calls to intrinsic functions.
Definition: IntrinsicInst.h:47
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
Definition: IntrinsicInst.h:54
void printPipeline(raw_ostream &OS, function_ref< StringRef(StringRef)> MapClassName2PassName)
Definition: LICM.cpp:308
PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM, LoopStandardAnalysisResults &AR, LPMUpdater &U)
Definition: LICM.cpp:285
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:67
PreservedAnalyses run(LoopNest &L, LoopAnalysisManager &AM, LoopStandardAnalysisResults &AR, LPMUpdater &U)
Definition: LICM.cpp:318
void printPipeline(raw_ostream &OS, function_ref< StringRef(StringRef)> MapClassName2PassName)
Definition: LICM.cpp:349
This class provides an interface for updating the loop pass manager based on mutations to the loop ne...
This is an alternative analysis pass to BlockFrequencyInfoWrapperPass.
static void getLazyBFIAnalysisUsage(AnalysisUsage &AU)
Helper for client passes to set up the analysis usage on behalf of this pass.
This is an alternative analysis pass to BranchProbabilityInfoWrapperPass.
Helper class for promoting a collection of loads and stores into SSA Form using the SSAUpdater.
Definition: SSAUpdater.h:150
An instruction for reading from memory.
Definition: Instructions.h:177
void setAlignment(Align Align)
Definition: Instructions.h:224
Value * getPointerOperand()
Definition: Instructions.h:264
void setOrdering(AtomicOrdering Ordering)
Sets the ordering constraint of this load instruction.
Definition: Instructions.h:234
Analysis pass that exposes the LoopInfo for a function.
Definition: LoopInfo.h:566
bool contains(const LoopT *L) const
Return true if the specified loop is contained within in this loop.
BlockT * getHeader() const
void addBasicBlockToLoop(BlockT *NewBB, LoopInfoBase< BlockT, LoopT > &LI)
This method is used by other analyses to update loop information.
BlockT * getLoopPreheader() const
If there is a preheader for this loop, return it.
ArrayRef< BlockT * > getBlocks() const
Get a list of the basic blocks which make up this loop.
void getUniqueExitBlocks(SmallVectorImpl< BlockT * > &ExitBlocks) const
Return all unique successor blocks of this loop.
LoopT * getParentLoop() const
Return the parent loop if it exists or nullptr for top level loops.
Wrapper class to LoopBlocksDFS that provides a standard begin()/end() interface for the DFS reverse p...
Definition: LoopIterator.h:172
void perform(const LoopInfo *LI)
Traverse the loop blocks and store the DFS result.
Definition: LoopIterator.h:180
void verify(const DominatorTreeBase< BlockT, false > &DomTree) const
LoopT * getLoopFor(const BlockT *BB) const
Return the inner most loop that BB lives in.
The legacy pass manager's analysis pass to compute loop information.
Definition: LoopInfo.h:591
bool wouldBeOutOfLoopUseRequiringLCSSA(const Value *V, const BasicBlock *ExitBB) const
Definition: LoopInfo.cpp:932
This class represents a loop nest and can be used to query its properties.
Function * getParent() const
Return the function to which the loop-nest belongs.
Loop & getOutermostLoop() const
Return the outermost loop in the loop nest.
Captures loop safety information.
Definition: MustExecute.h:60
void copyColors(BasicBlock *New, BasicBlock *Old)
Copy colors of block Old into the block New.
Definition: MustExecute.cpp:36
const DenseMap< BasicBlock *, ColorVector > & getBlockColors() const
Returns block colors map that is used to update funclet operand bundles.
Definition: MustExecute.cpp:32
virtual bool isGuaranteedToExecute(const Instruction &Inst, const DominatorTree *DT, const Loop *CurLoop) const =0
Returns true if the instruction in a loop is guaranteed to execute at least once (under the assumptio...
Represents a single loop in the control flow graph.
Definition: LoopInfo.h:44
bool hasLoopInvariantOperands(const Instruction *I) const
Return true if all the operands of the specified instruction are loop invariant.
Definition: LoopInfo.cpp:66
bool isLoopInvariant(const Value *V) const
Return true if the specified value is loop invariant.
Definition: LoopInfo.cpp:60
BasicBlock * getBlock() const
Definition: MemorySSA.h:164
bool doesNotAccessMemory() const
Whether this function accesses no memory.
Definition: ModRef.h:192
bool onlyAccessesArgPointees() const
Whether this function only (at most) accesses argument memory.
Definition: ModRef.h:201
bool onlyReadsMemory() const
Whether this function only (at most) reads memory.
Definition: ModRef.h:195
static MemoryLocation get(const LoadInst *LI)
Return a location with information about the memory reference by the given instruction.
An analysis that produces MemorySSA for a function.
Definition: MemorySSA.h:923
MemorySSA * getMemorySSA() const
Get handle on MemorySSA.
void insertDef(MemoryDef *Def, bool RenameUses=false)
Insert a definition into the MemorySSA IR.
MemoryAccess * createMemoryAccessInBB(Instruction *I, MemoryAccess *Definition, const BasicBlock *BB, MemorySSA::InsertionPlace Point)
Create a MemoryAccess in MemorySSA at a specified point in a block.
void insertUse(MemoryUse *Use, bool RenameUses=false)
void removeMemoryAccess(MemoryAccess *, bool OptimizePhis=false)
Remove a MemoryAccess from MemorySSA, including updating all definitions and uses.
MemoryUseOrDef * createMemoryAccessAfter(Instruction *I, MemoryAccess *Definition, MemoryAccess *InsertPt)
Create a MemoryAccess in MemorySSA after an existing MemoryAccess.
void moveToPlace(MemoryUseOrDef *What, BasicBlock *BB, MemorySSA::InsertionPlace Where)
void wireOldPredecessorsToNewImmediatePredecessor(BasicBlock *Old, BasicBlock *New, ArrayRef< BasicBlock * > Preds, bool IdenticalEdgesWereMerged=true)
A new empty BasicBlock (New) now branches directly to Old.
MemoryAccess * getClobberingMemoryAccess(const Instruction *I, BatchAAResults &AA)
Given a memory Mod/Ref/ModRef'ing instruction, calling this will give you the nearest dominating Memo...
Definition: MemorySSA.h:1035
Legacy analysis pass which computes MemorySSA.
Definition: MemorySSA.h:975
Encapsulates MemorySSA, including all data associated with memory accesses.
Definition: MemorySSA.h:700
AliasAnalysis & getAA()
Definition: MemorySSA.h:797
const AccessList * getBlockAccesses(const BasicBlock *BB) const
Return the list of MemoryAccess's for a given basic block.
Definition: MemorySSA.h:757
MemorySSAWalker * getSkipSelfWalker()
Definition: MemorySSA.cpp:1560
bool dominates(const MemoryAccess *A, const MemoryAccess *B) const
Given two memory accesses in potentially different blocks, determine whether MemoryAccess A dominates...
Definition: MemorySSA.cpp:2110
void verifyMemorySSA(VerificationLevel=VerificationLevel::Fast) const
Verify that MemorySSA is self consistent (IE definitions dominate all uses, uses appear in the right ...
Definition: MemorySSA.cpp:1857
MemoryUseOrDef * getMemoryAccess(const Instruction *I) const
Given a memory Mod/Ref'ing instruction, get the MemorySSA access associated with it.
Definition: MemorySSA.h:717
const DefsList * getBlockDefs(const BasicBlock *BB) const
Return the list of MemoryDef's and MemoryPhi's for a given basic block.
Definition: MemorySSA.h:765
bool locallyDominates(const MemoryAccess *A, const MemoryAccess *B) const
Given two memory accesses in the same basic block, determine whether MemoryAccess A dominates MemoryA...
Definition: MemorySSA.cpp:2079
bool isLiveOnEntryDef(const MemoryAccess *MA) const
Return true if MA represents the live on entry value.
Definition: MemorySSA.h:737
Class that has the common methods + fields of memory uses/defs.
Definition: MemorySSA.h:252
MemoryAccess * getDefiningAccess() const
Get the access that produces the memory state used by this Use.
Definition: MemorySSA.h:262
Represents read-only accesses to memory.
Definition: MemorySSA.h:312
const DataLayout & getDataLayout() const
Get the data layout for the module's target platform.
Definition: Module.h:275
The optimization diagnostic interface.
void emit(DiagnosticInfoOptimizationBase &OptDiag)
Output the remark via the diagnostic handler and to the optimization record file.
Diagnostic information for missed-optimization remarks.
Diagnostic information for applied optimization remarks.
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
op_range incoming_values()
void setIncomingBlock(unsigned i, BasicBlock *BB)
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", Instruction *InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
int getBasicBlockIndex(const BasicBlock *BB) const
Return the first index of the specified basic block in the value list for this PHI.
unsigned getNumIncomingValues() const
Return the number of incoming edges.
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
Pass interface - Implemented by all 'passes'.
Definition: Pass.h:94
PointerIntPair - This class implements a pair of a pointer and small integer.
static PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
Definition: Constants.cpp:1743
PredIteratorCache - This class is an extremely trivial cache for predecessor iterator queries.
size_t size(BasicBlock *BB) const
ArrayRef< BasicBlock * > get(BasicBlock *BB)
A set of analyses that are preserved following a run of a transformation pass.
Definition: PassManager.h:172
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition: PassManager.h:178
bool empty() const
Determine if the PriorityWorklist is empty or not.
bool insert(const T &X)
Insert a new element into the PriorityWorklist.
Helper class for SSA formation on a set of values defined in multiple blocks.
Definition: SSAUpdater.h:40
The main scalar evolution driver.
void forgetBlockAndLoopDispositions(Value *V=nullptr)
Called when the client has changed the disposition of values in a loop or block.
void forgetLoopDispositions()
Called when the client has changed the disposition of values in this loop.
bool remove(const value_type &X)
Remove an item from the set vector.
Definition: SetVector.h:188
bool empty() const
Determine if the SetVector is empty or not.
Definition: SetVector.h:93
iterator begin()
Get an iterator to the beginning of the SetVector.
Definition: SetVector.h:103
bool insert(const value_type &X)
Insert a new element into the SetVector.
Definition: SetVector.h:162
Flags controlling how much is checked when sinking or hoisting instructions.
Definition: LoopUtils.h:118
SinkAndHoistLICMFlags(unsigned LicmMssaOptCap, unsigned LicmMssaNoAccForPromotionCap, bool IsSink, Loop &L, MemorySSA &MSSA)
Definition: LICM.cpp:377
unsigned LicmMssaNoAccForPromotionCap
Definition: LoopUtils.h:137
A version of PriorityWorklist that selects small size optimized data structures for the vector and ma...
size_type size() const
Definition: SmallPtrSet.h:93
bool erase(PtrType Ptr)
erase - If the set contains the specified pointer, remove it and return true, otherwise return false.
Definition: SmallPtrSet.h:380
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
Definition: SmallPtrSet.h:384
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:366
iterator begin() const
Definition: SmallPtrSet.h:404
bool contains(ConstPtrType Ptr) const
Definition: SmallPtrSet.h:390
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition: SmallPtrSet.h:451
A SetVector that performs no allocations if smaller than a certain size.
Definition: SetVector.h:370
bool empty() const
Definition: SmallVector.h:94
size_t size() const
Definition: SmallVector.h:91
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:577
reference emplace_back(ArgTypes &&... Args)
Definition: SmallVector.h:941
void reserve(size_type N)
Definition: SmallVector.h:667
void push_back(const T &Elt)
Definition: SmallVector.h:416
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1200
An instruction for storing to memory.
Definition: Instructions.h:301
void setAlignment(Align Align)
Definition: Instructions.h:357
void setOrdering(AtomicOrdering Ordering)
Sets the ordering constraint of this store instruction.
Definition: Instructions.h:368
static unsigned getPointerOperandIndex()
Definition: Instructions.h:403
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
Provides information about what library functions are available for the current target.
Wrapper pass for TargetTransformInfo.
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
@ TCK_SizeAndLatency
The weighted sum of size and latency.
@ TCC_Free
Expected to fold away in lowering.
InstructionCost getInstructionCost(const User *U, ArrayRef< const Value * > Operands, TargetCostKind CostKind) const
Estimate the cost of a given IR user when lowered.
TinyPtrVector - This class is specialized for cases where there are normally 0 or 1 element in a vect...
Definition: TinyPtrVector.h:29
EltTy front() const
unsigned size() const
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition: Type.h:228
A Use represents the edge between a Value definition and its users.
Definition: Use.h:43
const Use & getOperandUse(unsigned i) const
Definition: User.h:182
void setOperand(unsigned i, Value *Val)
Definition: User.h:174
unsigned getNumOperands() const
Definition: User.h:191
LLVM Value Representation.
Definition: Value.h:74
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
bool hasOneUser() const
Return true if there is exactly one user of this value.
Definition: Value.cpp:157
std::string getNameOrAsOperand() const
Definition: Value.cpp:445
bool hasOneUse() const
Return true if there is exactly one use of this value.
Definition: Value.h:434
void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition: Value.cpp:534
bool use_empty() const
Definition: Value.h:344
user_iterator_impl< User > user_iterator
Definition: Value.h:390
StringRef getName() const
Return a constant reference to the value's name.
Definition: Value.cpp:309
void takeName(Value *V)
Transfer the name from V to this value.
Definition: Value.cpp:383
constexpr ScalarTy getFixedValue() const
Definition: TypeSize.h:188
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
Definition: TypeSize.h:172
An efficient, type-erasing, non-owning reference to a callable.
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition: raw_ostream.h:52
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition: CallingConv.h:24
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
class_match< BinaryOperator > m_BinOp()
Match an arbitrary binary operation and ignore it.
Definition: PatternMatch.h:84
OverflowingBinaryOp_match< LHS, RHS, Instruction::Sub, OverflowingBinaryOperator::NoSignedWrap > m_NSWSub(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::FMul > m_FMul(const LHS &L, const RHS &R)
bool match(Val *V, const Pattern &P)
Definition: PatternMatch.h:49
BinaryOp_match< LHS, RHS, Instruction::FAdd > m_FAdd(const LHS &L, const RHS &R)
Definition: PatternMatch.h:988
CmpClass_match< LHS, RHS, ICmpInst, ICmpInst::Predicate > m_ICmp(ICmpInst::Predicate &Pred, const LHS &L, const RHS &R)
OneUse_match< T > m_OneUse(const T &SubPattern)
Definition: PatternMatch.h:67
auto m_LogicalOr()
Matches L || R where L and R are arbitrary values.
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
Definition: PatternMatch.h:76
OverflowingBinaryOp_match< LHS, RHS, Instruction::Add, OverflowingBinaryOperator::NoSignedWrap > m_NSWAdd(const LHS &L, const RHS &R)
auto m_LogicalAnd()
Matches L && R where L and R are arbitrary values.
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:445
DiagnosticInfoOptimizationBase::Argument NV
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
void ReplaceInstWithInst(BasicBlock *BB, BasicBlock::iterator &BI, Instruction *I)
Replace the instruction specified by BI with the instruction specified by I.
SmallVector< DomTreeNode *, 16 > collectChildrenInLoop(DomTreeNode *N, const Loop *CurLoop)
Does a BFS from a given node to all of its children inside a given loop.
Definition: LoopUtils.cpp:450
Interval::succ_iterator succ_end(Interval *I)
Definition: Interval.h:102
@ NeverOverflows
Never overflows.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1726
bool canSinkOrHoistInst(Instruction &I, AAResults *AA, DominatorTree *DT, Loop *CurLoop, MemorySSAUpdater &MSSAU, bool TargetExecutesOncePerLoop, SinkAndHoistLICMFlags &LICMFlags, OptimizationRemarkEmitter *ORE=nullptr)
Returns true if is legal to hoist or sink this instruction disregarding the possible introduction of ...
Definition: LICM.cpp:1148
void set_intersect(S1Ty &S1, const S2Ty &S2)
set_intersect(A, B) - Compute A := A ^ B Identical to set_intersection, except that it works on set<>...
Definition: SetOperations.h:40
void salvageDebugInfo(const MachineRegisterInfo &MRI, MachineInstr &MI)
Assuming the instruction MI is going to be deleted, attempt to salvage debug users of MI by writing t...
Definition: Utils.cpp:1394
void initializeLegacyLICMPassPass(PassRegistry &)
bool isDereferenceableAndAlignedPointer(const Value *V, Type *Ty, Align Alignment, const DataLayout &DL, const Instruction *CtxI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr)
Returns true if V is always a dereferenceable pointer with alignment greater or equal than requested.
Definition: Loads.cpp:199
bool formLCSSARecursively(Loop &L, const DominatorTree &DT, const LoopInfo *LI, ScalarEvolution *SE)
Put a loop nest into LCSSA form.
Definition: LCSSA.cpp:425
bool PointerMayBeCapturedBefore(const Value *V, bool ReturnCaptures, bool StoreCaptures, const Instruction *I, const DominatorTree *DT, bool IncludeI=false, unsigned MaxUsesToExplore=0, const LoopInfo *LI=nullptr)
PointerMayBeCapturedBefore - Return true if this pointer value may be captured by the enclosing funct...
Interval::succ_iterator succ_begin(Interval *I)
succ_begin/succ_end - define methods so that Intervals may be used just like BasicBlocks can with the...
Definition: Interval.h:99
const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=6)
This method strips off any GEP address adjustments and pointer casts from the specified value,...
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition: STLExtras.h:665
Pass * createLICMPass()
Definition: LICM.cpp:370
Interval::pred_iterator pred_end(Interval *I)
Definition: Interval.h:112
bool hoistRegion(DomTreeNode *, AAResults *, LoopInfo *, DominatorTree *, AssumptionCache *, TargetLibraryInfo *, Loop *, MemorySSAUpdater &, ScalarEvolution *, ICFLoopSafetyInfo *, SinkAndHoistLICMFlags &, OptimizationRemarkEmitter *, bool, bool AllowSpeculation)
Walk the specified region of the CFG (defined by all blocks dominated by the specified block,...
Definition: LICM.cpp:865
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1733
bool isInstructionTriviallyDead(Instruction *I, const TargetLibraryInfo *TLI=nullptr)
Return true if the result produced by the instruction is not used, and the instruction will return.
Definition: Local.cpp:399
bool isGuard(const User *U)
Returns true iff U has semantics of a guard expressed in a form of call of llvm.experimental....
Definition: GuardUtils.cpp:18
auto reverse(ContainerTy &&C)
Definition: STLExtras.h:428
OverflowResult computeOverflowForSignedSub(const Value *LHS, const Value *RHS, const SimplifyQuery &SQ)
bool isModSet(const ModRefInfo MRI)
Definition: ModRef.h:48
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
Interval::pred_iterator pred_begin(Interval *I)
pred_begin/pred_end - define methods so that Intervals may be used just like BasicBlocks can with the...
Definition: Interval.h:109
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:156
bool isModOrRefSet(const ModRefInfo MRI)
Definition: ModRef.h:42
bool isNotVisibleOnUnwind(const Value *Object, bool &RequiresNoCaptureBeforeUnwind)
Return true if Object memory is not visible after an unwind, in the sense that program semantics cann...
void getLoopAnalysisUsage(AnalysisUsage &AU)
Helper to consistently add the set of standard passes to a loop pass's AnalysisUsage.
Definition: LoopUtils.cpp:141
BasicBlock * SplitBlockPredecessors(BasicBlock *BB, ArrayRef< BasicBlock * > Preds, const char *Suffix, DominatorTree *DT, LoopInfo *LI=nullptr, MemorySSAUpdater *MSSAU=nullptr, bool PreserveLCSSA=false)
This method introduces at least one new basic block into the function and moves some of the predecess...
ModRefInfo
Flags indicating whether a memory access modifies or references memory.
Definition: ModRef.h:27
bool VerifyMemorySSA
Enables verification of MemorySSA.
Definition: MemorySSA.cpp:83
bool salvageKnowledge(Instruction *I, AssumptionCache *AC=nullptr, DominatorTree *DT=nullptr)
Calls BuildAssumeFromInst and if the resulting llvm.assume is valid insert if before I.
bool hasDisableLICMTransformsHint(const Loop *L)
Look for the loop attribute that disables the LICM transformation heuristics.
Definition: LoopUtils.cpp:348
OverflowResult computeOverflowForSignedAdd(const WithCache< const Value * > &LHS, const WithCache< const Value * > &RHS, const SimplifyQuery &SQ)
void appendLoopsToWorklist(RangeT &&, SmallPriorityWorklist< Loop *, 4 > &)
Utility that implements appending of loops onto a worklist given a range.
Definition: LoopUtils.cpp:1609
bool isIdentifiedFunctionLocal(const Value *V)
Return true if V is umabigously identified at the function-level.
bool isSafeToSpeculativelyExecute(const Instruction *I, const Instruction *CtxI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr)
Return true if the instruction does not have any effects besides calculating the result and does not ...
bool isDereferenceablePointer(const Value *V, Type *Ty, const DataLayout &DL, const Instruction *CtxI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr)
Return true if this is always a dereferenceable pointer.
Definition: Loads.cpp:219
OutputIt move(R &&Range, OutputIt Out)
Provide wrappers to std::move which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1853
PreservedAnalyses getLoopPassPreservedAnalyses()
Returns the minimum set of Analyses that all loop passes must preserve.
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1753
void erase_if(Container &C, UnaryPredicate P)
Provide a container algorithm similar to C++ Library Fundamentals v2's erase_if which is equivalent t...
Definition: STLExtras.h:2020
auto predecessors(const MachineBasicBlock *BB)
bool sinkRegion(DomTreeNode *, AAResults *, LoopInfo *, DominatorTree *, TargetLibraryInfo *, TargetTransformInfo *, Loop *CurLoop, MemorySSAUpdater &, ICFLoopSafetyInfo *, SinkAndHoistLICMFlags &, OptimizationRemarkEmitter *, Loop *OutermostLoop=nullptr)
Walk the specified region of the CFG (defined by all blocks dominated by the specified block,...
Definition: LICM.cpp:544
cl::opt< unsigned > SetLicmMssaNoAccForPromotionCap
unsigned pred_size(const MachineBasicBlock *BB)
bool isKnownNonNegative(const Value *V, const SimplifyQuery &SQ, unsigned Depth=0)
Returns true if the give value is known to be non-negative.
bool promoteLoopAccessesToScalars(const SmallSetVector< Value *, 8 > &, SmallVectorImpl< BasicBlock * > &, SmallVectorImpl< BasicBlock::iterator > &, SmallVectorImpl< MemoryAccess * > &, PredIteratorCache &, LoopInfo *, DominatorTree *, AssumptionCache *AC, const TargetLibraryInfo *, TargetTransformInfo *, Loop *, MemorySSAUpdater &, ICFLoopSafetyInfo *, OptimizationRemarkEmitter *, bool AllowSpeculation, bool HasReadsOutsideSet)
Try to promote memory values to scalars by sinking stores out of the loop and moving loads to before ...
Definition: LICM.cpp:1949
cl::opt< unsigned > SetLicmMssaOptCap
bool sinkRegionForLoopNest(DomTreeNode *, AAResults *, LoopInfo *, DominatorTree *, TargetLibraryInfo *, TargetTransformInfo *, Loop *, MemorySSAUpdater &, ICFLoopSafetyInfo *, SinkAndHoistLICMFlags &, OptimizationRemarkEmitter *)
Call sinkRegion on loops contained within the specified loop in order from innermost to outermost.
Definition: LICM.cpp:612
Type * getLoadStoreType(Value *I)
A helper function that returns the type of a load or store instruction.
bool isWritableObject(const Value *Object, bool &ExplicitlyDereferenceableOnly)
Return true if the Object is writable, in the sense that any location based on this pointer that can ...
Implement std::hash so that hash_code can be used in STL containers.
Definition: BitVector.h:858
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition: BitVector.h:860
#define N
A collection of metadata nodes that might be associated with a memory access used by the alias-analys...
Definition: Metadata.h:738
AAMDNodes merge(const AAMDNodes &Other) const
Given two sets of AAMDNodes applying to potentially different locations, determine the best AAMDNodes...
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
unsigned MssaOptCap
Definition: LICM.h:49
unsigned MssaNoAccForPromotionCap
Definition: LICM.h:50
bool AllowSpeculation
Definition: LICM.h:51
The adaptor from a function pass to a loop pass computes these analyses and makes them available to t...
A lightweight accessor for an operand bundle meant to be passed around by value.
Definition: InstrTypes.h:1122
uint32_t getTagID() const
Return the tag of this operand bundle as an integer.
Definition: InstrTypes.h:1150
A CRTP mix-in to automatically provide informational APIs needed for passes.
Definition: PassManager.h:391