LLVM 20.0.0git
LICM.cpp
Go to the documentation of this file.
1//===-- LICM.cpp - Loop Invariant Code Motion Pass ------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This pass performs loop invariant code motion, attempting to remove as much
10// code from the body of a loop as possible. It does this by either hoisting
11// code into the preheader block, or by sinking code to the exit blocks if it is
12// safe. This pass also promotes must-aliased memory locations in the loop to
13// live in registers, thus hoisting and sinking "invariant" loads and stores.
14//
15// Hoisting operations out of loops is a canonicalization transform. It
16// enables and simplifies subsequent optimizations in the middle-end.
17// Rematerialization of hoisted instructions to reduce register pressure is the
18// responsibility of the back-end, which has more accurate information about
19// register pressure and also handles other optimizations than LICM that
20// increase live-ranges.
21//
22// This pass uses alias analysis for two purposes:
23//
24// 1. Moving loop invariant loads and calls out of loops. If we can determine
25// that a load or call inside of a loop never aliases anything stored to,
26// we can hoist it or sink it like any other instruction.
27// 2. Scalar Promotion of Memory - If there is a store instruction inside of
28// the loop, we try to move the store to happen AFTER the loop instead of
29// inside of the loop. This can only happen if a few conditions are true:
30// A. The pointer stored through is loop invariant
31// B. There are no stores or loads in the loop which _may_ alias the
32// pointer. There are no calls in the loop which mod/ref the pointer.
33// If these conditions are true, we can promote the loads and stores in the
34// loop of the pointer to use a temporary alloca'd variable. We then use
35// the SSAUpdater to construct the appropriate SSA form for the value.
36//
37//===----------------------------------------------------------------------===//
38
42#include "llvm/ADT/Statistic.h"
49#include "llvm/Analysis/Loads.h"
62#include "llvm/IR/CFG.h"
63#include "llvm/IR/Constants.h"
64#include "llvm/IR/DataLayout.h"
67#include "llvm/IR/Dominators.h"
70#include "llvm/IR/IRBuilder.h"
71#include "llvm/IR/LLVMContext.h"
72#include "llvm/IR/Metadata.h"
77#include "llvm/Support/Debug.h"
86#include <algorithm>
87#include <utility>
88using namespace llvm;
89
90namespace llvm {
91class LPMUpdater;
92} // namespace llvm
93
94#define DEBUG_TYPE "licm"
95
96STATISTIC(NumCreatedBlocks, "Number of blocks created");
97STATISTIC(NumClonedBranches, "Number of branches cloned");
98STATISTIC(NumSunk, "Number of instructions sunk out of loop");
99STATISTIC(NumHoisted, "Number of instructions hoisted out of loop");
100STATISTIC(NumMovedLoads, "Number of load insts hoisted or sunk");
101STATISTIC(NumMovedCalls, "Number of call insts hoisted or sunk");
102STATISTIC(NumPromotionCandidates, "Number of promotion candidates");
103STATISTIC(NumLoadPromoted, "Number of load-only promotions");
104STATISTIC(NumLoadStorePromoted, "Number of load and store promotions");
105STATISTIC(NumMinMaxHoisted,
106 "Number of min/max expressions hoisted out of the loop");
107STATISTIC(NumGEPsHoisted,
108 "Number of geps reassociated and hoisted out of the loop");
109STATISTIC(NumAddSubHoisted, "Number of add/subtract expressions reassociated "
110 "and hoisted out of the loop");
111STATISTIC(NumFPAssociationsHoisted, "Number of invariant FP expressions "
112 "reassociated and hoisted out of the loop");
113STATISTIC(NumIntAssociationsHoisted,
114 "Number of invariant int expressions "
115 "reassociated and hoisted out of the loop");
116STATISTIC(NumBOAssociationsHoisted, "Number of invariant BinaryOp expressions "
117 "reassociated and hoisted out of the loop");
118
119/// Memory promotion is enabled by default.
120static cl::opt<bool>
121 DisablePromotion("disable-licm-promotion", cl::Hidden, cl::init(false),
122 cl::desc("Disable memory promotion in LICM pass"));
123
125 "licm-control-flow-hoisting", cl::Hidden, cl::init(false),
126 cl::desc("Enable control flow (and PHI) hoisting in LICM"));
127
128static cl::opt<bool>
129 SingleThread("licm-force-thread-model-single", cl::Hidden, cl::init(false),
130 cl::desc("Force thread model single in LICM pass"));
131
133 "licm-max-num-uses-traversed", cl::Hidden, cl::init(8),
134 cl::desc("Max num uses visited for identifying load "
135 "invariance in loop using invariant start (default = 8)"));
136
138 "licm-max-num-fp-reassociations", cl::init(5U), cl::Hidden,
139 cl::desc(
140 "Set upper limit for the number of transformations performed "
141 "during a single round of hoisting the reassociated expressions."));
142
144 "licm-max-num-int-reassociations", cl::init(5U), cl::Hidden,
145 cl::desc(
146 "Set upper limit for the number of transformations performed "
147 "during a single round of hoisting the reassociated expressions."));
148
149// Experimental option to allow imprecision in LICM in pathological cases, in
150// exchange for faster compile. This is to be removed if MemorySSA starts to
151// address the same issue. LICM calls MemorySSAWalker's
152// getClobberingMemoryAccess, up to the value of the Cap, getting perfect
153// accuracy. Afterwards, LICM will call into MemorySSA's getDefiningAccess,
154// which may not be precise, since optimizeUses is capped. The result is
155// correct, but we may not get as "far up" as possible to get which access is
156// clobbering the one queried.
158 "licm-mssa-optimization-cap", cl::init(100), cl::Hidden,
159 cl::desc("Enable imprecision in LICM in pathological cases, in exchange "
160 "for faster compile. Caps the MemorySSA clobbering calls."));
161
162// Experimentally, memory promotion carries less importance than sinking and
163// hoisting. Limit when we do promotion when using MemorySSA, in order to save
164// compile time.
166 "licm-mssa-max-acc-promotion", cl::init(250), cl::Hidden,
167 cl::desc("[LICM & MemorySSA] When MSSA in LICM is disabled, this has no "
168 "effect. When MSSA in LICM is enabled, then this is the maximum "
169 "number of accesses allowed to be present in a loop in order to "
170 "enable memory promotion."));
171
172static bool inSubLoop(BasicBlock *BB, Loop *CurLoop, LoopInfo *LI);
173static bool isNotUsedOrFoldableInLoop(const Instruction &I, const Loop *CurLoop,
174 const LoopSafetyInfo *SafetyInfo,
176 bool &FoldableInLoop, bool LoopNestMode);
177static void hoist(Instruction &I, const DominatorTree *DT, const Loop *CurLoop,
178 BasicBlock *Dest, ICFLoopSafetyInfo *SafetyInfo,
181static bool sink(Instruction &I, LoopInfo *LI, DominatorTree *DT,
182 const Loop *CurLoop, ICFLoopSafetyInfo *SafetyInfo,
185 Instruction &Inst, const DominatorTree *DT, const TargetLibraryInfo *TLI,
186 const Loop *CurLoop, const LoopSafetyInfo *SafetyInfo,
187 OptimizationRemarkEmitter *ORE, const Instruction *CtxI,
188 AssumptionCache *AC, bool AllowSpeculation);
189static bool pointerInvalidatedByLoop(MemorySSA *MSSA, MemoryUse *MU,
190 Loop *CurLoop, Instruction &I,
192 bool InvariantGroup);
193static bool pointerInvalidatedByBlock(BasicBlock &BB, MemorySSA &MSSA,
194 MemoryUse &MU);
195/// Aggregates various functions for hoisting computations out of loop.
196static bool hoistArithmetics(Instruction &I, Loop &L,
197 ICFLoopSafetyInfo &SafetyInfo,
199 DominatorTree *DT);
201 Instruction &I, BasicBlock &ExitBlock, PHINode &PN, const LoopInfo *LI,
202 const LoopSafetyInfo *SafetyInfo, MemorySSAUpdater &MSSAU);
203
204static void eraseInstruction(Instruction &I, ICFLoopSafetyInfo &SafetyInfo,
205 MemorySSAUpdater &MSSAU);
206
208 ICFLoopSafetyInfo &SafetyInfo,
210
211static void foreachMemoryAccess(MemorySSA *MSSA, Loop *L,
212 function_ref<void(Instruction *)> Fn);
214 std::pair<SmallSetVector<Value *, 8>, bool>;
217
218namespace {
219struct LoopInvariantCodeMotion {
220 bool runOnLoop(Loop *L, AAResults *AA, LoopInfo *LI, DominatorTree *DT,
223 OptimizationRemarkEmitter *ORE, bool LoopNestMode = false);
224
225 LoopInvariantCodeMotion(unsigned LicmMssaOptCap,
226 unsigned LicmMssaNoAccForPromotionCap,
227 bool LicmAllowSpeculation)
228 : LicmMssaOptCap(LicmMssaOptCap),
229 LicmMssaNoAccForPromotionCap(LicmMssaNoAccForPromotionCap),
230 LicmAllowSpeculation(LicmAllowSpeculation) {}
231
232private:
233 unsigned LicmMssaOptCap;
234 unsigned LicmMssaNoAccForPromotionCap;
235 bool LicmAllowSpeculation;
236};
237
238struct LegacyLICMPass : public LoopPass {
239 static char ID; // Pass identification, replacement for typeid
240 LegacyLICMPass(
241 unsigned LicmMssaOptCap = SetLicmMssaOptCap,
242 unsigned LicmMssaNoAccForPromotionCap = SetLicmMssaNoAccForPromotionCap,
243 bool LicmAllowSpeculation = true)
244 : LoopPass(ID), LICM(LicmMssaOptCap, LicmMssaNoAccForPromotionCap,
245 LicmAllowSpeculation) {
247 }
248
249 bool runOnLoop(Loop *L, LPPassManager &LPM) override {
250 if (skipLoop(L))
251 return false;
252
253 LLVM_DEBUG(dbgs() << "Perform LICM on Loop with header at block "
254 << L->getHeader()->getNameOrAsOperand() << "\n");
255
256 Function *F = L->getHeader()->getParent();
257
258 auto *SE = getAnalysisIfAvailable<ScalarEvolutionWrapperPass>();
259 MemorySSA *MSSA = &getAnalysis<MemorySSAWrapperPass>().getMSSA();
260 // For the old PM, we can't use OptimizationRemarkEmitter as an analysis
261 // pass. Function analyses need to be preserved across loop transformations
262 // but ORE cannot be preserved (see comment before the pass definition).
263 OptimizationRemarkEmitter ORE(L->getHeader()->getParent());
264 return LICM.runOnLoop(
265 L, &getAnalysis<AAResultsWrapperPass>().getAAResults(),
266 &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(),
267 &getAnalysis<DominatorTreeWrapperPass>().getDomTree(),
268 &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(*F),
269 &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(*F),
270 &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(*F),
271 SE ? &SE->getSE() : nullptr, MSSA, &ORE);
272 }
273
274 /// This transformation requires natural loop information & requires that
275 /// loop preheaders be inserted into the CFG...
276 ///
277 void getAnalysisUsage(AnalysisUsage &AU) const override {
289 }
290
291private:
292 LoopInvariantCodeMotion LICM;
293};
294} // namespace
295
298 if (!AR.MSSA)
299 report_fatal_error("LICM requires MemorySSA (loop-mssa)",
300 /*GenCrashDiag*/false);
301
302 // For the new PM, we also can't use OptimizationRemarkEmitter as an analysis
303 // pass. Function analyses need to be preserved across loop transformations
304 // but ORE cannot be preserved (see comment before the pass definition).
305 OptimizationRemarkEmitter ORE(L.getHeader()->getParent());
306
307 LoopInvariantCodeMotion LICM(Opts.MssaOptCap, Opts.MssaNoAccForPromotionCap,
308 Opts.AllowSpeculation);
309 if (!LICM.runOnLoop(&L, &AR.AA, &AR.LI, &AR.DT, &AR.AC, &AR.TLI, &AR.TTI,
310 &AR.SE, AR.MSSA, &ORE))
311 return PreservedAnalyses::all();
312
314 PA.preserve<MemorySSAAnalysis>();
315
316 return PA;
317}
318
320 raw_ostream &OS, function_ref<StringRef(StringRef)> MapClassName2PassName) {
321 static_cast<PassInfoMixin<LICMPass> *>(this)->printPipeline(
322 OS, MapClassName2PassName);
323
324 OS << '<';
325 OS << (Opts.AllowSpeculation ? "" : "no-") << "allowspeculation";
326 OS << '>';
327}
328
331 LPMUpdater &) {
332 if (!AR.MSSA)
333 report_fatal_error("LNICM requires MemorySSA (loop-mssa)",
334 /*GenCrashDiag*/false);
335
336 // For the new PM, we also can't use OptimizationRemarkEmitter as an analysis
337 // pass. Function analyses need to be preserved across loop transformations
338 // but ORE cannot be preserved (see comment before the pass definition).
340
341 LoopInvariantCodeMotion LICM(Opts.MssaOptCap, Opts.MssaNoAccForPromotionCap,
342 Opts.AllowSpeculation);
343
344 Loop &OutermostLoop = LN.getOutermostLoop();
345 bool Changed = LICM.runOnLoop(&OutermostLoop, &AR.AA, &AR.LI, &AR.DT, &AR.AC,
346 &AR.TLI, &AR.TTI, &AR.SE, AR.MSSA, &ORE, true);
347
348 if (!Changed)
349 return PreservedAnalyses::all();
350
352
353 PA.preserve<DominatorTreeAnalysis>();
354 PA.preserve<LoopAnalysis>();
355 PA.preserve<MemorySSAAnalysis>();
356
357 return PA;
358}
359
361 raw_ostream &OS, function_ref<StringRef(StringRef)> MapClassName2PassName) {
362 static_cast<PassInfoMixin<LNICMPass> *>(this)->printPipeline(
363 OS, MapClassName2PassName);
364
365 OS << '<';
366 OS << (Opts.AllowSpeculation ? "" : "no-") << "allowspeculation";
367 OS << '>';
368}
369
370char LegacyLICMPass::ID = 0;
371INITIALIZE_PASS_BEGIN(LegacyLICMPass, "licm", "Loop Invariant Code Motion",
372 false, false)
378INITIALIZE_PASS_END(LegacyLICMPass, "licm", "Loop Invariant Code Motion", false,
379 false)
380
381Pass *llvm::createLICMPass() { return new LegacyLICMPass(); }
382
384 MemorySSA &MSSA)
386 IsSink, L, MSSA) {}
387
389 unsigned LicmMssaOptCap, unsigned LicmMssaNoAccForPromotionCap, bool IsSink,
390 Loop &L, MemorySSA &MSSA)
391 : LicmMssaOptCap(LicmMssaOptCap),
392 LicmMssaNoAccForPromotionCap(LicmMssaNoAccForPromotionCap),
393 IsSink(IsSink) {
394 unsigned AccessCapCount = 0;
395 for (auto *BB : L.getBlocks())
396 if (const auto *Accesses = MSSA.getBlockAccesses(BB))
397 for (const auto &MA : *Accesses) {
398 (void)MA;
399 ++AccessCapCount;
400 if (AccessCapCount > LicmMssaNoAccForPromotionCap) {
401 NoOfMemAccTooLarge = true;
402 return;
403 }
404 }
405}
406
407/// Hoist expressions out of the specified loop. Note, alias info for inner
408/// loop is not preserved so it is not a good idea to run LICM multiple
409/// times on one loop.
410bool LoopInvariantCodeMotion::runOnLoop(Loop *L, AAResults *AA, LoopInfo *LI,
414 ScalarEvolution *SE, MemorySSA *MSSA,
416 bool LoopNestMode) {
417 bool Changed = false;
418
419 assert(L->isLCSSAForm(*DT) && "Loop is not in LCSSA form.");
420
421 // If this loop has metadata indicating that LICM is not to be performed then
422 // just exit.
424 return false;
425 }
426
427 // Don't sink stores from loops with coroutine suspend instructions.
428 // LICM would sink instructions into the default destination of
429 // the coroutine switch. The default destination of the switch is to
430 // handle the case where the coroutine is suspended, by which point the
431 // coroutine frame may have been destroyed. No instruction can be sunk there.
432 // FIXME: This would unfortunately hurt the performance of coroutines, however
433 // there is currently no general solution for this. Similar issues could also
434 // potentially happen in other passes where instructions are being moved
435 // across that edge.
436 bool HasCoroSuspendInst = llvm::any_of(L->getBlocks(), [](BasicBlock *BB) {
437 return llvm::any_of(*BB, [](Instruction &I) {
438 IntrinsicInst *II = dyn_cast<IntrinsicInst>(&I);
439 return II && II->getIntrinsicID() == Intrinsic::coro_suspend;
440 });
441 });
442
443 MemorySSAUpdater MSSAU(MSSA);
444 SinkAndHoistLICMFlags Flags(LicmMssaOptCap, LicmMssaNoAccForPromotionCap,
445 /*IsSink=*/true, *L, *MSSA);
446
447 // Get the preheader block to move instructions into...
448 BasicBlock *Preheader = L->getLoopPreheader();
449
450 // Compute loop safety information.
451 ICFLoopSafetyInfo SafetyInfo;
452 SafetyInfo.computeLoopSafetyInfo(L);
453
454 // We want to visit all of the instructions in this loop... that are not parts
455 // of our subloops (they have already had their invariants hoisted out of
456 // their loop, into this loop, so there is no need to process the BODIES of
457 // the subloops).
458 //
459 // Traverse the body of the loop in depth first order on the dominator tree so
460 // that we are guaranteed to see definitions before we see uses. This allows
461 // us to sink instructions in one pass, without iteration. After sinking
462 // instructions, we perform another pass to hoist them out of the loop.
463 if (L->hasDedicatedExits())
464 Changed |=
465 LoopNestMode
466 ? sinkRegionForLoopNest(DT->getNode(L->getHeader()), AA, LI, DT,
467 TLI, TTI, L, MSSAU, &SafetyInfo, Flags, ORE)
468 : sinkRegion(DT->getNode(L->getHeader()), AA, LI, DT, TLI, TTI, L,
469 MSSAU, &SafetyInfo, Flags, ORE);
470 Flags.setIsSink(false);
471 if (Preheader)
472 Changed |= hoistRegion(DT->getNode(L->getHeader()), AA, LI, DT, AC, TLI, L,
473 MSSAU, SE, &SafetyInfo, Flags, ORE, LoopNestMode,
474 LicmAllowSpeculation);
475
476 // Now that all loop invariants have been removed from the loop, promote any
477 // memory references to scalars that we can.
478 // Don't sink stores from loops without dedicated block exits. Exits
479 // containing indirect branches are not transformed by loop simplify,
480 // make sure we catch that. An additional load may be generated in the
481 // preheader for SSA updater, so also avoid sinking when no preheader
482 // is available.
483 if (!DisablePromotion && Preheader && L->hasDedicatedExits() &&
484 !Flags.tooManyMemoryAccesses() && !HasCoroSuspendInst) {
485 // Figure out the loop exits and their insertion points
487 L->getUniqueExitBlocks(ExitBlocks);
488
489 // We can't insert into a catchswitch.
490 bool HasCatchSwitch = llvm::any_of(ExitBlocks, [](BasicBlock *Exit) {
491 return isa<CatchSwitchInst>(Exit->getTerminator());
492 });
493
494 if (!HasCatchSwitch) {
496 SmallVector<MemoryAccess *, 8> MSSAInsertPts;
497 InsertPts.reserve(ExitBlocks.size());
498 MSSAInsertPts.reserve(ExitBlocks.size());
499 for (BasicBlock *ExitBlock : ExitBlocks) {
500 InsertPts.push_back(ExitBlock->getFirstInsertionPt());
501 MSSAInsertPts.push_back(nullptr);
502 }
503
505
506 // Promoting one set of accesses may make the pointers for another set
507 // loop invariant, so run this in a loop.
508 bool Promoted = false;
509 bool LocalPromoted;
510 do {
511 LocalPromoted = false;
512 for (auto [PointerMustAliases, HasReadsOutsideSet] :
513 collectPromotionCandidates(MSSA, AA, L)) {
514 LocalPromoted |= promoteLoopAccessesToScalars(
515 PointerMustAliases, ExitBlocks, InsertPts, MSSAInsertPts, PIC, LI,
516 DT, AC, TLI, TTI, L, MSSAU, &SafetyInfo, ORE,
517 LicmAllowSpeculation, HasReadsOutsideSet);
518 }
519 Promoted |= LocalPromoted;
520 } while (LocalPromoted);
521
522 // Once we have promoted values across the loop body we have to
523 // recursively reform LCSSA as any nested loop may now have values defined
524 // within the loop used in the outer loop.
525 // FIXME: This is really heavy handed. It would be a bit better to use an
526 // SSAUpdater strategy during promotion that was LCSSA aware and reformed
527 // it as it went.
528 if (Promoted)
529 formLCSSARecursively(*L, *DT, LI, SE);
530
531 Changed |= Promoted;
532 }
533 }
534
535 // Check that neither this loop nor its parent have had LCSSA broken. LICM is
536 // specifically moving instructions across the loop boundary and so it is
537 // especially in need of basic functional correctness checking here.
538 assert(L->isLCSSAForm(*DT) && "Loop not left in LCSSA form after LICM!");
539 assert((L->isOutermost() || L->getParentLoop()->isLCSSAForm(*DT)) &&
540 "Parent loop not left in LCSSA form after LICM!");
541
542 if (VerifyMemorySSA)
543 MSSA->verifyMemorySSA();
544
545 if (Changed && SE)
547 return Changed;
548}
549
550/// Walk the specified region of the CFG (defined by all blocks dominated by
551/// the specified block, and that are in the current loop) in reverse depth
552/// first order w.r.t the DominatorTree. This allows us to visit uses before
553/// definitions, allowing us to sink a loop body in one pass without iteration.
554///
557 TargetTransformInfo *TTI, Loop *CurLoop,
558 MemorySSAUpdater &MSSAU, ICFLoopSafetyInfo *SafetyInfo,
560 OptimizationRemarkEmitter *ORE, Loop *OutermostLoop) {
561
562 // Verify inputs.
563 assert(N != nullptr && AA != nullptr && LI != nullptr && DT != nullptr &&
564 CurLoop != nullptr && SafetyInfo != nullptr &&
565 "Unexpected input to sinkRegion.");
566
567 // We want to visit children before parents. We will enqueue all the parents
568 // before their children in the worklist and process the worklist in reverse
569 // order.
571
572 bool Changed = false;
573 for (DomTreeNode *DTN : reverse(Worklist)) {
574 BasicBlock *BB = DTN->getBlock();
575 // Only need to process the contents of this block if it is not part of a
576 // subloop (which would already have been processed).
577 if (inSubLoop(BB, CurLoop, LI))
578 continue;
579
580 for (BasicBlock::iterator II = BB->end(); II != BB->begin();) {
581 Instruction &I = *--II;
582
583 // The instruction is not used in the loop if it is dead. In this case,
584 // we just delete it instead of sinking it.
585 if (isInstructionTriviallyDead(&I, TLI)) {
586 LLVM_DEBUG(dbgs() << "LICM deleting dead inst: " << I << '\n');
589 ++II;
590 eraseInstruction(I, *SafetyInfo, MSSAU);
591 Changed = true;
592 continue;
593 }
594
595 // Check to see if we can sink this instruction to the exit blocks
596 // of the loop. We can do this if the all users of the instruction are
597 // outside of the loop. In this case, it doesn't even matter if the
598 // operands of the instruction are loop invariant.
599 //
600 bool FoldableInLoop = false;
601 bool LoopNestMode = OutermostLoop != nullptr;
602 if (!I.mayHaveSideEffects() &&
603 isNotUsedOrFoldableInLoop(I, LoopNestMode ? OutermostLoop : CurLoop,
604 SafetyInfo, TTI, FoldableInLoop,
605 LoopNestMode) &&
606 canSinkOrHoistInst(I, AA, DT, CurLoop, MSSAU, true, Flags, ORE)) {
607 if (sink(I, LI, DT, CurLoop, SafetyInfo, MSSAU, ORE)) {
608 if (!FoldableInLoop) {
609 ++II;
611 eraseInstruction(I, *SafetyInfo, MSSAU);
612 }
613 Changed = true;
614 }
615 }
616 }
617 }
618 if (VerifyMemorySSA)
619 MSSAU.getMemorySSA()->verifyMemorySSA();
620 return Changed;
621}
622
625 TargetTransformInfo *TTI, Loop *CurLoop,
626 MemorySSAUpdater &MSSAU,
627 ICFLoopSafetyInfo *SafetyInfo,
630
631 bool Changed = false;
633 Worklist.insert(CurLoop);
634 appendLoopsToWorklist(*CurLoop, Worklist);
635 while (!Worklist.empty()) {
636 Loop *L = Worklist.pop_back_val();
637 Changed |= sinkRegion(DT->getNode(L->getHeader()), AA, LI, DT, TLI, TTI, L,
638 MSSAU, SafetyInfo, Flags, ORE, CurLoop);
639 }
640 return Changed;
641}
642
643namespace {
644// This is a helper class for hoistRegion to make it able to hoist control flow
645// in order to be able to hoist phis. The way this works is that we initially
646// start hoisting to the loop preheader, and when we see a loop invariant branch
647// we make note of this. When we then come to hoist an instruction that's
648// conditional on such a branch we duplicate the branch and the relevant control
649// flow, then hoist the instruction into the block corresponding to its original
650// block in the duplicated control flow.
651class ControlFlowHoister {
652private:
653 // Information about the loop we are hoisting from
654 LoopInfo *LI;
655 DominatorTree *DT;
656 Loop *CurLoop;
657 MemorySSAUpdater &MSSAU;
658
659 // A map of blocks in the loop to the block their instructions will be hoisted
660 // to.
661 DenseMap<BasicBlock *, BasicBlock *> HoistDestinationMap;
662
663 // The branches that we can hoist, mapped to the block that marks a
664 // convergence point of their control flow.
665 DenseMap<BranchInst *, BasicBlock *> HoistableBranches;
666
667public:
668 ControlFlowHoister(LoopInfo *LI, DominatorTree *DT, Loop *CurLoop,
669 MemorySSAUpdater &MSSAU)
670 : LI(LI), DT(DT), CurLoop(CurLoop), MSSAU(MSSAU) {}
671
672 void registerPossiblyHoistableBranch(BranchInst *BI) {
673 // We can only hoist conditional branches with loop invariant operands.
674 if (!ControlFlowHoisting || !BI->isConditional() ||
675 !CurLoop->hasLoopInvariantOperands(BI))
676 return;
677
678 // The branch destinations need to be in the loop, and we don't gain
679 // anything by duplicating conditional branches with duplicate successors,
680 // as it's essentially the same as an unconditional branch.
681 BasicBlock *TrueDest = BI->getSuccessor(0);
682 BasicBlock *FalseDest = BI->getSuccessor(1);
683 if (!CurLoop->contains(TrueDest) || !CurLoop->contains(FalseDest) ||
684 TrueDest == FalseDest)
685 return;
686
687 // We can hoist BI if one branch destination is the successor of the other,
688 // or both have common successor which we check by seeing if the
689 // intersection of their successors is non-empty.
690 // TODO: This could be expanded to allowing branches where both ends
691 // eventually converge to a single block.
692 SmallPtrSet<BasicBlock *, 4> TrueDestSucc, FalseDestSucc;
693 TrueDestSucc.insert(succ_begin(TrueDest), succ_end(TrueDest));
694 FalseDestSucc.insert(succ_begin(FalseDest), succ_end(FalseDest));
695 BasicBlock *CommonSucc = nullptr;
696 if (TrueDestSucc.count(FalseDest)) {
697 CommonSucc = FalseDest;
698 } else if (FalseDestSucc.count(TrueDest)) {
699 CommonSucc = TrueDest;
700 } else {
701 set_intersect(TrueDestSucc, FalseDestSucc);
702 // If there's one common successor use that.
703 if (TrueDestSucc.size() == 1)
704 CommonSucc = *TrueDestSucc.begin();
705 // If there's more than one pick whichever appears first in the block list
706 // (we can't use the value returned by TrueDestSucc.begin() as it's
707 // unpredicatable which element gets returned).
708 else if (!TrueDestSucc.empty()) {
709 Function *F = TrueDest->getParent();
710 auto IsSucc = [&](BasicBlock &BB) { return TrueDestSucc.count(&BB); };
711 auto It = llvm::find_if(*F, IsSucc);
712 assert(It != F->end() && "Could not find successor in function");
713 CommonSucc = &*It;
714 }
715 }
716 // The common successor has to be dominated by the branch, as otherwise
717 // there will be some other path to the successor that will not be
718 // controlled by this branch so any phi we hoist would be controlled by the
719 // wrong condition. This also takes care of avoiding hoisting of loop back
720 // edges.
721 // TODO: In some cases this could be relaxed if the successor is dominated
722 // by another block that's been hoisted and we can guarantee that the
723 // control flow has been replicated exactly.
724 if (CommonSucc && DT->dominates(BI, CommonSucc))
725 HoistableBranches[BI] = CommonSucc;
726 }
727
728 bool canHoistPHI(PHINode *PN) {
729 // The phi must have loop invariant operands.
730 if (!ControlFlowHoisting || !CurLoop->hasLoopInvariantOperands(PN))
731 return false;
732 // We can hoist phis if the block they are in is the target of hoistable
733 // branches which cover all of the predecessors of the block.
734 SmallPtrSet<BasicBlock *, 8> PredecessorBlocks;
735 BasicBlock *BB = PN->getParent();
736 for (BasicBlock *PredBB : predecessors(BB))
737 PredecessorBlocks.insert(PredBB);
738 // If we have less predecessor blocks than predecessors then the phi will
739 // have more than one incoming value for the same block which we can't
740 // handle.
741 // TODO: This could be handled be erasing some of the duplicate incoming
742 // values.
743 if (PredecessorBlocks.size() != pred_size(BB))
744 return false;
745 for (auto &Pair : HoistableBranches) {
746 if (Pair.second == BB) {
747 // Which blocks are predecessors via this branch depends on if the
748 // branch is triangle-like or diamond-like.
749 if (Pair.first->getSuccessor(0) == BB) {
750 PredecessorBlocks.erase(Pair.first->getParent());
751 PredecessorBlocks.erase(Pair.first->getSuccessor(1));
752 } else if (Pair.first->getSuccessor(1) == BB) {
753 PredecessorBlocks.erase(Pair.first->getParent());
754 PredecessorBlocks.erase(Pair.first->getSuccessor(0));
755 } else {
756 PredecessorBlocks.erase(Pair.first->getSuccessor(0));
757 PredecessorBlocks.erase(Pair.first->getSuccessor(1));
758 }
759 }
760 }
761 // PredecessorBlocks will now be empty if for every predecessor of BB we
762 // found a hoistable branch source.
763 return PredecessorBlocks.empty();
764 }
765
766 BasicBlock *getOrCreateHoistedBlock(BasicBlock *BB) {
768 return CurLoop->getLoopPreheader();
769 // If BB has already been hoisted, return that
770 if (HoistDestinationMap.count(BB))
771 return HoistDestinationMap[BB];
772
773 // Check if this block is conditional based on a pending branch
774 auto HasBBAsSuccessor =
776 return BB != Pair.second && (Pair.first->getSuccessor(0) == BB ||
777 Pair.first->getSuccessor(1) == BB);
778 };
779 auto It = llvm::find_if(HoistableBranches, HasBBAsSuccessor);
780
781 // If not involved in a pending branch, hoist to preheader
782 BasicBlock *InitialPreheader = CurLoop->getLoopPreheader();
783 if (It == HoistableBranches.end()) {
784 LLVM_DEBUG(dbgs() << "LICM using "
785 << InitialPreheader->getNameOrAsOperand()
786 << " as hoist destination for "
787 << BB->getNameOrAsOperand() << "\n");
788 HoistDestinationMap[BB] = InitialPreheader;
789 return InitialPreheader;
790 }
791 BranchInst *BI = It->first;
792 assert(std::find_if(++It, HoistableBranches.end(), HasBBAsSuccessor) ==
793 HoistableBranches.end() &&
794 "BB is expected to be the target of at most one branch");
795
796 LLVMContext &C = BB->getContext();
797 BasicBlock *TrueDest = BI->getSuccessor(0);
798 BasicBlock *FalseDest = BI->getSuccessor(1);
799 BasicBlock *CommonSucc = HoistableBranches[BI];
800 BasicBlock *HoistTarget = getOrCreateHoistedBlock(BI->getParent());
801
802 // Create hoisted versions of blocks that currently don't have them
803 auto CreateHoistedBlock = [&](BasicBlock *Orig) {
804 if (HoistDestinationMap.count(Orig))
805 return HoistDestinationMap[Orig];
806 BasicBlock *New =
807 BasicBlock::Create(C, Orig->getName() + ".licm", Orig->getParent());
808 HoistDestinationMap[Orig] = New;
809 DT->addNewBlock(New, HoistTarget);
810 if (CurLoop->getParentLoop())
811 CurLoop->getParentLoop()->addBasicBlockToLoop(New, *LI);
812 ++NumCreatedBlocks;
813 LLVM_DEBUG(dbgs() << "LICM created " << New->getName()
814 << " as hoist destination for " << Orig->getName()
815 << "\n");
816 return New;
817 };
818 BasicBlock *HoistTrueDest = CreateHoistedBlock(TrueDest);
819 BasicBlock *HoistFalseDest = CreateHoistedBlock(FalseDest);
820 BasicBlock *HoistCommonSucc = CreateHoistedBlock(CommonSucc);
821
822 // Link up these blocks with branches.
823 if (!HoistCommonSucc->getTerminator()) {
824 // The new common successor we've generated will branch to whatever that
825 // hoist target branched to.
826 BasicBlock *TargetSucc = HoistTarget->getSingleSuccessor();
827 assert(TargetSucc && "Expected hoist target to have a single successor");
828 HoistCommonSucc->moveBefore(TargetSucc);
829 BranchInst::Create(TargetSucc, HoistCommonSucc);
830 }
831 if (!HoistTrueDest->getTerminator()) {
832 HoistTrueDest->moveBefore(HoistCommonSucc);
833 BranchInst::Create(HoistCommonSucc, HoistTrueDest);
834 }
835 if (!HoistFalseDest->getTerminator()) {
836 HoistFalseDest->moveBefore(HoistCommonSucc);
837 BranchInst::Create(HoistCommonSucc, HoistFalseDest);
838 }
839
840 // If BI is being cloned to what was originally the preheader then
841 // HoistCommonSucc will now be the new preheader.
842 if (HoistTarget == InitialPreheader) {
843 // Phis in the loop header now need to use the new preheader.
844 InitialPreheader->replaceSuccessorsPhiUsesWith(HoistCommonSucc);
846 HoistTarget->getSingleSuccessor(), HoistCommonSucc, {HoistTarget});
847 // The new preheader dominates the loop header.
848 DomTreeNode *PreheaderNode = DT->getNode(HoistCommonSucc);
849 DomTreeNode *HeaderNode = DT->getNode(CurLoop->getHeader());
850 DT->changeImmediateDominator(HeaderNode, PreheaderNode);
851 // The preheader hoist destination is now the new preheader, with the
852 // exception of the hoist destination of this branch.
853 for (auto &Pair : HoistDestinationMap)
854 if (Pair.second == InitialPreheader && Pair.first != BI->getParent())
855 Pair.second = HoistCommonSucc;
856 }
857
858 // Now finally clone BI.
860 HoistTarget->getTerminator(),
861 BranchInst::Create(HoistTrueDest, HoistFalseDest, BI->getCondition()));
862 ++NumClonedBranches;
863
864 assert(CurLoop->getLoopPreheader() &&
865 "Hoisting blocks should not have destroyed preheader");
866 return HoistDestinationMap[BB];
867 }
868};
869} // namespace
870
871/// Walk the specified region of the CFG (defined by all blocks dominated by
872/// the specified block, and that are in the current loop) in depth first
873/// order w.r.t the DominatorTree. This allows us to visit definitions before
874/// uses, allowing us to hoist a loop body in one pass without iteration.
875///
878 TargetLibraryInfo *TLI, Loop *CurLoop,
880 ICFLoopSafetyInfo *SafetyInfo,
882 OptimizationRemarkEmitter *ORE, bool LoopNestMode,
883 bool AllowSpeculation) {
884 // Verify inputs.
885 assert(N != nullptr && AA != nullptr && LI != nullptr && DT != nullptr &&
886 CurLoop != nullptr && SafetyInfo != nullptr &&
887 "Unexpected input to hoistRegion.");
888
889 ControlFlowHoister CFH(LI, DT, CurLoop, MSSAU);
890
891 // Keep track of instructions that have been hoisted, as they may need to be
892 // re-hoisted if they end up not dominating all of their uses.
893 SmallVector<Instruction *, 16> HoistedInstructions;
894
895 // For PHI hoisting to work we need to hoist blocks before their successors.
896 // We can do this by iterating through the blocks in the loop in reverse
897 // post-order.
898 LoopBlocksRPO Worklist(CurLoop);
899 Worklist.perform(LI);
900 bool Changed = false;
901 BasicBlock *Preheader = CurLoop->getLoopPreheader();
902 for (BasicBlock *BB : Worklist) {
903 // Only need to process the contents of this block if it is not part of a
904 // subloop (which would already have been processed).
905 if (!LoopNestMode && inSubLoop(BB, CurLoop, LI))
906 continue;
907
909 // Try hoisting the instruction out to the preheader. We can only do
910 // this if all of the operands of the instruction are loop invariant and
911 // if it is safe to hoist the instruction. We also check block frequency
912 // to make sure instruction only gets hoisted into colder blocks.
913 // TODO: It may be safe to hoist if we are hoisting to a conditional block
914 // and we have accurately duplicated the control flow from the loop header
915 // to that block.
916 if (CurLoop->hasLoopInvariantOperands(&I) &&
917 canSinkOrHoistInst(I, AA, DT, CurLoop, MSSAU, true, Flags, ORE) &&
919 I, DT, TLI, CurLoop, SafetyInfo, ORE,
920 Preheader->getTerminator(), AC, AllowSpeculation)) {
921 hoist(I, DT, CurLoop, CFH.getOrCreateHoistedBlock(BB), SafetyInfo,
922 MSSAU, SE, ORE);
923 HoistedInstructions.push_back(&I);
924 Changed = true;
925 continue;
926 }
927
928 // Attempt to remove floating point division out of the loop by
929 // converting it to a reciprocal multiplication.
930 if (I.getOpcode() == Instruction::FDiv && I.hasAllowReciprocal() &&
931 CurLoop->isLoopInvariant(I.getOperand(1))) {
932 auto Divisor = I.getOperand(1);
933 auto One = llvm::ConstantFP::get(Divisor->getType(), 1.0);
934 auto ReciprocalDivisor = BinaryOperator::CreateFDiv(One, Divisor);
935 ReciprocalDivisor->setFastMathFlags(I.getFastMathFlags());
936 SafetyInfo->insertInstructionTo(ReciprocalDivisor, I.getParent());
937 ReciprocalDivisor->insertBefore(&I);
938 ReciprocalDivisor->setDebugLoc(I.getDebugLoc());
939
940 auto Product =
941 BinaryOperator::CreateFMul(I.getOperand(0), ReciprocalDivisor);
942 Product->setFastMathFlags(I.getFastMathFlags());
943 SafetyInfo->insertInstructionTo(Product, I.getParent());
944 Product->insertAfter(&I);
945 Product->setDebugLoc(I.getDebugLoc());
946 I.replaceAllUsesWith(Product);
947 eraseInstruction(I, *SafetyInfo, MSSAU);
948
949 hoist(*ReciprocalDivisor, DT, CurLoop, CFH.getOrCreateHoistedBlock(BB),
950 SafetyInfo, MSSAU, SE, ORE);
951 HoistedInstructions.push_back(ReciprocalDivisor);
952 Changed = true;
953 continue;
954 }
955
956 auto IsInvariantStart = [&](Instruction &I) {
957 using namespace PatternMatch;
958 return I.use_empty() &&
959 match(&I, m_Intrinsic<Intrinsic::invariant_start>());
960 };
961 auto MustExecuteWithoutWritesBefore = [&](Instruction &I) {
962 return SafetyInfo->isGuaranteedToExecute(I, DT, CurLoop) &&
963 SafetyInfo->doesNotWriteMemoryBefore(I, CurLoop);
964 };
965 if ((IsInvariantStart(I) || isGuard(&I)) &&
966 CurLoop->hasLoopInvariantOperands(&I) &&
967 MustExecuteWithoutWritesBefore(I)) {
968 hoist(I, DT, CurLoop, CFH.getOrCreateHoistedBlock(BB), SafetyInfo,
969 MSSAU, SE, ORE);
970 HoistedInstructions.push_back(&I);
971 Changed = true;
972 continue;
973 }
974
975 if (PHINode *PN = dyn_cast<PHINode>(&I)) {
976 if (CFH.canHoistPHI(PN)) {
977 // Redirect incoming blocks first to ensure that we create hoisted
978 // versions of those blocks before we hoist the phi.
979 for (unsigned int i = 0; i < PN->getNumIncomingValues(); ++i)
981 i, CFH.getOrCreateHoistedBlock(PN->getIncomingBlock(i)));
982 hoist(*PN, DT, CurLoop, CFH.getOrCreateHoistedBlock(BB), SafetyInfo,
983 MSSAU, SE, ORE);
984 assert(DT->dominates(PN, BB) && "Conditional PHIs not expected");
985 Changed = true;
986 continue;
987 }
988 }
989
990 // Try to reassociate instructions so that part of computations can be
991 // done out of loop.
992 if (hoistArithmetics(I, *CurLoop, *SafetyInfo, MSSAU, AC, DT)) {
993 Changed = true;
994 continue;
995 }
996
997 // Remember possibly hoistable branches so we can actually hoist them
998 // later if needed.
999 if (BranchInst *BI = dyn_cast<BranchInst>(&I))
1000 CFH.registerPossiblyHoistableBranch(BI);
1001 }
1002 }
1003
1004 // If we hoisted instructions to a conditional block they may not dominate
1005 // their uses that weren't hoisted (such as phis where some operands are not
1006 // loop invariant). If so make them unconditional by moving them to their
1007 // immediate dominator. We iterate through the instructions in reverse order
1008 // which ensures that when we rehoist an instruction we rehoist its operands,
1009 // and also keep track of where in the block we are rehoisting to make sure
1010 // that we rehoist instructions before the instructions that use them.
1011 Instruction *HoistPoint = nullptr;
1012 if (ControlFlowHoisting) {
1013 for (Instruction *I : reverse(HoistedInstructions)) {
1014 if (!llvm::all_of(I->uses(),
1015 [&](Use &U) { return DT->dominates(I, U); })) {
1016 BasicBlock *Dominator =
1017 DT->getNode(I->getParent())->getIDom()->getBlock();
1018 if (!HoistPoint || !DT->dominates(HoistPoint->getParent(), Dominator)) {
1019 if (HoistPoint)
1020 assert(DT->dominates(Dominator, HoistPoint->getParent()) &&
1021 "New hoist point expected to dominate old hoist point");
1022 HoistPoint = Dominator->getTerminator();
1023 }
1024 LLVM_DEBUG(dbgs() << "LICM rehoisting to "
1025 << HoistPoint->getParent()->getNameOrAsOperand()
1026 << ": " << *I << "\n");
1027 moveInstructionBefore(*I, HoistPoint->getIterator(), *SafetyInfo, MSSAU,
1028 SE);
1029 HoistPoint = I;
1030 Changed = true;
1031 }
1032 }
1033 }
1034 if (VerifyMemorySSA)
1035 MSSAU.getMemorySSA()->verifyMemorySSA();
1036
1037 // Now that we've finished hoisting make sure that LI and DT are still
1038 // valid.
1039#ifdef EXPENSIVE_CHECKS
1040 if (Changed) {
1041 assert(DT->verify(DominatorTree::VerificationLevel::Fast) &&
1042 "Dominator tree verification failed");
1043 LI->verify(*DT);
1044 }
1045#endif
1046
1047 return Changed;
1048}
1049
1050// Return true if LI is invariant within scope of the loop. LI is invariant if
1051// CurLoop is dominated by an invariant.start representing the same memory
1052// location and size as the memory location LI loads from, and also the
1053// invariant.start has no uses.
1055 Loop *CurLoop) {
1056 Value *Addr = LI->getPointerOperand();
1057 const DataLayout &DL = LI->getDataLayout();
1058 const TypeSize LocSizeInBits = DL.getTypeSizeInBits(LI->getType());
1059
1060 // It is not currently possible for clang to generate an invariant.start
1061 // intrinsic with scalable vector types because we don't support thread local
1062 // sizeless types and we don't permit sizeless types in structs or classes.
1063 // Furthermore, even if support is added for this in future the intrinsic
1064 // itself is defined to have a size of -1 for variable sized objects. This
1065 // makes it impossible to verify if the intrinsic envelops our region of
1066 // interest. For example, both <vscale x 32 x i8> and <vscale x 16 x i8>
1067 // types would have a -1 parameter, but the former is clearly double the size
1068 // of the latter.
1069 if (LocSizeInBits.isScalable())
1070 return false;
1071
1072 // If we've ended up at a global/constant, bail. We shouldn't be looking at
1073 // uselists for non-local Values in a loop pass.
1074 if (isa<Constant>(Addr))
1075 return false;
1076
1077 unsigned UsesVisited = 0;
1078 // Traverse all uses of the load operand value, to see if invariant.start is
1079 // one of the uses, and whether it dominates the load instruction.
1080 for (auto *U : Addr->users()) {
1081 // Avoid traversing for Load operand with high number of users.
1082 if (++UsesVisited > MaxNumUsesTraversed)
1083 return false;
1084 IntrinsicInst *II = dyn_cast<IntrinsicInst>(U);
1085 // If there are escaping uses of invariant.start instruction, the load maybe
1086 // non-invariant.
1087 if (!II || II->getIntrinsicID() != Intrinsic::invariant_start ||
1088 !II->use_empty())
1089 continue;
1090 ConstantInt *InvariantSize = cast<ConstantInt>(II->getArgOperand(0));
1091 // The intrinsic supports having a -1 argument for variable sized objects
1092 // so we should check for that here.
1093 if (InvariantSize->isNegative())
1094 continue;
1095 uint64_t InvariantSizeInBits = InvariantSize->getSExtValue() * 8;
1096 // Confirm the invariant.start location size contains the load operand size
1097 // in bits. Also, the invariant.start should dominate the load, and we
1098 // should not hoist the load out of a loop that contains this dominating
1099 // invariant.start.
1100 if (LocSizeInBits.getFixedValue() <= InvariantSizeInBits &&
1101 DT->properlyDominates(II->getParent(), CurLoop->getHeader()))
1102 return true;
1103 }
1104
1105 return false;
1106}
1107
1108namespace {
1109/// Return true if-and-only-if we know how to (mechanically) both hoist and
1110/// sink a given instruction out of a loop. Does not address legality
1111/// concerns such as aliasing or speculation safety.
1112bool isHoistableAndSinkableInst(Instruction &I) {
1113 // Only these instructions are hoistable/sinkable.
1114 return (isa<LoadInst>(I) || isa<StoreInst>(I) || isa<CallInst>(I) ||
1115 isa<FenceInst>(I) || isa<CastInst>(I) || isa<UnaryOperator>(I) ||
1116 isa<BinaryOperator>(I) || isa<SelectInst>(I) ||
1117 isa<GetElementPtrInst>(I) || isa<CmpInst>(I) ||
1118 isa<InsertElementInst>(I) || isa<ExtractElementInst>(I) ||
1119 isa<ShuffleVectorInst>(I) || isa<ExtractValueInst>(I) ||
1120 isa<InsertValueInst>(I) || isa<FreezeInst>(I));
1121}
1122/// Return true if MSSA knows there are no MemoryDefs in the loop.
1123bool isReadOnly(const MemorySSAUpdater &MSSAU, const Loop *L) {
1124 for (auto *BB : L->getBlocks())
1125 if (MSSAU.getMemorySSA()->getBlockDefs(BB))
1126 return false;
1127 return true;
1128}
1129
1130/// Return true if I is the only Instruction with a MemoryAccess in L.
1131bool isOnlyMemoryAccess(const Instruction *I, const Loop *L,
1132 const MemorySSAUpdater &MSSAU) {
1133 for (auto *BB : L->getBlocks())
1134 if (auto *Accs = MSSAU.getMemorySSA()->getBlockAccesses(BB)) {
1135 int NotAPhi = 0;
1136 for (const auto &Acc : *Accs) {
1137 if (isa<MemoryPhi>(&Acc))
1138 continue;
1139 const auto *MUD = cast<MemoryUseOrDef>(&Acc);
1140 if (MUD->getMemoryInst() != I || NotAPhi++ == 1)
1141 return false;
1142 }
1143 }
1144 return true;
1145}
1146}
1147
1149 BatchAAResults &BAA,
1150 SinkAndHoistLICMFlags &Flags,
1151 MemoryUseOrDef *MA) {
1152 // See declaration of SetLicmMssaOptCap for usage details.
1153 if (Flags.tooManyClobberingCalls())
1154 return MA->getDefiningAccess();
1155
1156 MemoryAccess *Source =
1158 Flags.incrementClobberingCalls();
1159 return Source;
1160}
1161
1163 Loop *CurLoop, MemorySSAUpdater &MSSAU,
1164 bool TargetExecutesOncePerLoop,
1165 SinkAndHoistLICMFlags &Flags,
1167 // If we don't understand the instruction, bail early.
1168 if (!isHoistableAndSinkableInst(I))
1169 return false;
1170
1171 MemorySSA *MSSA = MSSAU.getMemorySSA();
1172 // Loads have extra constraints we have to verify before we can hoist them.
1173 if (LoadInst *LI = dyn_cast<LoadInst>(&I)) {
1174 if (!LI->isUnordered())
1175 return false; // Don't sink/hoist volatile or ordered atomic loads!
1176
1177 // Loads from constant memory are always safe to move, even if they end up
1178 // in the same alias set as something that ends up being modified.
1179 if (!isModSet(AA->getModRefInfoMask(LI->getOperand(0))))
1180 return true;
1181 if (LI->hasMetadata(LLVMContext::MD_invariant_load))
1182 return true;
1183
1184 if (LI->isAtomic() && !TargetExecutesOncePerLoop)
1185 return false; // Don't risk duplicating unordered loads
1186
1187 // This checks for an invariant.start dominating the load.
1188 if (isLoadInvariantInLoop(LI, DT, CurLoop))
1189 return true;
1190
1191 auto MU = cast<MemoryUse>(MSSA->getMemoryAccess(LI));
1192
1193 bool InvariantGroup = LI->hasMetadata(LLVMContext::MD_invariant_group);
1194
1196 MSSA, MU, CurLoop, I, Flags, InvariantGroup);
1197 // Check loop-invariant address because this may also be a sinkable load
1198 // whose address is not necessarily loop-invariant.
1199 if (ORE && Invalidated && CurLoop->isLoopInvariant(LI->getPointerOperand()))
1200 ORE->emit([&]() {
1202 DEBUG_TYPE, "LoadWithLoopInvariantAddressInvalidated", LI)
1203 << "failed to move load with loop-invariant address "
1204 "because the loop may invalidate its value";
1205 });
1206
1207 return !Invalidated;
1208 } else if (CallInst *CI = dyn_cast<CallInst>(&I)) {
1209 // Don't sink or hoist dbg info; it's legal, but not useful.
1210 if (isa<DbgInfoIntrinsic>(I))
1211 return false;
1212
1213 // Don't sink calls which can throw.
1214 if (CI->mayThrow())
1215 return false;
1216
1217 // Convergent attribute has been used on operations that involve
1218 // inter-thread communication which results are implicitly affected by the
1219 // enclosing control flows. It is not safe to hoist or sink such operations
1220 // across control flow.
1221 if (CI->isConvergent())
1222 return false;
1223
1224 // FIXME: Current LLVM IR semantics don't work well with coroutines and
1225 // thread local globals. We currently treat getting the address of a thread
1226 // local global as not accessing memory, even though it may not be a
1227 // constant throughout a function with coroutines. Remove this check after
1228 // we better model semantics of thread local globals.
1229 if (CI->getFunction()->isPresplitCoroutine())
1230 return false;
1231
1232 using namespace PatternMatch;
1233 if (match(CI, m_Intrinsic<Intrinsic::assume>()))
1234 // Assumes don't actually alias anything or throw
1235 return true;
1236
1237 // Handle simple cases by querying alias analysis.
1238 MemoryEffects Behavior = AA->getMemoryEffects(CI);
1239
1240 if (Behavior.doesNotAccessMemory())
1241 return true;
1242 if (Behavior.onlyReadsMemory()) {
1243 // A readonly argmemonly function only reads from memory pointed to by
1244 // it's arguments with arbitrary offsets. If we can prove there are no
1245 // writes to this memory in the loop, we can hoist or sink.
1246 if (Behavior.onlyAccessesArgPointees()) {
1247 // TODO: expand to writeable arguments
1248 for (Value *Op : CI->args())
1249 if (Op->getType()->isPointerTy() &&
1251 MSSA, cast<MemoryUse>(MSSA->getMemoryAccess(CI)), CurLoop, I,
1252 Flags, /*InvariantGroup=*/false))
1253 return false;
1254 return true;
1255 }
1256
1257 // If this call only reads from memory and there are no writes to memory
1258 // in the loop, we can hoist or sink the call as appropriate.
1259 if (isReadOnly(MSSAU, CurLoop))
1260 return true;
1261 }
1262
1263 // FIXME: This should use mod/ref information to see if we can hoist or
1264 // sink the call.
1265
1266 return false;
1267 } else if (auto *FI = dyn_cast<FenceInst>(&I)) {
1268 // Fences alias (most) everything to provide ordering. For the moment,
1269 // just give up if there are any other memory operations in the loop.
1270 return isOnlyMemoryAccess(FI, CurLoop, MSSAU);
1271 } else if (auto *SI = dyn_cast<StoreInst>(&I)) {
1272 if (!SI->isUnordered())
1273 return false; // Don't sink/hoist volatile or ordered atomic store!
1274
1275 // We can only hoist a store that we can prove writes a value which is not
1276 // read or overwritten within the loop. For those cases, we fallback to
1277 // load store promotion instead. TODO: We can extend this to cases where
1278 // there is exactly one write to the location and that write dominates an
1279 // arbitrary number of reads in the loop.
1280 if (isOnlyMemoryAccess(SI, CurLoop, MSSAU))
1281 return true;
1282 // If there are more accesses than the Promotion cap, then give up as we're
1283 // not walking a list that long.
1284 if (Flags.tooManyMemoryAccesses())
1285 return false;
1286
1287 auto *SIMD = MSSA->getMemoryAccess(SI);
1288 BatchAAResults BAA(*AA);
1289 auto *Source = getClobberingMemoryAccess(*MSSA, BAA, Flags, SIMD);
1290 // Make sure there are no clobbers inside the loop.
1291 if (!MSSA->isLiveOnEntryDef(Source) &&
1292 CurLoop->contains(Source->getBlock()))
1293 return false;
1294
1295 // If there are interfering Uses (i.e. their defining access is in the
1296 // loop), or ordered loads (stored as Defs!), don't move this store.
1297 // Could do better here, but this is conservatively correct.
1298 // TODO: Cache set of Uses on the first walk in runOnLoop, update when
1299 // moving accesses. Can also extend to dominating uses.
1300 for (auto *BB : CurLoop->getBlocks())
1301 if (auto *Accesses = MSSA->getBlockAccesses(BB)) {
1302 for (const auto &MA : *Accesses)
1303 if (const auto *MU = dyn_cast<MemoryUse>(&MA)) {
1304 auto *MD = getClobberingMemoryAccess(*MSSA, BAA, Flags,
1305 const_cast<MemoryUse *>(MU));
1306 if (!MSSA->isLiveOnEntryDef(MD) &&
1307 CurLoop->contains(MD->getBlock()))
1308 return false;
1309 // Disable hoisting past potentially interfering loads. Optimized
1310 // Uses may point to an access outside the loop, as getClobbering
1311 // checks the previous iteration when walking the backedge.
1312 // FIXME: More precise: no Uses that alias SI.
1313 if (!Flags.getIsSink() && !MSSA->dominates(SIMD, MU))
1314 return false;
1315 } else if (const auto *MD = dyn_cast<MemoryDef>(&MA)) {
1316 if (auto *LI = dyn_cast<LoadInst>(MD->getMemoryInst())) {
1317 (void)LI; // Silence warning.
1318 assert(!LI->isUnordered() && "Expected unordered load");
1319 return false;
1320 }
1321 // Any call, while it may not be clobbering SI, it may be a use.
1322 if (auto *CI = dyn_cast<CallInst>(MD->getMemoryInst())) {
1323 // Check if the call may read from the memory location written
1324 // to by SI. Check CI's attributes and arguments; the number of
1325 // such checks performed is limited above by NoOfMemAccTooLarge.
1327 if (isModOrRefSet(MRI))
1328 return false;
1329 }
1330 }
1331 }
1332 return true;
1333 }
1334
1335 assert(!I.mayReadOrWriteMemory() && "unhandled aliasing");
1336
1337 // We've established mechanical ability and aliasing, it's up to the caller
1338 // to check fault safety
1339 return true;
1340}
1341
1342/// Returns true if a PHINode is a trivially replaceable with an
1343/// Instruction.
1344/// This is true when all incoming values are that instruction.
1345/// This pattern occurs most often with LCSSA PHI nodes.
1346///
1347static bool isTriviallyReplaceablePHI(const PHINode &PN, const Instruction &I) {
1348 for (const Value *IncValue : PN.incoming_values())
1349 if (IncValue != &I)
1350 return false;
1351
1352 return true;
1353}
1354
1355/// Return true if the instruction is foldable in the loop.
1356static bool isFoldableInLoop(const Instruction &I, const Loop *CurLoop,
1357 const TargetTransformInfo *TTI) {
1358 if (auto *GEP = dyn_cast<GetElementPtrInst>(&I)) {
1359 InstructionCost CostI =
1361 if (CostI != TargetTransformInfo::TCC_Free)
1362 return false;
1363 // For a GEP, we cannot simply use getInstructionCost because currently
1364 // it optimistically assumes that a GEP will fold into addressing mode
1365 // regardless of its users.
1366 const BasicBlock *BB = GEP->getParent();
1367 for (const User *U : GEP->users()) {
1368 const Instruction *UI = cast<Instruction>(U);
1369 if (CurLoop->contains(UI) &&
1370 (BB != UI->getParent() ||
1371 (!isa<StoreInst>(UI) && !isa<LoadInst>(UI))))
1372 return false;
1373 }
1374 return true;
1375 }
1376
1377 return false;
1378}
1379
1380/// Return true if the only users of this instruction are outside of
1381/// the loop. If this is true, we can sink the instruction to the exit
1382/// blocks of the loop.
1383///
1384/// We also return true if the instruction could be folded away in lowering.
1385/// (e.g., a GEP can be folded into a load as an addressing mode in the loop).
1386static bool isNotUsedOrFoldableInLoop(const Instruction &I, const Loop *CurLoop,
1387 const LoopSafetyInfo *SafetyInfo,
1389 bool &FoldableInLoop, bool LoopNestMode) {
1390 const auto &BlockColors = SafetyInfo->getBlockColors();
1391 bool IsFoldable = isFoldableInLoop(I, CurLoop, TTI);
1392 for (const User *U : I.users()) {
1393 const Instruction *UI = cast<Instruction>(U);
1394 if (const PHINode *PN = dyn_cast<PHINode>(UI)) {
1395 const BasicBlock *BB = PN->getParent();
1396 // We cannot sink uses in catchswitches.
1397 if (isa<CatchSwitchInst>(BB->getTerminator()))
1398 return false;
1399
1400 // We need to sink a callsite to a unique funclet. Avoid sinking if the
1401 // phi use is too muddled.
1402 if (isa<CallInst>(I))
1403 if (!BlockColors.empty() &&
1404 BlockColors.find(const_cast<BasicBlock *>(BB))->second.size() != 1)
1405 return false;
1406
1407 if (LoopNestMode) {
1408 while (isa<PHINode>(UI) && UI->hasOneUser() &&
1409 UI->getNumOperands() == 1) {
1410 if (!CurLoop->contains(UI))
1411 break;
1412 UI = cast<Instruction>(UI->user_back());
1413 }
1414 }
1415 }
1416
1417 if (CurLoop->contains(UI)) {
1418 if (IsFoldable) {
1419 FoldableInLoop = true;
1420 continue;
1421 }
1422 return false;
1423 }
1424 }
1425 return true;
1426}
1427
1429 Instruction &I, BasicBlock &ExitBlock, PHINode &PN, const LoopInfo *LI,
1430 const LoopSafetyInfo *SafetyInfo, MemorySSAUpdater &MSSAU) {
1431 Instruction *New;
1432 if (auto *CI = dyn_cast<CallInst>(&I)) {
1433 const auto &BlockColors = SafetyInfo->getBlockColors();
1434
1435 // Sinking call-sites need to be handled differently from other
1436 // instructions. The cloned call-site needs a funclet bundle operand
1437 // appropriate for its location in the CFG.
1439 for (unsigned BundleIdx = 0, BundleEnd = CI->getNumOperandBundles();
1440 BundleIdx != BundleEnd; ++BundleIdx) {
1441 OperandBundleUse Bundle = CI->getOperandBundleAt(BundleIdx);
1442 if (Bundle.getTagID() == LLVMContext::OB_funclet)
1443 continue;
1444
1445 OpBundles.emplace_back(Bundle);
1446 }
1447
1448 if (!BlockColors.empty()) {
1449 const ColorVector &CV = BlockColors.find(&ExitBlock)->second;
1450 assert(CV.size() == 1 && "non-unique color for exit block!");
1451 BasicBlock *BBColor = CV.front();
1452 Instruction *EHPad = BBColor->getFirstNonPHI();
1453 if (EHPad->isEHPad())
1454 OpBundles.emplace_back("funclet", EHPad);
1455 }
1456
1457 New = CallInst::Create(CI, OpBundles);
1458 New->copyMetadata(*CI);
1459 } else {
1460 New = I.clone();
1461 }
1462
1463 New->insertInto(&ExitBlock, ExitBlock.getFirstInsertionPt());
1464 if (!I.getName().empty())
1465 New->setName(I.getName() + ".le");
1466
1467 if (MSSAU.getMemorySSA()->getMemoryAccess(&I)) {
1468 // Create a new MemoryAccess and let MemorySSA set its defining access.
1469 MemoryAccess *NewMemAcc = MSSAU.createMemoryAccessInBB(
1470 New, nullptr, New->getParent(), MemorySSA::Beginning);
1471 if (NewMemAcc) {
1472 if (auto *MemDef = dyn_cast<MemoryDef>(NewMemAcc))
1473 MSSAU.insertDef(MemDef, /*RenameUses=*/true);
1474 else {
1475 auto *MemUse = cast<MemoryUse>(NewMemAcc);
1476 MSSAU.insertUse(MemUse, /*RenameUses=*/true);
1477 }
1478 }
1479 }
1480
1481 // Build LCSSA PHI nodes for any in-loop operands (if legal). Note that
1482 // this is particularly cheap because we can rip off the PHI node that we're
1483 // replacing for the number and blocks of the predecessors.
1484 // OPT: If this shows up in a profile, we can instead finish sinking all
1485 // invariant instructions, and then walk their operands to re-establish
1486 // LCSSA. That will eliminate creating PHI nodes just to nuke them when
1487 // sinking bottom-up.
1488 for (Use &Op : New->operands())
1489 if (LI->wouldBeOutOfLoopUseRequiringLCSSA(Op.get(), PN.getParent())) {
1490 auto *OInst = cast<Instruction>(Op.get());
1491 PHINode *OpPN =
1492 PHINode::Create(OInst->getType(), PN.getNumIncomingValues(),
1493 OInst->getName() + ".lcssa");
1494 OpPN->insertBefore(ExitBlock.begin());
1495 for (unsigned i = 0, e = PN.getNumIncomingValues(); i != e; ++i)
1496 OpPN->addIncoming(OInst, PN.getIncomingBlock(i));
1497 Op = OpPN;
1498 }
1499 return New;
1500}
1501
1503 MemorySSAUpdater &MSSAU) {
1504 MSSAU.removeMemoryAccess(&I);
1505 SafetyInfo.removeInstruction(&I);
1506 I.eraseFromParent();
1507}
1508
1510 ICFLoopSafetyInfo &SafetyInfo,
1511 MemorySSAUpdater &MSSAU,
1512 ScalarEvolution *SE) {
1513 SafetyInfo.removeInstruction(&I);
1514 SafetyInfo.insertInstructionTo(&I, Dest->getParent());
1515 I.moveBefore(*Dest->getParent(), Dest);
1516 if (MemoryUseOrDef *OldMemAcc = cast_or_null<MemoryUseOrDef>(
1517 MSSAU.getMemorySSA()->getMemoryAccess(&I)))
1518 MSSAU.moveToPlace(OldMemAcc, Dest->getParent(),
1520 if (SE)
1522}
1523
1525 PHINode *TPN, Instruction *I, LoopInfo *LI,
1527 const LoopSafetyInfo *SafetyInfo, const Loop *CurLoop,
1528 MemorySSAUpdater &MSSAU) {
1530 "Expect only trivially replaceable PHI");
1531 BasicBlock *ExitBlock = TPN->getParent();
1532 Instruction *New;
1533 auto It = SunkCopies.find(ExitBlock);
1534 if (It != SunkCopies.end())
1535 New = It->second;
1536 else
1537 New = SunkCopies[ExitBlock] = cloneInstructionInExitBlock(
1538 *I, *ExitBlock, *TPN, LI, SafetyInfo, MSSAU);
1539 return New;
1540}
1541
1542static bool canSplitPredecessors(PHINode *PN, LoopSafetyInfo *SafetyInfo) {
1543 BasicBlock *BB = PN->getParent();
1544 if (!BB->canSplitPredecessors())
1545 return false;
1546 // It's not impossible to split EHPad blocks, but if BlockColors already exist
1547 // it require updating BlockColors for all offspring blocks accordingly. By
1548 // skipping such corner case, we can make updating BlockColors after splitting
1549 // predecessor fairly simple.
1550 if (!SafetyInfo->getBlockColors().empty() && BB->getFirstNonPHI()->isEHPad())
1551 return false;
1552 for (BasicBlock *BBPred : predecessors(BB)) {
1553 if (isa<IndirectBrInst>(BBPred->getTerminator()))
1554 return false;
1555 }
1556 return true;
1557}
1558
1560 LoopInfo *LI, const Loop *CurLoop,
1561 LoopSafetyInfo *SafetyInfo,
1562 MemorySSAUpdater *MSSAU) {
1563#ifndef NDEBUG
1565 CurLoop->getUniqueExitBlocks(ExitBlocks);
1566 SmallPtrSet<BasicBlock *, 32> ExitBlockSet(ExitBlocks.begin(),
1567 ExitBlocks.end());
1568#endif
1569 BasicBlock *ExitBB = PN->getParent();
1570 assert(ExitBlockSet.count(ExitBB) && "Expect the PHI is in an exit block.");
1571
1572 // Split predecessors of the loop exit to make instructions in the loop are
1573 // exposed to exit blocks through trivially replaceable PHIs while keeping the
1574 // loop in the canonical form where each predecessor of each exit block should
1575 // be contained within the loop. For example, this will convert the loop below
1576 // from
1577 //
1578 // LB1:
1579 // %v1 =
1580 // br %LE, %LB2
1581 // LB2:
1582 // %v2 =
1583 // br %LE, %LB1
1584 // LE:
1585 // %p = phi [%v1, %LB1], [%v2, %LB2] <-- non-trivially replaceable
1586 //
1587 // to
1588 //
1589 // LB1:
1590 // %v1 =
1591 // br %LE.split, %LB2
1592 // LB2:
1593 // %v2 =
1594 // br %LE.split2, %LB1
1595 // LE.split:
1596 // %p1 = phi [%v1, %LB1] <-- trivially replaceable
1597 // br %LE
1598 // LE.split2:
1599 // %p2 = phi [%v2, %LB2] <-- trivially replaceable
1600 // br %LE
1601 // LE:
1602 // %p = phi [%p1, %LE.split], [%p2, %LE.split2]
1603 //
1604 const auto &BlockColors = SafetyInfo->getBlockColors();
1605 SmallSetVector<BasicBlock *, 8> PredBBs(pred_begin(ExitBB), pred_end(ExitBB));
1606 while (!PredBBs.empty()) {
1607 BasicBlock *PredBB = *PredBBs.begin();
1608 assert(CurLoop->contains(PredBB) &&
1609 "Expect all predecessors are in the loop");
1610 if (PN->getBasicBlockIndex(PredBB) >= 0) {
1612 ExitBB, PredBB, ".split.loop.exit", DT, LI, MSSAU, true);
1613 // Since we do not allow splitting EH-block with BlockColors in
1614 // canSplitPredecessors(), we can simply assign predecessor's color to
1615 // the new block.
1616 if (!BlockColors.empty())
1617 // Grab a reference to the ColorVector to be inserted before getting the
1618 // reference to the vector we are copying because inserting the new
1619 // element in BlockColors might cause the map to be reallocated.
1620 SafetyInfo->copyColors(NewPred, PredBB);
1621 }
1622 PredBBs.remove(PredBB);
1623 }
1624}
1625
1626/// When an instruction is found to only be used outside of the loop, this
1627/// function moves it to the exit blocks and patches up SSA form as needed.
1628/// This method is guaranteed to remove the original instruction from its
1629/// position, and may either delete it or move it to outside of the loop.
1630///
1631static bool sink(Instruction &I, LoopInfo *LI, DominatorTree *DT,
1632 const Loop *CurLoop, ICFLoopSafetyInfo *SafetyInfo,
1634 bool Changed = false;
1635 LLVM_DEBUG(dbgs() << "LICM sinking instruction: " << I << "\n");
1636
1637 // Iterate over users to be ready for actual sinking. Replace users via
1638 // unreachable blocks with undef and make all user PHIs trivially replaceable.
1639 SmallPtrSet<Instruction *, 8> VisitedUsers;
1640 for (Value::user_iterator UI = I.user_begin(), UE = I.user_end(); UI != UE;) {
1641 auto *User = cast<Instruction>(*UI);
1642 Use &U = UI.getUse();
1643 ++UI;
1644
1645 if (VisitedUsers.count(User) || CurLoop->contains(User))
1646 continue;
1647
1648 if (!DT->isReachableFromEntry(User->getParent())) {
1649 U = PoisonValue::get(I.getType());
1650 Changed = true;
1651 continue;
1652 }
1653
1654 // The user must be a PHI node.
1655 PHINode *PN = cast<PHINode>(User);
1656
1657 // Surprisingly, instructions can be used outside of loops without any
1658 // exits. This can only happen in PHI nodes if the incoming block is
1659 // unreachable.
1660 BasicBlock *BB = PN->getIncomingBlock(U);
1661 if (!DT->isReachableFromEntry(BB)) {
1662 U = PoisonValue::get(I.getType());
1663 Changed = true;
1664 continue;
1665 }
1666
1667 VisitedUsers.insert(PN);
1668 if (isTriviallyReplaceablePHI(*PN, I))
1669 continue;
1670
1671 if (!canSplitPredecessors(PN, SafetyInfo))
1672 return Changed;
1673
1674 // Split predecessors of the PHI so that we can make users trivially
1675 // replaceable.
1676 splitPredecessorsOfLoopExit(PN, DT, LI, CurLoop, SafetyInfo, &MSSAU);
1677
1678 // Should rebuild the iterators, as they may be invalidated by
1679 // splitPredecessorsOfLoopExit().
1680 UI = I.user_begin();
1681 UE = I.user_end();
1682 }
1683
1684 if (VisitedUsers.empty())
1685 return Changed;
1686
1687 ORE->emit([&]() {
1688 return OptimizationRemark(DEBUG_TYPE, "InstSunk", &I)
1689 << "sinking " << ore::NV("Inst", &I);
1690 });
1691 if (isa<LoadInst>(I))
1692 ++NumMovedLoads;
1693 else if (isa<CallInst>(I))
1694 ++NumMovedCalls;
1695 ++NumSunk;
1696
1697#ifndef NDEBUG
1699 CurLoop->getUniqueExitBlocks(ExitBlocks);
1700 SmallPtrSet<BasicBlock *, 32> ExitBlockSet(ExitBlocks.begin(),
1701 ExitBlocks.end());
1702#endif
1703
1704 // Clones of this instruction. Don't create more than one per exit block!
1706
1707 // If this instruction is only used outside of the loop, then all users are
1708 // PHI nodes in exit blocks due to LCSSA form. Just RAUW them with clones of
1709 // the instruction.
1710 // First check if I is worth sinking for all uses. Sink only when it is worth
1711 // across all uses.
1712 SmallSetVector<User*, 8> Users(I.user_begin(), I.user_end());
1713 for (auto *UI : Users) {
1714 auto *User = cast<Instruction>(UI);
1715
1716 if (CurLoop->contains(User))
1717 continue;
1718
1719 PHINode *PN = cast<PHINode>(User);
1720 assert(ExitBlockSet.count(PN->getParent()) &&
1721 "The LCSSA PHI is not in an exit block!");
1722
1723 // The PHI must be trivially replaceable.
1725 PN, &I, LI, SunkCopies, SafetyInfo, CurLoop, MSSAU);
1726 // As we sink the instruction out of the BB, drop its debug location.
1727 New->dropLocation();
1728 PN->replaceAllUsesWith(New);
1729 eraseInstruction(*PN, *SafetyInfo, MSSAU);
1730 Changed = true;
1731 }
1732 return Changed;
1733}
1734
1735/// When an instruction is found to only use loop invariant operands that
1736/// is safe to hoist, this instruction is called to do the dirty work.
1737///
1738static void hoist(Instruction &I, const DominatorTree *DT, const Loop *CurLoop,
1739 BasicBlock *Dest, ICFLoopSafetyInfo *SafetyInfo,
1742 LLVM_DEBUG(dbgs() << "LICM hoisting to " << Dest->getNameOrAsOperand() << ": "
1743 << I << "\n");
1744 ORE->emit([&]() {
1745 return OptimizationRemark(DEBUG_TYPE, "Hoisted", &I) << "hoisting "
1746 << ore::NV("Inst", &I);
1747 });
1748
1749 // Metadata can be dependent on conditions we are hoisting above.
1750 // Conservatively strip all metadata on the instruction unless we were
1751 // guaranteed to execute I if we entered the loop, in which case the metadata
1752 // is valid in the loop preheader.
1753 // Similarly, If I is a call and it is not guaranteed to execute in the loop,
1754 // then moving to the preheader means we should strip attributes on the call
1755 // that can cause UB since we may be hoisting above conditions that allowed
1756 // inferring those attributes. They may not be valid at the preheader.
1757 if ((I.hasMetadataOtherThanDebugLoc() || isa<CallInst>(I)) &&
1758 // The check on hasMetadataOtherThanDebugLoc is to prevent us from burning
1759 // time in isGuaranteedToExecute if we don't actually have anything to
1760 // drop. It is a compile time optimization, not required for correctness.
1761 !SafetyInfo->isGuaranteedToExecute(I, DT, CurLoop))
1762 I.dropUBImplyingAttrsAndMetadata();
1763
1764 if (isa<PHINode>(I))
1765 // Move the new node to the end of the phi list in the destination block.
1766 moveInstructionBefore(I, Dest->getFirstNonPHIIt(), *SafetyInfo, MSSAU, SE);
1767 else
1768 // Move the new node to the destination block, before its terminator.
1769 moveInstructionBefore(I, Dest->getTerminator()->getIterator(), *SafetyInfo,
1770 MSSAU, SE);
1771
1772 I.updateLocationAfterHoist();
1773
1774 if (isa<LoadInst>(I))
1775 ++NumMovedLoads;
1776 else if (isa<CallInst>(I))
1777 ++NumMovedCalls;
1778 ++NumHoisted;
1779}
1780
1781/// Only sink or hoist an instruction if it is not a trapping instruction,
1782/// or if the instruction is known not to trap when moved to the preheader.
1783/// or if it is a trapping instruction and is guaranteed to execute.
1785 Instruction &Inst, const DominatorTree *DT, const TargetLibraryInfo *TLI,
1786 const Loop *CurLoop, const LoopSafetyInfo *SafetyInfo,
1787 OptimizationRemarkEmitter *ORE, const Instruction *CtxI,
1788 AssumptionCache *AC, bool AllowSpeculation) {
1789 if (AllowSpeculation &&
1790 isSafeToSpeculativelyExecute(&Inst, CtxI, AC, DT, TLI))
1791 return true;
1792
1793 bool GuaranteedToExecute =
1794 SafetyInfo->isGuaranteedToExecute(Inst, DT, CurLoop);
1795
1796 if (!GuaranteedToExecute) {
1797 auto *LI = dyn_cast<LoadInst>(&Inst);
1798 if (LI && CurLoop->isLoopInvariant(LI->getPointerOperand()))
1799 ORE->emit([&]() {
1801 DEBUG_TYPE, "LoadWithLoopInvariantAddressCondExecuted", LI)
1802 << "failed to hoist load with loop-invariant address "
1803 "because load is conditionally executed";
1804 });
1805 }
1806
1807 return GuaranteedToExecute;
1808}
1809
1810namespace {
1811class LoopPromoter : public LoadAndStorePromoter {
1812 Value *SomePtr; // Designated pointer to store to.
1813 SmallVectorImpl<BasicBlock *> &LoopExitBlocks;
1815 SmallVectorImpl<MemoryAccess *> &MSSAInsertPts;
1816 PredIteratorCache &PredCache;
1817 MemorySSAUpdater &MSSAU;
1818 LoopInfo &LI;
1819 DebugLoc DL;
1820 Align Alignment;
1821 bool UnorderedAtomic;
1822 AAMDNodes AATags;
1823 ICFLoopSafetyInfo &SafetyInfo;
1824 bool CanInsertStoresInExitBlocks;
1826
1827 // We're about to add a use of V in a loop exit block. Insert an LCSSA phi
1828 // (if legal) if doing so would add an out-of-loop use to an instruction
1829 // defined in-loop.
1830 Value *maybeInsertLCSSAPHI(Value *V, BasicBlock *BB) const {
1831 if (!LI.wouldBeOutOfLoopUseRequiringLCSSA(V, BB))
1832 return V;
1833
1834 Instruction *I = cast<Instruction>(V);
1835 // We need to create an LCSSA PHI node for the incoming value and
1836 // store that.
1837 PHINode *PN = PHINode::Create(I->getType(), PredCache.size(BB),
1838 I->getName() + ".lcssa");
1839 PN->insertBefore(BB->begin());
1840 for (BasicBlock *Pred : PredCache.get(BB))
1841 PN->addIncoming(I, Pred);
1842 return PN;
1843 }
1844
1845public:
1846 LoopPromoter(Value *SP, ArrayRef<const Instruction *> Insts, SSAUpdater &S,
1850 MemorySSAUpdater &MSSAU, LoopInfo &li, DebugLoc dl,
1851 Align Alignment, bool UnorderedAtomic, const AAMDNodes &AATags,
1852 ICFLoopSafetyInfo &SafetyInfo, bool CanInsertStoresInExitBlocks)
1853 : LoadAndStorePromoter(Insts, S), SomePtr(SP), LoopExitBlocks(LEB),
1854 LoopInsertPts(LIP), MSSAInsertPts(MSSAIP), PredCache(PIC), MSSAU(MSSAU),
1855 LI(li), DL(std::move(dl)), Alignment(Alignment),
1856 UnorderedAtomic(UnorderedAtomic), AATags(AATags),
1857 SafetyInfo(SafetyInfo),
1858 CanInsertStoresInExitBlocks(CanInsertStoresInExitBlocks), Uses(Insts) {}
1859
1860 void insertStoresInLoopExitBlocks() {
1861 // Insert stores after in the loop exit blocks. Each exit block gets a
1862 // store of the live-out values that feed them. Since we've already told
1863 // the SSA updater about the defs in the loop and the preheader
1864 // definition, it is all set and we can start using it.
1865 DIAssignID *NewID = nullptr;
1866 for (unsigned i = 0, e = LoopExitBlocks.size(); i != e; ++i) {
1867 BasicBlock *ExitBlock = LoopExitBlocks[i];
1868 Value *LiveInValue = SSA.GetValueInMiddleOfBlock(ExitBlock);
1869 LiveInValue = maybeInsertLCSSAPHI(LiveInValue, ExitBlock);
1870 Value *Ptr = maybeInsertLCSSAPHI(SomePtr, ExitBlock);
1871 BasicBlock::iterator InsertPos = LoopInsertPts[i];
1872 StoreInst *NewSI = new StoreInst(LiveInValue, Ptr, InsertPos);
1873 if (UnorderedAtomic)
1874 NewSI->setOrdering(AtomicOrdering::Unordered);
1875 NewSI->setAlignment(Alignment);
1876 NewSI->setDebugLoc(DL);
1877 // Attach DIAssignID metadata to the new store, generating it on the
1878 // first loop iteration.
1879 if (i == 0) {
1880 // NewSI will have its DIAssignID set here if there are any stores in
1881 // Uses with a DIAssignID attachment. This merged ID will then be
1882 // attached to the other inserted stores (in the branch below).
1883 NewSI->mergeDIAssignID(Uses);
1884 NewID = cast_or_null<DIAssignID>(
1885 NewSI->getMetadata(LLVMContext::MD_DIAssignID));
1886 } else {
1887 // Attach the DIAssignID (or nullptr) merged from Uses in the branch
1888 // above.
1889 NewSI->setMetadata(LLVMContext::MD_DIAssignID, NewID);
1890 }
1891
1892 if (AATags)
1893 NewSI->setAAMetadata(AATags);
1894
1895 MemoryAccess *MSSAInsertPoint = MSSAInsertPts[i];
1896 MemoryAccess *NewMemAcc;
1897 if (!MSSAInsertPoint) {
1898 NewMemAcc = MSSAU.createMemoryAccessInBB(
1899 NewSI, nullptr, NewSI->getParent(), MemorySSA::Beginning);
1900 } else {
1901 NewMemAcc =
1902 MSSAU.createMemoryAccessAfter(NewSI, nullptr, MSSAInsertPoint);
1903 }
1904 MSSAInsertPts[i] = NewMemAcc;
1905 MSSAU.insertDef(cast<MemoryDef>(NewMemAcc), true);
1906 // FIXME: true for safety, false may still be correct.
1907 }
1908 }
1909
1910 void doExtraRewritesBeforeFinalDeletion() override {
1911 if (CanInsertStoresInExitBlocks)
1912 insertStoresInLoopExitBlocks();
1913 }
1914
1915 void instructionDeleted(Instruction *I) const override {
1916 SafetyInfo.removeInstruction(I);
1917 MSSAU.removeMemoryAccess(I);
1918 }
1919
1920 bool shouldDelete(Instruction *I) const override {
1921 if (isa<StoreInst>(I))
1922 return CanInsertStoresInExitBlocks;
1923 return true;
1924 }
1925};
1926
1927bool isNotCapturedBeforeOrInLoop(const Value *V, const Loop *L,
1928 DominatorTree *DT) {
1929 // We can perform the captured-before check against any instruction in the
1930 // loop header, as the loop header is reachable from any instruction inside
1931 // the loop.
1932 // TODO: ReturnCaptures=true shouldn't be necessary here.
1933 return !PointerMayBeCapturedBefore(V, /* ReturnCaptures */ true,
1934 /* StoreCaptures */ true,
1935 L->getHeader()->getTerminator(), DT);
1936}
1937
1938/// Return true if we can prove that a caller cannot inspect the object if an
1939/// unwind occurs inside the loop.
1940bool isNotVisibleOnUnwindInLoop(const Value *Object, const Loop *L,
1941 DominatorTree *DT) {
1942 bool RequiresNoCaptureBeforeUnwind;
1943 if (!isNotVisibleOnUnwind(Object, RequiresNoCaptureBeforeUnwind))
1944 return false;
1945
1946 return !RequiresNoCaptureBeforeUnwind ||
1947 isNotCapturedBeforeOrInLoop(Object, L, DT);
1948}
1949
1950bool isThreadLocalObject(const Value *Object, const Loop *L, DominatorTree *DT,
1952 // The object must be function-local to start with, and then not captured
1953 // before/in the loop.
1954 return (isIdentifiedFunctionLocal(Object) &&
1955 isNotCapturedBeforeOrInLoop(Object, L, DT)) ||
1957}
1958
1959} // namespace
1960
1961/// Try to promote memory values to scalars by sinking stores out of the
1962/// loop and moving loads to before the loop. We do this by looping over
1963/// the stores in the loop, looking for stores to Must pointers which are
1964/// loop invariant.
1965///
1967 const SmallSetVector<Value *, 8> &PointerMustAliases,
1972 const TargetLibraryInfo *TLI, TargetTransformInfo *TTI, Loop *CurLoop,
1973 MemorySSAUpdater &MSSAU, ICFLoopSafetyInfo *SafetyInfo,
1974 OptimizationRemarkEmitter *ORE, bool AllowSpeculation,
1975 bool HasReadsOutsideSet) {
1976 // Verify inputs.
1977 assert(LI != nullptr && DT != nullptr && CurLoop != nullptr &&
1978 SafetyInfo != nullptr &&
1979 "Unexpected Input to promoteLoopAccessesToScalars");
1980
1981 LLVM_DEBUG({
1982 dbgs() << "Trying to promote set of must-aliased pointers:\n";
1983 for (Value *Ptr : PointerMustAliases)
1984 dbgs() << " " << *Ptr << "\n";
1985 });
1986 ++NumPromotionCandidates;
1987
1988 Value *SomePtr = *PointerMustAliases.begin();
1989 BasicBlock *Preheader = CurLoop->getLoopPreheader();
1990
1991 // It is not safe to promote a load/store from the loop if the load/store is
1992 // conditional. For example, turning:
1993 //
1994 // for () { if (c) *P += 1; }
1995 //
1996 // into:
1997 //
1998 // tmp = *P; for () { if (c) tmp +=1; } *P = tmp;
1999 //
2000 // is not safe, because *P may only be valid to access if 'c' is true.
2001 //
2002 // The safety property divides into two parts:
2003 // p1) The memory may not be dereferenceable on entry to the loop. In this
2004 // case, we can't insert the required load in the preheader.
2005 // p2) The memory model does not allow us to insert a store along any dynamic
2006 // path which did not originally have one.
2007 //
2008 // If at least one store is guaranteed to execute, both properties are
2009 // satisfied, and promotion is legal.
2010 //
2011 // This, however, is not a necessary condition. Even if no store/load is
2012 // guaranteed to execute, we can still establish these properties.
2013 // We can establish (p1) by proving that hoisting the load into the preheader
2014 // is safe (i.e. proving dereferenceability on all paths through the loop). We
2015 // can use any access within the alias set to prove dereferenceability,
2016 // since they're all must alias.
2017 //
2018 // There are two ways establish (p2):
2019 // a) Prove the location is thread-local. In this case the memory model
2020 // requirement does not apply, and stores are safe to insert.
2021 // b) Prove a store dominates every exit block. In this case, if an exit
2022 // blocks is reached, the original dynamic path would have taken us through
2023 // the store, so inserting a store into the exit block is safe. Note that this
2024 // is different from the store being guaranteed to execute. For instance,
2025 // if an exception is thrown on the first iteration of the loop, the original
2026 // store is never executed, but the exit blocks are not executed either.
2027
2028 bool DereferenceableInPH = false;
2029 bool StoreIsGuanteedToExecute = false;
2030 bool FoundLoadToPromote = false;
2031 // Goes from Unknown to either Safe or Unsafe, but can't switch between them.
2032 enum {
2033 StoreSafe,
2034 StoreUnsafe,
2035 StoreSafetyUnknown,
2036 } StoreSafety = StoreSafetyUnknown;
2037
2039
2040 // We start with an alignment of one and try to find instructions that allow
2041 // us to prove better alignment.
2042 Align Alignment;
2043 // Keep track of which types of access we see
2044 bool SawUnorderedAtomic = false;
2045 bool SawNotAtomic = false;
2046 AAMDNodes AATags;
2047
2048 const DataLayout &MDL = Preheader->getDataLayout();
2049
2050 // If there are reads outside the promoted set, then promoting stores is
2051 // definitely not safe.
2052 if (HasReadsOutsideSet)
2053 StoreSafety = StoreUnsafe;
2054
2055 if (StoreSafety == StoreSafetyUnknown && SafetyInfo->anyBlockMayThrow()) {
2056 // If a loop can throw, we have to insert a store along each unwind edge.
2057 // That said, we can't actually make the unwind edge explicit. Therefore,
2058 // we have to prove that the store is dead along the unwind edge. We do
2059 // this by proving that the caller can't have a reference to the object
2060 // after return and thus can't possibly load from the object.
2061 Value *Object = getUnderlyingObject(SomePtr);
2062 if (!isNotVisibleOnUnwindInLoop(Object, CurLoop, DT))
2063 StoreSafety = StoreUnsafe;
2064 }
2065
2066 // Check that all accesses to pointers in the alias set use the same type.
2067 // We cannot (yet) promote a memory location that is loaded and stored in
2068 // different sizes. While we are at it, collect alignment and AA info.
2069 Type *AccessTy = nullptr;
2070 for (Value *ASIV : PointerMustAliases) {
2071 for (Use &U : ASIV->uses()) {
2072 // Ignore instructions that are outside the loop.
2073 Instruction *UI = dyn_cast<Instruction>(U.getUser());
2074 if (!UI || !CurLoop->contains(UI))
2075 continue;
2076
2077 // If there is an non-load/store instruction in the loop, we can't promote
2078 // it.
2079 if (LoadInst *Load = dyn_cast<LoadInst>(UI)) {
2080 if (!Load->isUnordered())
2081 return false;
2082
2083 SawUnorderedAtomic |= Load->isAtomic();
2084 SawNotAtomic |= !Load->isAtomic();
2085 FoundLoadToPromote = true;
2086
2087 Align InstAlignment = Load->getAlign();
2088
2089 // Note that proving a load safe to speculate requires proving
2090 // sufficient alignment at the target location. Proving it guaranteed
2091 // to execute does as well. Thus we can increase our guaranteed
2092 // alignment as well.
2093 if (!DereferenceableInPH || (InstAlignment > Alignment))
2095 *Load, DT, TLI, CurLoop, SafetyInfo, ORE,
2096 Preheader->getTerminator(), AC, AllowSpeculation)) {
2097 DereferenceableInPH = true;
2098 Alignment = std::max(Alignment, InstAlignment);
2099 }
2100 } else if (const StoreInst *Store = dyn_cast<StoreInst>(UI)) {
2101 // Stores *of* the pointer are not interesting, only stores *to* the
2102 // pointer.
2103 if (U.getOperandNo() != StoreInst::getPointerOperandIndex())
2104 continue;
2105 if (!Store->isUnordered())
2106 return false;
2107
2108 SawUnorderedAtomic |= Store->isAtomic();
2109 SawNotAtomic |= !Store->isAtomic();
2110
2111 // If the store is guaranteed to execute, both properties are satisfied.
2112 // We may want to check if a store is guaranteed to execute even if we
2113 // already know that promotion is safe, since it may have higher
2114 // alignment than any other guaranteed stores, in which case we can
2115 // raise the alignment on the promoted store.
2116 Align InstAlignment = Store->getAlign();
2117 bool GuaranteedToExecute =
2118 SafetyInfo->isGuaranteedToExecute(*UI, DT, CurLoop);
2119 StoreIsGuanteedToExecute |= GuaranteedToExecute;
2120 if (GuaranteedToExecute) {
2121 DereferenceableInPH = true;
2122 if (StoreSafety == StoreSafetyUnknown)
2123 StoreSafety = StoreSafe;
2124 Alignment = std::max(Alignment, InstAlignment);
2125 }
2126
2127 // If a store dominates all exit blocks, it is safe to sink.
2128 // As explained above, if an exit block was executed, a dominating
2129 // store must have been executed at least once, so we are not
2130 // introducing stores on paths that did not have them.
2131 // Note that this only looks at explicit exit blocks. If we ever
2132 // start sinking stores into unwind edges (see above), this will break.
2133 if (StoreSafety == StoreSafetyUnknown &&
2134 llvm::all_of(ExitBlocks, [&](BasicBlock *Exit) {
2135 return DT->dominates(Store->getParent(), Exit);
2136 }))
2137 StoreSafety = StoreSafe;
2138
2139 // If the store is not guaranteed to execute, we may still get
2140 // deref info through it.
2141 if (!DereferenceableInPH) {
2142 DereferenceableInPH = isDereferenceableAndAlignedPointer(
2143 Store->getPointerOperand(), Store->getValueOperand()->getType(),
2144 Store->getAlign(), MDL, Preheader->getTerminator(), AC, DT, TLI);
2145 }
2146 } else
2147 continue; // Not a load or store.
2148
2149 if (!AccessTy)
2150 AccessTy = getLoadStoreType(UI);
2151 else if (AccessTy != getLoadStoreType(UI))
2152 return false;
2153
2154 // Merge the AA tags.
2155 if (LoopUses.empty()) {
2156 // On the first load/store, just take its AA tags.
2157 AATags = UI->getAAMetadata();
2158 } else if (AATags) {
2159 AATags = AATags.merge(UI->getAAMetadata());
2160 }
2161
2162 LoopUses.push_back(UI);
2163 }
2164 }
2165
2166 // If we found both an unordered atomic instruction and a non-atomic memory
2167 // access, bail. We can't blindly promote non-atomic to atomic since we
2168 // might not be able to lower the result. We can't downgrade since that
2169 // would violate memory model. Also, align 0 is an error for atomics.
2170 if (SawUnorderedAtomic && SawNotAtomic)
2171 return false;
2172
2173 // If we're inserting an atomic load in the preheader, we must be able to
2174 // lower it. We're only guaranteed to be able to lower naturally aligned
2175 // atomics.
2176 if (SawUnorderedAtomic && Alignment < MDL.getTypeStoreSize(AccessTy))
2177 return false;
2178
2179 // If we couldn't prove we can hoist the load, bail.
2180 if (!DereferenceableInPH) {
2181 LLVM_DEBUG(dbgs() << "Not promoting: Not dereferenceable in preheader\n");
2182 return false;
2183 }
2184
2185 // We know we can hoist the load, but don't have a guaranteed store.
2186 // Check whether the location is writable and thread-local. If it is, then we
2187 // can insert stores along paths which originally didn't have them without
2188 // violating the memory model.
2189 if (StoreSafety == StoreSafetyUnknown) {
2190 Value *Object = getUnderlyingObject(SomePtr);
2191 bool ExplicitlyDereferenceableOnly;
2192 if (isWritableObject(Object, ExplicitlyDereferenceableOnly) &&
2193 (!ExplicitlyDereferenceableOnly ||
2194 isDereferenceablePointer(SomePtr, AccessTy, MDL)) &&
2195 isThreadLocalObject(Object, CurLoop, DT, TTI))
2196 StoreSafety = StoreSafe;
2197 }
2198
2199 // If we've still failed to prove we can sink the store, hoist the load
2200 // only, if possible.
2201 if (StoreSafety != StoreSafe && !FoundLoadToPromote)
2202 // If we cannot hoist the load either, give up.
2203 return false;
2204
2205 // Lets do the promotion!
2206 if (StoreSafety == StoreSafe) {
2207 LLVM_DEBUG(dbgs() << "LICM: Promoting load/store of the value: " << *SomePtr
2208 << '\n');
2209 ++NumLoadStorePromoted;
2210 } else {
2211 LLVM_DEBUG(dbgs() << "LICM: Promoting load of the value: " << *SomePtr
2212 << '\n');
2213 ++NumLoadPromoted;
2214 }
2215
2216 ORE->emit([&]() {
2217 return OptimizationRemark(DEBUG_TYPE, "PromoteLoopAccessesToScalar",
2218 LoopUses[0])
2219 << "Moving accesses to memory location out of the loop";
2220 });
2221
2222 // Look at all the loop uses, and try to merge their locations.
2223 std::vector<DILocation *> LoopUsesLocs;
2224 for (auto *U : LoopUses)
2225 LoopUsesLocs.push_back(U->getDebugLoc().get());
2226 auto DL = DebugLoc(DILocation::getMergedLocations(LoopUsesLocs));
2227
2228 // We use the SSAUpdater interface to insert phi nodes as required.
2230 SSAUpdater SSA(&NewPHIs);
2231 LoopPromoter Promoter(SomePtr, LoopUses, SSA, ExitBlocks, InsertPts,
2232 MSSAInsertPts, PIC, MSSAU, *LI, DL, Alignment,
2233 SawUnorderedAtomic, AATags, *SafetyInfo,
2234 StoreSafety == StoreSafe);
2235
2236 // Set up the preheader to have a definition of the value. It is the live-out
2237 // value from the preheader that uses in the loop will use.
2238 LoadInst *PreheaderLoad = nullptr;
2239 if (FoundLoadToPromote || !StoreIsGuanteedToExecute) {
2240 PreheaderLoad =
2241 new LoadInst(AccessTy, SomePtr, SomePtr->getName() + ".promoted",
2242 Preheader->getTerminator()->getIterator());
2243 if (SawUnorderedAtomic)
2244 PreheaderLoad->setOrdering(AtomicOrdering::Unordered);
2245 PreheaderLoad->setAlignment(Alignment);
2246 PreheaderLoad->setDebugLoc(DebugLoc());
2247 if (AATags)
2248 PreheaderLoad->setAAMetadata(AATags);
2249
2250 MemoryAccess *PreheaderLoadMemoryAccess = MSSAU.createMemoryAccessInBB(
2251 PreheaderLoad, nullptr, PreheaderLoad->getParent(), MemorySSA::End);
2252 MemoryUse *NewMemUse = cast<MemoryUse>(PreheaderLoadMemoryAccess);
2253 MSSAU.insertUse(NewMemUse, /*RenameUses=*/true);
2254 SSA.AddAvailableValue(Preheader, PreheaderLoad);
2255 } else {
2256 SSA.AddAvailableValue(Preheader, PoisonValue::get(AccessTy));
2257 }
2258
2259 if (VerifyMemorySSA)
2260 MSSAU.getMemorySSA()->verifyMemorySSA();
2261 // Rewrite all the loads in the loop and remember all the definitions from
2262 // stores in the loop.
2263 Promoter.run(LoopUses);
2264
2265 if (VerifyMemorySSA)
2266 MSSAU.getMemorySSA()->verifyMemorySSA();
2267 // If the SSAUpdater didn't use the load in the preheader, just zap it now.
2268 if (PreheaderLoad && PreheaderLoad->use_empty())
2269 eraseInstruction(*PreheaderLoad, *SafetyInfo, MSSAU);
2270
2271 return true;
2272}
2273
2274static void foreachMemoryAccess(MemorySSA *MSSA, Loop *L,
2275 function_ref<void(Instruction *)> Fn) {
2276 for (const BasicBlock *BB : L->blocks())
2277 if (const auto *Accesses = MSSA->getBlockAccesses(BB))
2278 for (const auto &Access : *Accesses)
2279 if (const auto *MUD = dyn_cast<MemoryUseOrDef>(&Access))
2280 Fn(MUD->getMemoryInst());
2281}
2282
2283// The bool indicates whether there might be reads outside the set, in which
2284// case only loads may be promoted.
2287 BatchAAResults BatchAA(*AA);
2288 AliasSetTracker AST(BatchAA);
2289
2290 auto IsPotentiallyPromotable = [L](const Instruction *I) {
2291 if (const auto *SI = dyn_cast<StoreInst>(I))
2292 return L->isLoopInvariant(SI->getPointerOperand());
2293 if (const auto *LI = dyn_cast<LoadInst>(I))
2294 return L->isLoopInvariant(LI->getPointerOperand());
2295 return false;
2296 };
2297
2298 // Populate AST with potentially promotable accesses.
2299 SmallPtrSet<Value *, 16> AttemptingPromotion;
2300 foreachMemoryAccess(MSSA, L, [&](Instruction *I) {
2301 if (IsPotentiallyPromotable(I)) {
2302 AttemptingPromotion.insert(I);
2303 AST.add(I);
2304 }
2305 });
2306
2307 // We're only interested in must-alias sets that contain a mod.
2309 for (AliasSet &AS : AST)
2310 if (!AS.isForwardingAliasSet() && AS.isMod() && AS.isMustAlias())
2311 Sets.push_back({&AS, false});
2312
2313 if (Sets.empty())
2314 return {}; // Nothing to promote...
2315
2316 // Discard any sets for which there is an aliasing non-promotable access.
2317 foreachMemoryAccess(MSSA, L, [&](Instruction *I) {
2318 if (AttemptingPromotion.contains(I))
2319 return;
2320
2322 ModRefInfo MR = Pair.getPointer()->aliasesUnknownInst(I, BatchAA);
2323 // Cannot promote if there are writes outside the set.
2324 if (isModSet(MR))
2325 return true;
2326 if (isRefSet(MR)) {
2327 // Remember reads outside the set.
2328 Pair.setInt(true);
2329 // If this is a mod-only set and there are reads outside the set,
2330 // we will not be able to promote, so bail out early.
2331 return !Pair.getPointer()->isRef();
2332 }
2333 return false;
2334 });
2335 });
2336
2338 for (auto [Set, HasReadsOutsideSet] : Sets) {
2339 SmallSetVector<Value *, 8> PointerMustAliases;
2340 for (const auto &MemLoc : *Set)
2341 PointerMustAliases.insert(const_cast<Value *>(MemLoc.Ptr));
2342 Result.emplace_back(std::move(PointerMustAliases), HasReadsOutsideSet);
2343 }
2344
2345 return Result;
2346}
2347
2349 Loop *CurLoop, Instruction &I,
2350 SinkAndHoistLICMFlags &Flags,
2351 bool InvariantGroup) {
2352 // For hoisting, use the walker to determine safety
2353 if (!Flags.getIsSink()) {
2354 // If hoisting an invariant group, we only need to check that there
2355 // is no store to the loaded pointer between the start of the loop,
2356 // and the load (since all values must be the same).
2357
2358 // This can be checked in two conditions:
2359 // 1) if the memoryaccess is outside the loop
2360 // 2) the earliest access is at the loop header,
2361 // if the memory loaded is the phi node
2362
2363 BatchAAResults BAA(MSSA->getAA());
2364 MemoryAccess *Source = getClobberingMemoryAccess(*MSSA, BAA, Flags, MU);
2365 return !MSSA->isLiveOnEntryDef(Source) &&
2366 CurLoop->contains(Source->getBlock()) &&
2367 !(InvariantGroup && Source->getBlock() == CurLoop->getHeader() && isa<MemoryPhi>(Source));
2368 }
2369
2370 // For sinking, we'd need to check all Defs below this use. The getClobbering
2371 // call will look on the backedge of the loop, but will check aliasing with
2372 // the instructions on the previous iteration.
2373 // For example:
2374 // for (i ... )
2375 // load a[i] ( Use (LoE)
2376 // store a[i] ( 1 = Def (2), with 2 = Phi for the loop.
2377 // i++;
2378 // The load sees no clobbering inside the loop, as the backedge alias check
2379 // does phi translation, and will check aliasing against store a[i-1].
2380 // However sinking the load outside the loop, below the store is incorrect.
2381
2382 // For now, only sink if there are no Defs in the loop, and the existing ones
2383 // precede the use and are in the same block.
2384 // FIXME: Increase precision: Safe to sink if Use post dominates the Def;
2385 // needs PostDominatorTreeAnalysis.
2386 // FIXME: More precise: no Defs that alias this Use.
2387 if (Flags.tooManyMemoryAccesses())
2388 return true;
2389 for (auto *BB : CurLoop->getBlocks())
2390 if (pointerInvalidatedByBlock(*BB, *MSSA, *MU))
2391 return true;
2392 // When sinking, the source block may not be part of the loop so check it.
2393 if (!CurLoop->contains(&I))
2394 return pointerInvalidatedByBlock(*I.getParent(), *MSSA, *MU);
2395
2396 return false;
2397}
2398
2400 if (const auto *Accesses = MSSA.getBlockDefs(&BB))
2401 for (const auto &MA : *Accesses)
2402 if (const auto *MD = dyn_cast<MemoryDef>(&MA))
2403 if (MU.getBlock() != MD->getBlock() || !MSSA.locallyDominates(MD, &MU))
2404 return true;
2405 return false;
2406}
2407
2408/// Try to simplify things like (A < INV_1 AND icmp A < INV_2) into (A <
2409/// min(INV_1, INV_2)), if INV_1 and INV_2 are both loop invariants and their
2410/// minimun can be computed outside of loop, and X is not a loop-invariant.
2411static bool hoistMinMax(Instruction &I, Loop &L, ICFLoopSafetyInfo &SafetyInfo,
2412 MemorySSAUpdater &MSSAU) {
2413 bool Inverse = false;
2414 using namespace PatternMatch;
2415 Value *Cond1, *Cond2;
2416 if (match(&I, m_LogicalOr(m_Value(Cond1), m_Value(Cond2)))) {
2417 Inverse = true;
2418 } else if (match(&I, m_LogicalAnd(m_Value(Cond1), m_Value(Cond2)))) {
2419 // Do nothing
2420 } else
2421 return false;
2422
2423 auto MatchICmpAgainstInvariant = [&](Value *C, ICmpInst::Predicate &P,
2424 Value *&LHS, Value *&RHS) {
2425 if (!match(C, m_OneUse(m_ICmp(P, m_Value(LHS), m_Value(RHS)))))
2426 return false;
2427 if (!LHS->getType()->isIntegerTy())
2428 return false;
2430 return false;
2431 if (L.isLoopInvariant(LHS)) {
2432 std::swap(LHS, RHS);
2434 }
2435 if (L.isLoopInvariant(LHS) || !L.isLoopInvariant(RHS))
2436 return false;
2437 if (Inverse)
2439 return true;
2440 };
2441 ICmpInst::Predicate P1, P2;
2442 Value *LHS1, *LHS2, *RHS1, *RHS2;
2443 if (!MatchICmpAgainstInvariant(Cond1, P1, LHS1, RHS1) ||
2444 !MatchICmpAgainstInvariant(Cond2, P2, LHS2, RHS2))
2445 return false;
2446 if (P1 != P2 || LHS1 != LHS2)
2447 return false;
2448
2449 // Everything is fine, we can do the transform.
2450 bool UseMin = ICmpInst::isLT(P1) || ICmpInst::isLE(P1);
2451 assert(
2452 (UseMin || ICmpInst::isGT(P1) || ICmpInst::isGE(P1)) &&
2453 "Relational predicate is either less (or equal) or greater (or equal)!");
2455 ? (UseMin ? Intrinsic::smin : Intrinsic::smax)
2456 : (UseMin ? Intrinsic::umin : Intrinsic::umax);
2457 auto *Preheader = L.getLoopPreheader();
2458 assert(Preheader && "Loop is not in simplify form?");
2459 IRBuilder<> Builder(Preheader->getTerminator());
2460 // We are about to create a new guaranteed use for RHS2 which might not exist
2461 // before (if it was a non-taken input of logical and/or instruction). If it
2462 // was poison, we need to freeze it. Note that no new use for LHS and RHS1 are
2463 // introduced, so they don't need this.
2464 if (isa<SelectInst>(I))
2465 RHS2 = Builder.CreateFreeze(RHS2, RHS2->getName() + ".fr");
2466 Value *NewRHS = Builder.CreateBinaryIntrinsic(
2467 id, RHS1, RHS2, nullptr, StringRef("invariant.") +
2468 (ICmpInst::isSigned(P1) ? "s" : "u") +
2469 (UseMin ? "min" : "max"));
2470 Builder.SetInsertPoint(&I);
2472 if (Inverse)
2474 Value *NewCond = Builder.CreateICmp(P, LHS1, NewRHS);
2475 NewCond->takeName(&I);
2476 I.replaceAllUsesWith(NewCond);
2477 eraseInstruction(I, SafetyInfo, MSSAU);
2478 eraseInstruction(*cast<Instruction>(Cond1), SafetyInfo, MSSAU);
2479 eraseInstruction(*cast<Instruction>(Cond2), SafetyInfo, MSSAU);
2480 return true;
2481}
2482
2483/// Reassociate gep (gep ptr, idx1), idx2 to gep (gep ptr, idx2), idx1 if
2484/// this allows hoisting the inner GEP.
2485static bool hoistGEP(Instruction &I, Loop &L, ICFLoopSafetyInfo &SafetyInfo,
2487 DominatorTree *DT) {
2488 auto *GEP = dyn_cast<GetElementPtrInst>(&I);
2489 if (!GEP)
2490 return false;
2491
2492 auto *Src = dyn_cast<GetElementPtrInst>(GEP->getPointerOperand());
2493 if (!Src || !Src->hasOneUse() || !L.contains(Src))
2494 return false;
2495
2496 Value *SrcPtr = Src->getPointerOperand();
2497 auto LoopInvariant = [&](Value *V) { return L.isLoopInvariant(V); };
2498 if (!L.isLoopInvariant(SrcPtr) || !all_of(GEP->indices(), LoopInvariant))
2499 return false;
2500
2501 // This can only happen if !AllowSpeculation, otherwise this would already be
2502 // handled.
2503 // FIXME: Should we respect AllowSpeculation in these reassociation folds?
2504 // The flag exists to prevent metadata dropping, which is not relevant here.
2505 if (all_of(Src->indices(), LoopInvariant))
2506 return false;
2507
2508 // The swapped GEPs are inbounds if both original GEPs are inbounds
2509 // and the sign of the offsets is the same. For simplicity, only
2510 // handle both offsets being non-negative.
2511 const DataLayout &DL = GEP->getDataLayout();
2512 auto NonNegative = [&](Value *V) {
2513 return isKnownNonNegative(V, SimplifyQuery(DL, DT, AC, GEP));
2514 };
2515 bool IsInBounds = Src->isInBounds() && GEP->isInBounds() &&
2516 all_of(Src->indices(), NonNegative) &&
2517 all_of(GEP->indices(), NonNegative);
2518
2519 BasicBlock *Preheader = L.getLoopPreheader();
2520 IRBuilder<> Builder(Preheader->getTerminator());
2521 Value *NewSrc = Builder.CreateGEP(GEP->getSourceElementType(), SrcPtr,
2522 SmallVector<Value *>(GEP->indices()),
2523 "invariant.gep", IsInBounds);
2524 Builder.SetInsertPoint(GEP);
2525 Value *NewGEP = Builder.CreateGEP(Src->getSourceElementType(), NewSrc,
2526 SmallVector<Value *>(Src->indices()), "gep",
2527 IsInBounds);
2528 GEP->replaceAllUsesWith(NewGEP);
2529 eraseInstruction(*GEP, SafetyInfo, MSSAU);
2530 eraseInstruction(*Src, SafetyInfo, MSSAU);
2531 return true;
2532}
2533
2534/// Try to turn things like "LV + C1 < C2" into "LV < C2 - C1". Here
2535/// C1 and C2 are loop invariants and LV is a loop-variant.
2536static bool hoistAdd(ICmpInst::Predicate Pred, Value *VariantLHS,
2537 Value *InvariantRHS, ICmpInst &ICmp, Loop &L,
2538 ICFLoopSafetyInfo &SafetyInfo, MemorySSAUpdater &MSSAU,
2539 AssumptionCache *AC, DominatorTree *DT) {
2540 assert(ICmpInst::isSigned(Pred) && "Not supported yet!");
2541 assert(!L.isLoopInvariant(VariantLHS) && "Precondition.");
2542 assert(L.isLoopInvariant(InvariantRHS) && "Precondition.");
2543
2544 // Try to represent VariantLHS as sum of invariant and variant operands.
2545 using namespace PatternMatch;
2546 Value *VariantOp, *InvariantOp;
2547 if (!match(VariantLHS, m_NSWAdd(m_Value(VariantOp), m_Value(InvariantOp))))
2548 return false;
2549
2550 // LHS itself is a loop-variant, try to represent it in the form:
2551 // "VariantOp + InvariantOp". If it is possible, then we can reassociate.
2552 if (L.isLoopInvariant(VariantOp))
2553 std::swap(VariantOp, InvariantOp);
2554 if (L.isLoopInvariant(VariantOp) || !L.isLoopInvariant(InvariantOp))
2555 return false;
2556
2557 // In order to turn "LV + C1 < C2" into "LV < C2 - C1", we need to be able to
2558 // freely move values from left side of inequality to right side (just as in
2559 // normal linear arithmetics). Overflows make things much more complicated, so
2560 // we want to avoid this.
2561 auto &DL = L.getHeader()->getDataLayout();
2562 bool ProvedNoOverflowAfterReassociate =
2563 computeOverflowForSignedSub(InvariantRHS, InvariantOp,
2564 SimplifyQuery(DL, DT, AC, &ICmp)) ==
2566 if (!ProvedNoOverflowAfterReassociate)
2567 return false;
2568 auto *Preheader = L.getLoopPreheader();
2569 assert(Preheader && "Loop is not in simplify form?");
2570 IRBuilder<> Builder(Preheader->getTerminator());
2571 Value *NewCmpOp = Builder.CreateSub(InvariantRHS, InvariantOp, "invariant.op",
2572 /*HasNUW*/ false, /*HasNSW*/ true);
2573 ICmp.setPredicate(Pred);
2574 ICmp.setOperand(0, VariantOp);
2575 ICmp.setOperand(1, NewCmpOp);
2576 eraseInstruction(cast<Instruction>(*VariantLHS), SafetyInfo, MSSAU);
2577 return true;
2578}
2579
2580/// Try to reassociate and hoist the following two patterns:
2581/// LV - C1 < C2 --> LV < C1 + C2,
2582/// C1 - LV < C2 --> LV > C1 - C2.
2583static bool hoistSub(ICmpInst::Predicate Pred, Value *VariantLHS,
2584 Value *InvariantRHS, ICmpInst &ICmp, Loop &L,
2585 ICFLoopSafetyInfo &SafetyInfo, MemorySSAUpdater &MSSAU,
2586 AssumptionCache *AC, DominatorTree *DT) {
2587 assert(ICmpInst::isSigned(Pred) && "Not supported yet!");
2588 assert(!L.isLoopInvariant(VariantLHS) && "Precondition.");
2589 assert(L.isLoopInvariant(InvariantRHS) && "Precondition.");
2590
2591 // Try to represent VariantLHS as sum of invariant and variant operands.
2592 using namespace PatternMatch;
2593 Value *VariantOp, *InvariantOp;
2594 if (!match(VariantLHS, m_NSWSub(m_Value(VariantOp), m_Value(InvariantOp))))
2595 return false;
2596
2597 bool VariantSubtracted = false;
2598 // LHS itself is a loop-variant, try to represent it in the form:
2599 // "VariantOp + InvariantOp". If it is possible, then we can reassociate. If
2600 // the variant operand goes with minus, we use a slightly different scheme.
2601 if (L.isLoopInvariant(VariantOp)) {
2602 std::swap(VariantOp, InvariantOp);
2603 VariantSubtracted = true;
2604 Pred = ICmpInst::getSwappedPredicate(Pred);
2605 }
2606 if (L.isLoopInvariant(VariantOp) || !L.isLoopInvariant(InvariantOp))
2607 return false;
2608
2609 // In order to turn "LV - C1 < C2" into "LV < C2 + C1", we need to be able to
2610 // freely move values from left side of inequality to right side (just as in
2611 // normal linear arithmetics). Overflows make things much more complicated, so
2612 // we want to avoid this. Likewise, for "C1 - LV < C2" we need to prove that
2613 // "C1 - C2" does not overflow.
2614 auto &DL = L.getHeader()->getDataLayout();
2615 SimplifyQuery SQ(DL, DT, AC, &ICmp);
2616 if (VariantSubtracted) {
2617 // C1 - LV < C2 --> LV > C1 - C2
2618 if (computeOverflowForSignedSub(InvariantOp, InvariantRHS, SQ) !=
2620 return false;
2621 } else {
2622 // LV - C1 < C2 --> LV < C1 + C2
2623 if (computeOverflowForSignedAdd(InvariantOp, InvariantRHS, SQ) !=
2625 return false;
2626 }
2627 auto *Preheader = L.getLoopPreheader();
2628 assert(Preheader && "Loop is not in simplify form?");
2629 IRBuilder<> Builder(Preheader->getTerminator());
2630 Value *NewCmpOp =
2631 VariantSubtracted
2632 ? Builder.CreateSub(InvariantOp, InvariantRHS, "invariant.op",
2633 /*HasNUW*/ false, /*HasNSW*/ true)
2634 : Builder.CreateAdd(InvariantOp, InvariantRHS, "invariant.op",
2635 /*HasNUW*/ false, /*HasNSW*/ true);
2636 ICmp.setPredicate(Pred);
2637 ICmp.setOperand(0, VariantOp);
2638 ICmp.setOperand(1, NewCmpOp);
2639 eraseInstruction(cast<Instruction>(*VariantLHS), SafetyInfo, MSSAU);
2640 return true;
2641}
2642
2643/// Reassociate and hoist add/sub expressions.
2644static bool hoistAddSub(Instruction &I, Loop &L, ICFLoopSafetyInfo &SafetyInfo,
2646 DominatorTree *DT) {
2647 using namespace PatternMatch;
2649 Value *LHS, *RHS;
2650 if (!match(&I, m_ICmp(Pred, m_Value(LHS), m_Value(RHS))))
2651 return false;
2652
2653 // TODO: Support unsigned predicates?
2654 if (!ICmpInst::isSigned(Pred))
2655 return false;
2656
2657 // Put variant operand to LHS position.
2658 if (L.isLoopInvariant(LHS)) {
2659 std::swap(LHS, RHS);
2660 Pred = ICmpInst::getSwappedPredicate(Pred);
2661 }
2662 // We want to delete the initial operation after reassociation, so only do it
2663 // if it has no other uses.
2664 if (L.isLoopInvariant(LHS) || !L.isLoopInvariant(RHS) || !LHS->hasOneUse())
2665 return false;
2666
2667 // TODO: We could go with smarter context, taking common dominator of all I's
2668 // users instead of I itself.
2669 if (hoistAdd(Pred, LHS, RHS, cast<ICmpInst>(I), L, SafetyInfo, MSSAU, AC, DT))
2670 return true;
2671
2672 if (hoistSub(Pred, LHS, RHS, cast<ICmpInst>(I), L, SafetyInfo, MSSAU, AC, DT))
2673 return true;
2674
2675 return false;
2676}
2677
2678static bool isReassociableOp(Instruction *I, unsigned IntOpcode,
2679 unsigned FPOpcode) {
2680 if (I->getOpcode() == IntOpcode)
2681 return true;
2682 if (I->getOpcode() == FPOpcode && I->hasAllowReassoc() &&
2683 I->hasNoSignedZeros())
2684 return true;
2685 return false;
2686}
2687
2688/// Try to reassociate expressions like ((A1 * B1) + (A2 * B2) + ...) * C where
2689/// A1, A2, ... and C are loop invariants into expressions like
2690/// ((A1 * C * B1) + (A2 * C * B2) + ...) and hoist the (A1 * C), (A2 * C), ...
2691/// invariant expressions. This functions returns true only if any hoisting has
2692/// actually occured.
2694 ICFLoopSafetyInfo &SafetyInfo,
2696 DominatorTree *DT) {
2697 if (!isReassociableOp(&I, Instruction::Mul, Instruction::FMul))
2698 return false;
2699 Value *VariantOp = I.getOperand(0);
2700 Value *InvariantOp = I.getOperand(1);
2701 if (L.isLoopInvariant(VariantOp))
2702 std::swap(VariantOp, InvariantOp);
2703 if (L.isLoopInvariant(VariantOp) || !L.isLoopInvariant(InvariantOp))
2704 return false;
2705 Value *Factor = InvariantOp;
2706
2707 // First, we need to make sure we should do the transformation.
2708 SmallVector<Use *> Changes;
2711 if (BinaryOperator *VariantBinOp = dyn_cast<BinaryOperator>(VariantOp))
2712 Worklist.push_back(VariantBinOp);
2713 while (!Worklist.empty()) {
2714 BinaryOperator *BO = Worklist.pop_back_val();
2715 if (!BO->hasOneUse())
2716 return false;
2717 if (isReassociableOp(BO, Instruction::Add, Instruction::FAdd) &&
2718 isa<BinaryOperator>(BO->getOperand(0)) &&
2719 isa<BinaryOperator>(BO->getOperand(1))) {
2720 Worklist.push_back(cast<BinaryOperator>(BO->getOperand(0)));
2721 Worklist.push_back(cast<BinaryOperator>(BO->getOperand(1)));
2722 Adds.push_back(BO);
2723 continue;
2724 }
2725 if (!isReassociableOp(BO, Instruction::Mul, Instruction::FMul) ||
2726 L.isLoopInvariant(BO))
2727 return false;
2728 Use &U0 = BO->getOperandUse(0);
2729 Use &U1 = BO->getOperandUse(1);
2730 if (L.isLoopInvariant(U0))
2731 Changes.push_back(&U0);
2732 else if (L.isLoopInvariant(U1))
2733 Changes.push_back(&U1);
2734 else
2735 return false;
2736 unsigned Limit = I.getType()->isIntOrIntVectorTy()
2739 if (Changes.size() > Limit)
2740 return false;
2741 }
2742 if (Changes.empty())
2743 return false;
2744
2745 // Drop the poison flags for any adds we looked through.
2746 if (I.getType()->isIntOrIntVectorTy()) {
2747 for (auto *Add : Adds)
2748 Add->dropPoisonGeneratingFlags();
2749 }
2750
2751 // We know we should do it so let's do the transformation.
2752 auto *Preheader = L.getLoopPreheader();
2753 assert(Preheader && "Loop is not in simplify form?");
2754 IRBuilder<> Builder(Preheader->getTerminator());
2755 for (auto *U : Changes) {
2756 assert(L.isLoopInvariant(U->get()));
2757 auto *Ins = cast<BinaryOperator>(U->getUser());
2758 Value *Mul;
2759 if (I.getType()->isIntOrIntVectorTy()) {
2760 Mul = Builder.CreateMul(U->get(), Factor, "factor.op.mul");
2761 // Drop the poison flags on the original multiply.
2762 Ins->dropPoisonGeneratingFlags();
2763 } else
2764 Mul = Builder.CreateFMulFMF(U->get(), Factor, Ins, "factor.op.fmul");
2765
2766 // Rewrite the reassociable instruction.
2767 unsigned OpIdx = U->getOperandNo();
2768 auto *LHS = OpIdx == 0 ? Mul : Ins->getOperand(0);
2769 auto *RHS = OpIdx == 1 ? Mul : Ins->getOperand(1);
2770 auto *NewBO =
2771 BinaryOperator::Create(Ins->getOpcode(), LHS, RHS,
2772 Ins->getName() + ".reass", Ins->getIterator());
2773 NewBO->copyIRFlags(Ins);
2774 if (VariantOp == Ins)
2775 VariantOp = NewBO;
2776 Ins->replaceAllUsesWith(NewBO);
2777 eraseInstruction(*Ins, SafetyInfo, MSSAU);
2778 }
2779
2780 I.replaceAllUsesWith(VariantOp);
2781 eraseInstruction(I, SafetyInfo, MSSAU);
2782 return true;
2783}
2784
2785/// Reassociate associative binary expressions of the form
2786///
2787/// 1. "(LV op C1) op C2" ==> "LV op (C1 op C2)"
2788///
2789/// where op is an associative binary op, LV is a loop variant, and C1 and C2
2790/// are loop invariants that we want to hoist.
2791///
2792/// TODO: This can be extended to more cases such as
2793/// 2. "C1 op (C2 op LV)" ==> "(C1 op C2) op LV"
2794/// 3. "(C1 op LV) op C2" ==> "LV op (C1 op C2)" if op is commutative
2795/// 4. "C1 op (LV op C2)" ==> "(C1 op C2) op LV" if op is commutative
2797 ICFLoopSafetyInfo &SafetyInfo,
2799 DominatorTree *DT) {
2800 auto *BO = dyn_cast<BinaryOperator>(&I);
2801 if (!BO || !BO->isAssociative())
2802 return false;
2803
2804 // Only fold ADDs for now.
2805 Instruction::BinaryOps Opcode = BO->getOpcode();
2806 if (Opcode != Instruction::Add)
2807 return false;
2808
2809 auto *BO0 = dyn_cast<BinaryOperator>(BO->getOperand(0));
2810 if (!BO0 || BO0->getOpcode() != Opcode || !BO0->isAssociative() ||
2811 BO0->hasNUsesOrMore(3))
2812 return false;
2813
2814 // Transform: "(LV op C1) op C2" ==> "LV op (C1 op C2)"
2815 Value *LV = BO0->getOperand(0);
2816 Value *C1 = BO0->getOperand(1);
2817 Value *C2 = BO->getOperand(1);
2818
2819 if (L.isLoopInvariant(LV) || !L.isLoopInvariant(C1) || !L.isLoopInvariant(C2))
2820 return false;
2821
2822 auto *Preheader = L.getLoopPreheader();
2823 assert(Preheader && "Loop is not in simplify form?");
2824
2825 auto *Inv = BinaryOperator::Create(Opcode, C1, C2, "invariant.op",
2826 Preheader->getTerminator()->getIterator());
2827 auto *NewBO = BinaryOperator::Create(
2828 Opcode, LV, Inv, BO->getName() + ".reass", BO->getIterator());
2829
2830 // Copy NUW for ADDs if both instructions have it.
2831 if (Opcode == Instruction::Add && BO->hasNoUnsignedWrap() &&
2832 BO0->hasNoUnsignedWrap()) {
2833 Inv->setHasNoUnsignedWrap(true);
2834 NewBO->setHasNoUnsignedWrap(true);
2835 }
2836
2837 BO->replaceAllUsesWith(NewBO);
2838 eraseInstruction(*BO, SafetyInfo, MSSAU);
2839
2840 // (LV op C1) might not be erased if it has more uses than the one we just
2841 // replaced.
2842 if (BO0->use_empty())
2843 eraseInstruction(*BO0, SafetyInfo, MSSAU);
2844
2845 return true;
2846}
2847
2849 ICFLoopSafetyInfo &SafetyInfo,
2851 DominatorTree *DT) {
2852 // Optimize complex patterns, such as (x < INV1 && x < INV2), turning them
2853 // into (x < min(INV1, INV2)), and hoisting the invariant part of this
2854 // expression out of the loop.
2855 if (hoistMinMax(I, L, SafetyInfo, MSSAU)) {
2856 ++NumHoisted;
2857 ++NumMinMaxHoisted;
2858 return true;
2859 }
2860
2861 // Try to hoist GEPs by reassociation.
2862 if (hoistGEP(I, L, SafetyInfo, MSSAU, AC, DT)) {
2863 ++NumHoisted;
2864 ++NumGEPsHoisted;
2865 return true;
2866 }
2867
2868 // Try to hoist add/sub's by reassociation.
2869 if (hoistAddSub(I, L, SafetyInfo, MSSAU, AC, DT)) {
2870 ++NumHoisted;
2871 ++NumAddSubHoisted;
2872 return true;
2873 }
2874
2875 bool IsInt = I.getType()->isIntOrIntVectorTy();
2876 if (hoistMulAddAssociation(I, L, SafetyInfo, MSSAU, AC, DT)) {
2877 ++NumHoisted;
2878 if (IsInt)
2879 ++NumIntAssociationsHoisted;
2880 else
2881 ++NumFPAssociationsHoisted;
2882 return true;
2883 }
2884
2885 if (hoistBOAssociation(I, L, SafetyInfo, MSSAU, AC, DT)) {
2886 ++NumHoisted;
2887 ++NumBOAssociationsHoisted;
2888 return true;
2889 }
2890
2891 return false;
2892}
2893
2894/// Little predicate that returns true if the specified basic block is in
2895/// a subloop of the current one, not the current one itself.
2896///
2897static bool inSubLoop(BasicBlock *BB, Loop *CurLoop, LoopInfo *LI) {
2898 assert(CurLoop->contains(BB) && "Only valid if BB is IN the loop");
2899 return LI->getLoopFor(BB) != CurLoop;
2900}
unsigned const MachineRegisterInfo * MRI
static msgpack::DocNode getNode(msgpack::DocNode DN, msgpack::Type Type, MCValue Val)
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
This file contains the declarations for the subclasses of Constant, which represent the different fla...
@ NonNegative
#define LLVM_DEBUG(X)
Definition: Debug.h:101
uint64_t Addr
Rewrite Partial Register Uses
#define DEBUG_TYPE
Hexagon Common GEP
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
iv Induction Variable Users
Definition: IVUsers.cpp:48
static bool isReassociableOp(Instruction *I, unsigned IntOpcode, unsigned FPOpcode)
Definition: LICM.cpp:2678
static bool isNotUsedOrFoldableInLoop(const Instruction &I, const Loop *CurLoop, const LoopSafetyInfo *SafetyInfo, TargetTransformInfo *TTI, bool &FoldableInLoop, bool LoopNestMode)
Return true if the only users of this instruction are outside of the loop.
Definition: LICM.cpp:1386
static bool hoistGEP(Instruction &I, Loop &L, ICFLoopSafetyInfo &SafetyInfo, MemorySSAUpdater &MSSAU, AssumptionCache *AC, DominatorTree *DT)
Reassociate gep (gep ptr, idx1), idx2 to gep (gep ptr, idx2), idx1 if this allows hoisting the inner ...
Definition: LICM.cpp:2485
static cl::opt< bool > SingleThread("licm-force-thread-model-single", cl::Hidden, cl::init(false), cl::desc("Force thread model single in LICM pass"))
static void splitPredecessorsOfLoopExit(PHINode *PN, DominatorTree *DT, LoopInfo *LI, const Loop *CurLoop, LoopSafetyInfo *SafetyInfo, MemorySSAUpdater *MSSAU)
Definition: LICM.cpp:1559
static cl::opt< unsigned > FPAssociationUpperLimit("licm-max-num-fp-reassociations", cl::init(5U), cl::Hidden, cl::desc("Set upper limit for the number of transformations performed " "during a single round of hoisting the reassociated expressions."))
static bool isFoldableInLoop(const Instruction &I, const Loop *CurLoop, const TargetTransformInfo *TTI)
Return true if the instruction is foldable in the loop.
Definition: LICM.cpp:1356
static bool hoistMinMax(Instruction &I, Loop &L, ICFLoopSafetyInfo &SafetyInfo, MemorySSAUpdater &MSSAU)
Try to simplify things like (A < INV_1 AND icmp A < INV_2) into (A < min(INV_1, INV_2)),...
Definition: LICM.cpp:2411
static void moveInstructionBefore(Instruction &I, BasicBlock::iterator Dest, ICFLoopSafetyInfo &SafetyInfo, MemorySSAUpdater &MSSAU, ScalarEvolution *SE)
Definition: LICM.cpp:1509
static Instruction * cloneInstructionInExitBlock(Instruction &I, BasicBlock &ExitBlock, PHINode &PN, const LoopInfo *LI, const LoopSafetyInfo *SafetyInfo, MemorySSAUpdater &MSSAU)
Definition: LICM.cpp:1428
static cl::opt< bool > ControlFlowHoisting("licm-control-flow-hoisting", cl::Hidden, cl::init(false), cl::desc("Enable control flow (and PHI) hoisting in LICM"))
static bool pointerInvalidatedByLoop(MemorySSA *MSSA, MemoryUse *MU, Loop *CurLoop, Instruction &I, SinkAndHoistLICMFlags &Flags, bool InvariantGroup)
Definition: LICM.cpp:2348
static bool hoistAdd(ICmpInst::Predicate Pred, Value *VariantLHS, Value *InvariantRHS, ICmpInst &ICmp, Loop &L, ICFLoopSafetyInfo &SafetyInfo, MemorySSAUpdater &MSSAU, AssumptionCache *AC, DominatorTree *DT)
Try to turn things like "LV + C1 < C2" into "LV < C2 - C1".
Definition: LICM.cpp:2536
static MemoryAccess * getClobberingMemoryAccess(MemorySSA &MSSA, BatchAAResults &BAA, SinkAndHoistLICMFlags &Flags, MemoryUseOrDef *MA)
Definition: LICM.cpp:1148
static SmallVector< PointersAndHasReadsOutsideSet, 0 > collectPromotionCandidates(MemorySSA *MSSA, AliasAnalysis *AA, Loop *L)
Definition: LICM.cpp:2286
static void hoist(Instruction &I, const DominatorTree *DT, const Loop *CurLoop, BasicBlock *Dest, ICFLoopSafetyInfo *SafetyInfo, MemorySSAUpdater &MSSAU, ScalarEvolution *SE, OptimizationRemarkEmitter *ORE)
When an instruction is found to only use loop invariant operands that is safe to hoist,...
Definition: LICM.cpp:1738
static bool canSplitPredecessors(PHINode *PN, LoopSafetyInfo *SafetyInfo)
Definition: LICM.cpp:1542
static bool sink(Instruction &I, LoopInfo *LI, DominatorTree *DT, const Loop *CurLoop, ICFLoopSafetyInfo *SafetyInfo, MemorySSAUpdater &MSSAU, OptimizationRemarkEmitter *ORE)
When an instruction is found to only be used outside of the loop, this function moves it to the exit ...
Definition: LICM.cpp:1631
static bool hoistAddSub(Instruction &I, Loop &L, ICFLoopSafetyInfo &SafetyInfo, MemorySSAUpdater &MSSAU, AssumptionCache *AC, DominatorTree *DT)
Reassociate and hoist add/sub expressions.
Definition: LICM.cpp:2644
static bool hoistMulAddAssociation(Instruction &I, Loop &L, ICFLoopSafetyInfo &SafetyInfo, MemorySSAUpdater &MSSAU, AssumptionCache *AC, DominatorTree *DT)
Try to reassociate expressions like ((A1 * B1) + (A2 * B2) + ...) * C where A1, A2,...
Definition: LICM.cpp:2693
static cl::opt< uint32_t > MaxNumUsesTraversed("licm-max-num-uses-traversed", cl::Hidden, cl::init(8), cl::desc("Max num uses visited for identifying load " "invariance in loop using invariant start (default = 8)"))
cl::opt< unsigned > IntAssociationUpperLimit("licm-max-num-int-reassociations", cl::init(5U), cl::Hidden, cl::desc("Set upper limit for the number of transformations performed " "during a single round of hoisting the reassociated expressions."))
static void foreachMemoryAccess(MemorySSA *MSSA, Loop *L, function_ref< void(Instruction *)> Fn)
Definition: LICM.cpp:2274
static bool isLoadInvariantInLoop(LoadInst *LI, DominatorTree *DT, Loop *CurLoop)
Definition: LICM.cpp:1054
static Instruction * sinkThroughTriviallyReplaceablePHI(PHINode *TPN, Instruction *I, LoopInfo *LI, SmallDenseMap< BasicBlock *, Instruction *, 32 > &SunkCopies, const LoopSafetyInfo *SafetyInfo, const Loop *CurLoop, MemorySSAUpdater &MSSAU)
Definition: LICM.cpp:1524
licm
Definition: LICM.cpp:378
static bool inSubLoop(BasicBlock *BB, Loop *CurLoop, LoopInfo *LI)
Little predicate that returns true if the specified basic block is in a subloop of the current one,...
Definition: LICM.cpp:2897
static bool hoistSub(ICmpInst::Predicate Pred, Value *VariantLHS, Value *InvariantRHS, ICmpInst &ICmp, Loop &L, ICFLoopSafetyInfo &SafetyInfo, MemorySSAUpdater &MSSAU, AssumptionCache *AC, DominatorTree *DT)
Try to reassociate and hoist the following two patterns: LV - C1 < C2 --> LV < C1 + C2,...
Definition: LICM.cpp:2583
static void eraseInstruction(Instruction &I, ICFLoopSafetyInfo &SafetyInfo, MemorySSAUpdater &MSSAU)
Definition: LICM.cpp:1502
static bool isSafeToExecuteUnconditionally(Instruction &Inst, const DominatorTree *DT, const TargetLibraryInfo *TLI, const Loop *CurLoop, const LoopSafetyInfo *SafetyInfo, OptimizationRemarkEmitter *ORE, const Instruction *CtxI, AssumptionCache *AC, bool AllowSpeculation)
Only sink or hoist an instruction if it is not a trapping instruction, or if the instruction is known...
Definition: LICM.cpp:1784
static bool hoistArithmetics(Instruction &I, Loop &L, ICFLoopSafetyInfo &SafetyInfo, MemorySSAUpdater &MSSAU, AssumptionCache *AC, DominatorTree *DT)
Aggregates various functions for hoisting computations out of loop.
Definition: LICM.cpp:2848
static bool isTriviallyReplaceablePHI(const PHINode &PN, const Instruction &I)
Returns true if a PHINode is a trivially replaceable with an Instruction.
Definition: LICM.cpp:1347
std::pair< SmallSetVector< Value *, 8 >, bool > PointersAndHasReadsOutsideSet
Definition: LICM.cpp:214
static cl::opt< bool > DisablePromotion("disable-licm-promotion", cl::Hidden, cl::init(false), cl::desc("Disable memory promotion in LICM pass"))
Memory promotion is enabled by default.
static bool hoistBOAssociation(Instruction &I, Loop &L, ICFLoopSafetyInfo &SafetyInfo, MemorySSAUpdater &MSSAU, AssumptionCache *AC, DominatorTree *DT)
Reassociate associative binary expressions of the form.
Definition: LICM.cpp:2796
static bool pointerInvalidatedByBlock(BasicBlock &BB, MemorySSA &MSSA, MemoryUse &MU)
Definition: LICM.cpp:2399
This file defines the interface for the loop nest analysis.
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
Machine Loop Invariant Code Motion
Memory SSA
Definition: MemorySSA.cpp:72
This file exposes an interface to building/using memory SSA to walk memory instructions using a use/d...
This file contains the declarations for metadata subclasses.
Contains a collection of routines for determining if a given instruction is guaranteed to execute if ...
uint64_t IntrinsicInst * II
#define P(N)
PassInstrumentationCallbacks PIC
#define INITIALIZE_PASS_DEPENDENCY(depName)
Definition: PassSupport.h:55
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
Definition: PassSupport.h:57
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
Definition: PassSupport.h:52
This file provides a priority worklist.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
raw_pwrite_stream & OS
This file defines generic set operations that may be used on set's of different types,...
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition: Statistic.h:167
This pass exposes codegen information to IR-level passes.
static cl::opt< bool > DisablePromotion("disable-type-promotion", cl::Hidden, cl::init(false), cl::desc("Disable type promotion pass"))
Value * RHS
Value * LHS
ModRefInfo getModRefInfoMask(const MemoryLocation &Loc, bool IgnoreLocals=false)
Returns a bitmask that should be unconditionally applied to the ModRef info of a memory location.
MemoryEffects getMemoryEffects(const CallBase *Call)
Return the behavior of the given call site.
void add(const MemoryLocation &Loc)
These methods are used to add different types of instructions to the alias sets.
A container for analyses that lazily runs them and caches their results.
Definition: PassManager.h:253
Represent the analysis usage information of a pass.
AnalysisUsage & addRequired()
AnalysisUsage & addPreserved()
Add the specified Pass class to the set of analyses preserved by this pass.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
An immutable pass that tracks lazily created AssumptionCache objects.
A cache of @llvm.assume calls within a function.
LLVM Basic Block Representation.
Definition: BasicBlock.h:61
void replaceSuccessorsPhiUsesWith(BasicBlock *Old, BasicBlock *New)
Update all phi nodes in this basic block's successors to refer to basic block New instead of basic bl...
Definition: BasicBlock.cpp:662
iterator end()
Definition: BasicBlock.h:461
iterator begin()
Instruction iterator methods.
Definition: BasicBlock.h:448
const_iterator getFirstInsertionPt() const
Returns an iterator to the first instruction in this block that is suitable for inserting a non-PHI i...
Definition: BasicBlock.cpp:416
InstListType::const_iterator getFirstNonPHIIt() const
Iterator returning form of getFirstNonPHI.
Definition: BasicBlock.cpp:374
const Instruction * getFirstNonPHI() const
Returns a pointer to the first instruction in this block that is not a PHINode instruction.
Definition: BasicBlock.cpp:367
static BasicBlock * Create(LLVMContext &Context, const Twine &Name="", Function *Parent=nullptr, BasicBlock *InsertBefore=nullptr)
Creates a new BasicBlock.
Definition: BasicBlock.h:212
const BasicBlock * getSingleSuccessor() const
Return the successor of this block if it has a single successor.
Definition: BasicBlock.cpp:489
const Function * getParent() const
Return the enclosing method, or null if none.
Definition: BasicBlock.h:219
const DataLayout & getDataLayout() const
Get the data layout of the module this basic block belongs to.
Definition: BasicBlock.cpp:296
InstListType::iterator iterator
Instruction iterators...
Definition: BasicBlock.h:177
LLVMContext & getContext() const
Get the context in which this basic block lives.
Definition: BasicBlock.cpp:168
void moveBefore(BasicBlock *MovePos)
Unlink this basic block from its current function and insert it into the function that MovePos lives ...
Definition: BasicBlock.h:376
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition: BasicBlock.h:239
bool canSplitPredecessors() const
Definition: BasicBlock.cpp:545
This class is a wrapper over an AAResults, and it is intended to be used only when there are no IR ch...
ModRefInfo getModRefInfo(const Instruction *I, const std::optional< MemoryLocation > &OptLoc)
static BinaryOperator * Create(BinaryOps Op, Value *S1, Value *S2, const Twine &Name=Twine(), InsertPosition InsertBefore=nullptr)
Construct a binary instruction, given the opcode and the two operands.
Conditional or Unconditional Branch instruction.
bool isConditional() const
static BranchInst * Create(BasicBlock *IfTrue, InsertPosition InsertBefore=nullptr)
BasicBlock * getSuccessor(unsigned i) const
Value * getCondition() const
This class represents a function call, abstracting a target machine's calling convention.
static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
void setPredicate(Predicate P)
Set the predicate for this instruction to the specified value.
Definition: InstrTypes.h:850
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition: InstrTypes.h:757
bool isSigned() const
Definition: InstrTypes.h:1007
Predicate getSwappedPredicate() const
For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
Definition: InstrTypes.h:909
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE,...
Definition: InstrTypes.h:871
This is the shared class of boolean and integer constants.
Definition: Constants.h:81
bool isNegative() const
Definition: Constants.h:201
int64_t getSExtValue() const
Return the constant as a 64-bit integer value after it has been sign extended as appropriate for the ...
Definition: Constants.h:161
Assignment ID.
static DILocation * getMergedLocations(ArrayRef< DILocation * > Locs)
Try to combine the vector of locations passed as input in a single one.
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:109
TypeSize getTypeStoreSize(Type *Ty) const
Returns the maximum number of bytes that may be overwritten by storing the specified type.
Definition: DataLayout.h:448
A debug info location.
Definition: DebugLoc.h:33
iterator find(const_arg_type_t< KeyT > Val)
Definition: DenseMap.h:155
size_type count(const_arg_type_t< KeyT > Val) const
Return 1 if the specified key is in the map, 0 otherwise.
Definition: DenseMap.h:151
iterator end()
Definition: DenseMap.h:84
DomTreeNodeBase * getIDom() const
NodeT * getBlock() const
Analysis pass which computes a DominatorTree.
Definition: Dominators.h:279
bool verify(VerificationLevel VL=VerificationLevel::Full) const
verify - checks if the tree is correct.
void changeImmediateDominator(DomTreeNodeBase< NodeT > *N, DomTreeNodeBase< NodeT > *NewIDom)
changeImmediateDominator - This method is used to update the dominator tree information when a node's...
DomTreeNodeBase< NodeT > * addNewBlock(NodeT *BB, NodeT *DomBB)
Add a new node to the dominator tree information.
DomTreeNodeBase< NodeT > * getNode(const NodeT *BB) const
getNode - return the (Post)DominatorTree node for the specified basic block.
bool properlyDominates(const DomTreeNodeBase< NodeT > *A, const DomTreeNodeBase< NodeT > *B) const
properlyDominates - Returns true iff A dominates B and A != B.
Legacy analysis pass which computes a DominatorTree.
Definition: Dominators.h:317
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition: Dominators.h:162
bool isReachableFromEntry(const Use &U) const
Provide an overload for a Use.
Definition: Dominators.cpp:321
bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
Definition: Dominators.cpp:122
This implementation of LoopSafetyInfo use ImplicitControlFlowTracking to give precise answers on "may...
Definition: MustExecute.h:132
bool doesNotWriteMemoryBefore(const BasicBlock *BB, const Loop *CurLoop) const
Returns true if we could not execute a memory-modifying instruction before we enter BB under assumpti...
void removeInstruction(const Instruction *Inst)
Inform safety info that we are planning to remove the instruction Inst from its block.
Definition: MustExecute.cpp:99
bool isGuaranteedToExecute(const Instruction &Inst, const DominatorTree *DT, const Loop *CurLoop) const override
Returns true if the instruction in a loop is guaranteed to execute at least once (under the assumptio...
bool anyBlockMayThrow() const override
Returns true iff any block of the loop for which this info is contains an instruction that may throw ...
Definition: MustExecute.cpp:75
void computeLoopSafetyInfo(const Loop *CurLoop) override
Computes safety information for a loop checks loop body & header for the possibility of may throw exc...
Definition: MustExecute.cpp:79
void insertInstructionTo(const Instruction *Inst, const BasicBlock *BB)
Inform the safety info that we are planning to insert a new instruction Inst into the basic block BB.
Definition: MustExecute.cpp:93
This instruction compares its operands according to the predicate given to the constructor.
static bool isGE(Predicate P)
Return true if the predicate is SGE or UGE.
static bool isLT(Predicate P)
Return true if the predicate is SLT or ULT.
static bool isGT(Predicate P)
Return true if the predicate is SGT or UGT.
bool isRelational() const
Return true if the predicate is relational (not EQ or NE).
static bool isLE(Predicate P)
Return true if the predicate is SLE or ULE.
Value * CreateBinaryIntrinsic(Intrinsic::ID ID, Value *LHS, Value *RHS, Instruction *FMFSource=nullptr, const Twine &Name="")
Create a call to intrinsic ID with 2 operands which is mangled on the first type.
Definition: IRBuilder.cpp:922
Value * CreateFMulFMF(Value *L, Value *R, Instruction *FMFSource, const Twine &Name="")
Copy fast-math-flags from an instruction rather than using the builder's default FMF.
Definition: IRBuilder.h:1606
Value * CreateFreeze(Value *V, const Twine &Name="")
Definition: IRBuilder.h:2543
Value * CreateGEP(Type *Ty, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &Name="", GEPNoWrapFlags NW=GEPNoWrapFlags::none())
Definition: IRBuilder.h:1871
Value * CreateSub(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition: IRBuilder.h:1349
Value * CreateAdd(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition: IRBuilder.h:1332
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block.
Definition: IRBuilder.h:177
Value * CreateICmp(CmpInst::Predicate P, Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:2359
Value * CreateMul(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition: IRBuilder.h:1366
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition: IRBuilder.h:2674
void mergeDIAssignID(ArrayRef< const Instruction * > SourceInstructions)
Merge the DIAssignID metadata from this instruction and those attached to instructions in SourceInstr...
Definition: DebugInfo.cpp:936
void insertBefore(Instruction *InsertPos)
Insert an unlinked instruction into a basic block immediately before the specified instruction.
Definition: Instruction.cpp:97
void setAAMetadata(const AAMDNodes &N)
Sets the AA metadata on this instruction from the AAMDNodes structure.
Definition: Metadata.cpp:1727
bool isEHPad() const
Return true if the instruction is a variety of EH-block.
Definition: Instruction.h:824
Instruction * user_back()
Specialize the methods defined in Value, as we know that an instruction can only be used by other ins...
Definition: Instruction.h:169
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
Definition: Instruction.h:381
void setMetadata(unsigned KindID, MDNode *Node)
Set the metadata of the specified kind to the specified node.
Definition: Metadata.cpp:1642
AAMDNodes getAAMetadata() const
Returns the AA metadata for this instruction.
Definition: Metadata.cpp:1713
void setDebugLoc(DebugLoc Loc)
Set the debug location information for this instruction.
Definition: Instruction.h:463
const DataLayout & getDataLayout() const
Get the data layout of the module this instruction belongs to.
Definition: Instruction.cpp:74
A wrapper class for inspecting calls to intrinsic functions.
Definition: IntrinsicInst.h:48
void printPipeline(raw_ostream &OS, function_ref< StringRef(StringRef)> MapClassName2PassName)
Definition: LICM.cpp:319
PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM, LoopStandardAnalysisResults &AR, LPMUpdater &U)
Definition: LICM.cpp:296
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:67
PreservedAnalyses run(LoopNest &L, LoopAnalysisManager &AM, LoopStandardAnalysisResults &AR, LPMUpdater &U)
Definition: LICM.cpp:329
void printPipeline(raw_ostream &OS, function_ref< StringRef(StringRef)> MapClassName2PassName)
Definition: LICM.cpp:360
This class provides an interface for updating the loop pass manager based on mutations to the loop ne...
This is an alternative analysis pass to BlockFrequencyInfoWrapperPass.
static void getLazyBFIAnalysisUsage(AnalysisUsage &AU)
Helper for client passes to set up the analysis usage on behalf of this pass.
This is an alternative analysis pass to BranchProbabilityInfoWrapperPass.
Helper class for promoting a collection of loads and stores into SSA Form using the SSAUpdater.
Definition: SSAUpdater.h:151
An instruction for reading from memory.
Definition: Instructions.h:174
void setAlignment(Align Align)
Definition: Instructions.h:213
Value * getPointerOperand()
Definition: Instructions.h:253
void setOrdering(AtomicOrdering Ordering)
Sets the ordering constraint of this load instruction.
Definition: Instructions.h:223
Analysis pass that exposes the LoopInfo for a function.
Definition: LoopInfo.h:571
bool contains(const LoopT *L) const
Return true if the specified loop is contained within in this loop.
BlockT * getHeader() const
void addBasicBlockToLoop(BlockT *NewBB, LoopInfoBase< BlockT, LoopT > &LI)
This method is used by other analyses to update loop information.
BlockT * getLoopPreheader() const
If there is a preheader for this loop, return it.
ArrayRef< BlockT * > getBlocks() const
Get a list of the basic blocks which make up this loop.
void getUniqueExitBlocks(SmallVectorImpl< BlockT * > &ExitBlocks) const
Return all unique successor blocks of this loop.
LoopT * getParentLoop() const
Return the parent loop if it exists or nullptr for top level loops.
Wrapper class to LoopBlocksDFS that provides a standard begin()/end() interface for the DFS reverse p...
Definition: LoopIterator.h:172
void perform(const LoopInfo *LI)
Traverse the loop blocks and store the DFS result.
Definition: LoopIterator.h:180
void verify(const DominatorTreeBase< BlockT, false > &DomTree) const
LoopT * getLoopFor(const BlockT *BB) const
Return the inner most loop that BB lives in.
The legacy pass manager's analysis pass to compute loop information.
Definition: LoopInfo.h:598
bool wouldBeOutOfLoopUseRequiringLCSSA(const Value *V, const BasicBlock *ExitBB) const
Definition: LoopInfo.cpp:943
This class represents a loop nest and can be used to query its properties.
Function * getParent() const
Return the function to which the loop-nest belongs.
Loop & getOutermostLoop() const
Return the outermost loop in the loop nest.
Captures loop safety information.
Definition: MustExecute.h:60
void copyColors(BasicBlock *New, BasicBlock *Old)
Copy colors of block Old into the block New.
Definition: MustExecute.cpp:36
const DenseMap< BasicBlock *, ColorVector > & getBlockColors() const
Returns block colors map that is used to update funclet operand bundles.
Definition: MustExecute.cpp:32
virtual bool isGuaranteedToExecute(const Instruction &Inst, const DominatorTree *DT, const Loop *CurLoop) const =0
Returns true if the instruction in a loop is guaranteed to execute at least once (under the assumptio...
Represents a single loop in the control flow graph.
Definition: LoopInfo.h:44
bool hasLoopInvariantOperands(const Instruction *I) const
Return true if all the operands of the specified instruction are loop invariant.
Definition: LoopInfo.cpp:67
bool isLoopInvariant(const Value *V) const
Return true if the specified value is loop invariant.
Definition: LoopInfo.cpp:61
BasicBlock * getBlock() const
Definition: MemorySSA.h:165
bool doesNotAccessMemory() const
Whether this function accesses no memory.
Definition: ModRef.h:192
bool onlyAccessesArgPointees() const
Whether this function only (at most) accesses argument memory.
Definition: ModRef.h:201
bool onlyReadsMemory() const
Whether this function only (at most) reads memory.
Definition: ModRef.h:195
static MemoryLocation get(const LoadInst *LI)
Return a location with information about the memory reference by the given instruction.
An analysis that produces MemorySSA for a function.
Definition: MemorySSA.h:928
MemorySSA * getMemorySSA() const
Get handle on MemorySSA.
void insertDef(MemoryDef *Def, bool RenameUses=false)
Insert a definition into the MemorySSA IR.
MemoryAccess * createMemoryAccessInBB(Instruction *I, MemoryAccess *Definition, const BasicBlock *BB, MemorySSA::InsertionPlace Point)
Create a MemoryAccess in MemorySSA at a specified point in a block.
void insertUse(MemoryUse *Use, bool RenameUses=false)
void removeMemoryAccess(MemoryAccess *, bool OptimizePhis=false)
Remove a MemoryAccess from MemorySSA, including updating all definitions and uses.
MemoryUseOrDef * createMemoryAccessAfter(Instruction *I, MemoryAccess *Definition, MemoryAccess *InsertPt)
Create a MemoryAccess in MemorySSA after an existing MemoryAccess.
void moveToPlace(MemoryUseOrDef *What, BasicBlock *BB, MemorySSA::InsertionPlace Where)
void wireOldPredecessorsToNewImmediatePredecessor(BasicBlock *Old, BasicBlock *New, ArrayRef< BasicBlock * > Preds, bool IdenticalEdgesWereMerged=true)
A new empty BasicBlock (New) now branches directly to Old.
MemoryAccess * getClobberingMemoryAccess(const Instruction *I, BatchAAResults &AA)
Given a memory Mod/Ref/ModRef'ing instruction, calling this will give you the nearest dominating Memo...
Definition: MemorySSA.h:1045
Legacy analysis pass which computes MemorySSA.
Definition: MemorySSA.h:985
Encapsulates MemorySSA, including all data associated with memory accesses.
Definition: MemorySSA.h:701
AliasAnalysis & getAA()
Definition: MemorySSA.h:799
const AccessList * getBlockAccesses(const BasicBlock *BB) const
Return the list of MemoryAccess's for a given basic block.
Definition: MemorySSA.h:759
MemorySSAWalker * getSkipSelfWalker()
Definition: MemorySSA.cpp:1603
bool dominates(const MemoryAccess *A, const MemoryAccess *B) const
Given two memory accesses in potentially different blocks, determine whether MemoryAccess A dominates...
Definition: MemorySSA.cpp:2173
void verifyMemorySSA(VerificationLevel=VerificationLevel::Fast) const
Verify that MemorySSA is self consistent (IE definitions dominate all uses, uses appear in the right ...
Definition: MemorySSA.cpp:1905
MemoryUseOrDef * getMemoryAccess(const Instruction *I) const
Given a memory Mod/Ref'ing instruction, get the MemorySSA access associated with it.
Definition: MemorySSA.h:719
const DefsList * getBlockDefs(const BasicBlock *BB) const
Return the list of MemoryDef's and MemoryPhi's for a given basic block.
Definition: MemorySSA.h:767
bool locallyDominates(const MemoryAccess *A, const MemoryAccess *B) const
Given two memory accesses in the same basic block, determine whether MemoryAccess A dominates MemoryA...
Definition: MemorySSA.cpp:2142
bool isLiveOnEntryDef(const MemoryAccess *MA) const
Return true if MA represents the live on entry value.
Definition: MemorySSA.h:739
Class that has the common methods + fields of memory uses/defs.
Definition: MemorySSA.h:253
MemoryAccess * getDefiningAccess() const
Get the access that produces the memory state used by this Use.
Definition: MemorySSA.h:263
Represents read-only accesses to memory.
Definition: MemorySSA.h:313
The optimization diagnostic interface.
void emit(DiagnosticInfoOptimizationBase &OptDiag)
Output the remark via the diagnostic handler and to the optimization record file.
Diagnostic information for missed-optimization remarks.
Diagnostic information for applied optimization remarks.
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
op_range incoming_values()
void setIncomingBlock(unsigned i, BasicBlock *BB)
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
int getBasicBlockIndex(const BasicBlock *BB) const
Return the first index of the specified basic block in the value list for this PHI.
unsigned getNumIncomingValues() const
Return the number of incoming edges.
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
Pass interface - Implemented by all 'passes'.
Definition: Pass.h:94
PointerIntPair - This class implements a pair of a pointer and small integer.
static PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
Definition: Constants.cpp:1852
PredIteratorCache - This class is an extremely trivial cache for predecessor iterator queries.
size_t size(BasicBlock *BB)
ArrayRef< BasicBlock * > get(BasicBlock *BB)
A set of analyses that are preserved following a run of a transformation pass.
Definition: Analysis.h:111
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition: Analysis.h:117
bool empty() const
Determine if the PriorityWorklist is empty or not.
bool insert(const T &X)
Insert a new element into the PriorityWorklist.
Helper class for SSA formation on a set of values defined in multiple blocks.
Definition: SSAUpdater.h:40
The main scalar evolution driver.
void forgetBlockAndLoopDispositions(Value *V=nullptr)
Called when the client has changed the disposition of values in a loop or block.
void forgetLoopDispositions()
Called when the client has changed the disposition of values in this loop.
bool remove(const value_type &X)
Remove an item from the set vector.
Definition: SetVector.h:188
bool empty() const
Determine if the SetVector is empty or not.
Definition: SetVector.h:93
iterator begin()
Get an iterator to the beginning of the SetVector.
Definition: SetVector.h:103
bool insert(const value_type &X)
Insert a new element into the SetVector.
Definition: SetVector.h:162
Flags controlling how much is checked when sinking or hoisting instructions.
Definition: LoopUtils.h:120
SinkAndHoistLICMFlags(unsigned LicmMssaOptCap, unsigned LicmMssaNoAccForPromotionCap, bool IsSink, Loop &L, MemorySSA &MSSA)
Definition: LICM.cpp:388
unsigned LicmMssaNoAccForPromotionCap
Definition: LoopUtils.h:139
A version of PriorityWorklist that selects small size optimized data structures for the vector and ma...
size_type size() const
Definition: SmallPtrSet.h:96
bool erase(PtrType Ptr)
Remove pointer from the set.
Definition: SmallPtrSet.h:385
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
Definition: SmallPtrSet.h:436
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:368
iterator begin() const
Definition: SmallPtrSet.h:456
bool contains(ConstPtrType Ptr) const
Definition: SmallPtrSet.h:442
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition: SmallPtrSet.h:503
A SetVector that performs no allocations if smaller than a certain size.
Definition: SetVector.h:370
bool empty() const
Definition: SmallVector.h:95
size_t size() const
Definition: SmallVector.h:92
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:587
reference emplace_back(ArgTypes &&... Args)
Definition: SmallVector.h:951
void reserve(size_type N)
Definition: SmallVector.h:677
void push_back(const T &Elt)
Definition: SmallVector.h:427
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1210
An instruction for storing to memory.
Definition: Instructions.h:290
void setAlignment(Align Align)
Definition: Instructions.h:333
void setOrdering(AtomicOrdering Ordering)
Sets the ordering constraint of this store instruction.
Definition: Instructions.h:344
static unsigned getPointerOperandIndex()
Definition: Instructions.h:379
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
Provides information about what library functions are available for the current target.
Wrapper pass for TargetTransformInfo.
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
@ TCK_SizeAndLatency
The weighted sum of size and latency.
@ TCC_Free
Expected to fold away in lowering.
InstructionCost getInstructionCost(const User *U, ArrayRef< const Value * > Operands, TargetCostKind CostKind) const
Estimate the cost of a given IR user when lowered.
TinyPtrVector - This class is specialized for cases where there are normally 0 or 1 element in a vect...
Definition: TinyPtrVector.h:29
EltTy front() const
unsigned size() const
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition: Type.h:224
A Use represents the edge between a Value definition and its users.
Definition: Use.h:43
const Use & getOperandUse(unsigned i) const
Definition: User.h:182
void setOperand(unsigned i, Value *Val)
Definition: User.h:174
Value * getOperand(unsigned i) const
Definition: User.h:169
unsigned getNumOperands() const
Definition: User.h:191
LLVM Value Representation.
Definition: Value.h:74
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
bool hasOneUser() const
Return true if there is exactly one user of this value.
Definition: Value.cpp:157
std::string getNameOrAsOperand() const
Definition: Value.cpp:445
bool hasOneUse() const
Return true if there is exactly one use of this value.
Definition: Value.h:434
void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition: Value.cpp:534
bool use_empty() const
Definition: Value.h:344
user_iterator_impl< User > user_iterator
Definition: Value.h:390
StringRef getName() const
Return a constant reference to the value's name.
Definition: Value.cpp:309
void takeName(Value *V)
Transfer the name from V to this value.
Definition: Value.cpp:383
constexpr ScalarTy getFixedValue() const
Definition: TypeSize.h:202
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
Definition: TypeSize.h:171
An efficient, type-erasing, non-owning reference to a callable.
const ParentTy * getParent() const
Definition: ilist_node.h:32
self_iterator getIterator()
Definition: ilist_node.h:132
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition: raw_ostream.h:52
@ Exit
Definition: COFF.h:827
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition: CallingConv.h:24
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
OverflowingBinaryOp_match< LHS, RHS, Instruction::Sub, OverflowingBinaryOperator::NoSignedWrap > m_NSWSub(const LHS &L, const RHS &R)
bool match(Val *V, const Pattern &P)
Definition: PatternMatch.h:49
CmpClass_match< LHS, RHS, ICmpInst, ICmpInst::Predicate > m_ICmp(ICmpInst::Predicate &Pred, const LHS &L, const RHS &R)
OneUse_match< T > m_OneUse(const T &SubPattern)
Definition: PatternMatch.h:67
auto m_LogicalOr()
Matches L || R where L and R are arbitrary values.
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
Definition: PatternMatch.h:92
OverflowingBinaryOp_match< LHS, RHS, Instruction::Add, OverflowingBinaryOperator::NoSignedWrap > m_NSWAdd(const LHS &L, const RHS &R)
auto m_LogicalAnd()
Matches L && R where L and R are arbitrary values.
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:443
DiagnosticInfoOptimizationBase::Argument NV
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
void ReplaceInstWithInst(BasicBlock *BB, BasicBlock::iterator &BI, Instruction *I)
Replace the instruction specified by BI with the instruction specified by I.
pred_iterator pred_end(BasicBlock *BB)
Definition: CFG.h:114
SmallVector< DomTreeNode *, 16 > collectChildrenInLoop(DomTreeNode *N, const Loop *CurLoop)
Does a BFS from a given node to all of its children inside a given loop.
Definition: LoopUtils.cpp:450
@ NeverOverflows
Never overflows.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1722
bool canSinkOrHoistInst(Instruction &I, AAResults *AA, DominatorTree *DT, Loop *CurLoop, MemorySSAUpdater &MSSAU, bool TargetExecutesOncePerLoop, SinkAndHoistLICMFlags &LICMFlags, OptimizationRemarkEmitter *ORE=nullptr)
Returns true if is legal to hoist or sink this instruction disregarding the possible introduction of ...
Definition: LICM.cpp:1162
void set_intersect(S1Ty &S1, const S2Ty &S2)
set_intersect(A, B) - Compute A := A ^ B Identical to set_intersection, except that it works on set<>...
Definition: SetOperations.h:58
void salvageDebugInfo(const MachineRegisterInfo &MRI, MachineInstr &MI)
Assuming the instruction MI is going to be deleted, attempt to salvage debug users of MI by writing t...
Definition: Utils.cpp:1678
void initializeLegacyLICMPassPass(PassRegistry &)
bool isDereferenceableAndAlignedPointer(const Value *V, Type *Ty, Align Alignment, const DataLayout &DL, const Instruction *CtxI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr)
Returns true if V is always a dereferenceable pointer with alignment greater or equal than requested.
Definition: Loads.cpp:201
bool formLCSSARecursively(Loop &L, const DominatorTree &DT, const LoopInfo *LI, ScalarEvolution *SE)
Put a loop nest into LCSSA form.
Definition: LCSSA.cpp:465
bool PointerMayBeCapturedBefore(const Value *V, bool ReturnCaptures, bool StoreCaptures, const Instruction *I, const DominatorTree *DT, bool IncludeI=false, unsigned MaxUsesToExplore=0, const LoopInfo *LI=nullptr)
PointerMayBeCapturedBefore - Return true if this pointer value may be captured by the enclosing funct...
const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=6)
This method strips off any GEP address adjustments, pointer casts or llvm.threadlocal....
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition: STLExtras.h:656
Pass * createLICMPass()
Definition: LICM.cpp:381
bool hoistRegion(DomTreeNode *, AAResults *, LoopInfo *, DominatorTree *, AssumptionCache *, TargetLibraryInfo *, Loop *, MemorySSAUpdater &, ScalarEvolution *, ICFLoopSafetyInfo *, SinkAndHoistLICMFlags &, OptimizationRemarkEmitter *, bool, bool AllowSpeculation)
Walk the specified region of the CFG (defined by all blocks dominated by the specified block,...
Definition: LICM.cpp:876
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1729
bool isInstructionTriviallyDead(Instruction *I, const TargetLibraryInfo *TLI=nullptr)
Return true if the result produced by the instruction is not used, and the instruction will return.
Definition: Local.cpp:400
pred_iterator pred_begin(BasicBlock *BB)
Definition: CFG.h:110
bool isGuard(const User *U)
Returns true iff U has semantics of a guard expressed in a form of call of llvm.experimental....
Definition: GuardUtils.cpp:18
auto reverse(ContainerTy &&C)
Definition: STLExtras.h:419
OverflowResult computeOverflowForSignedSub(const Value *LHS, const Value *RHS, const SimplifyQuery &SQ)
bool isModSet(const ModRefInfo MRI)
Definition: ModRef.h:48
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:167
bool isModOrRefSet(const ModRefInfo MRI)
Definition: ModRef.h:42
bool isSafeToSpeculativelyExecute(const Instruction *I, const Instruction *CtxI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr, bool UseVariableInfo=true)
Return true if the instruction does not have any effects besides calculating the result and does not ...
bool isNotVisibleOnUnwind(const Value *Object, bool &RequiresNoCaptureBeforeUnwind)
Return true if Object memory is not visible after an unwind, in the sense that program semantics cann...
void getLoopAnalysisUsage(AnalysisUsage &AU)
Helper to consistently add the set of standard passes to a loop pass's AnalysisUsage.
Definition: LoopUtils.cpp:141
RNSuccIterator< NodeRef, BlockT, RegionT > succ_begin(NodeRef Node)
BasicBlock * SplitBlockPredecessors(BasicBlock *BB, ArrayRef< BasicBlock * > Preds, const char *Suffix, DominatorTree *DT, LoopInfo *LI=nullptr, MemorySSAUpdater *MSSAU=nullptr, bool PreserveLCSSA=false)
This method introduces at least one new basic block into the function and moves some of the predecess...
ModRefInfo
Flags indicating whether a memory access modifies or references memory.
Definition: ModRef.h:27
bool VerifyMemorySSA
Enables verification of MemorySSA.
Definition: MemorySSA.cpp:84
RNSuccIterator< NodeRef, BlockT, RegionT > succ_end(NodeRef Node)
bool salvageKnowledge(Instruction *I, AssumptionCache *AC=nullptr, DominatorTree *DT=nullptr)
Calls BuildAssumeFromInst and if the resulting llvm.assume is valid insert if before I.
bool hasDisableLICMTransformsHint(const Loop *L)
Look for the loop attribute that disables the LICM transformation heuristics.
Definition: LoopUtils.cpp:348
OverflowResult computeOverflowForSignedAdd(const WithCache< const Value * > &LHS, const WithCache< const Value * > &RHS, const SimplifyQuery &SQ)
@ Mul
Product of integers.
@ Add
Sum of integers.
void appendLoopsToWorklist(RangeT &&, SmallPriorityWorklist< Loop *, 4 > &)
Utility that implements appending of loops onto a worklist given a range.
Definition: LoopUtils.cpp:1754
bool isIdentifiedFunctionLocal(const Value *V)
Return true if V is umabigously identified at the function-level.
bool isDereferenceablePointer(const Value *V, Type *Ty, const DataLayout &DL, const Instruction *CtxI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr)
Return true if this is always a dereferenceable pointer.
Definition: Loads.cpp:221
OutputIt move(R &&Range, OutputIt Out)
Provide wrappers to std::move which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1849
PreservedAnalyses getLoopPassPreservedAnalyses()
Returns the minimum set of Analyses that all loop passes must preserve.
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1749
void erase_if(Container &C, UnaryPredicate P)
Provide a container algorithm similar to C++ Library Fundamentals v2's erase_if which is equivalent t...
Definition: STLExtras.h:2057
auto predecessors(const MachineBasicBlock *BB)
Type * getLoadStoreType(const Value *I)
A helper function that returns the type of a load or store instruction.
bool sinkRegion(DomTreeNode *, AAResults *, LoopInfo *, DominatorTree *, TargetLibraryInfo *, TargetTransformInfo *, Loop *CurLoop, MemorySSAUpdater &, ICFLoopSafetyInfo *, SinkAndHoistLICMFlags &, OptimizationRemarkEmitter *, Loop *OutermostLoop=nullptr)
Walk the specified region of the CFG (defined by all blocks dominated by the specified block,...
Definition: LICM.cpp:555
cl::opt< unsigned > SetLicmMssaNoAccForPromotionCap
unsigned pred_size(const MachineBasicBlock *BB)
bool isKnownNonNegative(const Value *V, const SimplifyQuery &SQ, unsigned Depth=0)
Returns true if the give value is known to be non-negative.
bool promoteLoopAccessesToScalars(const SmallSetVector< Value *, 8 > &, SmallVectorImpl< BasicBlock * > &, SmallVectorImpl< BasicBlock::iterator > &, SmallVectorImpl< MemoryAccess * > &, PredIteratorCache &, LoopInfo *, DominatorTree *, AssumptionCache *AC, const TargetLibraryInfo *, TargetTransformInfo *, Loop *, MemorySSAUpdater &, ICFLoopSafetyInfo *, OptimizationRemarkEmitter *, bool AllowSpeculation, bool HasReadsOutsideSet)
Try to promote memory values to scalars by sinking stores out of the loop and moving loads to before ...
Definition: LICM.cpp:1966
cl::opt< unsigned > SetLicmMssaOptCap
bool sinkRegionForLoopNest(DomTreeNode *, AAResults *, LoopInfo *, DominatorTree *, TargetLibraryInfo *, TargetTransformInfo *, Loop *, MemorySSAUpdater &, ICFLoopSafetyInfo *, SinkAndHoistLICMFlags &, OptimizationRemarkEmitter *)
Call sinkRegion on loops contained within the specified loop in order from innermost to outermost.
Definition: LICM.cpp:623
bool isWritableObject(const Value *Object, bool &ExplicitlyDereferenceableOnly)
Return true if the Object is writable, in the sense that any location based on this pointer that can ...
Implement std::hash so that hash_code can be used in STL containers.
Definition: BitVector.h:858
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition: BitVector.h:860
#define N
A collection of metadata nodes that might be associated with a memory access used by the alias-analys...
Definition: Metadata.h:760
AAMDNodes merge(const AAMDNodes &Other) const
Given two sets of AAMDNodes applying to potentially different locations, determine the best AAMDNodes...
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
unsigned MssaOptCap
Definition: LICM.h:49
unsigned MssaNoAccForPromotionCap
Definition: LICM.h:50
bool AllowSpeculation
Definition: LICM.h:51
The adaptor from a function pass to a loop pass computes these analyses and makes them available to t...
A lightweight accessor for an operand bundle meant to be passed around by value.
Definition: InstrTypes.h:1131
uint32_t getTagID() const
Return the tag of this operand bundle as an integer.
Definition: InstrTypes.h:1159
A CRTP mix-in to automatically provide informational APIs needed for passes.
Definition: PassManager.h:69