LLVM 23.0.0git
LICM.cpp
Go to the documentation of this file.
1//===-- LICM.cpp - Loop Invariant Code Motion Pass ------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This pass performs loop invariant code motion, attempting to remove as much
10// code from the body of a loop as possible. It does this by either hoisting
11// code into the preheader block, or by sinking code to the exit blocks if it is
12// safe. This pass also promotes must-aliased memory locations in the loop to
13// live in registers, thus hoisting and sinking "invariant" loads and stores.
14//
15// Hoisting operations out of loops is a canonicalization transform. It
16// enables and simplifies subsequent optimizations in the middle-end.
17// Rematerialization of hoisted instructions to reduce register pressure is the
18// responsibility of the back-end, which has more accurate information about
19// register pressure and also handles other optimizations than LICM that
20// increase live-ranges.
21//
22// This pass uses alias analysis for two purposes:
23//
24// 1. Moving loop invariant loads and calls out of loops. If we can determine
25// that a load or call inside of a loop never aliases anything stored to,
26// we can hoist it or sink it like any other instruction.
27// 2. Scalar Promotion of Memory - If there is a store instruction inside of
28// the loop, we try to move the store to happen AFTER the loop instead of
29// inside of the loop. This can only happen if a few conditions are true:
30// A. The pointer stored through is loop invariant
31// B. There are no stores or loads in the loop which _may_ alias the
32// pointer. There are no calls in the loop which mod/ref the pointer.
33// If these conditions are true, we can promote the loads and stores in the
34// loop of the pointer to use a temporary alloca'd variable. We then use
35// the SSAUpdater to construct the appropriate SSA form for the value.
36//
37//===----------------------------------------------------------------------===//
38
42#include "llvm/ADT/Statistic.h"
50#include "llvm/Analysis/Loads.h"
63#include "llvm/IR/CFG.h"
64#include "llvm/IR/Constants.h"
65#include "llvm/IR/DataLayout.h"
68#include "llvm/IR/Dominators.h"
69#include "llvm/IR/IRBuilder.h"
72#include "llvm/IR/LLVMContext.h"
73#include "llvm/IR/Metadata.h"
78#include "llvm/Support/Debug.h"
86#include <algorithm>
87#include <utility>
88using namespace llvm;
89
90namespace llvm {
91class LPMUpdater;
92} // namespace llvm
93
94#define DEBUG_TYPE "licm"
95
96STATISTIC(NumCreatedBlocks, "Number of blocks created");
97STATISTIC(NumClonedBranches, "Number of branches cloned");
98STATISTIC(NumSunk, "Number of instructions sunk out of loop");
99STATISTIC(NumHoisted, "Number of instructions hoisted out of loop");
100STATISTIC(NumMovedLoads, "Number of load insts hoisted or sunk");
101STATISTIC(NumMovedCalls, "Number of call insts hoisted or sunk");
102STATISTIC(NumPromotionCandidates, "Number of promotion candidates");
103STATISTIC(NumLoadPromoted, "Number of load-only promotions");
104STATISTIC(NumLoadStorePromoted, "Number of load and store promotions");
105STATISTIC(NumMinMaxHoisted,
106 "Number of min/max expressions hoisted out of the loop");
107STATISTIC(NumGEPsHoisted,
108 "Number of geps reassociated and hoisted out of the loop");
109STATISTIC(NumAddSubHoisted, "Number of add/subtract expressions reassociated "
110 "and hoisted out of the loop");
111STATISTIC(NumFPAssociationsHoisted, "Number of invariant FP expressions "
112 "reassociated and hoisted out of the loop");
113STATISTIC(NumIntAssociationsHoisted,
114 "Number of invariant int expressions "
115 "reassociated and hoisted out of the loop");
116STATISTIC(NumBOAssociationsHoisted, "Number of invariant BinaryOp expressions "
117 "reassociated and hoisted out of the loop");
118
119/// Memory promotion is enabled by default.
120static cl::opt<bool>
121 DisablePromotion("disable-licm-promotion", cl::Hidden, cl::init(false),
122 cl::desc("Disable memory promotion in LICM pass"));
123
125 "licm-control-flow-hoisting", cl::Hidden, cl::init(false),
126 cl::desc("Enable control flow (and PHI) hoisting in LICM"));
127
128static cl::opt<bool>
129 SingleThread("licm-force-thread-model-single", cl::Hidden, cl::init(false),
130 cl::desc("Force thread model single in LICM pass"));
131
133 "licm-max-num-uses-traversed", cl::Hidden, cl::init(8),
134 cl::desc("Max num uses visited for identifying load "
135 "invariance in loop using invariant start (default = 8)"));
136
138 "licm-max-num-fp-reassociations", cl::init(5U), cl::Hidden,
139 cl::desc(
140 "Set upper limit for the number of transformations performed "
141 "during a single round of hoisting the reassociated expressions."));
142
144 "licm-max-num-int-reassociations", cl::init(5U), cl::Hidden,
145 cl::desc(
146 "Set upper limit for the number of transformations performed "
147 "during a single round of hoisting the reassociated expressions."));
148
149// Experimental option to allow imprecision in LICM in pathological cases, in
150// exchange for faster compile. This is to be removed if MemorySSA starts to
151// address the same issue. LICM calls MemorySSAWalker's
152// getClobberingMemoryAccess, up to the value of the Cap, getting perfect
153// accuracy. Afterwards, LICM will call into MemorySSA's getDefiningAccess,
154// which may not be precise, since optimizeUses is capped. The result is
155// correct, but we may not get as "far up" as possible to get which access is
156// clobbering the one queried.
158 "licm-mssa-optimization-cap", cl::init(100), cl::Hidden,
159 cl::desc("Enable imprecision in LICM in pathological cases, in exchange "
160 "for faster compile. Caps the MemorySSA clobbering calls."));
161
162// Experimentally, memory promotion carries less importance than sinking and
163// hoisting. Limit when we do promotion when using MemorySSA, in order to save
164// compile time.
166 "licm-mssa-max-acc-promotion", cl::init(250), cl::Hidden,
167 cl::desc("[LICM & MemorySSA] When MSSA in LICM is disabled, this has no "
168 "effect. When MSSA in LICM is enabled, then this is the maximum "
169 "number of accesses allowed to be present in a loop in order to "
170 "enable memory promotion."));
171
172namespace llvm {
174} // end namespace llvm
175
176static bool inSubLoop(BasicBlock *BB, Loop *CurLoop, LoopInfo *LI);
177static bool isNotUsedOrFoldableInLoop(const Instruction &I, const Loop *CurLoop,
178 const LoopSafetyInfo *SafetyInfo,
180 bool &FoldableInLoop, bool LoopNestMode);
181static void hoist(Instruction &I, const DominatorTree *DT, const Loop *CurLoop,
182 BasicBlock *Dest, ICFLoopSafetyInfo *SafetyInfo,
185static bool sink(Instruction &I, LoopInfo *LI, DominatorTree *DT,
186 const Loop *CurLoop, ICFLoopSafetyInfo *SafetyInfo,
189 Instruction &Inst, const DominatorTree *DT, const TargetLibraryInfo *TLI,
190 const Loop *CurLoop, const LoopSafetyInfo *SafetyInfo,
191 OptimizationRemarkEmitter *ORE, const Instruction *CtxI,
192 AssumptionCache *AC, bool AllowSpeculation);
194 AAResults *AA, Loop *CurLoop,
195 SinkAndHoistLICMFlags &Flags);
196static bool pointerInvalidatedByLoop(MemorySSA *MSSA, MemoryUse *MU,
197 Loop *CurLoop, Instruction &I,
199 bool InvariantGroup);
200static bool pointerInvalidatedByBlock(BasicBlock &BB, MemorySSA &MSSA,
201 MemoryUse &MU);
202/// Aggregates various functions for hoisting computations out of loop.
203static bool hoistArithmetics(Instruction &I, Loop &L,
204 ICFLoopSafetyInfo &SafetyInfo,
206 DominatorTree *DT);
208 Instruction &I, BasicBlock &ExitBlock, PHINode &PN, const LoopInfo *LI,
209 const LoopSafetyInfo *SafetyInfo, MemorySSAUpdater &MSSAU);
210
211static void eraseInstruction(Instruction &I, ICFLoopSafetyInfo &SafetyInfo,
212 MemorySSAUpdater &MSSAU);
213
215 ICFLoopSafetyInfo &SafetyInfo,
217
218static void foreachMemoryAccess(MemorySSA *MSSA, Loop *L,
219 function_ref<void(Instruction *)> Fn);
221 std::pair<SmallSetVector<Value *, 8>, bool>;
224
225namespace {
226struct LoopInvariantCodeMotion {
227 bool runOnLoop(Loop *L, AAResults *AA, LoopInfo *LI, DominatorTree *DT,
230 OptimizationRemarkEmitter *ORE, bool LoopNestMode = false);
231
232 LoopInvariantCodeMotion(unsigned LicmMssaOptCap,
233 unsigned LicmMssaNoAccForPromotionCap,
234 bool LicmAllowSpeculation)
235 : LicmMssaOptCap(LicmMssaOptCap),
236 LicmMssaNoAccForPromotionCap(LicmMssaNoAccForPromotionCap),
237 LicmAllowSpeculation(LicmAllowSpeculation) {}
238
239private:
240 unsigned LicmMssaOptCap;
241 unsigned LicmMssaNoAccForPromotionCap;
242 bool LicmAllowSpeculation;
243};
244
245struct LegacyLICMPass : public LoopPass {
246 static char ID; // Pass identification, replacement for typeid
247 LegacyLICMPass(
248 unsigned LicmMssaOptCap = SetLicmMssaOptCap,
249 unsigned LicmMssaNoAccForPromotionCap = SetLicmMssaNoAccForPromotionCap,
250 bool LicmAllowSpeculation = true)
251 : LoopPass(ID), LICM(LicmMssaOptCap, LicmMssaNoAccForPromotionCap,
252 LicmAllowSpeculation) {
254 }
255
256 bool runOnLoop(Loop *L, LPPassManager &LPM) override {
257 if (skipLoop(L))
258 return false;
259
260 LLVM_DEBUG(dbgs() << "Perform LICM on Loop with header at block "
261 << L->getHeader()->getNameOrAsOperand() << "\n");
262
263 Function *F = L->getHeader()->getParent();
264
265 auto *SE = getAnalysisIfAvailable<ScalarEvolutionWrapperPass>();
266 MemorySSA *MSSA = &getAnalysis<MemorySSAWrapperPass>().getMSSA();
267 // For the old PM, we can't use OptimizationRemarkEmitter as an analysis
268 // pass. Function analyses need to be preserved across loop transformations
269 // but ORE cannot be preserved (see comment before the pass definition).
270 OptimizationRemarkEmitter ORE(L->getHeader()->getParent());
271 return LICM.runOnLoop(
272 L, &getAnalysis<AAResultsWrapperPass>().getAAResults(),
273 &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(),
274 &getAnalysis<DominatorTreeWrapperPass>().getDomTree(),
275 &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(*F),
276 &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(*F),
277 &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(*F),
278 SE ? &SE->getSE() : nullptr, MSSA, &ORE);
279 }
280
281 /// This transformation requires natural loop information & requires that
282 /// loop preheaders be inserted into the CFG...
283 ///
284 void getAnalysisUsage(AnalysisUsage &AU) const override {
285 AU.addPreserved<DominatorTreeWrapperPass>();
286 AU.addPreserved<LoopInfoWrapperPass>();
287 AU.addRequired<TargetLibraryInfoWrapperPass>();
288 AU.addRequired<MemorySSAWrapperPass>();
289 AU.addPreserved<MemorySSAWrapperPass>();
290 AU.addRequired<TargetTransformInfoWrapperPass>();
291 AU.addRequired<AssumptionCacheTracker>();
294 AU.addPreserved<LazyBlockFrequencyInfoPass>();
295 AU.addPreserved<LazyBranchProbabilityInfoPass>();
296 }
297
298private:
299 LoopInvariantCodeMotion LICM;
300};
301} // namespace
302
305 if (!AR.MSSA)
306 reportFatalUsageError("LICM requires MemorySSA (loop-mssa)");
307
308 // For the new PM, we also can't use OptimizationRemarkEmitter as an analysis
309 // pass. Function analyses need to be preserved across loop transformations
310 // but ORE cannot be preserved (see comment before the pass definition).
311 OptimizationRemarkEmitter ORE(L.getHeader()->getParent());
312
313 LoopInvariantCodeMotion LICM(Opts.MssaOptCap, Opts.MssaNoAccForPromotionCap,
314 Opts.AllowSpeculation);
315 if (!LICM.runOnLoop(&L, &AR.AA, &AR.LI, &AR.DT, &AR.AC, &AR.TLI, &AR.TTI,
316 &AR.SE, AR.MSSA, &ORE))
317 return PreservedAnalyses::all();
318
320 PA.preserve<MemorySSAAnalysis>();
321
322 return PA;
323}
324
326 raw_ostream &OS, function_ref<StringRef(StringRef)> MapClassName2PassName) {
327 static_cast<PassInfoMixin<LICMPass> *>(this)->printPipeline(
328 OS, MapClassName2PassName);
329
330 OS << '<';
331 OS << (Opts.AllowSpeculation ? "" : "no-") << "allowspeculation";
332 OS << '>';
333}
334
337 LPMUpdater &) {
338 if (!AR.MSSA)
339 reportFatalUsageError("LNICM requires MemorySSA (loop-mssa)");
340
341 // For the new PM, we also can't use OptimizationRemarkEmitter as an analysis
342 // pass. Function analyses need to be preserved across loop transformations
343 // but ORE cannot be preserved (see comment before the pass definition).
345
346 LoopInvariantCodeMotion LICM(Opts.MssaOptCap, Opts.MssaNoAccForPromotionCap,
347 Opts.AllowSpeculation);
348
349 Loop &OutermostLoop = LN.getOutermostLoop();
350 bool Changed = LICM.runOnLoop(&OutermostLoop, &AR.AA, &AR.LI, &AR.DT, &AR.AC,
351 &AR.TLI, &AR.TTI, &AR.SE, AR.MSSA, &ORE, true);
352
353 if (!Changed)
354 return PreservedAnalyses::all();
355
357
358 PA.preserve<DominatorTreeAnalysis>();
359 PA.preserve<LoopAnalysis>();
360 PA.preserve<MemorySSAAnalysis>();
361
362 return PA;
363}
364
366 raw_ostream &OS, function_ref<StringRef(StringRef)> MapClassName2PassName) {
367 static_cast<PassInfoMixin<LNICMPass> *>(this)->printPipeline(
368 OS, MapClassName2PassName);
369
370 OS << '<';
371 OS << (Opts.AllowSpeculation ? "" : "no-") << "allowspeculation";
372 OS << '>';
373}
374
375char LegacyLICMPass::ID = 0;
376INITIALIZE_PASS_BEGIN(LegacyLICMPass, "licm", "Loop Invariant Code Motion",
377 false, false)
383INITIALIZE_PASS_END(LegacyLICMPass, "licm", "Loop Invariant Code Motion", false,
384 false)
385
386Pass *llvm::createLICMPass() { return new LegacyLICMPass(); }
387
392
394 unsigned LicmMssaOptCap, unsigned LicmMssaNoAccForPromotionCap, bool IsSink,
395 Loop &L, MemorySSA &MSSA)
398 IsSink(IsSink) {
399 unsigned AccessCapCount = 0;
400 for (auto *BB : L.getBlocks())
401 if (const auto *Accesses = MSSA.getBlockAccesses(BB))
402 for (const auto &MA : *Accesses) {
403 (void)MA;
404 ++AccessCapCount;
405 if (AccessCapCount > LicmMssaNoAccForPromotionCap) {
406 NoOfMemAccTooLarge = true;
407 return;
408 }
409 }
410}
411
412/// Hoist expressions out of the specified loop. Note, alias info for inner
413/// loop is not preserved so it is not a good idea to run LICM multiple
414/// times on one loop.
415bool LoopInvariantCodeMotion::runOnLoop(Loop *L, AAResults *AA, LoopInfo *LI,
419 ScalarEvolution *SE, MemorySSA *MSSA,
421 bool LoopNestMode) {
422 bool Changed = false;
423
424 assert(L->isLCSSAForm(*DT) && "Loop is not in LCSSA form.");
425
426 // If this loop has metadata indicating that LICM is not to be performed then
427 // just exit.
429 return false;
430 }
431
432 // Don't sink stores from loops with coroutine suspend instructions.
433 // LICM would sink instructions into the default destination of
434 // the coroutine switch. The default destination of the switch is to
435 // handle the case where the coroutine is suspended, by which point the
436 // coroutine frame may have been destroyed. No instruction can be sunk there.
437 // FIXME: This would unfortunately hurt the performance of coroutines, however
438 // there is currently no general solution for this. Similar issues could also
439 // potentially happen in other passes where instructions are being moved
440 // across that edge.
441 bool HasCoroSuspendInst = llvm::any_of(L->getBlocks(), [](BasicBlock *BB) {
442 using namespace PatternMatch;
443 return any_of(make_pointer_range(*BB),
444 match_fn(m_Intrinsic<Intrinsic::coro_suspend>()));
445 });
446
447 MemorySSAUpdater MSSAU(MSSA);
448 SinkAndHoistLICMFlags Flags(LicmMssaOptCap, LicmMssaNoAccForPromotionCap,
449 /*IsSink=*/true, *L, *MSSA);
450
451 // Get the preheader block to move instructions into...
452 BasicBlock *Preheader = L->getLoopPreheader();
453
454 // Compute loop safety information.
455 ICFLoopSafetyInfo SafetyInfo;
456 SafetyInfo.computeLoopSafetyInfo(L);
457
458 // We want to visit all of the instructions in this loop... that are not parts
459 // of our subloops (they have already had their invariants hoisted out of
460 // their loop, into this loop, so there is no need to process the BODIES of
461 // the subloops).
462 //
463 // Traverse the body of the loop in depth first order on the dominator tree so
464 // that we are guaranteed to see definitions before we see uses. This allows
465 // us to sink instructions in one pass, without iteration. After sinking
466 // instructions, we perform another pass to hoist them out of the loop.
467 if (L->hasDedicatedExits())
468 Changed |=
469 LoopNestMode
470 ? sinkRegionForLoopNest(DT->getNode(L->getHeader()), AA, LI, DT,
471 TLI, TTI, L, MSSAU, &SafetyInfo, Flags, ORE)
472 : sinkRegion(DT->getNode(L->getHeader()), AA, LI, DT, TLI, TTI, L,
473 MSSAU, &SafetyInfo, Flags, ORE);
474 Flags.setIsSink(false);
475 if (Preheader)
476 Changed |= hoistRegion(DT->getNode(L->getHeader()), AA, LI, DT, AC, TLI, L,
477 MSSAU, SE, &SafetyInfo, Flags, ORE, LoopNestMode,
478 LicmAllowSpeculation);
479
480 // Now that all loop invariants have been removed from the loop, promote any
481 // memory references to scalars that we can.
482 // Don't sink stores from loops without dedicated block exits. Exits
483 // containing indirect branches are not transformed by loop simplify,
484 // make sure we catch that. An additional load may be generated in the
485 // preheader for SSA updater, so also avoid sinking when no preheader
486 // is available.
487 if (!DisablePromotion && Preheader && L->hasDedicatedExits() &&
488 !Flags.tooManyMemoryAccesses() && !HasCoroSuspendInst) {
489 // Figure out the loop exits and their insertion points
490 SmallVector<BasicBlock *, 8> ExitBlocks;
491 L->getUniqueExitBlocks(ExitBlocks);
492
493 // We can't insert into a catchswitch.
494 bool HasCatchSwitch = llvm::any_of(ExitBlocks, [](BasicBlock *Exit) {
495 return isa<CatchSwitchInst>(Exit->getTerminator());
496 });
497
498 if (!HasCatchSwitch) {
500 SmallVector<MemoryAccess *, 8> MSSAInsertPts;
501 InsertPts.reserve(ExitBlocks.size());
502 MSSAInsertPts.reserve(ExitBlocks.size());
503 for (BasicBlock *ExitBlock : ExitBlocks) {
504 InsertPts.push_back(ExitBlock->getFirstInsertionPt());
505 MSSAInsertPts.push_back(nullptr);
506 }
507
509
510 // Promoting one set of accesses may make the pointers for another set
511 // loop invariant, so run this in a loop.
512 bool Promoted = false;
513 bool LocalPromoted;
514 do {
515 LocalPromoted = false;
516 for (auto [PointerMustAliases, HasReadsOutsideSet] :
517 collectPromotionCandidates(MSSA, AA, L)) {
518 LocalPromoted |= promoteLoopAccessesToScalars(
519 PointerMustAliases, ExitBlocks, InsertPts, MSSAInsertPts, PIC, LI,
520 DT, AC, TLI, TTI, L, MSSAU, &SafetyInfo, ORE,
521 LicmAllowSpeculation, HasReadsOutsideSet);
522 }
523 Promoted |= LocalPromoted;
524 } while (LocalPromoted);
525
526 // Once we have promoted values across the loop body we have to
527 // recursively reform LCSSA as any nested loop may now have values defined
528 // within the loop used in the outer loop.
529 // FIXME: This is really heavy handed. It would be a bit better to use an
530 // SSAUpdater strategy during promotion that was LCSSA aware and reformed
531 // it as it went.
532 if (Promoted)
533 formLCSSARecursively(*L, *DT, LI, SE);
534
535 Changed |= Promoted;
536 }
537 }
538
539 // Check that neither this loop nor its parent have had LCSSA broken. LICM is
540 // specifically moving instructions across the loop boundary and so it is
541 // especially in need of basic functional correctness checking here.
542 assert(L->isLCSSAForm(*DT) && "Loop not left in LCSSA form after LICM!");
543 assert((L->isOutermost() || L->getParentLoop()->isLCSSAForm(*DT)) &&
544 "Parent loop not left in LCSSA form after LICM!");
545
546 if (VerifyMemorySSA)
547 MSSA->verifyMemorySSA();
548
549 if (Changed && SE)
551 return Changed;
552}
553
554/// Walk the specified region of the CFG (defined by all blocks dominated by
555/// the specified block, and that are in the current loop) in reverse depth
556/// first order w.r.t the DominatorTree. This allows us to visit uses before
557/// definitions, allowing us to sink a loop body in one pass without iteration.
558///
561 TargetTransformInfo *TTI, Loop *CurLoop,
562 MemorySSAUpdater &MSSAU, ICFLoopSafetyInfo *SafetyInfo,
564 OptimizationRemarkEmitter *ORE, Loop *OutermostLoop) {
565
566 // Verify inputs.
567 assert(N != nullptr && AA != nullptr && LI != nullptr && DT != nullptr &&
568 CurLoop != nullptr && SafetyInfo != nullptr &&
569 "Unexpected input to sinkRegion.");
570
571 // We want to visit children before parents. We will enqueue all the parents
572 // before their children in the worklist and process the worklist in reverse
573 // order.
575 collectChildrenInLoop(DT, N, CurLoop);
576
577 bool Changed = false;
578 for (BasicBlock *BB : reverse(Worklist)) {
579 // subloop (which would already have been processed).
580 if (inSubLoop(BB, CurLoop, LI))
581 continue;
582
583 for (BasicBlock::iterator II = BB->end(); II != BB->begin();) {
584 Instruction &I = *--II;
585
586 // The instruction is not used in the loop if it is dead. In this case,
587 // we just delete it instead of sinking it.
588 if (isInstructionTriviallyDead(&I, TLI)) {
589 LLVM_DEBUG(dbgs() << "LICM deleting dead inst: " << I << '\n');
592 ++II;
593 eraseInstruction(I, *SafetyInfo, MSSAU);
594 Changed = true;
595 continue;
596 }
597
598 // Check to see if we can sink this instruction to the exit blocks
599 // of the loop. We can do this if the all users of the instruction are
600 // outside of the loop. In this case, it doesn't even matter if the
601 // operands of the instruction are loop invariant.
602 //
603 bool FoldableInLoop = false;
604 bool LoopNestMode = OutermostLoop != nullptr;
605 if (!I.mayHaveSideEffects() &&
606 isNotUsedOrFoldableInLoop(I, LoopNestMode ? OutermostLoop : CurLoop,
607 SafetyInfo, TTI, FoldableInLoop,
608 LoopNestMode) &&
609 canSinkOrHoistInst(I, AA, DT, CurLoop, MSSAU, true, Flags, ORE)) {
610 if (sink(I, LI, DT, CurLoop, SafetyInfo, MSSAU, ORE)) {
611 if (!FoldableInLoop) {
612 ++II;
614 eraseInstruction(I, *SafetyInfo, MSSAU);
615 }
616 Changed = true;
617 }
618 }
619 }
620 }
621 if (VerifyMemorySSA)
622 MSSAU.getMemorySSA()->verifyMemorySSA();
623 return Changed;
624}
625
628 TargetTransformInfo *TTI, Loop *CurLoop,
629 MemorySSAUpdater &MSSAU,
630 ICFLoopSafetyInfo *SafetyInfo,
633
634 bool Changed = false;
636 Worklist.insert(CurLoop);
637 appendLoopsToWorklist(*CurLoop, Worklist);
638 while (!Worklist.empty()) {
639 Loop *L = Worklist.pop_back_val();
640 Changed |= sinkRegion(DT->getNode(L->getHeader()), AA, LI, DT, TLI, TTI, L,
641 MSSAU, SafetyInfo, Flags, ORE, CurLoop);
642 }
643 return Changed;
644}
645
646namespace {
647// This is a helper class for hoistRegion to make it able to hoist control flow
648// in order to be able to hoist phis. The way this works is that we initially
649// start hoisting to the loop preheader, and when we see a loop invariant branch
650// we make note of this. When we then come to hoist an instruction that's
651// conditional on such a branch we duplicate the branch and the relevant control
652// flow, then hoist the instruction into the block corresponding to its original
653// block in the duplicated control flow.
654class ControlFlowHoister {
655private:
656 // Information about the loop we are hoisting from
657 LoopInfo *LI;
658 DominatorTree *DT;
659 Loop *CurLoop;
660 MemorySSAUpdater &MSSAU;
661
662 // A map of blocks in the loop to the block their instructions will be hoisted
663 // to.
664 DenseMap<BasicBlock *, BasicBlock *> HoistDestinationMap;
665
666 // The branches that we can hoist, mapped to the block that marks a
667 // convergence point of their control flow.
668 DenseMap<CondBrInst *, BasicBlock *> HoistableBranches;
669
670public:
671 ControlFlowHoister(LoopInfo *LI, DominatorTree *DT, Loop *CurLoop,
672 MemorySSAUpdater &MSSAU)
673 : LI(LI), DT(DT), CurLoop(CurLoop), MSSAU(MSSAU) {}
674
675 void registerPossiblyHoistableBranch(CondBrInst *BI) {
676 // We can only hoist conditional branches with loop invariant operands.
677 if (!ControlFlowHoisting || !CurLoop->hasLoopInvariantOperands(BI))
678 return;
679
680 // The branch destinations need to be in the loop, and we don't gain
681 // anything by duplicating conditional branches with duplicate successors,
682 // as it's essentially the same as an unconditional branch.
683 BasicBlock *TrueDest = BI->getSuccessor(0);
684 BasicBlock *FalseDest = BI->getSuccessor(1);
685 if (!CurLoop->contains(TrueDest) || !CurLoop->contains(FalseDest) ||
686 TrueDest == FalseDest)
687 return;
688
689 // We can hoist BI if one branch destination is the successor of the other,
690 // or both have common successor which we check by seeing if the
691 // intersection of their successors is non-empty.
692 // TODO: This could be expanded to allowing branches where both ends
693 // eventually converge to a single block.
694 SmallPtrSet<BasicBlock *, 4> TrueDestSucc(llvm::from_range,
695 successors(TrueDest));
696 SmallPtrSet<BasicBlock *, 4> FalseDestSucc(llvm::from_range,
697 successors(FalseDest));
698 BasicBlock *CommonSucc = nullptr;
699 if (TrueDestSucc.count(FalseDest)) {
700 CommonSucc = FalseDest;
701 } else if (FalseDestSucc.count(TrueDest)) {
702 CommonSucc = TrueDest;
703 } else {
704 set_intersect(TrueDestSucc, FalseDestSucc);
705 // If there's one common successor use that.
706 if (TrueDestSucc.size() == 1)
707 CommonSucc = *TrueDestSucc.begin();
708 // If there's more than one pick whichever appears first in the block list
709 // (we can't use the value returned by TrueDestSucc.begin() as it's
710 // unpredicatable which element gets returned).
711 else if (!TrueDestSucc.empty()) {
712 Function *F = TrueDest->getParent();
713 auto IsSucc = [&](BasicBlock &BB) { return TrueDestSucc.count(&BB); };
714 auto It = llvm::find_if(*F, IsSucc);
715 assert(It != F->end() && "Could not find successor in function");
716 CommonSucc = &*It;
717 }
718 }
719 // The common successor has to be dominated by the branch, as otherwise
720 // there will be some other path to the successor that will not be
721 // controlled by this branch so any phi we hoist would be controlled by the
722 // wrong condition. This also takes care of avoiding hoisting of loop back
723 // edges.
724 // TODO: In some cases this could be relaxed if the successor is dominated
725 // by another block that's been hoisted and we can guarantee that the
726 // control flow has been replicated exactly.
727 if (CommonSucc && DT->dominates(BI, CommonSucc))
728 HoistableBranches[BI] = CommonSucc;
729 }
730
731 bool canHoistPHI(PHINode *PN) {
732 // The phi must have loop invariant operands.
733 if (!ControlFlowHoisting || !CurLoop->hasLoopInvariantOperands(PN))
734 return false;
735 // We can hoist phis if the block they are in is the target of hoistable
736 // branches which cover all of the predecessors of the block.
737 BasicBlock *BB = PN->getParent();
738 SmallPtrSet<BasicBlock *, 8> PredecessorBlocks(llvm::from_range,
739 predecessors(BB));
740 // If we have less predecessor blocks than predecessors then the phi will
741 // have more than one incoming value for the same block which we can't
742 // handle.
743 // TODO: This could be handled be erasing some of the duplicate incoming
744 // values.
745 if (PredecessorBlocks.size() != pred_size(BB))
746 return false;
747 for (auto &Pair : HoistableBranches) {
748 if (Pair.second == BB) {
749 // Which blocks are predecessors via this branch depends on if the
750 // branch is triangle-like or diamond-like.
751 if (Pair.first->getSuccessor(0) == BB) {
752 PredecessorBlocks.erase(Pair.first->getParent());
753 PredecessorBlocks.erase(Pair.first->getSuccessor(1));
754 } else if (Pair.first->getSuccessor(1) == BB) {
755 PredecessorBlocks.erase(Pair.first->getParent());
756 PredecessorBlocks.erase(Pair.first->getSuccessor(0));
757 } else {
758 PredecessorBlocks.erase(Pair.first->getSuccessor(0));
759 PredecessorBlocks.erase(Pair.first->getSuccessor(1));
760 }
761 }
762 }
763 // PredecessorBlocks will now be empty if for every predecessor of BB we
764 // found a hoistable branch source.
765 return PredecessorBlocks.empty();
766 }
767
768 BasicBlock *getOrCreateHoistedBlock(BasicBlock *BB) {
770 return CurLoop->getLoopPreheader();
771 // If BB has already been hoisted, return that
772 if (auto It = HoistDestinationMap.find(BB); It != HoistDestinationMap.end())
773 return It->second;
774
775 // Check if this block is conditional based on a pending branch
776 auto HasBBAsSuccessor =
777 [&](DenseMap<CondBrInst *, BasicBlock *>::value_type &Pair) {
778 return BB != Pair.second && (Pair.first->getSuccessor(0) == BB ||
779 Pair.first->getSuccessor(1) == BB);
780 };
781 auto It = llvm::find_if(HoistableBranches, HasBBAsSuccessor);
782
783 // If not involved in a pending branch, hoist to preheader
784 BasicBlock *InitialPreheader = CurLoop->getLoopPreheader();
785 if (It == HoistableBranches.end()) {
786 LLVM_DEBUG(dbgs() << "LICM using "
787 << InitialPreheader->getNameOrAsOperand()
788 << " as hoist destination for "
789 << BB->getNameOrAsOperand() << "\n");
790 HoistDestinationMap[BB] = InitialPreheader;
791 return InitialPreheader;
792 }
793 CondBrInst *BI = It->first;
794 assert(std::none_of(std::next(It), HoistableBranches.end(),
795 HasBBAsSuccessor) &&
796 "BB is expected to be the target of at most one branch");
797
798 LLVMContext &C = BB->getContext();
799 BasicBlock *TrueDest = BI->getSuccessor(0);
800 BasicBlock *FalseDest = BI->getSuccessor(1);
801 BasicBlock *CommonSucc = HoistableBranches[BI];
802 BasicBlock *HoistTarget = getOrCreateHoistedBlock(BI->getParent());
803
804 // Create hoisted versions of blocks that currently don't have them
805 auto CreateHoistedBlock = [&](BasicBlock *Orig) {
806 auto [It, Inserted] = HoistDestinationMap.try_emplace(Orig);
807 if (!Inserted)
808 return It->second;
809 BasicBlock *New =
810 BasicBlock::Create(C, Orig->getName() + ".licm", Orig->getParent());
811 It->second = New;
812 DT->addNewBlock(New, HoistTarget);
813 if (CurLoop->getParentLoop())
814 CurLoop->getParentLoop()->addBasicBlockToLoop(New, *LI);
815 ++NumCreatedBlocks;
816 LLVM_DEBUG(dbgs() << "LICM created " << New->getName()
817 << " as hoist destination for " << Orig->getName()
818 << "\n");
819 return New;
820 };
821 BasicBlock *HoistTrueDest = CreateHoistedBlock(TrueDest);
822 BasicBlock *HoistFalseDest = CreateHoistedBlock(FalseDest);
823 BasicBlock *HoistCommonSucc = CreateHoistedBlock(CommonSucc);
824
825 // Link up these blocks with branches.
826 if (!HoistCommonSucc->getTerminator()) {
827 // The new common successor we've generated will branch to whatever that
828 // hoist target branched to.
829 BasicBlock *TargetSucc = HoistTarget->getSingleSuccessor();
830 assert(TargetSucc && "Expected hoist target to have a single successor");
831 HoistCommonSucc->moveBefore(TargetSucc);
832 UncondBrInst::Create(TargetSucc, HoistCommonSucc);
833 }
834 if (!HoistTrueDest->getTerminator()) {
835 HoistTrueDest->moveBefore(HoistCommonSucc);
836 UncondBrInst::Create(HoistCommonSucc, HoistTrueDest);
837 }
838 if (!HoistFalseDest->getTerminator()) {
839 HoistFalseDest->moveBefore(HoistCommonSucc);
840 UncondBrInst::Create(HoistCommonSucc, HoistFalseDest);
841 }
842
843 // If BI is being cloned to what was originally the preheader then
844 // HoistCommonSucc will now be the new preheader.
845 if (HoistTarget == InitialPreheader) {
846 // Phis in the loop header now need to use the new preheader.
847 InitialPreheader->replaceSuccessorsPhiUsesWith(HoistCommonSucc);
849 HoistTarget->getSingleSuccessor(), HoistCommonSucc, {HoistTarget});
850 // The new preheader dominates the loop header.
851 DomTreeNode *PreheaderNode = DT->getNode(HoistCommonSucc);
852 DomTreeNode *HeaderNode = DT->getNode(CurLoop->getHeader());
853 DT->changeImmediateDominator(HeaderNode, PreheaderNode);
854 // The preheader hoist destination is now the new preheader, with the
855 // exception of the hoist destination of this branch.
856 for (auto &Pair : HoistDestinationMap)
857 if (Pair.second == InitialPreheader && Pair.first != BI->getParent())
858 Pair.second = HoistCommonSucc;
859 }
860
861 // Now finally clone BI.
862 auto *NewBI =
863 CondBrInst::Create(BI->getCondition(), HoistTrueDest, HoistFalseDest,
864 HoistTarget->getTerminator()->getIterator());
865 HoistTarget->getTerminator()->eraseFromParent();
866 // md_prof should also come from the original branch - since the
867 // condition was hoisted, the branch probabilities shouldn't change.
869 NewBI->copyMetadata(*BI, {LLVMContext::MD_prof});
870 // FIXME: Issue #152767: debug info should also be the same as the
871 // original branch, **if** the user explicitly indicated that.
872 NewBI->setDebugLoc(HoistTarget->getTerminator()->getDebugLoc());
873
874 ++NumClonedBranches;
875
876 assert(CurLoop->getLoopPreheader() &&
877 "Hoisting blocks should not have destroyed preheader");
878 return HoistDestinationMap[BB];
879 }
880};
881} // namespace
882
883/// Walk the specified region of the CFG (defined by all blocks dominated by
884/// the specified block, and that are in the current loop) in depth first
885/// order w.r.t the DominatorTree. This allows us to visit definitions before
886/// uses, allowing us to hoist a loop body in one pass without iteration.
887///
890 TargetLibraryInfo *TLI, Loop *CurLoop,
892 ICFLoopSafetyInfo *SafetyInfo,
894 OptimizationRemarkEmitter *ORE, bool LoopNestMode,
895 bool AllowSpeculation) {
896 // Verify inputs.
897 assert(N != nullptr && AA != nullptr && LI != nullptr && DT != nullptr &&
898 CurLoop != nullptr && SafetyInfo != nullptr &&
899 "Unexpected input to hoistRegion.");
900
901 ControlFlowHoister CFH(LI, DT, CurLoop, MSSAU);
902
903 // Keep track of instructions that have been hoisted, as they may need to be
904 // re-hoisted if they end up not dominating all of their uses.
905 SmallVector<Instruction *, 16> HoistedInstructions;
906
907 // For PHI hoisting to work we need to hoist blocks before their successors.
908 // We can do this by iterating through the blocks in the loop in reverse
909 // post-order.
910 LoopBlocksRPO Worklist(CurLoop);
911 Worklist.perform(LI);
912 bool Changed = false;
913 BasicBlock *Preheader = CurLoop->getLoopPreheader();
914 for (BasicBlock *BB : Worklist) {
915 // Only need to process the contents of this block if it is not part of a
916 // subloop (which would already have been processed).
917 if (!LoopNestMode && inSubLoop(BB, CurLoop, LI))
918 continue;
919
921 // Try hoisting the instruction out to the preheader. We can only do
922 // this if all of the operands of the instruction are loop invariant and
923 // if it is safe to hoist the instruction.
924 // TODO: It may be safe to hoist if we are hoisting to a conditional block
925 // and we have accurately duplicated the control flow from the loop header
926 // to that block.
927 if (CurLoop->hasLoopInvariantOperands(&I) &&
928 canSinkOrHoistInst(I, AA, DT, CurLoop, MSSAU, true, Flags, ORE) &&
929 isSafeToExecuteUnconditionally(I, DT, TLI, CurLoop, SafetyInfo, ORE,
930 Preheader->getTerminator(), AC,
931 AllowSpeculation)) {
932 hoist(I, DT, CurLoop, CFH.getOrCreateHoistedBlock(BB), SafetyInfo,
933 MSSAU, SE, ORE);
934 HoistedInstructions.push_back(&I);
935 Changed = true;
936 continue;
937 }
938
939 // Attempt to remove floating point division out of the loop by
940 // converting it to a reciprocal multiplication.
941 if (I.getOpcode() == Instruction::FDiv && I.hasAllowReciprocal() &&
942 CurLoop->isLoopInvariant(I.getOperand(1))) {
943 auto Divisor = I.getOperand(1);
944 auto One = llvm::ConstantFP::get(Divisor->getType(), 1.0);
945 auto ReciprocalDivisor = BinaryOperator::CreateFDiv(One, Divisor);
946 ReciprocalDivisor->setFastMathFlags(I.getFastMathFlags());
947 SafetyInfo->insertInstructionTo(ReciprocalDivisor, I.getParent());
948 ReciprocalDivisor->insertBefore(I.getIterator());
949 ReciprocalDivisor->setDebugLoc(I.getDebugLoc());
950
951 auto Product =
952 BinaryOperator::CreateFMul(I.getOperand(0), ReciprocalDivisor);
953 Product->setFastMathFlags(I.getFastMathFlags());
954 SafetyInfo->insertInstructionTo(Product, I.getParent());
955 Product->insertAfter(I.getIterator());
956 Product->setDebugLoc(I.getDebugLoc());
957 I.replaceAllUsesWith(Product);
958 eraseInstruction(I, *SafetyInfo, MSSAU);
959
960 hoist(*ReciprocalDivisor, DT, CurLoop, CFH.getOrCreateHoistedBlock(BB),
961 SafetyInfo, MSSAU, SE, ORE);
962 HoistedInstructions.push_back(ReciprocalDivisor);
963 Changed = true;
964 continue;
965 }
966
967 auto IsInvariantStart = [&](Instruction &I) {
968 using namespace PatternMatch;
969 return I.use_empty() &&
971 };
972 auto MustExecuteWithoutWritesBefore = [&](Instruction &I) {
973 return SafetyInfo->isGuaranteedToExecute(I, DT, CurLoop) &&
974 SafetyInfo->doesNotWriteMemoryBefore(I, CurLoop);
975 };
976 if ((IsInvariantStart(I) || isGuard(&I)) &&
977 CurLoop->hasLoopInvariantOperands(&I) &&
978 MustExecuteWithoutWritesBefore(I)) {
979 hoist(I, DT, CurLoop, CFH.getOrCreateHoistedBlock(BB), SafetyInfo,
980 MSSAU, SE, ORE);
981 HoistedInstructions.push_back(&I);
982 Changed = true;
983 continue;
984 }
985
986 if (PHINode *PN = dyn_cast<PHINode>(&I)) {
987 if (CFH.canHoistPHI(PN)) {
988 // Redirect incoming blocks first to ensure that we create hoisted
989 // versions of those blocks before we hoist the phi.
990 for (unsigned int i = 0; i < PN->getNumIncomingValues(); ++i)
992 i, CFH.getOrCreateHoistedBlock(PN->getIncomingBlock(i)));
993 hoist(*PN, DT, CurLoop, CFH.getOrCreateHoistedBlock(BB), SafetyInfo,
994 MSSAU, SE, ORE);
995 assert(DT->dominates(PN, BB) && "Conditional PHIs not expected");
996 Changed = true;
997 continue;
998 }
999 }
1000
1001 // Try to reassociate instructions so that part of computations can be
1002 // done out of loop.
1003 if (hoistArithmetics(I, *CurLoop, *SafetyInfo, MSSAU, AC, DT)) {
1004 Changed = true;
1005 continue;
1006 }
1007
1008 // Remember possibly hoistable branches so we can actually hoist them
1009 // later if needed.
1010 if (CondBrInst *BI = dyn_cast<CondBrInst>(&I))
1011 CFH.registerPossiblyHoistableBranch(BI);
1012 }
1013 }
1014
1015 // If we hoisted instructions to a conditional block they may not dominate
1016 // their uses that weren't hoisted (such as phis where some operands are not
1017 // loop invariant). If so make them unconditional by moving them to their
1018 // immediate dominator. We iterate through the instructions in reverse order
1019 // which ensures that when we rehoist an instruction we rehoist its operands,
1020 // and also keep track of where in the block we are rehoisting to make sure
1021 // that we rehoist instructions before the instructions that use them.
1022 Instruction *HoistPoint = nullptr;
1023 if (ControlFlowHoisting) {
1024 for (Instruction *I : reverse(HoistedInstructions)) {
1025 if (!llvm::all_of(I->uses(),
1026 [&](Use &U) { return DT->dominates(I, U); })) {
1027 BasicBlock *Dominator =
1028 DT->getNode(I->getParent())->getIDom()->getBlock();
1029 if (!HoistPoint || !DT->dominates(HoistPoint->getParent(), Dominator)) {
1030 if (HoistPoint)
1031 assert(DT->dominates(Dominator, HoistPoint->getParent()) &&
1032 "New hoist point expected to dominate old hoist point");
1033 HoistPoint = Dominator->getTerminator();
1034 }
1035 LLVM_DEBUG(dbgs() << "LICM rehoisting to "
1036 << HoistPoint->getParent()->getNameOrAsOperand()
1037 << ": " << *I << "\n");
1038 moveInstructionBefore(*I, HoistPoint->getIterator(), *SafetyInfo, MSSAU,
1039 SE);
1040 HoistPoint = I;
1041 Changed = true;
1042 }
1043 }
1044 }
1045 if (VerifyMemorySSA)
1046 MSSAU.getMemorySSA()->verifyMemorySSA();
1047
1048 // Now that we've finished hoisting make sure that LI and DT are still
1049 // valid.
1050#ifdef EXPENSIVE_CHECKS
1051 if (Changed) {
1052 assert(DT->verify(DominatorTree::VerificationLevel::Fast) &&
1053 "Dominator tree verification failed");
1054 LI->verify(*DT);
1055 }
1056#endif
1057
1058 return Changed;
1059}
1060
1061// Return true if LI is invariant within scope of the loop. LI is invariant if
1062// CurLoop is dominated by an invariant.start representing the same memory
1063// location and size as the memory location LI loads from, and also the
1064// invariant.start has no uses.
1066 Loop *CurLoop) {
1067 Value *Addr = LI->getPointerOperand();
1068 const DataLayout &DL = LI->getDataLayout();
1069 const TypeSize LocSizeInBits = DL.getTypeSizeInBits(LI->getType());
1070
1071 // It is not currently possible for clang to generate an invariant.start
1072 // intrinsic with scalable vector types because we don't support thread local
1073 // sizeless types and we don't permit sizeless types in structs or classes.
1074 // Furthermore, even if support is added for this in future the intrinsic
1075 // itself is defined to have a size of -1 for variable sized objects. This
1076 // makes it impossible to verify if the intrinsic envelops our region of
1077 // interest. For example, both <vscale x 32 x i8> and <vscale x 16 x i8>
1078 // types would have a -1 parameter, but the former is clearly double the size
1079 // of the latter.
1080 if (LocSizeInBits.isScalable())
1081 return false;
1082
1083 // If we've ended up at a global/constant, bail. We shouldn't be looking at
1084 // uselists for non-local Values in a loop pass.
1085 if (isa<Constant>(Addr))
1086 return false;
1087
1088 unsigned UsesVisited = 0;
1089 // Traverse all uses of the load operand value, to see if invariant.start is
1090 // one of the uses, and whether it dominates the load instruction.
1091 for (auto *U : Addr->users()) {
1092 // Avoid traversing for Load operand with high number of users.
1093 if (++UsesVisited > MaxNumUsesTraversed)
1094 return false;
1096 // If there are escaping uses of invariant.start instruction, the load maybe
1097 // non-invariant.
1098 if (!II || II->getIntrinsicID() != Intrinsic::invariant_start ||
1099 !II->use_empty())
1100 continue;
1101 ConstantInt *InvariantSize = cast<ConstantInt>(II->getArgOperand(0));
1102 // The intrinsic supports having a -1 argument for variable sized objects
1103 // so we should check for that here.
1104 if (InvariantSize->isNegative())
1105 continue;
1106 uint64_t InvariantSizeInBits = InvariantSize->getSExtValue() * 8;
1107 // Confirm the invariant.start location size contains the load operand size
1108 // in bits. Also, the invariant.start should dominate the load, and we
1109 // should not hoist the load out of a loop that contains this dominating
1110 // invariant.start.
1111 if (LocSizeInBits.getFixedValue() <= InvariantSizeInBits &&
1112 DT->properlyDominates(II->getParent(), CurLoop->getHeader()))
1113 return true;
1114 }
1115
1116 return false;
1117}
1118
1119/// Return true if-and-only-if we know how to (mechanically) both hoist and
1120/// sink a given instruction out of a loop. Does not address legality
1121/// concerns such as aliasing or speculation safety.
1132
1133/// Return true if I is the only Instruction with a MemoryAccess in L.
1134static bool isOnlyMemoryAccess(const Instruction *I, const Loop *L,
1135 const MemorySSAUpdater &MSSAU) {
1136 for (auto *BB : L->getBlocks())
1137 if (auto *Accs = MSSAU.getMemorySSA()->getBlockAccesses(BB)) {
1138 int NotAPhi = 0;
1139 for (const auto &Acc : *Accs) {
1140 if (isa<MemoryPhi>(&Acc))
1141 continue;
1142 const auto *MUD = cast<MemoryUseOrDef>(&Acc);
1143 if (MUD->getMemoryInst() != I || NotAPhi++ == 1)
1144 return false;
1145 }
1146 }
1147 return true;
1148}
1149
1151 BatchAAResults &BAA,
1152 SinkAndHoistLICMFlags &Flags,
1153 MemoryUseOrDef *MA) {
1154 // See declaration of SetLicmMssaOptCap for usage details.
1155 if (Flags.tooManyClobberingCalls())
1156 return MA->getDefiningAccess();
1157
1158 MemoryAccess *Source =
1160 Flags.incrementClobberingCalls();
1161 return Source;
1162}
1163
1165 Loop *CurLoop, MemorySSAUpdater &MSSAU,
1166 bool TargetExecutesOncePerLoop,
1167 SinkAndHoistLICMFlags &Flags,
1169 // If we don't understand the instruction, bail early.
1171 return false;
1172
1173 MemorySSA *MSSA = MSSAU.getMemorySSA();
1174 // Loads have extra constraints we have to verify before we can hoist them.
1175 if (LoadInst *LI = dyn_cast<LoadInst>(&I)) {
1176 if (!LI->isUnordered())
1177 return false; // Don't sink/hoist volatile or ordered atomic loads!
1178
1179 // Loads from constant memory are always safe to move, even if they end up
1180 // in the same alias set as something that ends up being modified.
1181 if (!isModSet(AA->getModRefInfoMask(LI->getOperand(0))))
1182 return true;
1183 if (LI->hasMetadata(LLVMContext::MD_invariant_load))
1184 return true;
1185
1186 if (LI->isAtomic() && !TargetExecutesOncePerLoop)
1187 return false; // Don't risk duplicating unordered loads
1188
1189 // This checks for an invariant.start dominating the load.
1190 if (isLoadInvariantInLoop(LI, DT, CurLoop))
1191 return true;
1192
1193 auto MU = cast<MemoryUse>(MSSA->getMemoryAccess(LI));
1194
1195 bool InvariantGroup = LI->hasMetadata(LLVMContext::MD_invariant_group);
1196
1197 bool Invalidated = pointerInvalidatedByLoop(
1198 MSSA, MU, CurLoop, I, Flags, InvariantGroup);
1199 // Check loop-invariant address because this may also be a sinkable load
1200 // whose address is not necessarily loop-invariant.
1201 if (ORE && Invalidated && CurLoop->isLoopInvariant(LI->getPointerOperand()))
1202 ORE->emit([&]() {
1204 DEBUG_TYPE, "LoadWithLoopInvariantAddressInvalidated", LI)
1205 << "failed to move load with loop-invariant address "
1206 "because the loop may invalidate its value";
1207 });
1208
1209 return !Invalidated;
1210 } else if (CallInst *CI = dyn_cast<CallInst>(&I)) {
1211 // Don't sink calls which can throw.
1212 if (CI->mayThrow())
1213 return false;
1214
1215 // Convergent attribute has been used on operations that involve
1216 // inter-thread communication which results are implicitly affected by the
1217 // enclosing control flows. It is not safe to hoist or sink such operations
1218 // across control flow.
1219 if (CI->isConvergent())
1220 return false;
1221
1222 // FIXME: Current LLVM IR semantics don't work well with coroutines and
1223 // thread local globals. We currently treat getting the address of a thread
1224 // local global as not accessing memory, even though it may not be a
1225 // constant throughout a function with coroutines. Remove this check after
1226 // we better model semantics of thread local globals.
1227 if (CI->getFunction()->isPresplitCoroutine())
1228 return false;
1229
1230 using namespace PatternMatch;
1232 // Assumes don't actually alias anything or throw
1233 return true;
1234
1235 // Handle simple cases by querying alias analysis.
1236 MemoryEffects Behavior = AA->getMemoryEffects(CI);
1237
1238 if (Behavior.doesNotAccessMemory())
1239 return true;
1240 if (Behavior.onlyReadsMemory()) {
1241 // Might have stale MemoryDef for call that was later inferred to be
1242 // read-only.
1243 auto *MU = dyn_cast<MemoryUse>(MSSA->getMemoryAccess(CI));
1244 if (!MU)
1245 return false;
1246
1247 // If we can prove there are no writes to the memory read by the call, we
1248 // can hoist or sink.
1250 MSSA, MU, CurLoop, I, Flags, /*InvariantGroup=*/false);
1251 }
1252
1253 if (Behavior.onlyWritesMemory()) {
1254 // can hoist or sink if there are no conflicting read/writes to the
1255 // memory location written to by the call.
1256 return noConflictingReadWrites(CI, MSSA, AA, CurLoop, Flags);
1257 }
1258
1259 return false;
1260 } else if (auto *FI = dyn_cast<FenceInst>(&I)) {
1261 // Fences alias (most) everything to provide ordering. For the moment,
1262 // just give up if there are any other memory operations in the loop.
1263 return isOnlyMemoryAccess(FI, CurLoop, MSSAU);
1264 } else if (auto *SI = dyn_cast<StoreInst>(&I)) {
1265 if (!SI->isUnordered())
1266 return false; // Don't sink/hoist volatile or ordered atomic store!
1267
1268 // We can only hoist a store that we can prove writes a value which is not
1269 // read or overwritten within the loop. For those cases, we fallback to
1270 // load store promotion instead. TODO: We can extend this to cases where
1271 // there is exactly one write to the location and that write dominates an
1272 // arbitrary number of reads in the loop.
1273 if (isOnlyMemoryAccess(SI, CurLoop, MSSAU))
1274 return true;
1275 return noConflictingReadWrites(SI, MSSA, AA, CurLoop, Flags);
1276 }
1277
1278 assert(!I.mayReadOrWriteMemory() && "unhandled aliasing");
1279
1280 // We've established mechanical ability and aliasing, it's up to the caller
1281 // to check fault safety
1282 return true;
1283}
1284
1285/// Returns true if a PHINode is a trivially replaceable with an
1286/// Instruction.
1287/// This is true when all incoming values are that instruction.
1288/// This pattern occurs most often with LCSSA PHI nodes.
1289///
1290static bool isTriviallyReplaceablePHI(const PHINode &PN, const Instruction &I) {
1291 for (const Value *IncValue : PN.incoming_values())
1292 if (IncValue != &I)
1293 return false;
1294
1295 return true;
1296}
1297
1298/// Return true if the instruction is foldable in the loop.
1299static bool isFoldableInLoop(const Instruction &I, const Loop *CurLoop,
1300 const TargetTransformInfo *TTI) {
1301 if (auto *GEP = dyn_cast<GetElementPtrInst>(&I)) {
1302 InstructionCost CostI =
1303 TTI->getInstructionCost(&I, TargetTransformInfo::TCK_SizeAndLatency);
1304 if (CostI != TargetTransformInfo::TCC_Free)
1305 return false;
1306 // For a GEP, we cannot simply use getInstructionCost because currently
1307 // it optimistically assumes that a GEP will fold into addressing mode
1308 // regardless of its users.
1309 const BasicBlock *BB = GEP->getParent();
1310 for (const User *U : GEP->users()) {
1311 const Instruction *UI = cast<Instruction>(U);
1312 if (CurLoop->contains(UI) &&
1313 (BB != UI->getParent() ||
1314 (!isa<StoreInst>(UI) && !isa<LoadInst>(UI))))
1315 return false;
1316 }
1317 return true;
1318 }
1319
1320 return false;
1321}
1322
1323/// Return true if the only users of this instruction are outside of
1324/// the loop. If this is true, we can sink the instruction to the exit
1325/// blocks of the loop.
1326///
1327/// We also return true if the instruction could be folded away in lowering.
1328/// (e.g., a GEP can be folded into a load as an addressing mode in the loop).
1329static bool isNotUsedOrFoldableInLoop(const Instruction &I, const Loop *CurLoop,
1330 const LoopSafetyInfo *SafetyInfo,
1332 bool &FoldableInLoop, bool LoopNestMode) {
1333 const auto &BlockColors = SafetyInfo->getBlockColors();
1334 bool IsFoldable = isFoldableInLoop(I, CurLoop, TTI);
1335 for (const User *U : I.users()) {
1336 const Instruction *UI = cast<Instruction>(U);
1337 if (const PHINode *PN = dyn_cast<PHINode>(UI)) {
1338 const BasicBlock *BB = PN->getParent();
1339 // We cannot sink uses in catchswitches.
1341 return false;
1342
1343 // We need to sink a callsite to a unique funclet. Avoid sinking if the
1344 // phi use is too muddled.
1345 if (isa<CallInst>(I))
1346 if (!BlockColors.empty() &&
1347 BlockColors.find(const_cast<BasicBlock *>(BB))->second.size() != 1)
1348 return false;
1349
1350 if (LoopNestMode) {
1351 while (isa<PHINode>(UI) && UI->hasOneUser() &&
1352 UI->getNumOperands() == 1) {
1353 if (!CurLoop->contains(UI))
1354 break;
1355 UI = cast<Instruction>(UI->user_back());
1356 }
1357 }
1358 }
1359
1360 if (CurLoop->contains(UI)) {
1361 if (IsFoldable) {
1362 FoldableInLoop = true;
1363 continue;
1364 }
1365 return false;
1366 }
1367 }
1368 return true;
1369}
1370
1372 Instruction &I, BasicBlock &ExitBlock, PHINode &PN, const LoopInfo *LI,
1373 const LoopSafetyInfo *SafetyInfo, MemorySSAUpdater &MSSAU) {
1374 Instruction *New;
1375 if (auto *CI = dyn_cast<CallInst>(&I)) {
1376 const auto &BlockColors = SafetyInfo->getBlockColors();
1377
1378 // Sinking call-sites need to be handled differently from other
1379 // instructions. The cloned call-site needs a funclet bundle operand
1380 // appropriate for its location in the CFG.
1382 for (unsigned BundleIdx = 0, BundleEnd = CI->getNumOperandBundles();
1383 BundleIdx != BundleEnd; ++BundleIdx) {
1384 OperandBundleUse Bundle = CI->getOperandBundleAt(BundleIdx);
1385 if (Bundle.getTagID() == LLVMContext::OB_funclet)
1386 continue;
1387
1388 OpBundles.emplace_back(Bundle);
1389 }
1390
1391 if (!BlockColors.empty()) {
1392 const ColorVector &CV = BlockColors.find(&ExitBlock)->second;
1393 assert(CV.size() == 1 && "non-unique color for exit block!");
1394 BasicBlock *BBColor = CV.front();
1395 BasicBlock::iterator EHPad = BBColor->getFirstNonPHIIt();
1396 if (EHPad->isEHPad())
1397 OpBundles.emplace_back("funclet", &*EHPad);
1398 }
1399
1400 New = CallInst::Create(CI, OpBundles);
1401 New->copyMetadata(*CI);
1402 } else {
1403 New = I.clone();
1404 }
1405
1406 New->insertInto(&ExitBlock, ExitBlock.getFirstInsertionPt());
1407 if (!I.getName().empty())
1408 New->setName(I.getName() + ".le");
1409
1410 if (MSSAU.getMemorySSA()->getMemoryAccess(&I)) {
1411 // Create a new MemoryAccess and let MemorySSA set its defining access.
1412 // After running some passes, MemorySSA might be outdated, and the
1413 // instruction `I` may have become a non-memory touching instruction.
1414 MemoryAccess *NewMemAcc = MSSAU.createMemoryAccessInBB(
1415 New, nullptr, New->getParent(), MemorySSA::Beginning,
1416 /*CreationMustSucceed=*/false);
1417 if (NewMemAcc) {
1418 if (auto *MemDef = dyn_cast<MemoryDef>(NewMemAcc))
1419 MSSAU.insertDef(MemDef, /*RenameUses=*/true);
1420 else {
1421 auto *MemUse = cast<MemoryUse>(NewMemAcc);
1422 MSSAU.insertUse(MemUse, /*RenameUses=*/true);
1423 }
1424 }
1425 }
1426
1427 // Build LCSSA PHI nodes for any in-loop operands (if legal). Note that
1428 // this is particularly cheap because we can rip off the PHI node that we're
1429 // replacing for the number and blocks of the predecessors.
1430 // OPT: If this shows up in a profile, we can instead finish sinking all
1431 // invariant instructions, and then walk their operands to re-establish
1432 // LCSSA. That will eliminate creating PHI nodes just to nuke them when
1433 // sinking bottom-up.
1434 for (Use &Op : New->operands())
1435 if (LI->wouldBeOutOfLoopUseRequiringLCSSA(Op.get(), PN.getParent())) {
1436 auto *OInst = cast<Instruction>(Op.get());
1437 PHINode *OpPN =
1438 PHINode::Create(OInst->getType(), PN.getNumIncomingValues(),
1439 OInst->getName() + ".lcssa");
1440 OpPN->insertBefore(ExitBlock.begin());
1441 for (unsigned i = 0, e = PN.getNumIncomingValues(); i != e; ++i)
1442 OpPN->addIncoming(OInst, PN.getIncomingBlock(i));
1443 Op = OpPN;
1444 }
1445 return New;
1446}
1447
1449 MemorySSAUpdater &MSSAU) {
1450 MSSAU.removeMemoryAccess(&I);
1451 SafetyInfo.removeInstruction(&I);
1452 I.eraseFromParent();
1453}
1454
1456 ICFLoopSafetyInfo &SafetyInfo,
1457 MemorySSAUpdater &MSSAU,
1458 ScalarEvolution *SE) {
1459 SafetyInfo.removeInstruction(&I);
1460 SafetyInfo.insertInstructionTo(&I, Dest->getParent());
1461 I.moveBefore(*Dest->getParent(), Dest);
1463 MSSAU.getMemorySSA()->getMemoryAccess(&I)))
1464 MSSAU.moveToPlace(OldMemAcc, Dest->getParent(),
1466 if (SE)
1468}
1469
1471 PHINode *TPN, Instruction *I, LoopInfo *LI,
1473 const LoopSafetyInfo *SafetyInfo, const Loop *CurLoop,
1474 MemorySSAUpdater &MSSAU) {
1476 "Expect only trivially replaceable PHI");
1477 BasicBlock *ExitBlock = TPN->getParent();
1478 auto [It, Inserted] = SunkCopies.try_emplace(ExitBlock);
1479 if (Inserted)
1480 It->second = cloneInstructionInExitBlock(*I, *ExitBlock, *TPN, LI,
1481 SafetyInfo, MSSAU);
1482 return It->second;
1483}
1484
1485static bool canSplitPredecessors(PHINode *PN, LoopSafetyInfo *SafetyInfo) {
1486 BasicBlock *BB = PN->getParent();
1487 if (!BB->canSplitPredecessors())
1488 return false;
1489 // It's not impossible to split EHPad blocks, but if BlockColors already exist
1490 // it require updating BlockColors for all offspring blocks accordingly. By
1491 // skipping such corner case, we can make updating BlockColors after splitting
1492 // predecessor fairly simple.
1493 if (!SafetyInfo->getBlockColors().empty() &&
1494 BB->getFirstNonPHIIt()->isEHPad())
1495 return false;
1496 for (BasicBlock *BBPred : predecessors(BB)) {
1497 if (isa<IndirectBrInst>(BBPred->getTerminator()))
1498 return false;
1499 }
1500 return true;
1501}
1502
1504 LoopInfo *LI, const Loop *CurLoop,
1505 LoopSafetyInfo *SafetyInfo,
1506 MemorySSAUpdater *MSSAU) {
1507#ifndef NDEBUG
1509 CurLoop->getUniqueExitBlocks(ExitBlocks);
1510 SmallPtrSet<BasicBlock *, 32> ExitBlockSet(llvm::from_range, ExitBlocks);
1511#endif
1512 BasicBlock *ExitBB = PN->getParent();
1513 assert(ExitBlockSet.count(ExitBB) && "Expect the PHI is in an exit block.");
1514
1515 // Split predecessors of the loop exit to make instructions in the loop are
1516 // exposed to exit blocks through trivially replaceable PHIs while keeping the
1517 // loop in the canonical form where each predecessor of each exit block should
1518 // be contained within the loop. For example, this will convert the loop below
1519 // from
1520 //
1521 // LB1:
1522 // %v1 =
1523 // br %LE, %LB2
1524 // LB2:
1525 // %v2 =
1526 // br %LE, %LB1
1527 // LE:
1528 // %p = phi [%v1, %LB1], [%v2, %LB2] <-- non-trivially replaceable
1529 //
1530 // to
1531 //
1532 // LB1:
1533 // %v1 =
1534 // br %LE.split, %LB2
1535 // LB2:
1536 // %v2 =
1537 // br %LE.split2, %LB1
1538 // LE.split:
1539 // %p1 = phi [%v1, %LB1] <-- trivially replaceable
1540 // br %LE
1541 // LE.split2:
1542 // %p2 = phi [%v2, %LB2] <-- trivially replaceable
1543 // br %LE
1544 // LE:
1545 // %p = phi [%p1, %LE.split], [%p2, %LE.split2]
1546 //
1547 const auto &BlockColors = SafetyInfo->getBlockColors();
1548 SmallSetVector<BasicBlock *, 8> PredBBs(pred_begin(ExitBB), pred_end(ExitBB));
1549 DomTreeUpdater DTU(DT, DomTreeUpdater::UpdateStrategy::Lazy);
1550 while (!PredBBs.empty()) {
1551 BasicBlock *PredBB = *PredBBs.begin();
1552 assert(CurLoop->contains(PredBB) &&
1553 "Expect all predecessors are in the loop");
1554 if (PN->getBasicBlockIndex(PredBB) >= 0) {
1556 ExitBB, PredBB, ".split.loop.exit", &DTU, LI, MSSAU, true);
1557 // Since we do not allow splitting EH-block with BlockColors in
1558 // canSplitPredecessors(), we can simply assign predecessor's color to
1559 // the new block.
1560 if (!BlockColors.empty())
1561 // Grab a reference to the ColorVector to be inserted before getting the
1562 // reference to the vector we are copying because inserting the new
1563 // element in BlockColors might cause the map to be reallocated.
1564 SafetyInfo->copyColors(NewPred, PredBB);
1565 }
1566 PredBBs.remove(PredBB);
1567 }
1568}
1569
1570/// When an instruction is found to only be used outside of the loop, this
1571/// function moves it to the exit blocks and patches up SSA form as needed.
1572/// This method is guaranteed to remove the original instruction from its
1573/// position, and may either delete it or move it to outside of the loop.
1574///
1575static bool sink(Instruction &I, LoopInfo *LI, DominatorTree *DT,
1576 const Loop *CurLoop, ICFLoopSafetyInfo *SafetyInfo,
1578 bool Changed = false;
1579 LLVM_DEBUG(dbgs() << "LICM sinking instruction: " << I << "\n");
1580
1581 // Iterate over users to be ready for actual sinking. Replace users via
1582 // unreachable blocks with undef and make all user PHIs trivially replaceable.
1583 SmallPtrSet<Instruction *, 8> VisitedUsers;
1584 for (Value::user_iterator UI = I.user_begin(), UE = I.user_end(); UI != UE;) {
1585 auto *User = cast<Instruction>(*UI);
1586 Use &U = UI.getUse();
1587 ++UI;
1588
1589 if (VisitedUsers.count(User) || CurLoop->contains(User))
1590 continue;
1591
1592 if (!DT->isReachableFromEntry(User->getParent())) {
1593 U = PoisonValue::get(I.getType());
1594 Changed = true;
1595 continue;
1596 }
1597
1598 // The user must be a PHI node.
1599 PHINode *PN = cast<PHINode>(User);
1600
1601 // Surprisingly, instructions can be used outside of loops without any
1602 // exits. This can only happen in PHI nodes if the incoming block is
1603 // unreachable.
1604 BasicBlock *BB = PN->getIncomingBlock(U);
1605 if (!DT->isReachableFromEntry(BB)) {
1606 U = PoisonValue::get(I.getType());
1607 Changed = true;
1608 continue;
1609 }
1610
1611 VisitedUsers.insert(PN);
1612 if (isTriviallyReplaceablePHI(*PN, I))
1613 continue;
1614
1615 if (!canSplitPredecessors(PN, SafetyInfo))
1616 return Changed;
1617
1618 // Split predecessors of the PHI so that we can make users trivially
1619 // replaceable.
1620 splitPredecessorsOfLoopExit(PN, DT, LI, CurLoop, SafetyInfo, &MSSAU);
1621
1622 // Should rebuild the iterators, as they may be invalidated by
1623 // splitPredecessorsOfLoopExit().
1624 UI = I.user_begin();
1625 UE = I.user_end();
1626 }
1627
1628 if (VisitedUsers.empty())
1629 return Changed;
1630
1631 ORE->emit([&]() {
1632 return OptimizationRemark(DEBUG_TYPE, "InstSunk", &I)
1633 << "sinking " << ore::NV("Inst", &I);
1634 });
1635 if (isa<LoadInst>(I))
1636 ++NumMovedLoads;
1637 else if (isa<CallInst>(I))
1638 ++NumMovedCalls;
1639 ++NumSunk;
1640
1641#ifndef NDEBUG
1643 CurLoop->getUniqueExitBlocks(ExitBlocks);
1644 SmallPtrSet<BasicBlock *, 32> ExitBlockSet(llvm::from_range, ExitBlocks);
1645#endif
1646
1647 // Clones of this instruction. Don't create more than one per exit block!
1649
1650 // If this instruction is only used outside of the loop, then all users are
1651 // PHI nodes in exit blocks due to LCSSA form. Just RAUW them with clones of
1652 // the instruction.
1653 // First check if I is worth sinking for all uses. Sink only when it is worth
1654 // across all uses.
1655 SmallSetVector<User*, 8> Users(I.user_begin(), I.user_end());
1656 for (auto *UI : Users) {
1657 auto *User = cast<Instruction>(UI);
1658
1659 if (CurLoop->contains(User))
1660 continue;
1661
1662 PHINode *PN = cast<PHINode>(User);
1663 assert(ExitBlockSet.count(PN->getParent()) &&
1664 "The LCSSA PHI is not in an exit block!");
1665
1666 // The PHI must be trivially replaceable.
1668 PN, &I, LI, SunkCopies, SafetyInfo, CurLoop, MSSAU);
1669 // As we sink the instruction out of the BB, drop its debug location.
1670 New->dropLocation();
1671 PN->replaceAllUsesWith(New);
1672 eraseInstruction(*PN, *SafetyInfo, MSSAU);
1673 Changed = true;
1674 }
1675 return Changed;
1676}
1677
1678/// When an instruction is found to only use loop invariant operands that
1679/// is safe to hoist, this instruction is called to do the dirty work.
1680///
1681static void hoist(Instruction &I, const DominatorTree *DT, const Loop *CurLoop,
1682 BasicBlock *Dest, ICFLoopSafetyInfo *SafetyInfo,
1685 LLVM_DEBUG(dbgs() << "LICM hoisting to " << Dest->getNameOrAsOperand() << ": "
1686 << I << "\n");
1687 ORE->emit([&]() {
1688 return OptimizationRemark(DEBUG_TYPE, "Hoisted", &I) << "hoisting "
1689 << ore::NV("Inst", &I);
1690 });
1691
1692 // Metadata can be dependent on conditions we are hoisting above.
1693 // Conservatively strip all metadata on the instruction unless we were
1694 // guaranteed to execute I if we entered the loop, in which case the metadata
1695 // is valid in the loop preheader.
1696 // Similarly, If I is a call and it is not guaranteed to execute in the loop,
1697 // then moving to the preheader means we should strip attributes on the call
1698 // that can cause UB since we may be hoisting above conditions that allowed
1699 // inferring those attributes. They may not be valid at the preheader.
1700 if ((I.hasMetadataOtherThanDebugLoc() || isa<CallInst>(I)) &&
1701 // The check on hasMetadataOtherThanDebugLoc is to prevent us from burning
1702 // time in isGuaranteedToExecute if we don't actually have anything to
1703 // drop. It is a compile time optimization, not required for correctness.
1704 !SafetyInfo->isGuaranteedToExecute(I, DT, CurLoop)) {
1705 I.dropUBImplyingAttrsAndMetadata();
1706 }
1707
1708 if (isa<PHINode>(I))
1709 // Move the new node to the end of the phi list in the destination block.
1710 moveInstructionBefore(I, Dest->getFirstNonPHIIt(), *SafetyInfo, MSSAU, SE);
1711 else
1712 // Move the new node to the destination block, before its terminator.
1713 moveInstructionBefore(I, Dest->getTerminator()->getIterator(), *SafetyInfo,
1714 MSSAU, SE);
1715
1716 I.updateLocationAfterHoist();
1717
1718 if (isa<LoadInst>(I))
1719 ++NumMovedLoads;
1720 else if (isa<CallInst>(I))
1721 ++NumMovedCalls;
1722 ++NumHoisted;
1723}
1724
1725/// Only sink or hoist an instruction if it is not a trapping instruction,
1726/// or if the instruction is known not to trap when moved to the preheader.
1727/// or if it is a trapping instruction and is guaranteed to execute.
1729 Instruction &Inst, const DominatorTree *DT, const TargetLibraryInfo *TLI,
1730 const Loop *CurLoop, const LoopSafetyInfo *SafetyInfo,
1731 OptimizationRemarkEmitter *ORE, const Instruction *CtxI,
1732 AssumptionCache *AC, bool AllowSpeculation) {
1733 if (AllowSpeculation &&
1734 isSafeToSpeculativelyExecute(&Inst, CtxI, AC, DT, TLI))
1735 return true;
1736
1737 bool GuaranteedToExecute =
1738 SafetyInfo->isGuaranteedToExecute(Inst, DT, CurLoop);
1739
1740 if (!GuaranteedToExecute) {
1741 auto *LI = dyn_cast<LoadInst>(&Inst);
1742 if (LI && CurLoop->isLoopInvariant(LI->getPointerOperand()))
1743 ORE->emit([&]() {
1745 DEBUG_TYPE, "LoadWithLoopInvariantAddressCondExecuted", LI)
1746 << "failed to hoist load with loop-invariant address "
1747 "because load is conditionally executed";
1748 });
1749 }
1750
1751 return GuaranteedToExecute;
1752}
1753
1754namespace {
1755class LoopPromoter : public LoadAndStorePromoter {
1756 Value *SomePtr; // Designated pointer to store to.
1757 SmallVectorImpl<BasicBlock *> &LoopExitBlocks;
1758 SmallVectorImpl<BasicBlock::iterator> &LoopInsertPts;
1759 SmallVectorImpl<MemoryAccess *> &MSSAInsertPts;
1760 PredIteratorCache &PredCache;
1761 MemorySSAUpdater &MSSAU;
1762 LoopInfo &LI;
1763 DebugLoc DL;
1764 Align Alignment;
1765 bool UnorderedAtomic;
1766 AAMDNodes AATags;
1767 ICFLoopSafetyInfo &SafetyInfo;
1768 bool CanInsertStoresInExitBlocks;
1770
1771 // We're about to add a use of V in a loop exit block. Insert an LCSSA phi
1772 // (if legal) if doing so would add an out-of-loop use to an instruction
1773 // defined in-loop.
1774 Value *maybeInsertLCSSAPHI(Value *V, BasicBlock *BB) const {
1775 if (!LI.wouldBeOutOfLoopUseRequiringLCSSA(V, BB))
1776 return V;
1777
1779 // We need to create an LCSSA PHI node for the incoming value and
1780 // store that.
1781 PHINode *PN = PHINode::Create(I->getType(), PredCache.size(BB),
1782 I->getName() + ".lcssa");
1783 PN->insertBefore(BB->begin());
1784 for (BasicBlock *Pred : PredCache.get(BB))
1785 PN->addIncoming(I, Pred);
1786 return PN;
1787 }
1788
1789public:
1790 LoopPromoter(Value *SP, ArrayRef<const Instruction *> Insts, SSAUpdater &S,
1791 SmallVectorImpl<BasicBlock *> &LEB,
1792 SmallVectorImpl<BasicBlock::iterator> &LIP,
1793 SmallVectorImpl<MemoryAccess *> &MSSAIP, PredIteratorCache &PIC,
1794 MemorySSAUpdater &MSSAU, LoopInfo &li, DebugLoc dl,
1795 Align Alignment, bool UnorderedAtomic, const AAMDNodes &AATags,
1796 ICFLoopSafetyInfo &SafetyInfo, bool CanInsertStoresInExitBlocks)
1797 : LoadAndStorePromoter(Insts, S), SomePtr(SP), LoopExitBlocks(LEB),
1798 LoopInsertPts(LIP), MSSAInsertPts(MSSAIP), PredCache(PIC), MSSAU(MSSAU),
1799 LI(li), DL(std::move(dl)), Alignment(Alignment),
1800 UnorderedAtomic(UnorderedAtomic), AATags(AATags),
1801 SafetyInfo(SafetyInfo),
1802 CanInsertStoresInExitBlocks(CanInsertStoresInExitBlocks), Uses(Insts) {}
1803
1804 void insertStoresInLoopExitBlocks() {
1805 // Insert stores after in the loop exit blocks. Each exit block gets a
1806 // store of the live-out values that feed them. Since we've already told
1807 // the SSA updater about the defs in the loop and the preheader
1808 // definition, it is all set and we can start using it.
1809 DIAssignID *NewID = nullptr;
1810 for (unsigned i = 0, e = LoopExitBlocks.size(); i != e; ++i) {
1811 BasicBlock *ExitBlock = LoopExitBlocks[i];
1812 Value *LiveInValue = SSA.GetValueInMiddleOfBlock(ExitBlock);
1813 LiveInValue = maybeInsertLCSSAPHI(LiveInValue, ExitBlock);
1814 Value *Ptr = maybeInsertLCSSAPHI(SomePtr, ExitBlock);
1815 BasicBlock::iterator InsertPos = LoopInsertPts[i];
1816 StoreInst *NewSI = new StoreInst(LiveInValue, Ptr, InsertPos);
1817 if (UnorderedAtomic)
1818 NewSI->setOrdering(AtomicOrdering::Unordered);
1819 NewSI->setAlignment(Alignment);
1820 NewSI->setDebugLoc(DL);
1821 // Attach DIAssignID metadata to the new store, generating it on the
1822 // first loop iteration.
1823 if (i == 0) {
1824 // NewSI will have its DIAssignID set here if there are any stores in
1825 // Uses with a DIAssignID attachment. This merged ID will then be
1826 // attached to the other inserted stores (in the branch below).
1827 NewSI->mergeDIAssignID(Uses);
1829 NewSI->getMetadata(LLVMContext::MD_DIAssignID));
1830 } else {
1831 // Attach the DIAssignID (or nullptr) merged from Uses in the branch
1832 // above.
1833 NewSI->setMetadata(LLVMContext::MD_DIAssignID, NewID);
1834 }
1835
1836 if (AATags)
1837 NewSI->setAAMetadata(AATags);
1838
1839 MemoryAccess *MSSAInsertPoint = MSSAInsertPts[i];
1840 MemoryAccess *NewMemAcc;
1841 if (!MSSAInsertPoint) {
1842 NewMemAcc = MSSAU.createMemoryAccessInBB(
1843 NewSI, nullptr, NewSI->getParent(), MemorySSA::Beginning);
1844 } else {
1845 NewMemAcc =
1846 MSSAU.createMemoryAccessAfter(NewSI, nullptr, MSSAInsertPoint);
1847 }
1848 MSSAInsertPts[i] = NewMemAcc;
1849 MSSAU.insertDef(cast<MemoryDef>(NewMemAcc), true);
1850 // FIXME: true for safety, false may still be correct.
1851 }
1852 }
1853
1854 void doExtraRewritesBeforeFinalDeletion() override {
1855 if (CanInsertStoresInExitBlocks)
1856 insertStoresInLoopExitBlocks();
1857 }
1858
1859 void instructionDeleted(Instruction *I) const override {
1860 SafetyInfo.removeInstruction(I);
1861 MSSAU.removeMemoryAccess(I);
1862 }
1863
1864 bool shouldDelete(Instruction *I) const override {
1865 if (isa<StoreInst>(I))
1866 return CanInsertStoresInExitBlocks;
1867 return true;
1868 }
1869};
1870
1871bool isNotCapturedBeforeOrInLoop(const Value *V, const Loop *L,
1872 DominatorTree *DT) {
1873 // We can perform the captured-before check against any instruction in the
1874 // loop header, as the loop header is reachable from any instruction inside
1875 // the loop.
1876 // TODO: ReturnCaptures=true shouldn't be necessary here.
1878 V, /*ReturnCaptures=*/true, L->getHeader()->getTerminator(), DT,
1879 /*IncludeI=*/false, CaptureComponents::Provenance));
1880}
1881
1882/// Return true if we can prove that a caller cannot inspect the object if an
1883/// unwind occurs inside the loop.
1884bool isNotVisibleOnUnwindInLoop(const Value *Object, const Loop *L,
1885 DominatorTree *DT) {
1886 bool RequiresNoCaptureBeforeUnwind;
1887 if (!isNotVisibleOnUnwind(Object, RequiresNoCaptureBeforeUnwind))
1888 return false;
1889
1890 return !RequiresNoCaptureBeforeUnwind ||
1891 isNotCapturedBeforeOrInLoop(Object, L, DT);
1892}
1893
1894bool isThreadLocalObject(const Value *Object, const Loop *L, DominatorTree *DT,
1896 // The object must be function-local to start with, and then not captured
1897 // before/in the loop.
1898 return (isIdentifiedFunctionLocal(Object) &&
1899 isNotCapturedBeforeOrInLoop(Object, L, DT)) ||
1900 (TTI->isSingleThreaded() || SingleThread);
1901}
1902
1903} // namespace
1904
1905/// Try to promote memory values to scalars by sinking stores out of the
1906/// loop and moving loads to before the loop. We do this by looping over
1907/// the stores in the loop, looking for stores to Must pointers which are
1908/// loop invariant.
1909///
1911 const SmallSetVector<Value *, 8> &PointerMustAliases,
1916 const TargetLibraryInfo *TLI, TargetTransformInfo *TTI, Loop *CurLoop,
1917 MemorySSAUpdater &MSSAU, ICFLoopSafetyInfo *SafetyInfo,
1918 OptimizationRemarkEmitter *ORE, bool AllowSpeculation,
1919 bool HasReadsOutsideSet) {
1920 // Verify inputs.
1921 assert(LI != nullptr && DT != nullptr && CurLoop != nullptr &&
1922 SafetyInfo != nullptr &&
1923 "Unexpected Input to promoteLoopAccessesToScalars");
1924
1925 LLVM_DEBUG({
1926 dbgs() << "Trying to promote set of must-aliased pointers:\n";
1927 for (Value *Ptr : PointerMustAliases)
1928 dbgs() << " " << *Ptr << "\n";
1929 });
1930 ++NumPromotionCandidates;
1931
1932 Value *SomePtr = *PointerMustAliases.begin();
1933 BasicBlock *Preheader = CurLoop->getLoopPreheader();
1934
1935 // It is not safe to promote a load/store from the loop if the load/store is
1936 // conditional. For example, turning:
1937 //
1938 // for () { if (c) *P += 1; }
1939 //
1940 // into:
1941 //
1942 // tmp = *P; for () { if (c) tmp +=1; } *P = tmp;
1943 //
1944 // is not safe, because *P may only be valid to access if 'c' is true.
1945 //
1946 // The safety property divides into two parts:
1947 // p1) The memory may not be dereferenceable on entry to the loop. In this
1948 // case, we can't insert the required load in the preheader.
1949 // p2) The memory model does not allow us to insert a store along any dynamic
1950 // path which did not originally have one.
1951 //
1952 // If at least one store is guaranteed to execute, both properties are
1953 // satisfied, and promotion is legal.
1954 //
1955 // This, however, is not a necessary condition. Even if no store/load is
1956 // guaranteed to execute, we can still establish these properties.
1957 // We can establish (p1) by proving that hoisting the load into the preheader
1958 // is safe (i.e. proving dereferenceability on all paths through the loop). We
1959 // can use any access within the alias set to prove dereferenceability,
1960 // since they're all must alias.
1961 //
1962 // There are two ways establish (p2):
1963 // a) Prove the location is thread-local. In this case the memory model
1964 // requirement does not apply, and stores are safe to insert.
1965 // b) Prove a store dominates every exit block. In this case, if an exit
1966 // blocks is reached, the original dynamic path would have taken us through
1967 // the store, so inserting a store into the exit block is safe. Note that this
1968 // is different from the store being guaranteed to execute. For instance,
1969 // if an exception is thrown on the first iteration of the loop, the original
1970 // store is never executed, but the exit blocks are not executed either.
1971
1972 bool DereferenceableInPH = false;
1973 bool StoreIsGuanteedToExecute = false;
1974 bool LoadIsGuaranteedToExecute = false;
1975 bool FoundLoadToPromote = false;
1976
1977 // Goes from Unknown to either Safe or Unsafe, but can't switch between them.
1978 enum {
1979 StoreSafe,
1980 StoreUnsafe,
1981 StoreSafetyUnknown,
1982 } StoreSafety = StoreSafetyUnknown;
1983
1985
1986 // We start with an alignment of one and try to find instructions that allow
1987 // us to prove better alignment.
1988 Align Alignment;
1989 // Keep track of which types of access we see
1990 bool SawUnorderedAtomic = false;
1991 bool SawNotAtomic = false;
1992 AAMDNodes AATags;
1993
1994 const DataLayout &MDL = Preheader->getDataLayout();
1995
1996 // If there are reads outside the promoted set, then promoting stores is
1997 // definitely not safe.
1998 if (HasReadsOutsideSet)
1999 StoreSafety = StoreUnsafe;
2000
2001 if (StoreSafety == StoreSafetyUnknown && SafetyInfo->anyBlockMayThrow()) {
2002 // If a loop can throw, we have to insert a store along each unwind edge.
2003 // That said, we can't actually make the unwind edge explicit. Therefore,
2004 // we have to prove that the store is dead along the unwind edge. We do
2005 // this by proving that the caller can't have a reference to the object
2006 // after return and thus can't possibly load from the object.
2007 Value *Object = getUnderlyingObject(SomePtr);
2008 if (!isNotVisibleOnUnwindInLoop(Object, CurLoop, DT))
2009 StoreSafety = StoreUnsafe;
2010 }
2011
2012 // Check that all accesses to pointers in the alias set use the same type.
2013 // We cannot (yet) promote a memory location that is loaded and stored in
2014 // different sizes. While we are at it, collect alignment and AA info.
2015 Type *AccessTy = nullptr;
2016 for (Value *ASIV : PointerMustAliases) {
2017 for (Use &U : ASIV->uses()) {
2018 // Ignore instructions that are outside the loop.
2019 Instruction *UI = dyn_cast<Instruction>(U.getUser());
2020 if (!UI || !CurLoop->contains(UI))
2021 continue;
2022
2023 // If there is an non-load/store instruction in the loop, we can't promote
2024 // it.
2025 if (LoadInst *Load = dyn_cast<LoadInst>(UI)) {
2026 if (!Load->isUnordered())
2027 return false;
2028
2029 SawUnorderedAtomic |= Load->isAtomic();
2030 SawNotAtomic |= !Load->isAtomic();
2031 FoundLoadToPromote = true;
2032
2033 Align InstAlignment = Load->getAlign();
2034
2035 if (!LoadIsGuaranteedToExecute)
2036 LoadIsGuaranteedToExecute =
2037 SafetyInfo->isGuaranteedToExecute(*UI, DT, CurLoop);
2038
2039 // Note that proving a load safe to speculate requires proving
2040 // sufficient alignment at the target location. Proving it guaranteed
2041 // to execute does as well. Thus we can increase our guaranteed
2042 // alignment as well.
2043 if (!DereferenceableInPH || (InstAlignment > Alignment))
2045 *Load, DT, TLI, CurLoop, SafetyInfo, ORE,
2046 Preheader->getTerminator(), AC, AllowSpeculation)) {
2047 DereferenceableInPH = true;
2048 Alignment = std::max(Alignment, InstAlignment);
2049 }
2050 } else if (const StoreInst *Store = dyn_cast<StoreInst>(UI)) {
2051 // Stores *of* the pointer are not interesting, only stores *to* the
2052 // pointer.
2053 if (U.getOperandNo() != StoreInst::getPointerOperandIndex())
2054 continue;
2055 if (!Store->isUnordered())
2056 return false;
2057
2058 SawUnorderedAtomic |= Store->isAtomic();
2059 SawNotAtomic |= !Store->isAtomic();
2060
2061 // If the store is guaranteed to execute, both properties are satisfied.
2062 // We may want to check if a store is guaranteed to execute even if we
2063 // already know that promotion is safe, since it may have higher
2064 // alignment than any other guaranteed stores, in which case we can
2065 // raise the alignment on the promoted store.
2066 Align InstAlignment = Store->getAlign();
2067 bool GuaranteedToExecute =
2068 SafetyInfo->isGuaranteedToExecute(*UI, DT, CurLoop);
2069 StoreIsGuanteedToExecute |= GuaranteedToExecute;
2070 if (GuaranteedToExecute) {
2071 DereferenceableInPH = true;
2072 if (StoreSafety == StoreSafetyUnknown)
2073 StoreSafety = StoreSafe;
2074 Alignment = std::max(Alignment, InstAlignment);
2075 }
2076
2077 // If a store dominates all exit blocks, it is safe to sink.
2078 // As explained above, if an exit block was executed, a dominating
2079 // store must have been executed at least once, so we are not
2080 // introducing stores on paths that did not have them.
2081 // Note that this only looks at explicit exit blocks. If we ever
2082 // start sinking stores into unwind edges (see above), this will break.
2083 if (StoreSafety == StoreSafetyUnknown &&
2084 llvm::all_of(ExitBlocks, [&](BasicBlock *Exit) {
2085 return DT->dominates(Store->getParent(), Exit);
2086 }))
2087 StoreSafety = StoreSafe;
2088
2089 // If the store is not guaranteed to execute, we may still get
2090 // deref info through it.
2091 if (!DereferenceableInPH) {
2092 DereferenceableInPH = isDereferenceableAndAlignedPointer(
2093 Store->getPointerOperand(), Store->getValueOperand()->getType(),
2094 Store->getAlign(), MDL, Preheader->getTerminator(), AC, DT, TLI);
2095 }
2096 } else
2097 continue; // Not a load or store.
2098
2099 if (!AccessTy)
2100 AccessTy = getLoadStoreType(UI);
2101 else if (AccessTy != getLoadStoreType(UI))
2102 return false;
2103
2104 // Merge the AA tags.
2105 if (LoopUses.empty()) {
2106 // On the first load/store, just take its AA tags.
2107 AATags = UI->getAAMetadata();
2108 } else if (AATags) {
2109 AATags = AATags.merge(UI->getAAMetadata());
2110 }
2111
2112 LoopUses.push_back(UI);
2113 }
2114 }
2115
2116 // If we found both an unordered atomic instruction and a non-atomic memory
2117 // access, bail. We can't blindly promote non-atomic to atomic since we
2118 // might not be able to lower the result. We can't downgrade since that
2119 // would violate memory model. Also, align 0 is an error for atomics.
2120 if (SawUnorderedAtomic && SawNotAtomic)
2121 return false;
2122
2123 // If we're inserting an atomic load in the preheader, we must be able to
2124 // lower it. We're only guaranteed to be able to lower naturally aligned
2125 // atomics.
2126 if (SawUnorderedAtomic && Alignment < MDL.getTypeStoreSize(AccessTy))
2127 return false;
2128
2129 // If we couldn't prove we can hoist the load, bail.
2130 if (!DereferenceableInPH) {
2131 LLVM_DEBUG(dbgs() << "Not promoting: Not dereferenceable in preheader\n");
2132 return false;
2133 }
2134
2135 // We know we can hoist the load, but don't have a guaranteed store.
2136 // Check whether the location is writable and thread-local. If it is, then we
2137 // can insert stores along paths which originally didn't have them without
2138 // violating the memory model.
2139 if (StoreSafety == StoreSafetyUnknown) {
2140 Value *Object = getUnderlyingObject(SomePtr);
2141 bool ExplicitlyDereferenceableOnly;
2142 if (isWritableObject(Object, ExplicitlyDereferenceableOnly) &&
2143 (!ExplicitlyDereferenceableOnly ||
2144 isDereferenceablePointer(SomePtr, AccessTy, MDL)) &&
2145 isThreadLocalObject(Object, CurLoop, DT, TTI))
2146 StoreSafety = StoreSafe;
2147 }
2148
2149 // If we've still failed to prove we can sink the store, hoist the load
2150 // only, if possible.
2151 if (StoreSafety != StoreSafe && !FoundLoadToPromote)
2152 // If we cannot hoist the load either, give up.
2153 return false;
2154
2155 // Lets do the promotion!
2156 if (StoreSafety == StoreSafe) {
2157 LLVM_DEBUG(dbgs() << "LICM: Promoting load/store of the value: " << *SomePtr
2158 << '\n');
2159 ++NumLoadStorePromoted;
2160 } else {
2161 LLVM_DEBUG(dbgs() << "LICM: Promoting load of the value: " << *SomePtr
2162 << '\n');
2163 ++NumLoadPromoted;
2164 }
2165
2166 ORE->emit([&]() {
2167 return OptimizationRemark(DEBUG_TYPE, "PromoteLoopAccessesToScalar",
2168 LoopUses[0])
2169 << "Moving accesses to memory location out of the loop";
2170 });
2171
2172 // Look at all the loop uses, and try to merge their locations.
2173 std::vector<DebugLoc> LoopUsesLocs;
2174 for (auto U : LoopUses)
2175 LoopUsesLocs.push_back(U->getDebugLoc());
2176 auto DL = DebugLoc::getMergedLocations(LoopUsesLocs);
2177
2178 // We use the SSAUpdater interface to insert phi nodes as required.
2180 SSAUpdater SSA(&NewPHIs);
2181 LoopPromoter Promoter(SomePtr, LoopUses, SSA, ExitBlocks, InsertPts,
2182 MSSAInsertPts, PIC, MSSAU, *LI, DL, Alignment,
2183 SawUnorderedAtomic,
2184 StoreIsGuanteedToExecute ? AATags : AAMDNodes(),
2185 *SafetyInfo, StoreSafety == StoreSafe);
2186
2187 // Set up the preheader to have a definition of the value. It is the live-out
2188 // value from the preheader that uses in the loop will use.
2189 LoadInst *PreheaderLoad = nullptr;
2190 if (FoundLoadToPromote || !StoreIsGuanteedToExecute) {
2191 PreheaderLoad =
2192 new LoadInst(AccessTy, SomePtr, SomePtr->getName() + ".promoted",
2193 Preheader->getTerminator()->getIterator());
2194 if (SawUnorderedAtomic)
2195 PreheaderLoad->setOrdering(AtomicOrdering::Unordered);
2196 PreheaderLoad->setAlignment(Alignment);
2197 PreheaderLoad->setDebugLoc(DebugLoc::getDropped());
2198 if (AATags && LoadIsGuaranteedToExecute)
2199 PreheaderLoad->setAAMetadata(AATags);
2200
2201 MemoryAccess *PreheaderLoadMemoryAccess = MSSAU.createMemoryAccessInBB(
2202 PreheaderLoad, nullptr, PreheaderLoad->getParent(), MemorySSA::End);
2203 MemoryUse *NewMemUse = cast<MemoryUse>(PreheaderLoadMemoryAccess);
2204 MSSAU.insertUse(NewMemUse, /*RenameUses=*/true);
2205 SSA.AddAvailableValue(Preheader, PreheaderLoad);
2206 } else {
2207 SSA.AddAvailableValue(Preheader, PoisonValue::get(AccessTy));
2208 }
2209
2210 if (VerifyMemorySSA)
2211 MSSAU.getMemorySSA()->verifyMemorySSA();
2212 // Rewrite all the loads in the loop and remember all the definitions from
2213 // stores in the loop.
2214 Promoter.run(LoopUses);
2215
2216 if (VerifyMemorySSA)
2217 MSSAU.getMemorySSA()->verifyMemorySSA();
2218 // If the SSAUpdater didn't use the load in the preheader, just zap it now.
2219 if (PreheaderLoad && PreheaderLoad->use_empty())
2220 eraseInstruction(*PreheaderLoad, *SafetyInfo, MSSAU);
2221
2222 return true;
2223}
2224
2225static void foreachMemoryAccess(MemorySSA *MSSA, Loop *L,
2226 function_ref<void(Instruction *)> Fn) {
2227 for (const BasicBlock *BB : L->blocks())
2228 if (const auto *Accesses = MSSA->getBlockAccesses(BB))
2229 for (const auto &Access : *Accesses)
2230 if (const auto *MUD = dyn_cast<MemoryUseOrDef>(&Access))
2231 Fn(MUD->getMemoryInst());
2232}
2233
2234// The bool indicates whether there might be reads outside the set, in which
2235// case only loads may be promoted.
2238 BatchAAResults BatchAA(*AA);
2239 AliasSetTracker AST(BatchAA);
2240
2241 auto IsPotentiallyPromotable = [L](const Instruction *I) {
2242 if (const auto *SI = dyn_cast<StoreInst>(I)) {
2243 const Value *PtrOp = SI->getPointerOperand();
2244 return !isa<ConstantData>(PtrOp) && L->isLoopInvariant(PtrOp);
2245 }
2246 if (const auto *LI = dyn_cast<LoadInst>(I)) {
2247 const Value *PtrOp = LI->getPointerOperand();
2248 return !isa<ConstantData>(PtrOp) && L->isLoopInvariant(PtrOp);
2249 }
2250 return false;
2251 };
2252
2253 // Populate AST with potentially promotable accesses.
2254 SmallPtrSet<Value *, 16> AttemptingPromotion;
2255 foreachMemoryAccess(MSSA, L, [&](Instruction *I) {
2256 if (IsPotentiallyPromotable(I)) {
2257 AttemptingPromotion.insert(I);
2258 AST.add(I);
2259 }
2260 });
2261
2262 // We're only interested in must-alias sets that contain a mod.
2264 for (AliasSet &AS : AST)
2265 if (!AS.isForwardingAliasSet() && AS.isMod() && AS.isMustAlias())
2266 Sets.push_back({&AS, false});
2267
2268 if (Sets.empty())
2269 return {}; // Nothing to promote...
2270
2271 // Discard any sets for which there is an aliasing non-promotable access.
2272 foreachMemoryAccess(MSSA, L, [&](Instruction *I) {
2273 if (AttemptingPromotion.contains(I))
2274 return;
2275
2277 ModRefInfo MR = Pair.getPointer()->aliasesUnknownInst(I, BatchAA);
2278 // Cannot promote if there are writes outside the set.
2279 if (isModSet(MR))
2280 return true;
2281 if (isRefSet(MR)) {
2282 // Remember reads outside the set.
2283 Pair.setInt(true);
2284 // If this is a mod-only set and there are reads outside the set,
2285 // we will not be able to promote, so bail out early.
2286 return !Pair.getPointer()->isRef();
2287 }
2288 return false;
2289 });
2290 });
2291
2293 for (auto [Set, HasReadsOutsideSet] : Sets) {
2294 SmallSetVector<Value *, 8> PointerMustAliases;
2295 for (const auto &MemLoc : *Set)
2296 PointerMustAliases.insert(const_cast<Value *>(MemLoc.Ptr));
2297 Result.emplace_back(std::move(PointerMustAliases), HasReadsOutsideSet);
2298 }
2299
2300 return Result;
2301}
2302
2303// For a given store instruction or writeonly call instruction, this function
2304// checks that there are no read or writes that conflict with the memory
2305// access in the instruction
2307 AAResults *AA, Loop *CurLoop,
2308 SinkAndHoistLICMFlags &Flags) {
2310 // If there are more accesses than the Promotion cap, then give up as we're
2311 // not walking a list that long.
2312 if (Flags.tooManyMemoryAccesses())
2313 return false;
2314
2315 auto *IMD = MSSA->getMemoryAccess(I);
2316 BatchAAResults BAA(*AA);
2317 auto *Source = getClobberingMemoryAccess(*MSSA, BAA, Flags, IMD);
2318 // Make sure there are no clobbers inside the loop.
2319 if (!MSSA->isLiveOnEntryDef(Source) && CurLoop->contains(Source->getBlock()))
2320 return false;
2321
2322 // If there are interfering Uses (i.e. their defining access is in the
2323 // loop), or ordered loads (stored as Defs!), don't move this store.
2324 // Could do better here, but this is conservatively correct.
2325 // TODO: Cache set of Uses on the first walk in runOnLoop, update when
2326 // moving accesses. Can also extend to dominating uses.
2327 for (auto *BB : CurLoop->getBlocks()) {
2328 auto *Accesses = MSSA->getBlockAccesses(BB);
2329 if (!Accesses)
2330 continue;
2331 for (const auto &MA : *Accesses)
2332 if (const auto *MU = dyn_cast<MemoryUse>(&MA)) {
2333 auto *MD = getClobberingMemoryAccess(*MSSA, BAA, Flags,
2334 const_cast<MemoryUse *>(MU));
2335 if (!MSSA->isLiveOnEntryDef(MD) && CurLoop->contains(MD->getBlock()))
2336 return false;
2337 // Disable hoisting past potentially interfering loads. Optimized
2338 // Uses may point to an access outside the loop, as getClobbering
2339 // checks the previous iteration when walking the backedge.
2340 // FIXME: More precise: no Uses that alias I.
2341 if (!Flags.getIsSink() && !MSSA->dominates(IMD, MU))
2342 return false;
2343 } else if (const auto *MD = dyn_cast<MemoryDef>(&MA)) {
2344 if (auto *LI = dyn_cast<LoadInst>(MD->getMemoryInst())) {
2345 (void)LI; // Silence warning.
2346 assert(!LI->isUnordered() && "Expected unordered load");
2347 return false;
2348 }
2349 // Any call, while it may not be clobbering I, it may be a use.
2350 if (auto *CI = dyn_cast<CallInst>(MD->getMemoryInst())) {
2351 // Check if the call may read from the memory location written
2352 // to by I. Check CI's attributes and arguments; the number of
2353 // such checks performed is limited above by NoOfMemAccTooLarge.
2354 if (auto *SI = dyn_cast<StoreInst>(I)) {
2356 if (isModOrRefSet(MRI))
2357 return false;
2358 } else {
2359 auto *SCI = cast<CallInst>(I);
2360 // If the instruction we are wanting to hoist is also a call
2361 // instruction then we need not check mod/ref info with itself
2362 if (SCI == CI)
2363 continue;
2364 ModRefInfo MRI = BAA.getModRefInfo(CI, SCI);
2365 if (isModOrRefSet(MRI))
2366 return false;
2367 }
2368 }
2369 }
2370 }
2371 return true;
2372}
2373
2375 Loop *CurLoop, Instruction &I,
2376 SinkAndHoistLICMFlags &Flags,
2377 bool InvariantGroup) {
2378 // For hoisting, use the walker to determine safety
2379 if (!Flags.getIsSink()) {
2380 // If hoisting an invariant group, we only need to check that there
2381 // is no store to the loaded pointer between the start of the loop,
2382 // and the load (since all values must be the same).
2383
2384 // This can be checked in two conditions:
2385 // 1) if the memoryaccess is outside the loop
2386 // 2) the earliest access is at the loop header,
2387 // if the memory loaded is the phi node
2388
2389 BatchAAResults BAA(MSSA->getAA());
2390 MemoryAccess *Source = getClobberingMemoryAccess(*MSSA, BAA, Flags, MU);
2391 return !MSSA->isLiveOnEntryDef(Source) &&
2392 CurLoop->contains(Source->getBlock()) &&
2393 !(InvariantGroup && Source->getBlock() == CurLoop->getHeader() && isa<MemoryPhi>(Source));
2394 }
2395
2396 // For sinking, we'd need to check all Defs below this use. The getClobbering
2397 // call will look on the backedge of the loop, but will check aliasing with
2398 // the instructions on the previous iteration.
2399 // For example:
2400 // for (i ... )
2401 // load a[i] ( Use (LoE)
2402 // store a[i] ( 1 = Def (2), with 2 = Phi for the loop.
2403 // i++;
2404 // The load sees no clobbering inside the loop, as the backedge alias check
2405 // does phi translation, and will check aliasing against store a[i-1].
2406 // However sinking the load outside the loop, below the store is incorrect.
2407
2408 // For now, only sink if there are no Defs in the loop, and the existing ones
2409 // precede the use and are in the same block.
2410 // FIXME: Increase precision: Safe to sink if Use post dominates the Def;
2411 // needs PostDominatorTreeAnalysis.
2412 // FIXME: More precise: no Defs that alias this Use.
2413 if (Flags.tooManyMemoryAccesses())
2414 return true;
2415 for (auto *BB : CurLoop->getBlocks())
2416 if (pointerInvalidatedByBlock(*BB, *MSSA, *MU))
2417 return true;
2418 // When sinking, the source block may not be part of the loop so check it.
2419 if (!CurLoop->contains(&I))
2420 return pointerInvalidatedByBlock(*I.getParent(), *MSSA, *MU);
2421
2422 return false;
2423}
2424
2426 if (const auto *Accesses = MSSA.getBlockDefs(&BB))
2427 for (const auto &MA : *Accesses)
2428 if (const auto *MD = dyn_cast<MemoryDef>(&MA))
2429 if (MU.getBlock() != MD->getBlock() || !MSSA.locallyDominates(MD, &MU))
2430 return true;
2431 return false;
2432}
2433
2434/// Try to simplify things like (A < INV_1 AND icmp A < INV_2) into (A <
2435/// min(INV_1, INV_2)), if INV_1 and INV_2 are both loop invariants and their
2436/// minimun can be computed outside of loop, and X is not a loop-invariant.
2437static bool hoistMinMax(Instruction &I, Loop &L, ICFLoopSafetyInfo &SafetyInfo,
2438 MemorySSAUpdater &MSSAU) {
2439 bool Inverse = false;
2440 using namespace PatternMatch;
2441 Value *Cond1, *Cond2;
2442 if (match(&I, m_LogicalOr(m_Value(Cond1), m_Value(Cond2)))) {
2443 Inverse = true;
2444 } else if (match(&I, m_LogicalAnd(m_Value(Cond1), m_Value(Cond2)))) {
2445 // Do nothing
2446 } else
2447 return false;
2448
2449 auto MatchICmpAgainstInvariant = [&](Value *C, CmpPredicate &P, Value *&LHS,
2450 Value *&RHS) {
2451 if (!match(C, m_OneUse(m_ICmp(P, m_Value(LHS), m_Value(RHS)))))
2452 return false;
2453 if (!LHS->getType()->isIntegerTy())
2454 return false;
2456 return false;
2457 if (L.isLoopInvariant(LHS)) {
2458 std::swap(LHS, RHS);
2460 }
2461 if (L.isLoopInvariant(LHS) || !L.isLoopInvariant(RHS))
2462 return false;
2463 if (Inverse)
2465 return true;
2466 };
2467 CmpPredicate P1, P2;
2468 Value *LHS1, *LHS2, *RHS1, *RHS2;
2469 if (!MatchICmpAgainstInvariant(Cond1, P1, LHS1, RHS1) ||
2470 !MatchICmpAgainstInvariant(Cond2, P2, LHS2, RHS2))
2471 return false;
2472 auto MatchingPred = CmpPredicate::getMatching(P1, P2);
2473 if (!MatchingPred || LHS1 != LHS2)
2474 return false;
2475
2476 // Everything is fine, we can do the transform.
2477 bool UseMin = ICmpInst::isLT(*MatchingPred) || ICmpInst::isLE(*MatchingPred);
2478 assert(
2479 (UseMin || ICmpInst::isGT(*MatchingPred) ||
2480 ICmpInst::isGE(*MatchingPred)) &&
2481 "Relational predicate is either less (or equal) or greater (or equal)!");
2482 Intrinsic::ID id = ICmpInst::isSigned(*MatchingPred)
2483 ? (UseMin ? Intrinsic::smin : Intrinsic::smax)
2484 : (UseMin ? Intrinsic::umin : Intrinsic::umax);
2485 auto *Preheader = L.getLoopPreheader();
2486 assert(Preheader && "Loop is not in simplify form?");
2487 IRBuilder<> Builder(Preheader->getTerminator());
2488 // We are about to create a new guaranteed use for RHS2 which might not exist
2489 // before (if it was a non-taken input of logical and/or instruction). If it
2490 // was poison, we need to freeze it. Note that no new use for LHS and RHS1 are
2491 // introduced, so they don't need this.
2492 if (isa<SelectInst>(I))
2493 RHS2 = Builder.CreateFreeze(RHS2, RHS2->getName() + ".fr");
2494 Value *NewRHS = Builder.CreateBinaryIntrinsic(
2495 id, RHS1, RHS2, nullptr,
2496 StringRef("invariant.") +
2497 (ICmpInst::isSigned(*MatchingPred) ? "s" : "u") +
2498 (UseMin ? "min" : "max"));
2499 Builder.SetInsertPoint(&I);
2500 ICmpInst::Predicate P = *MatchingPred;
2501 if (Inverse)
2503 Value *NewCond = Builder.CreateICmp(P, LHS1, NewRHS);
2504 NewCond->takeName(&I);
2505 I.replaceAllUsesWith(NewCond);
2506 eraseInstruction(I, SafetyInfo, MSSAU);
2507 Instruction &CondI1 = *cast<Instruction>(Cond1);
2508 Instruction &CondI2 = *cast<Instruction>(Cond2);
2509 salvageDebugInfo(CondI1);
2510 salvageDebugInfo(CondI2);
2511 eraseInstruction(CondI1, SafetyInfo, MSSAU);
2512 eraseInstruction(CondI2, SafetyInfo, MSSAU);
2513 return true;
2514}
2515
2516/// Reassociate gep (gep ptr, idx1), idx2 to gep (gep ptr, idx2), idx1 if
2517/// this allows hoisting the inner GEP.
2518static bool hoistGEP(Instruction &I, Loop &L, ICFLoopSafetyInfo &SafetyInfo,
2520 DominatorTree *DT) {
2522 if (!GEP)
2523 return false;
2524
2525 // Do not try to hoist a constant GEP out of the loop via reassociation.
2526 // Constant GEPs can often be folded into addressing modes, and reassociating
2527 // them may inhibit CSE of a common base.
2528 if (GEP->hasAllConstantIndices())
2529 return false;
2530
2531 auto *Src = dyn_cast<GetElementPtrInst>(GEP->getPointerOperand());
2532 if (!Src || !Src->hasOneUse() || !L.contains(Src))
2533 return false;
2534
2535 Value *SrcPtr = Src->getPointerOperand();
2536 auto LoopInvariant = [&](Value *V) { return L.isLoopInvariant(V); };
2537 if (!L.isLoopInvariant(SrcPtr) || !all_of(GEP->indices(), LoopInvariant))
2538 return false;
2539
2540 // This can only happen if !AllowSpeculation, otherwise this would already be
2541 // handled.
2542 // FIXME: Should we respect AllowSpeculation in these reassociation folds?
2543 // The flag exists to prevent metadata dropping, which is not relevant here.
2544 if (all_of(Src->indices(), LoopInvariant))
2545 return false;
2546
2547 // The swapped GEPs are inbounds if both original GEPs are inbounds
2548 // and the sign of the offsets is the same. For simplicity, only
2549 // handle both offsets being non-negative.
2550 const DataLayout &DL = GEP->getDataLayout();
2551 auto NonNegative = [&](Value *V) {
2552 return isKnownNonNegative(V, SimplifyQuery(DL, DT, AC, GEP));
2553 };
2554 bool IsInBounds = Src->isInBounds() && GEP->isInBounds() &&
2555 all_of(Src->indices(), NonNegative) &&
2556 all_of(GEP->indices(), NonNegative);
2557
2558 BasicBlock *Preheader = L.getLoopPreheader();
2559 IRBuilder<> Builder(Preheader->getTerminator());
2560 Value *NewSrc = Builder.CreateGEP(GEP->getSourceElementType(), SrcPtr,
2561 SmallVector<Value *>(GEP->indices()),
2562 "invariant.gep", IsInBounds);
2563 Builder.SetInsertPoint(GEP);
2564 Value *NewGEP = Builder.CreateGEP(Src->getSourceElementType(), NewSrc,
2565 SmallVector<Value *>(Src->indices()), "gep",
2566 IsInBounds);
2567 GEP->replaceAllUsesWith(NewGEP);
2568 eraseInstruction(*GEP, SafetyInfo, MSSAU);
2569 salvageDebugInfo(*Src);
2570 eraseInstruction(*Src, SafetyInfo, MSSAU);
2571 return true;
2572}
2573
2574/// Try to turn things like "LV + C1 < C2" into "LV < C2 - C1". Here
2575/// C1 and C2 are loop invariants and LV is a loop-variant.
2576static bool hoistAdd(ICmpInst::Predicate Pred, Value *VariantLHS,
2577 Value *InvariantRHS, ICmpInst &ICmp, Loop &L,
2578 ICFLoopSafetyInfo &SafetyInfo, MemorySSAUpdater &MSSAU,
2579 AssumptionCache *AC, DominatorTree *DT) {
2580 assert(!L.isLoopInvariant(VariantLHS) && "Precondition.");
2581 assert(L.isLoopInvariant(InvariantRHS) && "Precondition.");
2582
2583 bool IsSigned = ICmpInst::isSigned(Pred);
2584
2585 // Try to represent VariantLHS as sum of invariant and variant operands.
2586 using namespace PatternMatch;
2587 Value *VariantOp, *InvariantOp;
2588 if (IsSigned && !match(VariantLHS, m_NSWAddLike(m_Value(VariantOp),
2589 m_Value(InvariantOp))))
2590 return false;
2591 if (!IsSigned && !match(VariantLHS, m_NUWAddLike(m_Value(VariantOp),
2592 m_Value(InvariantOp))))
2593 return false;
2594
2595 // LHS itself is a loop-variant, try to represent it in the form:
2596 // "VariantOp + InvariantOp". If it is possible, then we can reassociate.
2597 if (L.isLoopInvariant(VariantOp))
2598 std::swap(VariantOp, InvariantOp);
2599 if (L.isLoopInvariant(VariantOp) || !L.isLoopInvariant(InvariantOp))
2600 return false;
2601
2602 // In order to turn "LV + C1 < C2" into "LV < C2 - C1", we need to be able to
2603 // freely move values from left side of inequality to right side (just as in
2604 // normal linear arithmetics). Overflows make things much more complicated, so
2605 // we want to avoid this.
2606 auto &DL = L.getHeader()->getDataLayout();
2607 SimplifyQuery SQ(DL, DT, AC, &ICmp);
2608 if (IsSigned && computeOverflowForSignedSub(InvariantRHS, InvariantOp, SQ) !=
2610 return false;
2611 if (!IsSigned &&
2612 computeOverflowForUnsignedSub(InvariantRHS, InvariantOp, SQ) !=
2614 return false;
2615 auto *Preheader = L.getLoopPreheader();
2616 assert(Preheader && "Loop is not in simplify form?");
2617 IRBuilder<> Builder(Preheader->getTerminator());
2618 Value *NewCmpOp =
2619 Builder.CreateSub(InvariantRHS, InvariantOp, "invariant.op",
2620 /*HasNUW*/ !IsSigned, /*HasNSW*/ IsSigned);
2621 ICmp.setPredicate(Pred);
2622 ICmp.setOperand(0, VariantOp);
2623 ICmp.setOperand(1, NewCmpOp);
2624
2625 Instruction &DeadI = cast<Instruction>(*VariantLHS);
2626 salvageDebugInfo(DeadI);
2627 eraseInstruction(DeadI, SafetyInfo, MSSAU);
2628 return true;
2629}
2630
2631/// Try to reassociate and hoist the following two patterns:
2632/// LV - C1 < C2 --> LV < C1 + C2,
2633/// C1 - LV < C2 --> LV > C1 - C2.
2634static bool hoistSub(ICmpInst::Predicate Pred, Value *VariantLHS,
2635 Value *InvariantRHS, ICmpInst &ICmp, Loop &L,
2636 ICFLoopSafetyInfo &SafetyInfo, MemorySSAUpdater &MSSAU,
2637 AssumptionCache *AC, DominatorTree *DT) {
2638 assert(!L.isLoopInvariant(VariantLHS) && "Precondition.");
2639 assert(L.isLoopInvariant(InvariantRHS) && "Precondition.");
2640
2641 bool IsSigned = ICmpInst::isSigned(Pred);
2642
2643 // Try to represent VariantLHS as sum of invariant and variant operands.
2644 using namespace PatternMatch;
2645 Value *VariantOp, *InvariantOp;
2646 if (IsSigned &&
2647 !match(VariantLHS, m_NSWSub(m_Value(VariantOp), m_Value(InvariantOp))))
2648 return false;
2649 if (!IsSigned &&
2650 !match(VariantLHS, m_NUWSub(m_Value(VariantOp), m_Value(InvariantOp))))
2651 return false;
2652
2653 bool VariantSubtracted = false;
2654 // LHS itself is a loop-variant, try to represent it in the form:
2655 // "VariantOp + InvariantOp". If it is possible, then we can reassociate. If
2656 // the variant operand goes with minus, we use a slightly different scheme.
2657 if (L.isLoopInvariant(VariantOp)) {
2658 std::swap(VariantOp, InvariantOp);
2659 VariantSubtracted = true;
2660 Pred = ICmpInst::getSwappedPredicate(Pred);
2661 }
2662 if (L.isLoopInvariant(VariantOp) || !L.isLoopInvariant(InvariantOp))
2663 return false;
2664
2665 // In order to turn "LV - C1 < C2" into "LV < C2 + C1", we need to be able to
2666 // freely move values from left side of inequality to right side (just as in
2667 // normal linear arithmetics). Overflows make things much more complicated, so
2668 // we want to avoid this. Likewise, for "C1 - LV < C2" we need to prove that
2669 // "C1 - C2" does not overflow.
2670 auto &DL = L.getHeader()->getDataLayout();
2671 SimplifyQuery SQ(DL, DT, AC, &ICmp);
2672 if (VariantSubtracted && IsSigned) {
2673 // C1 - LV < C2 --> LV > C1 - C2
2674 if (computeOverflowForSignedSub(InvariantOp, InvariantRHS, SQ) !=
2676 return false;
2677 } else if (VariantSubtracted && !IsSigned) {
2678 // C1 - LV < C2 --> LV > C1 - C2
2679 if (computeOverflowForUnsignedSub(InvariantOp, InvariantRHS, SQ) !=
2681 return false;
2682 } else if (!VariantSubtracted && IsSigned) {
2683 // LV - C1 < C2 --> LV < C1 + C2
2684 if (computeOverflowForSignedAdd(InvariantOp, InvariantRHS, SQ) !=
2686 return false;
2687 } else { // !VariantSubtracted && !IsSigned
2688 // LV - C1 < C2 --> LV < C1 + C2
2689 if (computeOverflowForUnsignedAdd(InvariantOp, InvariantRHS, SQ) !=
2691 return false;
2692 }
2693 auto *Preheader = L.getLoopPreheader();
2694 assert(Preheader && "Loop is not in simplify form?");
2695 IRBuilder<> Builder(Preheader->getTerminator());
2696 Value *NewCmpOp =
2697 VariantSubtracted
2698 ? Builder.CreateSub(InvariantOp, InvariantRHS, "invariant.op",
2699 /*HasNUW*/ !IsSigned, /*HasNSW*/ IsSigned)
2700 : Builder.CreateAdd(InvariantOp, InvariantRHS, "invariant.op",
2701 /*HasNUW*/ !IsSigned, /*HasNSW*/ IsSigned);
2702 ICmp.setPredicate(Pred);
2703 ICmp.setOperand(0, VariantOp);
2704 ICmp.setOperand(1, NewCmpOp);
2705
2706 Instruction &DeadI = cast<Instruction>(*VariantLHS);
2707 salvageDebugInfo(DeadI);
2708 eraseInstruction(DeadI, SafetyInfo, MSSAU);
2709 return true;
2710}
2711
2712/// Reassociate and hoist add/sub expressions.
2713static bool hoistAddSub(Instruction &I, Loop &L, ICFLoopSafetyInfo &SafetyInfo,
2715 DominatorTree *DT) {
2716 using namespace PatternMatch;
2717 CmpPredicate Pred;
2718 Value *LHS, *RHS;
2719 if (!match(&I, m_ICmp(Pred, m_Value(LHS), m_Value(RHS))))
2720 return false;
2721
2722 // Put variant operand to LHS position.
2723 if (L.isLoopInvariant(LHS)) {
2724 std::swap(LHS, RHS);
2725 Pred = ICmpInst::getSwappedPredicate(Pred);
2726 }
2727 // We want to delete the initial operation after reassociation, so only do it
2728 // if it has no other uses.
2729 if (L.isLoopInvariant(LHS) || !L.isLoopInvariant(RHS) || !LHS->hasOneUse())
2730 return false;
2731
2732 // TODO: We could go with smarter context, taking common dominator of all I's
2733 // users instead of I itself.
2734 if (hoistAdd(Pred, LHS, RHS, cast<ICmpInst>(I), L, SafetyInfo, MSSAU, AC, DT))
2735 return true;
2736
2737 if (hoistSub(Pred, LHS, RHS, cast<ICmpInst>(I), L, SafetyInfo, MSSAU, AC, DT))
2738 return true;
2739
2740 return false;
2741}
2742
2743static bool isReassociableOp(Instruction *I, unsigned IntOpcode,
2744 unsigned FPOpcode) {
2745 if (I->getOpcode() == IntOpcode)
2746 return true;
2747 if (I->getOpcode() == FPOpcode && I->hasAllowReassoc() &&
2748 I->hasNoSignedZeros())
2749 return true;
2750 return false;
2751}
2752
2753/// Try to reassociate expressions like ((A1 * B1) + (A2 * B2) + ...) * C where
2754/// A1, A2, ... and C are loop invariants into expressions like
2755/// ((A1 * C * B1) + (A2 * C * B2) + ...) and hoist the (A1 * C), (A2 * C), ...
2756/// invariant expressions. This functions returns true only if any hoisting has
2757/// actually occurred.
2759 ICFLoopSafetyInfo &SafetyInfo,
2761 DominatorTree *DT) {
2762 if (!isReassociableOp(&I, Instruction::Mul, Instruction::FMul))
2763 return false;
2764 Value *VariantOp = I.getOperand(0);
2765 Value *InvariantOp = I.getOperand(1);
2766 if (L.isLoopInvariant(VariantOp))
2767 std::swap(VariantOp, InvariantOp);
2768 if (L.isLoopInvariant(VariantOp) || !L.isLoopInvariant(InvariantOp))
2769 return false;
2770 Value *Factor = InvariantOp;
2771
2772 // First, we need to make sure we should do the transformation.
2773 SmallVector<Use *> Changes;
2776 if (BinaryOperator *VariantBinOp = dyn_cast<BinaryOperator>(VariantOp))
2777 Worklist.push_back(VariantBinOp);
2778 while (!Worklist.empty()) {
2779 BinaryOperator *BO = Worklist.pop_back_val();
2780 if (!BO->hasOneUse())
2781 return false;
2782 if (isReassociableOp(BO, Instruction::Add, Instruction::FAdd) &&
2785 Worklist.push_back(cast<BinaryOperator>(BO->getOperand(0)));
2786 Worklist.push_back(cast<BinaryOperator>(BO->getOperand(1)));
2787 Adds.push_back(BO);
2788 continue;
2789 }
2790 if (!isReassociableOp(BO, Instruction::Mul, Instruction::FMul) ||
2791 L.isLoopInvariant(BO))
2792 return false;
2793 Use &U0 = BO->getOperandUse(0);
2794 Use &U1 = BO->getOperandUse(1);
2795 if (L.isLoopInvariant(U0))
2796 Changes.push_back(&U0);
2797 else if (L.isLoopInvariant(U1))
2798 Changes.push_back(&U1);
2799 else
2800 return false;
2801 unsigned Limit = I.getType()->isIntOrIntVectorTy()
2804 if (Changes.size() > Limit)
2805 return false;
2806 }
2807 if (Changes.empty())
2808 return false;
2809
2810 // Drop the poison flags for any adds we looked through.
2811 if (I.getType()->isIntOrIntVectorTy()) {
2812 for (auto *Add : Adds)
2813 Add->dropPoisonGeneratingFlags();
2814 }
2815
2816 // We know we should do it so let's do the transformation.
2817 auto *Preheader = L.getLoopPreheader();
2818 assert(Preheader && "Loop is not in simplify form?");
2819 IRBuilder<> Builder(Preheader->getTerminator());
2820 for (auto *U : Changes) {
2821 assert(L.isLoopInvariant(U->get()));
2822 auto *Ins = cast<BinaryOperator>(U->getUser());
2823 Value *Mul;
2824 if (I.getType()->isIntOrIntVectorTy()) {
2825 Mul = Builder.CreateMul(U->get(), Factor, "factor.op.mul");
2826 // Drop the poison flags on the original multiply.
2827 Ins->dropPoisonGeneratingFlags();
2828 } else
2829 Mul = Builder.CreateFMulFMF(U->get(), Factor, Ins, "factor.op.fmul");
2830
2831 // Rewrite the reassociable instruction.
2832 unsigned OpIdx = U->getOperandNo();
2833 auto *LHS = OpIdx == 0 ? Mul : Ins->getOperand(0);
2834 auto *RHS = OpIdx == 1 ? Mul : Ins->getOperand(1);
2835 auto *NewBO =
2836 BinaryOperator::Create(Ins->getOpcode(), LHS, RHS,
2837 Ins->getName() + ".reass", Ins->getIterator());
2838 NewBO->setDebugLoc(DebugLoc::getDropped());
2839 NewBO->copyIRFlags(Ins);
2840 if (VariantOp == Ins)
2841 VariantOp = NewBO;
2842 Ins->replaceAllUsesWith(NewBO);
2843 eraseInstruction(*Ins, SafetyInfo, MSSAU);
2844 }
2845
2846 I.replaceAllUsesWith(VariantOp);
2847 eraseInstruction(I, SafetyInfo, MSSAU);
2848 return true;
2849}
2850
2851/// Reassociate associative binary expressions of the form
2852///
2853/// 1. "(LV op C1) op C2" ==> "LV op (C1 op C2)"
2854/// 2. "(C1 op LV) op C2" ==> "LV op (C1 op C2)"
2855/// 3. "C2 op (C1 op LV)" ==> "LV op (C1 op C2)"
2856/// 4. "C2 op (LV op C1)" ==> "LV op (C1 op C2)"
2857///
2858/// where op is an associative BinOp, LV is a loop variant, and C1 and C2 are
2859/// loop invariants that we want to hoist, noting that associativity implies
2860/// commutativity.
2862 ICFLoopSafetyInfo &SafetyInfo,
2864 DominatorTree *DT) {
2865 auto *BO = dyn_cast<BinaryOperator>(&I);
2866 if (!BO || !BO->isAssociative())
2867 return false;
2868
2869 Instruction::BinaryOps Opcode = BO->getOpcode();
2870 bool LVInRHS = L.isLoopInvariant(BO->getOperand(0));
2871 auto *BO0 = dyn_cast<BinaryOperator>(BO->getOperand(LVInRHS));
2872 if (!BO0 || BO0->getOpcode() != Opcode || !BO0->isAssociative() ||
2873 BO0->hasNUsesOrMore(BO0->getType()->isIntegerTy() ? 2 : 3))
2874 return false;
2875
2876 Value *LV = BO0->getOperand(0);
2877 Value *C1 = BO0->getOperand(1);
2878 Value *C2 = BO->getOperand(!LVInRHS);
2879
2880 assert(BO->isCommutative() && BO0->isCommutative() &&
2881 "Associativity implies commutativity");
2882 if (L.isLoopInvariant(LV) && !L.isLoopInvariant(C1))
2883 std::swap(LV, C1);
2884 if (L.isLoopInvariant(LV) || !L.isLoopInvariant(C1) || !L.isLoopInvariant(C2))
2885 return false;
2886
2887 auto *Preheader = L.getLoopPreheader();
2888 assert(Preheader && "Loop is not in simplify form?");
2889
2890 IRBuilder<> Builder(Preheader->getTerminator());
2891 auto *Inv = Builder.CreateBinOp(Opcode, C1, C2, "invariant.op");
2892
2893 auto *NewBO = BinaryOperator::Create(
2894 Opcode, LV, Inv, BO->getName() + ".reass", BO->getIterator());
2895 NewBO->setDebugLoc(DebugLoc::getDropped());
2896
2897 if (Opcode == Instruction::FAdd || Opcode == Instruction::FMul) {
2898 // Intersect FMF flags for FADD and FMUL.
2899 FastMathFlags Intersect = BO->getFastMathFlags() & BO0->getFastMathFlags();
2900 if (auto *I = dyn_cast<Instruction>(Inv))
2901 I->setFastMathFlags(Intersect);
2902 NewBO->setFastMathFlags(Intersect);
2903 } else {
2904 OverflowTracking Flags;
2905 Flags.AllKnownNonNegative = false;
2906 Flags.AllKnownNonZero = false;
2907 Flags.mergeFlags(*BO);
2908 Flags.mergeFlags(*BO0);
2909 // If `Inv` was not constant-folded, a new Instruction has been created.
2910 if (auto *I = dyn_cast<Instruction>(Inv))
2911 Flags.applyFlags(*I);
2912 Flags.applyFlags(*NewBO);
2913 }
2914
2915 BO->replaceAllUsesWith(NewBO);
2916 eraseInstruction(*BO, SafetyInfo, MSSAU);
2917
2918 // (LV op C1) might not be erased if it has more uses than the one we just
2919 // replaced.
2920 if (BO0->use_empty()) {
2921 salvageDebugInfo(*BO0);
2922 eraseInstruction(*BO0, SafetyInfo, MSSAU);
2923 }
2924
2925 return true;
2926}
2927
2929 ICFLoopSafetyInfo &SafetyInfo,
2931 DominatorTree *DT) {
2932 // Optimize complex patterns, such as (x < INV1 && x < INV2), turning them
2933 // into (x < min(INV1, INV2)), and hoisting the invariant part of this
2934 // expression out of the loop.
2935 if (hoistMinMax(I, L, SafetyInfo, MSSAU)) {
2936 ++NumHoisted;
2937 ++NumMinMaxHoisted;
2938 return true;
2939 }
2940
2941 // Try to hoist GEPs by reassociation.
2942 if (hoistGEP(I, L, SafetyInfo, MSSAU, AC, DT)) {
2943 ++NumHoisted;
2944 ++NumGEPsHoisted;
2945 return true;
2946 }
2947
2948 // Try to hoist add/sub's by reassociation.
2949 if (hoistAddSub(I, L, SafetyInfo, MSSAU, AC, DT)) {
2950 ++NumHoisted;
2951 ++NumAddSubHoisted;
2952 return true;
2953 }
2954
2955 bool IsInt = I.getType()->isIntOrIntVectorTy();
2956 if (hoistMulAddAssociation(I, L, SafetyInfo, MSSAU, AC, DT)) {
2957 ++NumHoisted;
2958 if (IsInt)
2959 ++NumIntAssociationsHoisted;
2960 else
2961 ++NumFPAssociationsHoisted;
2962 return true;
2963 }
2964
2965 if (hoistBOAssociation(I, L, SafetyInfo, MSSAU, AC, DT)) {
2966 ++NumHoisted;
2967 ++NumBOAssociationsHoisted;
2968 return true;
2969 }
2970
2971 return false;
2972}
2973
2974/// Little predicate that returns true if the specified basic block is in
2975/// a subloop of the current one, not the current one itself.
2976///
2977static bool inSubLoop(BasicBlock *BB, Loop *CurLoop, LoopInfo *LI) {
2978 assert(CurLoop->contains(BB) && "Only valid if BB is IN the loop");
2979 return LI->getLoopFor(BB) != CurLoop;
2980}
for(const MachineOperand &MO :llvm::drop_begin(OldMI.operands(), Desc.getNumOperands()))
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static msgpack::DocNode getNode(msgpack::DocNode DN, msgpack::Type Type, MCValue Val)
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
This file contains the declarations for the subclasses of Constant, which represent the different fla...
DXIL Forward Handle Accesses
DXIL Resource Access
early cse Early CSE w MemorySSA
#define DEBUG_TYPE
Hexagon Common GEP
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
iv Induction Variable Users
Definition IVUsers.cpp:48
static bool isReassociableOp(Instruction *I, unsigned IntOpcode, unsigned FPOpcode)
Definition LICM.cpp:2743
static bool isNotUsedOrFoldableInLoop(const Instruction &I, const Loop *CurLoop, const LoopSafetyInfo *SafetyInfo, TargetTransformInfo *TTI, bool &FoldableInLoop, bool LoopNestMode)
Return true if the only users of this instruction are outside of the loop.
Definition LICM.cpp:1329
static bool hoistGEP(Instruction &I, Loop &L, ICFLoopSafetyInfo &SafetyInfo, MemorySSAUpdater &MSSAU, AssumptionCache *AC, DominatorTree *DT)
Reassociate gep (gep ptr, idx1), idx2 to gep (gep ptr, idx2), idx1 if this allows hoisting the inner ...
Definition LICM.cpp:2518
static cl::opt< bool > SingleThread("licm-force-thread-model-single", cl::Hidden, cl::init(false), cl::desc("Force thread model single in LICM pass"))
static void splitPredecessorsOfLoopExit(PHINode *PN, DominatorTree *DT, LoopInfo *LI, const Loop *CurLoop, LoopSafetyInfo *SafetyInfo, MemorySSAUpdater *MSSAU)
Definition LICM.cpp:1503
static cl::opt< unsigned > FPAssociationUpperLimit("licm-max-num-fp-reassociations", cl::init(5U), cl::Hidden, cl::desc("Set upper limit for the number of transformations performed " "during a single round of hoisting the reassociated expressions."))
static bool isFoldableInLoop(const Instruction &I, const Loop *CurLoop, const TargetTransformInfo *TTI)
Return true if the instruction is foldable in the loop.
Definition LICM.cpp:1299
static bool hoistMinMax(Instruction &I, Loop &L, ICFLoopSafetyInfo &SafetyInfo, MemorySSAUpdater &MSSAU)
Try to simplify things like (A < INV_1 AND icmp A < INV_2) into (A < min(INV_1, INV_2)),...
Definition LICM.cpp:2437
static void moveInstructionBefore(Instruction &I, BasicBlock::iterator Dest, ICFLoopSafetyInfo &SafetyInfo, MemorySSAUpdater &MSSAU, ScalarEvolution *SE)
Definition LICM.cpp:1455
static Instruction * cloneInstructionInExitBlock(Instruction &I, BasicBlock &ExitBlock, PHINode &PN, const LoopInfo *LI, const LoopSafetyInfo *SafetyInfo, MemorySSAUpdater &MSSAU)
Definition LICM.cpp:1371
static cl::opt< bool > ControlFlowHoisting("licm-control-flow-hoisting", cl::Hidden, cl::init(false), cl::desc("Enable control flow (and PHI) hoisting in LICM"))
static bool pointerInvalidatedByLoop(MemorySSA *MSSA, MemoryUse *MU, Loop *CurLoop, Instruction &I, SinkAndHoistLICMFlags &Flags, bool InvariantGroup)
Definition LICM.cpp:2374
static bool hoistAdd(ICmpInst::Predicate Pred, Value *VariantLHS, Value *InvariantRHS, ICmpInst &ICmp, Loop &L, ICFLoopSafetyInfo &SafetyInfo, MemorySSAUpdater &MSSAU, AssumptionCache *AC, DominatorTree *DT)
Try to turn things like "LV + C1 < C2" into "LV < C2 - C1".
Definition LICM.cpp:2576
static MemoryAccess * getClobberingMemoryAccess(MemorySSA &MSSA, BatchAAResults &BAA, SinkAndHoistLICMFlags &Flags, MemoryUseOrDef *MA)
Definition LICM.cpp:1150
static SmallVector< PointersAndHasReadsOutsideSet, 0 > collectPromotionCandidates(MemorySSA *MSSA, AliasAnalysis *AA, Loop *L)
Definition LICM.cpp:2237
static void hoist(Instruction &I, const DominatorTree *DT, const Loop *CurLoop, BasicBlock *Dest, ICFLoopSafetyInfo *SafetyInfo, MemorySSAUpdater &MSSAU, ScalarEvolution *SE, OptimizationRemarkEmitter *ORE)
When an instruction is found to only use loop invariant operands that is safe to hoist,...
Definition LICM.cpp:1681
static bool canSplitPredecessors(PHINode *PN, LoopSafetyInfo *SafetyInfo)
Definition LICM.cpp:1485
static bool sink(Instruction &I, LoopInfo *LI, DominatorTree *DT, const Loop *CurLoop, ICFLoopSafetyInfo *SafetyInfo, MemorySSAUpdater &MSSAU, OptimizationRemarkEmitter *ORE)
When an instruction is found to only be used outside of the loop, this function moves it to the exit ...
Definition LICM.cpp:1575
static bool hoistAddSub(Instruction &I, Loop &L, ICFLoopSafetyInfo &SafetyInfo, MemorySSAUpdater &MSSAU, AssumptionCache *AC, DominatorTree *DT)
Reassociate and hoist add/sub expressions.
Definition LICM.cpp:2713
static bool hoistMulAddAssociation(Instruction &I, Loop &L, ICFLoopSafetyInfo &SafetyInfo, MemorySSAUpdater &MSSAU, AssumptionCache *AC, DominatorTree *DT)
Try to reassociate expressions like ((A1 * B1) + (A2 * B2) + ...) * C where A1, A2,...
Definition LICM.cpp:2758
static cl::opt< uint32_t > MaxNumUsesTraversed("licm-max-num-uses-traversed", cl::Hidden, cl::init(8), cl::desc("Max num uses visited for identifying load " "invariance in loop using invariant start (default = 8)"))
static bool isOnlyMemoryAccess(const Instruction *I, const Loop *L, const MemorySSAUpdater &MSSAU)
Return true if I is the only Instruction with a MemoryAccess in L.
Definition LICM.cpp:1134
static cl::opt< unsigned > IntAssociationUpperLimit("licm-max-num-int-reassociations", cl::init(5U), cl::Hidden, cl::desc("Set upper limit for the number of transformations performed " "during a single round of hoisting the reassociated expressions."))
static void foreachMemoryAccess(MemorySSA *MSSA, Loop *L, function_ref< void(Instruction *)> Fn)
Definition LICM.cpp:2225
static bool isLoadInvariantInLoop(LoadInst *LI, DominatorTree *DT, Loop *CurLoop)
Definition LICM.cpp:1065
static bool isHoistableAndSinkableInst(Instruction &I)
Return true if-and-only-if we know how to (mechanically) both hoist and sink a given instruction out ...
Definition LICM.cpp:1122
static Instruction * sinkThroughTriviallyReplaceablePHI(PHINode *TPN, Instruction *I, LoopInfo *LI, SmallDenseMap< BasicBlock *, Instruction *, 32 > &SunkCopies, const LoopSafetyInfo *SafetyInfo, const Loop *CurLoop, MemorySSAUpdater &MSSAU)
Definition LICM.cpp:1470
static bool inSubLoop(BasicBlock *BB, Loop *CurLoop, LoopInfo *LI)
Little predicate that returns true if the specified basic block is in a subloop of the current one,...
Definition LICM.cpp:2977
static bool hoistSub(ICmpInst::Predicate Pred, Value *VariantLHS, Value *InvariantRHS, ICmpInst &ICmp, Loop &L, ICFLoopSafetyInfo &SafetyInfo, MemorySSAUpdater &MSSAU, AssumptionCache *AC, DominatorTree *DT)
Try to reassociate and hoist the following two patterns: LV - C1 < C2 --> LV < C1 + C2,...
Definition LICM.cpp:2634
static void eraseInstruction(Instruction &I, ICFLoopSafetyInfo &SafetyInfo, MemorySSAUpdater &MSSAU)
Definition LICM.cpp:1448
static bool isSafeToExecuteUnconditionally(Instruction &Inst, const DominatorTree *DT, const TargetLibraryInfo *TLI, const Loop *CurLoop, const LoopSafetyInfo *SafetyInfo, OptimizationRemarkEmitter *ORE, const Instruction *CtxI, AssumptionCache *AC, bool AllowSpeculation)
Only sink or hoist an instruction if it is not a trapping instruction, or if the instruction is known...
Definition LICM.cpp:1728
static bool hoistArithmetics(Instruction &I, Loop &L, ICFLoopSafetyInfo &SafetyInfo, MemorySSAUpdater &MSSAU, AssumptionCache *AC, DominatorTree *DT)
Aggregates various functions for hoisting computations out of loop.
Definition LICM.cpp:2928
static bool noConflictingReadWrites(Instruction *I, MemorySSA *MSSA, AAResults *AA, Loop *CurLoop, SinkAndHoistLICMFlags &Flags)
Definition LICM.cpp:2306
static bool isTriviallyReplaceablePHI(const PHINode &PN, const Instruction &I)
Returns true if a PHINode is a trivially replaceable with an Instruction.
Definition LICM.cpp:1290
std::pair< SmallSetVector< Value *, 8 >, bool > PointersAndHasReadsOutsideSet
Definition LICM.cpp:220
static cl::opt< bool > DisablePromotion("disable-licm-promotion", cl::Hidden, cl::init(false), cl::desc("Disable memory promotion in LICM pass"))
Memory promotion is enabled by default.
static bool hoistBOAssociation(Instruction &I, Loop &L, ICFLoopSafetyInfo &SafetyInfo, MemorySSAUpdater &MSSAU, AssumptionCache *AC, DominatorTree *DT)
Reassociate associative binary expressions of the form.
Definition LICM.cpp:2861
static bool pointerInvalidatedByBlock(BasicBlock &BB, MemorySSA &MSSA, MemoryUse &MU)
Definition LICM.cpp:2425
This file defines the interface for the loop nest analysis.
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
Memory SSA
Definition MemorySSA.cpp:72
This file exposes an interface to building/using memory SSA to walk memory instructions using a use/d...
This file contains the declarations for metadata subclasses.
Contains a collection of routines for determining if a given instruction is guaranteed to execute if ...
MachineInstr unsigned OpIdx
uint64_t IntrinsicInst * II
#define P(N)
if(PassOpts->AAPipeline)
PassInstrumentationCallbacks PIC
#define INITIALIZE_PASS_DEPENDENCY(depName)
Definition PassSupport.h:42
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
Definition PassSupport.h:44
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
Definition PassSupport.h:39
This file provides a priority worklist.
static DominatorTree getDomTree(Function &F)
Remove Loads Into Fake Uses
This file defines generic set operations that may be used on set's of different types,...
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition Statistic.h:171
#define LLVM_DEBUG(...)
Definition Debug.h:114
This pass exposes codegen information to IR-level passes.
static cl::opt< bool > DisablePromotion("disable-type-promotion", cl::Hidden, cl::init(false), cl::desc("Disable type promotion pass"))
Value * RHS
Value * LHS
BinaryOperator * Mul
LLVM_ABI void add(const MemoryLocation &Loc)
These methods are used to add different types of instructions to the alias sets.
AnalysisUsage & addRequired()
AnalysisUsage & addPreserved()
Add the specified Pass class to the set of analyses preserved by this pass.
A cache of @llvm.assume calls within a function.
LLVM Basic Block Representation.
Definition BasicBlock.h:62
LLVM_ABI void replaceSuccessorsPhiUsesWith(BasicBlock *Old, BasicBlock *New)
Update all phi nodes in this basic block's successors to refer to basic block New instead of basic bl...
iterator begin()
Instruction iterator methods.
Definition BasicBlock.h:470
LLVM_ABI const_iterator getFirstInsertionPt() const
Returns an iterator to the first instruction in this block that is suitable for inserting a non-PHI i...
const Function * getParent() const
Return the enclosing method, or null if none.
Definition BasicBlock.h:213
LLVM_ABI InstListType::const_iterator getFirstNonPHIIt() const
Returns an iterator to the first instruction in this block that is not a PHINode instruction.
static BasicBlock * Create(LLVMContext &Context, const Twine &Name="", Function *Parent=nullptr, BasicBlock *InsertBefore=nullptr)
Creates a new BasicBlock.
Definition BasicBlock.h:206
LLVM_ABI const BasicBlock * getSingleSuccessor() const
Return the successor of this block if it has a single successor.
LLVM_ABI const DataLayout & getDataLayout() const
Get the data layout of the module this basic block belongs to.
InstListType::iterator iterator
Instruction iterators...
Definition BasicBlock.h:170
LLVM_ABI LLVMContext & getContext() const
Get the context in which this basic block lives.
void moveBefore(BasicBlock *MovePos)
Unlink this basic block from its current function and insert it into the function that MovePos lives ...
Definition BasicBlock.h:397
LLVM_ABI bool canSplitPredecessors() const
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition BasicBlock.h:233
This class is a wrapper over an AAResults, and it is intended to be used only when there are no IR ch...
ModRefInfo getModRefInfo(const Instruction *I, const std::optional< MemoryLocation > &OptLoc)
static LLVM_ABI BinaryOperator * Create(BinaryOps Op, Value *S1, Value *S2, const Twine &Name=Twine(), InsertPosition InsertBefore=nullptr)
Construct a binary instruction, given the opcode and the two operands.
This class represents a function call, abstracting a target machine's calling convention.
static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
void setPredicate(Predicate P)
Set the predicate for this instruction to the specified value.
Definition InstrTypes.h:768
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition InstrTypes.h:676
bool isSigned() const
Definition InstrTypes.h:930
Predicate getSwappedPredicate() const
For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
Definition InstrTypes.h:827
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE,...
Definition InstrTypes.h:789
An abstraction over a floating-point predicate, and a pack of an integer predicate with samesign info...
static LLVM_ABI std::optional< CmpPredicate > getMatching(CmpPredicate A, CmpPredicate B)
Compares two CmpPredicates taking samesign into account and returns the canonicalized CmpPredicate if...
Conditional Branch instruction.
static CondBrInst * Create(Value *Cond, BasicBlock *IfTrue, BasicBlock *IfFalse, InsertPosition InsertBefore=nullptr)
Value * getCondition() const
BasicBlock * getSuccessor(unsigned i) const
This is the shared class of boolean and integer constants.
Definition Constants.h:87
bool isNegative() const
Definition Constants.h:214
int64_t getSExtValue() const
Return the constant as a 64-bit integer value after it has been sign extended as appropriate for the ...
Definition Constants.h:174
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:64
TypeSize getTypeStoreSize(Type *Ty) const
Returns the maximum number of bytes that may be overwritten by storing the specified type.
Definition DataLayout.h:572
static LLVM_ABI DebugLoc getMergedLocations(ArrayRef< DebugLoc > Locs)
Try to combine the vector of locations passed as input in a single one.
Definition DebugLoc.cpp:166
static DebugLoc getDropped()
Definition DebugLoc.h:163
iterator find(const_arg_type_t< KeyT > Val)
Definition DenseMap.h:178
std::pair< iterator, bool > try_emplace(KeyT &&Key, Ts &&...Args)
Definition DenseMap.h:256
iterator end()
Definition DenseMap.h:81
DomTreeNodeBase * getIDom() const
NodeT * getBlock() const
Analysis pass which computes a DominatorTree.
Definition Dominators.h:283
bool verify(VerificationLevel VL=VerificationLevel::Full) const
verify - checks if the tree is correct.
void changeImmediateDominator(DomTreeNodeBase< NodeT > *N, DomTreeNodeBase< NodeT > *NewIDom)
changeImmediateDominator - This method is used to update the dominator tree information when a node's...
DomTreeNodeBase< NodeT > * addNewBlock(NodeT *BB, NodeT *DomBB)
Add a new node to the dominator tree information.
DomTreeNodeBase< NodeT > * getNode(const NodeT *BB) const
getNode - return the (Post)DominatorTree node for the specified basic block.
bool properlyDominates(const DomTreeNodeBase< NodeT > *A, const DomTreeNodeBase< NodeT > *B) const
properlyDominates - Returns true iff A dominates B and A != B.
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition Dominators.h:164
LLVM_ABI bool isReachableFromEntry(const Use &U) const
Provide an overload for a Use.
LLVM_ABI bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
Convenience struct for specifying and reasoning about fast-math flags.
Definition FMF.h:23
This implementation of LoopSafetyInfo use ImplicitControlFlowTracking to give precise answers on "may...
bool doesNotWriteMemoryBefore(const BasicBlock *BB, const Loop *CurLoop) const
Returns true if we could not execute a memory-modifying instruction before we enter BB under assumpti...
void removeInstruction(const Instruction *Inst)
Inform safety info that we are planning to remove the instruction Inst from its block.
bool isGuaranteedToExecute(const Instruction &Inst, const DominatorTree *DT, const Loop *CurLoop) const override
Returns true if the instruction in a loop is guaranteed to execute at least once (under the assumptio...
bool anyBlockMayThrow() const override
Returns true iff any block of the loop for which this info is contains an instruction that may throw ...
void computeLoopSafetyInfo(const Loop *CurLoop) override
Computes safety information for a loop checks loop body & header for the possibility of may throw exc...
void insertInstructionTo(const Instruction *Inst, const BasicBlock *BB)
Inform the safety info that we are planning to insert a new instruction Inst into the basic block BB.
This instruction compares its operands according to the predicate given to the constructor.
static bool isGE(Predicate P)
Return true if the predicate is SGE or UGE.
static bool isLT(Predicate P)
Return true if the predicate is SLT or ULT.
static bool isGT(Predicate P)
Return true if the predicate is SGT or UGT.
bool isRelational() const
Return true if the predicate is relational (not EQ or NE).
static bool isLE(Predicate P)
Return true if the predicate is SLE or ULE.
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition IRBuilder.h:2787
LLVM_ABI void mergeDIAssignID(ArrayRef< const Instruction * > SourceInstructions)
Merge the DIAssignID metadata from this instruction and those attached to instructions in SourceInstr...
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
LLVM_ABI void setAAMetadata(const AAMDNodes &N)
Sets the AA metadata on this instruction from the AAMDNodes structure.
LLVM_ABI void insertBefore(InstListType::iterator InsertPos)
Insert an unlinked instruction into a basic block immediately before the specified position.
LLVM_ABI InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
Instruction * user_back()
Specialize the methods defined in Value, as we know that an instruction can only be used by other ins...
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
LLVM_ABI void setMetadata(unsigned KindID, MDNode *Node)
Set the metadata of the specified kind to the specified node.
LLVM_ABI AAMDNodes getAAMetadata() const
Returns the AA metadata for this instruction.
void setDebugLoc(DebugLoc Loc)
Set the debug location information for this instruction.
LLVM_ABI const DataLayout & getDataLayout() const
Get the data layout of the module this instruction belongs to.
A wrapper class for inspecting calls to intrinsic functions.
void printPipeline(raw_ostream &OS, function_ref< StringRef(StringRef)> MapClassName2PassName)
Definition LICM.cpp:325
PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM, LoopStandardAnalysisResults &AR, LPMUpdater &U)
Definition LICM.cpp:303
PreservedAnalyses run(LoopNest &L, LoopAnalysisManager &AM, LoopStandardAnalysisResults &AR, LPMUpdater &U)
Definition LICM.cpp:335
void printPipeline(raw_ostream &OS, function_ref< StringRef(StringRef)> MapClassName2PassName)
Definition LICM.cpp:365
This class provides an interface for updating the loop pass manager based on mutations to the loop ne...
static void getLazyBFIAnalysisUsage(AnalysisUsage &AU)
Helper for client passes to set up the analysis usage on behalf of this pass.
Helper class for promoting a collection of loads and stores into SSA Form using the SSAUpdater.
Definition SSAUpdater.h:147
An instruction for reading from memory.
void setAlignment(Align Align)
Value * getPointerOperand()
void setOrdering(AtomicOrdering Ordering)
Sets the ordering constraint of this load instruction.
Analysis pass that exposes the LoopInfo for a function.
Definition LoopInfo.h:569
bool contains(const LoopT *L) const
Return true if the specified loop is contained within in this loop.
BlockT * getHeader() const
void addBasicBlockToLoop(BlockT *NewBB, LoopInfoBase< BlockT, LoopT > &LI)
This method is used by other analyses to update loop information.
BlockT * getLoopPreheader() const
If there is a preheader for this loop, return it.
ArrayRef< BlockT * > getBlocks() const
Get a list of the basic blocks which make up this loop.
void getUniqueExitBlocks(SmallVectorImpl< BlockT * > &ExitBlocks) const
Return all unique successor blocks of this loop.
LoopT * getParentLoop() const
Return the parent loop if it exists or nullptr for top level loops.
Wrapper class to LoopBlocksDFS that provides a standard begin()/end() interface for the DFS reverse p...
void perform(const LoopInfo *LI)
Traverse the loop blocks and store the DFS result.
void verify(const DominatorTreeBase< BlockT, false > &DomTree) const
LoopT * getLoopFor(const BlockT *BB) const
Return the inner most loop that BB lives in.
LLVM_ABI bool wouldBeOutOfLoopUseRequiringLCSSA(const Value *V, const BasicBlock *ExitBB) const
Definition LoopInfo.cpp:963
This class represents a loop nest and can be used to query its properties.
Function * getParent() const
Return the function to which the loop-nest belongs.
Loop & getOutermostLoop() const
Return the outermost loop in the loop nest.
Captures loop safety information.
Definition MustExecute.h:60
LLVM_ABI void copyColors(BasicBlock *New, BasicBlock *Old)
Copy colors of block Old into the block New.
LLVM_ABI const DenseMap< BasicBlock *, ColorVector > & getBlockColors() const
Returns block colors map that is used to update funclet operand bundles.
virtual bool isGuaranteedToExecute(const Instruction &Inst, const DominatorTree *DT, const Loop *CurLoop) const =0
Returns true if the instruction in a loop is guaranteed to execute at least once (under the assumptio...
Represents a single loop in the control flow graph.
Definition LoopInfo.h:40
bool hasLoopInvariantOperands(const Instruction *I) const
Return true if all the operands of the specified instruction are loop invariant.
Definition LoopInfo.cpp:72
bool isLoopInvariant(const Value *V) const
Return true if the specified value is loop invariant.
Definition LoopInfo.cpp:66
BasicBlock * getBlock() const
Definition MemorySSA.h:162
bool onlyWritesMemory() const
Whether this function only (at most) writes memory.
Definition ModRef.h:252
bool doesNotAccessMemory() const
Whether this function accesses no memory.
Definition ModRef.h:246
bool onlyReadsMemory() const
Whether this function only (at most) reads memory.
Definition ModRef.h:249
static LLVM_ABI MemoryLocation get(const LoadInst *LI)
Return a location with information about the memory reference by the given instruction.
An analysis that produces MemorySSA for a function.
Definition MemorySSA.h:922
MemorySSA * getMemorySSA() const
Get handle on MemorySSA.
LLVM_ABI void insertDef(MemoryDef *Def, bool RenameUses=false)
Insert a definition into the MemorySSA IR.
LLVM_ABI void insertUse(MemoryUse *Use, bool RenameUses=false)
LLVM_ABI MemoryAccess * createMemoryAccessInBB(Instruction *I, MemoryAccess *Definition, const BasicBlock *BB, MemorySSA::InsertionPlace Point, bool CreationMustSucceed=true)
Create a MemoryAccess in MemorySSA at a specified point in a block.
LLVM_ABI void removeMemoryAccess(MemoryAccess *, bool OptimizePhis=false)
Remove a MemoryAccess from MemorySSA, including updating all definitions and uses.
LLVM_ABI MemoryUseOrDef * createMemoryAccessAfter(Instruction *I, MemoryAccess *Definition, MemoryAccess *InsertPt)
Create a MemoryAccess in MemorySSA after an existing MemoryAccess.
LLVM_ABI void moveToPlace(MemoryUseOrDef *What, BasicBlock *BB, MemorySSA::InsertionPlace Where)
LLVM_ABI void wireOldPredecessorsToNewImmediatePredecessor(BasicBlock *Old, BasicBlock *New, ArrayRef< BasicBlock * > Preds, bool IdenticalEdgesWereMerged=true)
A new empty BasicBlock (New) now branches directly to Old.
MemoryAccess * getClobberingMemoryAccess(const Instruction *I, BatchAAResults &AA)
Given a memory Mod/Ref/ModRef'ing instruction, calling this will give you the nearest dominating Memo...
Definition MemorySSA.h:1039
Legacy analysis pass which computes MemorySSA.
Definition MemorySSA.h:979
Encapsulates MemorySSA, including all data associated with memory accesses.
Definition MemorySSA.h:702
AliasAnalysis & getAA()
Definition MemorySSA.h:800
DefsList * getBlockDefs(const BasicBlock *BB) const
Return the list of MemoryDef's and MemoryPhi's for a given basic block.
Definition MemorySSA.h:765
LLVM_ABI MemorySSAWalker * getSkipSelfWalker()
AccessList * getBlockAccesses(const BasicBlock *BB) const
Return the list of MemoryAccess's for a given basic block.
Definition MemorySSA.h:758
LLVM_ABI bool dominates(const MemoryAccess *A, const MemoryAccess *B) const
Given two memory accesses in potentially different blocks, determine whether MemoryAccess A dominates...
LLVM_ABI void verifyMemorySSA(VerificationLevel=VerificationLevel::Fast) const
Verify that MemorySSA is self consistent (IE definitions dominate all uses, uses appear in the right ...
MemoryUseOrDef * getMemoryAccess(const Instruction *I) const
Given a memory Mod/Ref'ing instruction, get the MemorySSA access associated with it.
Definition MemorySSA.h:720
LLVM_ABI bool locallyDominates(const MemoryAccess *A, const MemoryAccess *B) const
Given two memory accesses in the same basic block, determine whether MemoryAccess A dominates MemoryA...
bool isLiveOnEntryDef(const MemoryAccess *MA) const
Return true if MA represents the live on entry value.
Definition MemorySSA.h:740
Class that has the common methods + fields of memory uses/defs.
Definition MemorySSA.h:250
MemoryAccess * getDefiningAccess() const
Get the access that produces the memory state used by this Use.
Definition MemorySSA.h:260
Represents read-only accesses to memory.
Definition MemorySSA.h:310
The optimization diagnostic interface.
LLVM_ABI void emit(DiagnosticInfoOptimizationBase &OptDiag)
Output the remark via the diagnostic handler and to the optimization record file.
Diagnostic information for missed-optimization remarks.
Diagnostic information for applied optimization remarks.
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
op_range incoming_values()
void setIncomingBlock(unsigned i, BasicBlock *BB)
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
int getBasicBlockIndex(const BasicBlock *BB) const
Return the first index of the specified basic block in the value list for this PHI.
unsigned getNumIncomingValues() const
Return the number of incoming edges.
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
static LLVM_ABI PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
Pass interface - Implemented by all 'passes'.
Definition Pass.h:99
PointerIntPair - This class implements a pair of a pointer and small integer.
void setInt(IntType IntVal) &
PointerTy getPointer() const
static LLVM_ABI PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
PredIteratorCache - This class is an extremely trivial cache for predecessor iterator queries.
size_t size(BasicBlock *BB)
ArrayRef< BasicBlock * > get(BasicBlock *BB)
A set of analyses that are preserved following a run of a transformation pass.
Definition Analysis.h:112
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition Analysis.h:118
bool empty() const
Determine if the PriorityWorklist is empty or not.
bool insert(const T &X)
Insert a new element into the PriorityWorklist.
Helper class for SSA formation on a set of values defined in multiple blocks.
Definition SSAUpdater.h:39
The main scalar evolution driver.
LLVM_ABI void forgetBlockAndLoopDispositions(Value *V=nullptr)
Called when the client has changed the disposition of values in a loop or block.
LLVM_ABI void forgetLoopDispositions()
Called when the client has changed the disposition of values in this loop.
bool remove(const value_type &X)
Remove an item from the set vector.
Definition SetVector.h:181
bool empty() const
Determine if the SetVector is empty or not.
Definition SetVector.h:100
iterator begin()
Get an iterator to the beginning of the SetVector.
Definition SetVector.h:106
bool insert(const value_type &X)
Insert a new element into the SetVector.
Definition SetVector.h:151
Flags controlling how much is checked when sinking or hoisting instructions.
Definition LoopUtils.h:123
LLVM_ABI SinkAndHoistLICMFlags(unsigned LicmMssaOptCap, unsigned LicmMssaNoAccForPromotionCap, bool IsSink, Loop &L, MemorySSA &MSSA)
Definition LICM.cpp:393
unsigned LicmMssaNoAccForPromotionCap
Definition LoopUtils.h:142
A version of PriorityWorklist that selects small size optimized data structures for the vector and ma...
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
bool contains(ConstPtrType Ptr) const
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
A SetVector that performs no allocations if smaller than a certain size.
Definition SetVector.h:339
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void reserve(size_type N)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
void setAlignment(Align Align)
void setOrdering(AtomicOrdering Ordering)
Sets the ordering constraint of this store instruction.
static unsigned getPointerOperandIndex()
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
Provides information about what library functions are available for the current target.
Wrapper pass for TargetTransformInfo.
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
@ TCK_SizeAndLatency
The weighted sum of size and latency.
@ TCC_Free
Expected to fold away in lowering.
EltTy front() const
unsigned size() const
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
static UncondBrInst * Create(BasicBlock *IfTrue, InsertPosition InsertBefore=nullptr)
A Use represents the edge between a Value definition and its users.
Definition Use.h:35
const Use & getOperandUse(unsigned i) const
Definition User.h:220
void setOperand(unsigned i, Value *Val)
Definition User.h:212
Value * getOperand(unsigned i) const
Definition User.h:207
unsigned getNumOperands() const
Definition User.h:229
LLVM Value Representation.
Definition Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:256
LLVM_ABI bool hasOneUser() const
Return true if there is exactly one user of this value.
Definition Value.cpp:166
LLVM_ABI std::string getNameOrAsOperand() const
Definition Value.cpp:464
bool hasOneUse() const
Return true if there is exactly one use of this value.
Definition Value.h:440
LLVM_ABI void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition Value.cpp:553
iterator_range< user_iterator > users()
Definition Value.h:427
bool use_empty() const
Definition Value.h:347
iterator_range< use_iterator > uses()
Definition Value.h:381
user_iterator_impl< User > user_iterator
Definition Value.h:392
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Definition Value.cpp:322
LLVM_ABI void takeName(Value *V)
Transfer the name from V to this value.
Definition Value.cpp:403
constexpr ScalarTy getFixedValue() const
Definition TypeSize.h:200
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
Definition TypeSize.h:168
An efficient, type-erasing, non-owning reference to a callable.
const ParentTy * getParent() const
Definition ilist_node.h:34
self_iterator getIterator()
Definition ilist_node.h:123
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition raw_ostream.h:53
Changed
Abstract Attribute helper functions.
Definition Attributor.h:165
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
@ BasicBlock
Various leaf nodes.
Definition ISDOpcodes.h:81
OneUse_match< SubPat > m_OneUse(const SubPat &SP)
OverflowingBinaryOp_match< LHS, RHS, Instruction::Sub, OverflowingBinaryOperator::NoSignedWrap > m_NSWSub(const LHS &L, const RHS &R)
bool match(Val *V, const Pattern &P)
IntrinsicID_match m_Intrinsic()
Match intrinsic calls like this: m_Intrinsic<Intrinsic::fabs>(m_Value(X))
auto m_LogicalOr()
Matches L || R where L and R are arbitrary values.
OverflowingBinaryOp_match< LHS, RHS, Instruction::Sub, OverflowingBinaryOperator::NoUnsignedWrap > m_NUWSub(const LHS &L, const RHS &R)
match_combine_or< OverflowingBinaryOp_match< LHS, RHS, Instruction::Add, OverflowingBinaryOperator::NoSignedWrap >, DisjointOr_match< LHS, RHS > > m_NSWAddLike(const LHS &L, const RHS &R)
Match either "add nsw" or "or disjoint".
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
CmpClass_match< LHS, RHS, ICmpInst > m_ICmp(CmpPredicate &Pred, const LHS &L, const RHS &R)
auto m_LogicalAnd()
Matches L && R where L and R are arbitrary values.
match_combine_or< OverflowingBinaryOp_match< LHS, RHS, Instruction::Add, OverflowingBinaryOperator::NoUnsignedWrap >, DisjointOr_match< LHS, RHS > > m_NUWAddLike(const LHS &L, const RHS &R)
Match either "add nuw" or "or disjoint".
initializer< Ty > init(const Ty &Val)
DiagnosticInfoOptimizationBase::Argument NV
friend class Instruction
Iterator for Instructions in a `BasicBlock.
Definition BasicBlock.h:73
This is an optimization pass for GlobalISel generic memory operations.
Definition Types.h:26
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
@ NeverOverflows
Never overflows.
cl::opt< bool > ProfcheckDisableMetadataFixes
Definition Metadata.cpp:64
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1739
LLVM_ABI bool canSinkOrHoistInst(Instruction &I, AAResults *AA, DominatorTree *DT, Loop *CurLoop, MemorySSAUpdater &MSSAU, bool TargetExecutesOncePerLoop, SinkAndHoistLICMFlags &LICMFlags, OptimizationRemarkEmitter *ORE=nullptr)
Returns true if is legal to hoist or sink this instruction disregarding the possible introduction of ...
Definition LICM.cpp:1164
auto pred_end(const MachineBasicBlock *BB)
void set_intersect(S1Ty &S1, const S2Ty &S2)
set_intersect(A, B) - Compute A := A ^ B Identical to set_intersection, except that it works on set<>...
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
LLVM_ABI void salvageDebugInfo(const MachineRegisterInfo &MRI, MachineInstr &MI)
Assuming the instruction MI is going to be deleted, attempt to salvage debug users of MI by writing t...
Definition Utils.cpp:1726
auto successors(const MachineBasicBlock *BB)
LLVM_ABI bool isDereferenceableAndAlignedPointer(const Value *V, Type *Ty, Align Alignment, const DataLayout &DL, const Instruction *CtxI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr)
Returns true if V is always a dereferenceable pointer with alignment greater or equal than requested.
Definition Loads.cpp:229
constexpr from_range_t from_range
LLVM_ABI bool formLCSSARecursively(Loop &L, const DominatorTree &DT, const LoopInfo *LI, ScalarEvolution *SE)
Put a loop nest into LCSSA form.
Definition LCSSA.cpp:449
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition STLExtras.h:634
auto cast_or_null(const Y &Val)
Definition Casting.h:714
auto pred_size(const MachineBasicBlock *BB)
MemoryEffectsBase< IRMemLocation > MemoryEffects
Summary of how a function affects memory in the program.
Definition ModRef.h:336
LLVM_ABI bool isSafeToSpeculativelyExecute(const Instruction *I, const Instruction *CtxI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr, bool UseVariableInfo=true, bool IgnoreUBImplyingAttrs=true)
Return true if the instruction does not have any effects besides calculating the result and does not ...
LLVM_ABI bool PointerMayBeCapturedBefore(const Value *V, bool ReturnCaptures, const Instruction *I, const DominatorTree *DT, bool IncludeI=false, unsigned MaxUsesToExplore=0, const LoopInfo *LI=nullptr)
PointerMayBeCapturedBefore - Return true if this pointer value may be captured by the enclosing funct...
LLVM_ABI Pass * createLICMPass()
Definition LICM.cpp:386
LLVM_ABI SmallVector< BasicBlock *, 16 > collectChildrenInLoop(DominatorTree *DT, DomTreeNode *N, const Loop *CurLoop)
Does a BFS from a given node to all of its children inside a given loop.
DomTreeNodeBase< BasicBlock > DomTreeNode
Definition Dominators.h:94
AnalysisManager< Loop, LoopStandardAnalysisResults & > LoopAnalysisManager
The loop analysis manager.
LLVM_ABI bool hoistRegion(DomTreeNode *, AAResults *, LoopInfo *, DominatorTree *, AssumptionCache *, TargetLibraryInfo *, Loop *, MemorySSAUpdater &, ScalarEvolution *, ICFLoopSafetyInfo *, SinkAndHoistLICMFlags &, OptimizationRemarkEmitter *, bool, bool AllowSpeculation)
Walk the specified region of the CFG (defined by all blocks dominated by the specified block,...
Definition LICM.cpp:888
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1746
LLVM_ABI bool isInstructionTriviallyDead(Instruction *I, const TargetLibraryInfo *TLI=nullptr)
Return true if the result produced by the instruction is not used, and the instruction will return.
Definition Local.cpp:403
bool isGuard(const User *U)
Returns true iff U has semantics of a guard expressed in a form of call of llvm.experimental....
auto reverse(ContainerTy &&C)
Definition STLExtras.h:408
LLVM_ABI OverflowResult computeOverflowForSignedSub(const Value *LHS, const Value *RHS, const SimplifyQuery &SQ)
LLVM_ABI void initializeLegacyLICMPassPass(PassRegistry &)
bool isModSet(const ModRefInfo MRI)
Definition ModRef.h:49
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
LLVM_TEMPLATE_ABI void appendLoopsToWorklist(RangeT &&, SmallPriorityWorklist< Loop *, 4 > &)
Utility that implements appending of loops onto a worklist given a range.
bool isModOrRefSet(const ModRefInfo MRI)
Definition ModRef.h:43
LLVM_ABI bool isNotVisibleOnUnwind(const Value *Object, bool &RequiresNoCaptureBeforeUnwind)
Return true if Object memory is not visible after an unwind, in the sense that program semantics cann...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
LLVM_ABI void getLoopAnalysisUsage(AnalysisUsage &AU)
Helper to consistently add the set of standard passes to a loop pass's AnalysisUsage.
LLVM_ABI BasicBlock * SplitBlockPredecessors(BasicBlock *BB, ArrayRef< BasicBlock * > Preds, const char *Suffix, DominatorTree *DT, LoopInfo *LI=nullptr, MemorySSAUpdater *MSSAU=nullptr, bool PreserveLCSSA=false)
This method introduces at least one new basic block into the function and moves some of the predecess...
ModRefInfo
Flags indicating whether a memory access modifies or references memory.
Definition ModRef.h:28
TargetTransformInfo TTI
LLVM_ABI bool VerifyMemorySSA
Enables verification of MemorySSA.
Definition MemorySSA.cpp:84
LLVM_ABI bool salvageKnowledge(Instruction *I, AssumptionCache *AC=nullptr, DominatorTree *DT=nullptr)
Calls BuildAssumeFromInst and if the resulting llvm.assume is valid insert if before I.
LLVM_ABI bool hasDisableLICMTransformsHint(const Loop *L)
Look for the loop attribute that disables the LICM transformation heuristics.
LLVM_ABI OverflowResult computeOverflowForSignedAdd(const WithCache< const Value * > &LHS, const WithCache< const Value * > &RHS, const SimplifyQuery &SQ)
@ Add
Sum of integers.
DWARFExpression::Operation Op
ArrayRef(const T &OneElt) -> ArrayRef< T >
LLVM_ABI bool isIdentifiedFunctionLocal(const Value *V)
Return true if V is umabigously identified at the function-level.
LLVM_ABI bool isDereferenceablePointer(const Value *V, Type *Ty, const DataLayout &DL, const Instruction *CtxI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr)
Return true if this is always a dereferenceable pointer.
Definition Loads.cpp:249
OutputIt move(R &&Range, OutputIt Out)
Provide wrappers to std::move which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1917
LLVM_ABI OverflowResult computeOverflowForUnsignedSub(const Value *LHS, const Value *RHS, const SimplifyQuery &SQ)
TinyPtrVector< BasicBlock * > ColorVector
auto pred_begin(const MachineBasicBlock *BB)
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
LLVM_ABI PreservedAnalyses getLoopPassPreservedAnalyses()
Returns the minimum set of Analyses that all loop passes must preserve.
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1772
void erase_if(Container &C, UnaryPredicate P)
Provide a container algorithm similar to C++ Library Fundamentals v2's erase_if which is equivalent t...
Definition STLExtras.h:2192
auto predecessors(const MachineBasicBlock *BB)
Type * getLoadStoreType(const Value *I)
A helper function that returns the type of a load or store instruction.
LLVM_ABI bool sinkRegion(DomTreeNode *, AAResults *, LoopInfo *, DominatorTree *, TargetLibraryInfo *, TargetTransformInfo *, Loop *CurLoop, MemorySSAUpdater &, ICFLoopSafetyInfo *, SinkAndHoistLICMFlags &, OptimizationRemarkEmitter *, Loop *OutermostLoop=nullptr)
Walk the specified region of the CFG (defined by all blocks dominated by the specified block,...
Definition LICM.cpp:559
LLVM_ABI OverflowResult computeOverflowForUnsignedAdd(const WithCache< const Value * > &LHS, const WithCache< const Value * > &RHS, const SimplifyQuery &SQ)
cl::opt< unsigned > SetLicmMssaNoAccForPromotionCap
LLVM_ABI const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=MaxLookupSearchDepth)
This method strips off any GEP address adjustments, pointer casts or llvm.threadlocal....
AAResults AliasAnalysis
Temporary typedef for legacy code that uses a generic AliasAnalysis pointer or reference.
bool capturesNothing(CaptureComponents CC)
Definition ModRef.h:355
LLVM_ABI bool isKnownNonNegative(const Value *V, const SimplifyQuery &SQ, unsigned Depth=0)
Returns true if the give value is known to be non-negative.
LLVM_ABI bool promoteLoopAccessesToScalars(const SmallSetVector< Value *, 8 > &, SmallVectorImpl< BasicBlock * > &, SmallVectorImpl< BasicBlock::iterator > &, SmallVectorImpl< MemoryAccess * > &, PredIteratorCache &, LoopInfo *, DominatorTree *, AssumptionCache *AC, const TargetLibraryInfo *, TargetTransformInfo *, Loop *, MemorySSAUpdater &, ICFLoopSafetyInfo *, OptimizationRemarkEmitter *, bool AllowSpeculation, bool HasReadsOutsideSet)
Try to promote memory values to scalars by sinking stores out of the loop and moving loads to before ...
Definition LICM.cpp:1910
cl::opt< unsigned > SetLicmMssaOptCap
LLVM_ABI bool sinkRegionForLoopNest(DomTreeNode *, AAResults *, LoopInfo *, DominatorTree *, TargetLibraryInfo *, TargetTransformInfo *, Loop *, MemorySSAUpdater &, ICFLoopSafetyInfo *, SinkAndHoistLICMFlags &, OptimizationRemarkEmitter *)
Call sinkRegion on loops contained within the specified loop in order from innermost to outermost.
Definition LICM.cpp:626
bool isRefSet(const ModRefInfo MRI)
Definition ModRef.h:52
LLVM_ABI bool isWritableObject(const Value *Object, bool &ExplicitlyDereferenceableOnly)
Return true if the Object is writable, in the sense that any location based on this pointer that can ...
LLVM_ABI void reportFatalUsageError(Error Err)
Report a fatal error that does not indicate a bug in LLVM.
Definition Error.cpp:177
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition BitVector.h:872
#define N
A collection of metadata nodes that might be associated with a memory access used by the alias-analys...
Definition Metadata.h:763
LLVM_ABI AAMDNodes merge(const AAMDNodes &Other) const
Given two sets of AAMDNodes applying to potentially different locations, determine the best AAMDNodes...
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
The adaptor from a function pass to a loop pass computes these analyses and makes them available to t...
A lightweight accessor for an operand bundle meant to be passed around by value.
uint32_t getTagID() const
Return the tag of this operand bundle as an integer.
A CRTP mix-in to automatically provide informational APIs needed for passes.
Definition PassManager.h:70