LLVM 18.0.0git
LoopRotationUtils.cpp
Go to the documentation of this file.
1//===----------------- LoopRotationUtils.cpp -----------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file provides utilities to convert a loop into a loop with bottom test.
10//
11//===----------------------------------------------------------------------===//
12
14#include "llvm/ADT/Statistic.h"
24#include "llvm/IR/CFG.h"
25#include "llvm/IR/DebugInfo.h"
26#include "llvm/IR/Dominators.h"
28#include "llvm/IR/MDBuilder.h"
31#include "llvm/Support/Debug.h"
38using namespace llvm;
39
40#define DEBUG_TYPE "loop-rotate"
41
42STATISTIC(NumNotRotatedDueToHeaderSize,
43 "Number of loops not rotated due to the header size");
44STATISTIC(NumInstrsHoisted,
45 "Number of instructions hoisted into loop preheader");
46STATISTIC(NumInstrsDuplicated,
47 "Number of instructions cloned into loop preheader");
48STATISTIC(NumRotated, "Number of loops rotated");
49
50static cl::opt<bool>
51 MultiRotate("loop-rotate-multi", cl::init(false), cl::Hidden,
52 cl::desc("Allow loop rotation multiple times in order to reach "
53 "a better latch exit"));
54
55// Probability that a rotated loop has zero trip count / is never entered.
56static constexpr uint32_t ZeroTripCountWeights[] = {1, 127};
57
58namespace {
59/// A simple loop rotation transformation.
60class LoopRotate {
61 const unsigned MaxHeaderSize;
62 LoopInfo *LI;
65 DominatorTree *DT;
67 MemorySSAUpdater *MSSAU;
68 const SimplifyQuery &SQ;
69 bool RotationOnly;
70 bool IsUtilMode;
71 bool PrepareForLTO;
72
73public:
74 LoopRotate(unsigned MaxHeaderSize, LoopInfo *LI,
77 const SimplifyQuery &SQ, bool RotationOnly, bool IsUtilMode,
78 bool PrepareForLTO)
79 : MaxHeaderSize(MaxHeaderSize), LI(LI), TTI(TTI), AC(AC), DT(DT), SE(SE),
80 MSSAU(MSSAU), SQ(SQ), RotationOnly(RotationOnly),
81 IsUtilMode(IsUtilMode), PrepareForLTO(PrepareForLTO) {}
82 bool processLoop(Loop *L);
83
84private:
85 bool rotateLoop(Loop *L, bool SimplifiedLatch);
86 bool simplifyLoopLatch(Loop *L);
87};
88} // end anonymous namespace
89
90/// Insert (K, V) pair into the ValueToValueMap, and verify the key did not
91/// previously exist in the map, and the value was inserted.
93 bool Inserted = VM.insert({K, V}).second;
94 assert(Inserted);
95 (void)Inserted;
96}
97/// RewriteUsesOfClonedInstructions - We just cloned the instructions from the
98/// old header into the preheader. If there were uses of the values produced by
99/// these instruction that were outside of the loop, we have to insert PHI nodes
100/// to merge the two values. Do this now.
102 BasicBlock *OrigPreheader,
104 ScalarEvolution *SE,
105 SmallVectorImpl<PHINode*> *InsertedPHIs) {
106 // Remove PHI node entries that are no longer live.
107 BasicBlock::iterator I, E = OrigHeader->end();
108 for (I = OrigHeader->begin(); PHINode *PN = dyn_cast<PHINode>(I); ++I)
109 PN->removeIncomingValue(PN->getBasicBlockIndex(OrigPreheader));
110
111 // Now fix up users of the instructions in OrigHeader, inserting PHI nodes
112 // as necessary.
113 SSAUpdater SSA(InsertedPHIs);
114 for (I = OrigHeader->begin(); I != E; ++I) {
115 Value *OrigHeaderVal = &*I;
116
117 // If there are no uses of the value (e.g. because it returns void), there
118 // is nothing to rewrite.
119 if (OrigHeaderVal->use_empty())
120 continue;
121
122 Value *OrigPreHeaderVal = ValueMap.lookup(OrigHeaderVal);
123
124 // The value now exits in two versions: the initial value in the preheader
125 // and the loop "next" value in the original header.
126 SSA.Initialize(OrigHeaderVal->getType(), OrigHeaderVal->getName());
127 // Force re-computation of OrigHeaderVal, as some users now need to use the
128 // new PHI node.
129 if (SE)
130 SE->forgetValue(OrigHeaderVal);
131 SSA.AddAvailableValue(OrigHeader, OrigHeaderVal);
132 SSA.AddAvailableValue(OrigPreheader, OrigPreHeaderVal);
133
134 // Visit each use of the OrigHeader instruction.
135 for (Use &U : llvm::make_early_inc_range(OrigHeaderVal->uses())) {
136 // SSAUpdater can't handle a non-PHI use in the same block as an
137 // earlier def. We can easily handle those cases manually.
138 Instruction *UserInst = cast<Instruction>(U.getUser());
139 if (!isa<PHINode>(UserInst)) {
140 BasicBlock *UserBB = UserInst->getParent();
141
142 // The original users in the OrigHeader are already using the
143 // original definitions.
144 if (UserBB == OrigHeader)
145 continue;
146
147 // Users in the OrigPreHeader need to use the value to which the
148 // original definitions are mapped.
149 if (UserBB == OrigPreheader) {
150 U = OrigPreHeaderVal;
151 continue;
152 }
153 }
154
155 // Anything else can be handled by SSAUpdater.
156 SSA.RewriteUse(U);
157 }
158
159 // Replace MetadataAsValue(ValueAsMetadata(OrigHeaderVal)) uses in debug
160 // intrinsics.
162 llvm::findDbgValues(DbgValues, OrigHeaderVal);
163 for (auto &DbgValue : DbgValues) {
164 // The original users in the OrigHeader are already using the original
165 // definitions.
166 BasicBlock *UserBB = DbgValue->getParent();
167 if (UserBB == OrigHeader)
168 continue;
169
170 // Users in the OrigPreHeader need to use the value to which the
171 // original definitions are mapped and anything else can be handled by
172 // the SSAUpdater. To avoid adding PHINodes, check if the value is
173 // available in UserBB, if not substitute undef.
174 Value *NewVal;
175 if (UserBB == OrigPreheader)
176 NewVal = OrigPreHeaderVal;
177 else if (SSA.HasValueForBlock(UserBB))
178 NewVal = SSA.GetValueInMiddleOfBlock(UserBB);
179 else
180 NewVal = UndefValue::get(OrigHeaderVal->getType());
181 DbgValue->replaceVariableLocationOp(OrigHeaderVal, NewVal);
182 }
183 }
184}
185
186// Assuming both header and latch are exiting, look for a phi which is only
187// used outside the loop (via a LCSSA phi) in the exit from the header.
188// This means that rotating the loop can remove the phi.
190 BasicBlock *Header = L->getHeader();
191 BranchInst *BI = dyn_cast<BranchInst>(Header->getTerminator());
192 assert(BI && BI->isConditional() && "need header with conditional exit");
193 BasicBlock *HeaderExit = BI->getSuccessor(0);
194 if (L->contains(HeaderExit))
195 HeaderExit = BI->getSuccessor(1);
196
197 for (auto &Phi : Header->phis()) {
198 // Look for uses of this phi in the loop/via exits other than the header.
199 if (llvm::any_of(Phi.users(), [HeaderExit](const User *U) {
200 return cast<Instruction>(U)->getParent() != HeaderExit;
201 }))
202 continue;
203 return true;
204 }
205 return false;
206}
207
208// Check that latch exit is deoptimizing (which means - very unlikely to happen)
209// and there is another exit from the loop which is non-deoptimizing.
210// If we rotate latch to that exit our loop has a better chance of being fully
211// canonical.
212//
213// It can give false positives in some rare cases.
215 BasicBlock *Latch = L->getLoopLatch();
216 assert(Latch && "need latch");
217 BranchInst *BI = dyn_cast<BranchInst>(Latch->getTerminator());
218 // Need normal exiting latch.
219 if (!BI || !BI->isConditional())
220 return false;
221
222 BasicBlock *Exit = BI->getSuccessor(1);
223 if (L->contains(Exit))
224 Exit = BI->getSuccessor(0);
225
226 // Latch exit is non-deoptimizing, no need to rotate.
228 return false;
229
231 L->getUniqueExitBlocks(Exits);
232 if (!Exits.empty()) {
233 // There is at least one non-deoptimizing exit.
234 //
235 // Note, that BasicBlock::getPostdominatingDeoptimizeCall is not exact,
236 // as it can conservatively return false for deoptimizing exits with
237 // complex enough control flow down to deoptimize call.
238 //
239 // That means here we can report success for a case where
240 // all exits are deoptimizing but one of them has complex enough
241 // control flow (e.g. with loops).
242 //
243 // That should be a very rare case and false positives for this function
244 // have compile-time effect only.
245 return any_of(Exits, [](const BasicBlock *BB) {
247 });
248 }
249 return false;
250}
251
252static void updateBranchWeights(BranchInst &PreHeaderBI, BranchInst &LoopBI,
253 bool HasConditionalPreHeader,
254 bool SuccsSwapped) {
255 MDNode *WeightMD = getBranchWeightMDNode(PreHeaderBI);
256 if (WeightMD == nullptr)
257 return;
258
259 // LoopBI should currently be a clone of PreHeaderBI with the same
260 // metadata. But we double check to make sure we don't have a degenerate case
261 // where instsimplify changed the instructions.
262 if (WeightMD != getBranchWeightMDNode(LoopBI))
263 return;
264
266 extractFromBranchWeightMD(WeightMD, Weights);
267 if (Weights.size() != 2)
268 return;
269 uint32_t OrigLoopExitWeight = Weights[0];
270 uint32_t OrigLoopBackedgeWeight = Weights[1];
271
272 if (SuccsSwapped)
273 std::swap(OrigLoopExitWeight, OrigLoopBackedgeWeight);
274
275 // Update branch weights. Consider the following edge-counts:
276 //
277 // | |-------- |
278 // V V | V
279 // Br i1 ... | Br i1 ...
280 // | | | | |
281 // x| y| | becomes: | y0| |-----
282 // V V | | V V |
283 // Exit Loop | | Loop |
284 // | | | Br i1 ... |
285 // ----- | | | |
286 // x0| x1| y1 | |
287 // V V ----
288 // Exit
289 //
290 // The following must hold:
291 // - x == x0 + x1 # counts to "exit" must stay the same.
292 // - y0 == x - x0 == x1 # how often loop was entered at all.
293 // - y1 == y - y0 # How often loop was repeated (after first iter.).
294 //
295 // We cannot generally deduce how often we had a zero-trip count loop so we
296 // have to make a guess for how to distribute x among the new x0 and x1.
297
298 uint32_t ExitWeight0 = 0; // aka x0
299 if (HasConditionalPreHeader) {
300 // Here we cannot know how many 0-trip count loops we have, so we guess:
301 if (OrigLoopBackedgeWeight > OrigLoopExitWeight) {
302 // If the loop count is bigger than the exit count then we set
303 // probabilities as if 0-trip count nearly never happens.
304 ExitWeight0 = ZeroTripCountWeights[0];
305 // Scale up counts if necessary so we can match `ZeroTripCountWeights` for
306 // the `ExitWeight0`:`ExitWeight1` (aka `x0`:`x1` ratio`) ratio.
307 while (OrigLoopExitWeight < ZeroTripCountWeights[1] + ExitWeight0) {
308 // ... but don't overflow.
309 uint32_t const HighBit = uint32_t{1} << (sizeof(uint32_t) * 8 - 1);
310 if ((OrigLoopBackedgeWeight & HighBit) != 0 ||
311 (OrigLoopExitWeight & HighBit) != 0)
312 break;
313 OrigLoopBackedgeWeight <<= 1;
314 OrigLoopExitWeight <<= 1;
315 }
316 } else {
317 // If there's a higher exit-count than backedge-count then we set
318 // probabilities as if there are only 0-trip and 1-trip cases.
319 ExitWeight0 = OrigLoopExitWeight - OrigLoopBackedgeWeight;
320 }
321 }
322 uint32_t ExitWeight1 = OrigLoopExitWeight - ExitWeight0; // aka x1
323 uint32_t EnterWeight = ExitWeight1; // aka y0
324 uint32_t LoopBackWeight = OrigLoopBackedgeWeight - EnterWeight; // aka y1
325
326 MDBuilder MDB(LoopBI.getContext());
327 MDNode *LoopWeightMD =
328 MDB.createBranchWeights(SuccsSwapped ? LoopBackWeight : ExitWeight1,
329 SuccsSwapped ? ExitWeight1 : LoopBackWeight);
330 LoopBI.setMetadata(LLVMContext::MD_prof, LoopWeightMD);
331 if (HasConditionalPreHeader) {
332 MDNode *PreHeaderWeightMD =
333 MDB.createBranchWeights(SuccsSwapped ? EnterWeight : ExitWeight0,
334 SuccsSwapped ? ExitWeight0 : EnterWeight);
335 PreHeaderBI.setMetadata(LLVMContext::MD_prof, PreHeaderWeightMD);
336 }
337}
338
339/// Rotate loop LP. Return true if the loop is rotated.
340///
341/// \param SimplifiedLatch is true if the latch was just folded into the final
342/// loop exit. In this case we may want to rotate even though the new latch is
343/// now an exiting branch. This rotation would have happened had the latch not
344/// been simplified. However, if SimplifiedLatch is false, then we avoid
345/// rotating loops in which the latch exits to avoid excessive or endless
346/// rotation. LoopRotate should be repeatable and converge to a canonical
347/// form. This property is satisfied because simplifying the loop latch can only
348/// happen once across multiple invocations of the LoopRotate pass.
349///
350/// If -loop-rotate-multi is enabled we can do multiple rotations in one go
351/// so to reach a suitable (non-deoptimizing) exit.
352bool LoopRotate::rotateLoop(Loop *L, bool SimplifiedLatch) {
353 // If the loop has only one block then there is not much to rotate.
354 if (L->getBlocks().size() == 1)
355 return false;
356
357 bool Rotated = false;
358 do {
359 BasicBlock *OrigHeader = L->getHeader();
360 BasicBlock *OrigLatch = L->getLoopLatch();
361
362 BranchInst *BI = dyn_cast<BranchInst>(OrigHeader->getTerminator());
363 if (!BI || BI->isUnconditional())
364 return Rotated;
365
366 // If the loop header is not one of the loop exiting blocks then
367 // either this loop is already rotated or it is not
368 // suitable for loop rotation transformations.
369 if (!L->isLoopExiting(OrigHeader))
370 return Rotated;
371
372 // If the loop latch already contains a branch that leaves the loop then the
373 // loop is already rotated.
374 if (!OrigLatch)
375 return Rotated;
376
377 // Rotate if either the loop latch does *not* exit the loop, or if the loop
378 // latch was just simplified. Or if we think it will be profitable.
379 if (L->isLoopExiting(OrigLatch) && !SimplifiedLatch && IsUtilMode == false &&
382 return Rotated;
383
384 // Check size of original header and reject loop if it is very big or we can't
385 // duplicate blocks inside it.
386 {
388 CodeMetrics::collectEphemeralValues(L, AC, EphValues);
389
391 Metrics.analyzeBasicBlock(OrigHeader, *TTI, EphValues, PrepareForLTO);
392 if (Metrics.notDuplicatable) {
394 dbgs() << "LoopRotation: NOT rotating - contains non-duplicatable"
395 << " instructions: ";
396 L->dump());
397 return Rotated;
398 }
399 if (Metrics.convergent) {
400 LLVM_DEBUG(dbgs() << "LoopRotation: NOT rotating - contains convergent "
401 "instructions: ";
402 L->dump());
403 return Rotated;
404 }
405 if (!Metrics.NumInsts.isValid()) {
406 LLVM_DEBUG(dbgs() << "LoopRotation: NOT rotating - contains instructions"
407 " with invalid cost: ";
408 L->dump());
409 return Rotated;
410 }
411 if (Metrics.NumInsts > MaxHeaderSize) {
412 LLVM_DEBUG(dbgs() << "LoopRotation: NOT rotating - contains "
413 << Metrics.NumInsts
414 << " instructions, which is more than the threshold ("
415 << MaxHeaderSize << " instructions): ";
416 L->dump());
417 ++NumNotRotatedDueToHeaderSize;
418 return Rotated;
419 }
420
421 // When preparing for LTO, avoid rotating loops with calls that could be
422 // inlined during the LTO stage.
423 if (PrepareForLTO && Metrics.NumInlineCandidates > 0)
424 return Rotated;
425 }
426
427 // Now, this loop is suitable for rotation.
428 BasicBlock *OrigPreheader = L->getLoopPreheader();
429
430 // If the loop could not be converted to canonical form, it must have an
431 // indirectbr in it, just give up.
432 if (!OrigPreheader || !L->hasDedicatedExits())
433 return Rotated;
434
435 // Anything ScalarEvolution may know about this loop or the PHI nodes
436 // in its header will soon be invalidated. We should also invalidate
437 // all outer loops because insertion and deletion of blocks that happens
438 // during the rotation may violate invariants related to backedge taken
439 // infos in them.
440 if (SE) {
441 SE->forgetTopmostLoop(L);
442 // We may hoist some instructions out of loop. In case if they were cached
443 // as "loop variant" or "loop computable", these caches must be dropped.
444 // We also may fold basic blocks, so cached block dispositions also need
445 // to be dropped.
446 SE->forgetBlockAndLoopDispositions();
447 }
448
449 LLVM_DEBUG(dbgs() << "LoopRotation: rotating "; L->dump());
450 if (MSSAU && VerifyMemorySSA)
451 MSSAU->getMemorySSA()->verifyMemorySSA();
452
453 // Find new Loop header. NewHeader is a Header's one and only successor
454 // that is inside loop. Header's other successor is outside the
455 // loop. Otherwise loop is not suitable for rotation.
456 BasicBlock *Exit = BI->getSuccessor(0);
457 BasicBlock *NewHeader = BI->getSuccessor(1);
458 bool BISuccsSwapped = L->contains(Exit);
459 if (BISuccsSwapped)
460 std::swap(Exit, NewHeader);
461 assert(NewHeader && "Unable to determine new loop header");
462 assert(L->contains(NewHeader) && !L->contains(Exit) &&
463 "Unable to determine loop header and exit blocks");
464
465 // This code assumes that the new header has exactly one predecessor.
466 // Remove any single-entry PHI nodes in it.
467 assert(NewHeader->getSinglePredecessor() &&
468 "New header doesn't have one pred!");
469 FoldSingleEntryPHINodes(NewHeader);
470
471 // Begin by walking OrigHeader and populating ValueMap with an entry for
472 // each Instruction.
473 BasicBlock::iterator I = OrigHeader->begin(), E = OrigHeader->end();
474 ValueToValueMapTy ValueMap, ValueMapMSSA;
475
476 // For PHI nodes, the value available in OldPreHeader is just the
477 // incoming value from OldPreHeader.
478 for (; PHINode *PN = dyn_cast<PHINode>(I); ++I)
480 PN->getIncomingValueForBlock(OrigPreheader));
481
482 // For the rest of the instructions, either hoist to the OrigPreheader if
483 // possible or create a clone in the OldPreHeader if not.
484 Instruction *LoopEntryBranch = OrigPreheader->getTerminator();
485
486 // Record all debug intrinsics preceding LoopEntryBranch to avoid
487 // duplication.
488 using DbgIntrinsicHash =
489 std::pair<std::pair<hash_code, DILocalVariable *>, DIExpression *>;
490 auto makeHash = [](DbgVariableIntrinsic *D) -> DbgIntrinsicHash {
491 auto VarLocOps = D->location_ops();
492 return {{hash_combine_range(VarLocOps.begin(), VarLocOps.end()),
493 D->getVariable()},
494 D->getExpression()};
495 };
497 for (Instruction &I : llvm::drop_begin(llvm::reverse(*OrigPreheader))) {
498 if (auto *DII = dyn_cast<DbgVariableIntrinsic>(&I))
499 DbgIntrinsics.insert(makeHash(DII));
500 else
501 break;
502 }
503
504 // Remember the local noalias scope declarations in the header. After the
505 // rotation, they must be duplicated and the scope must be cloned. This
506 // avoids unwanted interaction across iterations.
507 SmallVector<NoAliasScopeDeclInst *, 6> NoAliasDeclInstructions;
508 for (Instruction &I : *OrigHeader)
509 if (auto *Decl = dyn_cast<NoAliasScopeDeclInst>(&I))
510 NoAliasDeclInstructions.push_back(Decl);
511
512 while (I != E) {
513 Instruction *Inst = &*I++;
514
515 // If the instruction's operands are invariant and it doesn't read or write
516 // memory, then it is safe to hoist. Doing this doesn't change the order of
517 // execution in the preheader, but does prevent the instruction from
518 // executing in each iteration of the loop. This means it is safe to hoist
519 // something that might trap, but isn't safe to hoist something that reads
520 // memory (without proving that the loop doesn't write).
521 if (L->hasLoopInvariantOperands(Inst) && !Inst->mayReadFromMemory() &&
522 !Inst->mayWriteToMemory() && !Inst->isTerminator() &&
523 !isa<DbgInfoIntrinsic>(Inst) && !isa<AllocaInst>(Inst)) {
524 Inst->moveBefore(LoopEntryBranch);
525 ++NumInstrsHoisted;
526 continue;
527 }
528
529 // Otherwise, create a duplicate of the instruction.
530 Instruction *C = Inst->clone();
531 C->insertBefore(LoopEntryBranch);
532
533 ++NumInstrsDuplicated;
534
535 // Eagerly remap the operands of the instruction.
538
539 // Avoid inserting the same intrinsic twice.
540 if (auto *DII = dyn_cast<DbgVariableIntrinsic>(C))
541 if (DbgIntrinsics.count(makeHash(DII))) {
542 C->eraseFromParent();
543 continue;
544 }
545
546 // With the operands remapped, see if the instruction constant folds or is
547 // otherwise simplifyable. This commonly occurs because the entry from PHI
548 // nodes allows icmps and other instructions to fold.
550 if (V && LI->replacementPreservesLCSSAForm(C, V)) {
551 // If so, then delete the temporary instruction and stick the folded value
552 // in the map.
554 if (!C->mayHaveSideEffects()) {
555 C->eraseFromParent();
556 C = nullptr;
557 }
558 } else {
560 }
561 if (C) {
562 // Otherwise, stick the new instruction into the new block!
563 C->setName(Inst->getName());
564
565 if (auto *II = dyn_cast<AssumeInst>(C))
566 AC->registerAssumption(II);
567 // MemorySSA cares whether the cloned instruction was inserted or not, and
568 // not whether it can be remapped to a simplified value.
569 if (MSSAU)
570 InsertNewValueIntoMap(ValueMapMSSA, Inst, C);
571 }
572 }
573
574 if (!NoAliasDeclInstructions.empty()) {
575 // There are noalias scope declarations:
576 // (general):
577 // Original: OrigPre { OrigHeader NewHeader ... Latch }
578 // after: (OrigPre+OrigHeader') { NewHeader ... Latch OrigHeader }
579 //
580 // with D: llvm.experimental.noalias.scope.decl,
581 // U: !noalias or !alias.scope depending on D
582 // ... { D U1 U2 } can transform into:
583 // (0) : ... { D U1 U2 } // no relevant rotation for this part
584 // (1) : ... D' { U1 U2 D } // D is part of OrigHeader
585 // (2) : ... D' U1' { U2 D U1 } // D, U1 are part of OrigHeader
586 //
587 // We now want to transform:
588 // (1) -> : ... D' { D U1 U2 D'' }
589 // (2) -> : ... D' U1' { D U2 D'' U1'' }
590 // D: original llvm.experimental.noalias.scope.decl
591 // D', U1': duplicate with replaced scopes
592 // D'', U1'': different duplicate with replaced scopes
593 // This ensures a safe fallback to 'may_alias' introduced by the rotate,
594 // as U1'' and U1' scopes will not be compatible wrt to the local restrict
595
596 // Clone the llvm.experimental.noalias.decl again for the NewHeader.
597 Instruction *NewHeaderInsertionPoint = &(*NewHeader->getFirstNonPHI());
598 for (NoAliasScopeDeclInst *NAD : NoAliasDeclInstructions) {
599 LLVM_DEBUG(dbgs() << " Cloning llvm.experimental.noalias.scope.decl:"
600 << *NAD << "\n");
601 Instruction *NewNAD = NAD->clone();
602 NewNAD->insertBefore(NewHeaderInsertionPoint);
603 }
604
605 // Scopes must now be duplicated, once for OrigHeader and once for
606 // OrigPreHeader'.
607 {
608 auto &Context = NewHeader->getContext();
609
610 SmallVector<MDNode *, 8> NoAliasDeclScopes;
611 for (NoAliasScopeDeclInst *NAD : NoAliasDeclInstructions)
612 NoAliasDeclScopes.push_back(NAD->getScopeList());
613
614 LLVM_DEBUG(dbgs() << " Updating OrigHeader scopes\n");
615 cloneAndAdaptNoAliasScopes(NoAliasDeclScopes, {OrigHeader}, Context,
616 "h.rot");
617 LLVM_DEBUG(OrigHeader->dump());
618
619 // Keep the compile time impact low by only adapting the inserted block
620 // of instructions in the OrigPreHeader. This might result in slightly
621 // more aliasing between these instructions and those that were already
622 // present, but it will be much faster when the original PreHeader is
623 // large.
624 LLVM_DEBUG(dbgs() << " Updating part of OrigPreheader scopes\n");
625 auto *FirstDecl =
626 cast<Instruction>(ValueMap[*NoAliasDeclInstructions.begin()]);
627 auto *LastInst = &OrigPreheader->back();
628 cloneAndAdaptNoAliasScopes(NoAliasDeclScopes, FirstDecl, LastInst,
629 Context, "pre.rot");
630 LLVM_DEBUG(OrigPreheader->dump());
631
632 LLVM_DEBUG(dbgs() << " Updated NewHeader:\n");
633 LLVM_DEBUG(NewHeader->dump());
634 }
635 }
636
637 // Along with all the other instructions, we just cloned OrigHeader's
638 // terminator into OrigPreHeader. Fix up the PHI nodes in each of OrigHeader's
639 // successors by duplicating their incoming values for OrigHeader.
640 for (BasicBlock *SuccBB : successors(OrigHeader))
641 for (BasicBlock::iterator BI = SuccBB->begin();
642 PHINode *PN = dyn_cast<PHINode>(BI); ++BI)
643 PN->addIncoming(PN->getIncomingValueForBlock(OrigHeader), OrigPreheader);
644
645 // Now that OrigPreHeader has a clone of OrigHeader's terminator, remove
646 // OrigPreHeader's old terminator (the original branch into the loop), and
647 // remove the corresponding incoming values from the PHI nodes in OrigHeader.
648 LoopEntryBranch->eraseFromParent();
649
650 // Update MemorySSA before the rewrite call below changes the 1:1
651 // instruction:cloned_instruction_or_value mapping.
652 if (MSSAU) {
653 InsertNewValueIntoMap(ValueMapMSSA, OrigHeader, OrigPreheader);
654 MSSAU->updateForClonedBlockIntoPred(OrigHeader, OrigPreheader,
655 ValueMapMSSA);
656 }
657
658 SmallVector<PHINode*, 2> InsertedPHIs;
659 // If there were any uses of instructions in the duplicated block outside the
660 // loop, update them, inserting PHI nodes as required
661 RewriteUsesOfClonedInstructions(OrigHeader, OrigPreheader, ValueMap, SE,
662 &InsertedPHIs);
663
664 // Attach dbg.value intrinsics to the new phis if that phi uses a value that
665 // previously had debug metadata attached. This keeps the debug info
666 // up-to-date in the loop body.
667 if (!InsertedPHIs.empty())
668 insertDebugValuesForPHIs(OrigHeader, InsertedPHIs);
669
670 // NewHeader is now the header of the loop.
671 L->moveToHeader(NewHeader);
672 assert(L->getHeader() == NewHeader && "Latch block is our new header");
673
674 // Inform DT about changes to the CFG.
675 if (DT) {
676 // The OrigPreheader branches to the NewHeader and Exit now. Then, inform
677 // the DT about the removed edge to the OrigHeader (that got removed).
679 Updates.push_back({DominatorTree::Insert, OrigPreheader, Exit});
680 Updates.push_back({DominatorTree::Insert, OrigPreheader, NewHeader});
681 Updates.push_back({DominatorTree::Delete, OrigPreheader, OrigHeader});
682
683 if (MSSAU) {
684 MSSAU->applyUpdates(Updates, *DT, /*UpdateDT=*/true);
685 if (VerifyMemorySSA)
686 MSSAU->getMemorySSA()->verifyMemorySSA();
687 } else {
688 DT->applyUpdates(Updates);
689 }
690 }
691
692 // At this point, we've finished our major CFG changes. As part of cloning
693 // the loop into the preheader we've simplified instructions and the
694 // duplicated conditional branch may now be branching on a constant. If it is
695 // branching on a constant and if that constant means that we enter the loop,
696 // then we fold away the cond branch to an uncond branch. This simplifies the
697 // loop in cases important for nested loops, and it also means we don't have
698 // to split as many edges.
699 BranchInst *PHBI = cast<BranchInst>(OrigPreheader->getTerminator());
700 assert(PHBI->isConditional() && "Should be clone of BI condbr!");
701 const Value *Cond = PHBI->getCondition();
702 const bool HasConditionalPreHeader =
703 !isa<ConstantInt>(Cond) ||
704 PHBI->getSuccessor(cast<ConstantInt>(Cond)->isZero()) != NewHeader;
705
706 updateBranchWeights(*PHBI, *BI, HasConditionalPreHeader, BISuccsSwapped);
707
708 if (HasConditionalPreHeader) {
709 // The conditional branch can't be folded, handle the general case.
710 // Split edges as necessary to preserve LoopSimplify form.
711
712 // Right now OrigPreHeader has two successors, NewHeader and ExitBlock, and
713 // thus is not a preheader anymore.
714 // Split the edge to form a real preheader.
716 OrigPreheader, NewHeader,
717 CriticalEdgeSplittingOptions(DT, LI, MSSAU).setPreserveLCSSA());
718 NewPH->setName(NewHeader->getName() + ".lr.ph");
719
720 // Preserve canonical loop form, which means that 'Exit' should have only
721 // one predecessor. Note that Exit could be an exit block for multiple
722 // nested loops, causing both of the edges to now be critical and need to
723 // be split.
725 bool SplitLatchEdge = false;
726 for (BasicBlock *ExitPred : ExitPreds) {
727 // We only need to split loop exit edges.
728 Loop *PredLoop = LI->getLoopFor(ExitPred);
729 if (!PredLoop || PredLoop->contains(Exit) ||
730 isa<IndirectBrInst>(ExitPred->getTerminator()))
731 continue;
732 SplitLatchEdge |= L->getLoopLatch() == ExitPred;
733 BasicBlock *ExitSplit = SplitCriticalEdge(
734 ExitPred, Exit,
735 CriticalEdgeSplittingOptions(DT, LI, MSSAU).setPreserveLCSSA());
736 ExitSplit->moveBefore(Exit);
737 }
738 assert(SplitLatchEdge &&
739 "Despite splitting all preds, failed to split latch exit?");
740 (void)SplitLatchEdge;
741 } else {
742 // We can fold the conditional branch in the preheader, this makes things
743 // simpler. The first step is to remove the extra edge to the Exit block.
744 Exit->removePredecessor(OrigPreheader, true /*preserve LCSSA*/);
745 BranchInst *NewBI = BranchInst::Create(NewHeader, PHBI);
746 NewBI->setDebugLoc(PHBI->getDebugLoc());
747 PHBI->eraseFromParent();
748
749 // With our CFG finalized, update DomTree if it is available.
750 if (DT) DT->deleteEdge(OrigPreheader, Exit);
751
752 // Update MSSA too, if available.
753 if (MSSAU)
754 MSSAU->removeEdge(OrigPreheader, Exit);
755 }
756
757 assert(L->getLoopPreheader() && "Invalid loop preheader after loop rotation");
758 assert(L->getLoopLatch() && "Invalid loop latch after loop rotation");
759
760 if (MSSAU && VerifyMemorySSA)
761 MSSAU->getMemorySSA()->verifyMemorySSA();
762
763 // Now that the CFG and DomTree are in a consistent state again, try to merge
764 // the OrigHeader block into OrigLatch. This will succeed if they are
765 // connected by an unconditional branch. This is just a cleanup so the
766 // emitted code isn't too gross in this common case.
767 DomTreeUpdater DTU(DT, DomTreeUpdater::UpdateStrategy::Eager);
768 BasicBlock *PredBB = OrigHeader->getUniquePredecessor();
769 bool DidMerge = MergeBlockIntoPredecessor(OrigHeader, &DTU, LI, MSSAU);
770 if (DidMerge)
772
773 if (MSSAU && VerifyMemorySSA)
774 MSSAU->getMemorySSA()->verifyMemorySSA();
775
776 LLVM_DEBUG(dbgs() << "LoopRotation: into "; L->dump());
777
778 ++NumRotated;
779
780 Rotated = true;
781 SimplifiedLatch = false;
782
783 // Check that new latch is a deoptimizing exit and then repeat rotation if possible.
784 // Deoptimizing latch exit is not a generally typical case, so we just loop over.
785 // TODO: if it becomes a performance bottleneck extend rotation algorithm
786 // to handle multiple rotations in one go.
788
789
790 return true;
791}
792
793/// Determine whether the instructions in this range may be safely and cheaply
794/// speculated. This is not an important enough situation to develop complex
795/// heuristics. We handle a single arithmetic instruction along with any type
796/// conversions.
799 bool seenIncrement = false;
800 bool MultiExitLoop = false;
801
802 if (!L->getExitingBlock())
803 MultiExitLoop = true;
804
805 for (BasicBlock::iterator I = Begin; I != End; ++I) {
806
808 return false;
809
810 if (isa<DbgInfoIntrinsic>(I))
811 continue;
812
813 switch (I->getOpcode()) {
814 default:
815 return false;
816 case Instruction::GetElementPtr:
817 // GEPs are cheap if all indices are constant.
818 if (!cast<GEPOperator>(I)->hasAllConstantIndices())
819 return false;
820 // fall-thru to increment case
821 [[fallthrough]];
822 case Instruction::Add:
823 case Instruction::Sub:
824 case Instruction::And:
825 case Instruction::Or:
826 case Instruction::Xor:
827 case Instruction::Shl:
828 case Instruction::LShr:
829 case Instruction::AShr: {
830 Value *IVOpnd =
831 !isa<Constant>(I->getOperand(0))
832 ? I->getOperand(0)
833 : !isa<Constant>(I->getOperand(1)) ? I->getOperand(1) : nullptr;
834 if (!IVOpnd)
835 return false;
836
837 // If increment operand is used outside of the loop, this speculation
838 // could cause extra live range interference.
839 if (MultiExitLoop) {
840 for (User *UseI : IVOpnd->users()) {
841 auto *UserInst = cast<Instruction>(UseI);
842 if (!L->contains(UserInst))
843 return false;
844 }
845 }
846
847 if (seenIncrement)
848 return false;
849 seenIncrement = true;
850 break;
851 }
852 case Instruction::Trunc:
853 case Instruction::ZExt:
854 case Instruction::SExt:
855 // ignore type conversions
856 break;
857 }
858 }
859 return true;
860}
861
862/// Fold the loop tail into the loop exit by speculating the loop tail
863/// instructions. Typically, this is a single post-increment. In the case of a
864/// simple 2-block loop, hoisting the increment can be much better than
865/// duplicating the entire loop header. In the case of loops with early exits,
866/// rotation will not work anyway, but simplifyLoopLatch will put the loop in
867/// canonical form so downstream passes can handle it.
868///
869/// I don't believe this invalidates SCEV.
870bool LoopRotate::simplifyLoopLatch(Loop *L) {
871 BasicBlock *Latch = L->getLoopLatch();
872 if (!Latch || Latch->hasAddressTaken())
873 return false;
874
875 BranchInst *Jmp = dyn_cast<BranchInst>(Latch->getTerminator());
876 if (!Jmp || !Jmp->isUnconditional())
877 return false;
878
879 BasicBlock *LastExit = Latch->getSinglePredecessor();
880 if (!LastExit || !L->isLoopExiting(LastExit))
881 return false;
882
883 BranchInst *BI = dyn_cast<BranchInst>(LastExit->getTerminator());
884 if (!BI)
885 return false;
886
887 if (!shouldSpeculateInstrs(Latch->begin(), Jmp->getIterator(), L))
888 return false;
889
890 LLVM_DEBUG(dbgs() << "Folding loop latch " << Latch->getName() << " into "
891 << LastExit->getName() << "\n");
892
893 DomTreeUpdater DTU(DT, DomTreeUpdater::UpdateStrategy::Eager);
894 MergeBlockIntoPredecessor(Latch, &DTU, LI, MSSAU, nullptr,
895 /*PredecessorWithTwoSuccessors=*/true);
896
897 if (SE) {
898 // Merging blocks may remove blocks reference in the block disposition cache. Clear the cache.
899 SE->forgetBlockAndLoopDispositions();
900 }
901
902 if (MSSAU && VerifyMemorySSA)
903 MSSAU->getMemorySSA()->verifyMemorySSA();
904
905 return true;
906}
907
908/// Rotate \c L, and return true if any modification was made.
909bool LoopRotate::processLoop(Loop *L) {
910 // Save the loop metadata.
911 MDNode *LoopMD = L->getLoopID();
912
913 bool SimplifiedLatch = false;
914
915 // Simplify the loop latch before attempting to rotate the header
916 // upward. Rotation may not be needed if the loop tail can be folded into the
917 // loop exit.
918 if (!RotationOnly)
919 SimplifiedLatch = simplifyLoopLatch(L);
920
921 bool MadeChange = rotateLoop(L, SimplifiedLatch);
922 assert((!MadeChange || L->isLoopExiting(L->getLoopLatch())) &&
923 "Loop latch should be exiting after loop-rotate.");
924
925 // Restore the loop metadata.
926 // NB! We presume LoopRotation DOESN'T ADD its own metadata.
927 if ((MadeChange || SimplifiedLatch) && LoopMD)
928 L->setLoopID(LoopMD);
929
930 return MadeChange || SimplifiedLatch;
931}
932
933
934/// The utility to convert a loop into a loop with bottom test.
938 const SimplifyQuery &SQ, bool RotationOnly = true,
939 unsigned Threshold = unsigned(-1),
940 bool IsUtilMode = true, bool PrepareForLTO) {
941 LoopRotate LR(Threshold, LI, TTI, AC, DT, SE, MSSAU, SQ, RotationOnly,
942 IsUtilMode, PrepareForLTO);
943 return LR.processLoop(L);
944}
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
#define LLVM_DEBUG(X)
Definition: Debug.h:101
bool End
Definition: ELF_riscv.cpp:469
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
static bool isZero(Value *V, const DataLayout &DL, DominatorTree *DT, AssumptionCache *AC)
Definition: Lint.cpp:526
static constexpr uint32_t ZeroTripCountWeights[]
static bool canRotateDeoptimizingLatchExit(Loop *L)
static bool shouldSpeculateInstrs(BasicBlock::iterator Begin, BasicBlock::iterator End, Loop *L)
Determine whether the instructions in this range may be safely and cheaply speculated.
static cl::opt< bool > MultiRotate("loop-rotate-multi", cl::init(false), cl::Hidden, cl::desc("Allow loop rotation multiple times in order to reach " "a better latch exit"))
static bool profitableToRotateLoopExitingLatch(Loop *L)
static void updateBranchWeights(BranchInst &PreHeaderBI, BranchInst &LoopBI, bool HasConditionalPreHeader, bool SuccsSwapped)
static void InsertNewValueIntoMap(ValueToValueMapTy &VM, Value *K, Value *V)
Insert (K, V) pair into the ValueToValueMap, and verify the key did not previously exist in the map,...
static void RewriteUsesOfClonedInstructions(BasicBlock *OrigHeader, BasicBlock *OrigPreheader, ValueToValueMapTy &ValueMap, ScalarEvolution *SE, SmallVectorImpl< PHINode * > *InsertedPHIs)
RewriteUsesOfClonedInstructions - We just cloned the instructions from the old header into the prehea...
#define I(x, y, z)
Definition: MD5.cpp:58
Machine Trace Metrics
Memory SSA
Definition: MemorySSA.cpp:71
This file exposes an interface to building/using memory SSA to walk memory instructions using a use/d...
LLVMContext & Context
This file contains the declarations for profiling metadata utility functions.
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition: Statistic.h:167
Class recording the (high level) value of a variable.
A cache of @llvm.assume calls within a function.
LLVM Basic Block Representation.
Definition: BasicBlock.h:56
iterator end()
Definition: BasicBlock.h:337
iterator begin()
Instruction iterator methods.
Definition: BasicBlock.h:335
bool hasAddressTaken() const
Returns true if there are any uses of this basic block other than direct branches,...
Definition: BasicBlock.h:516
const Instruction * getFirstNonPHI() const
Returns a pointer to the first instruction in this block that is not a PHINode instruction.
Definition: BasicBlock.cpp:216
const BasicBlock * getSinglePredecessor() const
Return the predecessor of this block if it has a single predecessor block.
Definition: BasicBlock.cpp:296
const BasicBlock * getUniquePredecessor() const
Return the predecessor of this block if it has a unique predecessor block.
Definition: BasicBlock.cpp:304
InstListType::iterator iterator
Instruction iterators...
Definition: BasicBlock.h:87
LLVMContext & getContext() const
Get the context in which this basic block lives.
Definition: BasicBlock.cpp:35
void moveBefore(BasicBlock *MovePos)
Unlink this basic block from its current function and insert it into the function that MovePos lives ...
Definition: BasicBlock.h:263
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition: BasicBlock.h:127
const CallInst * getPostdominatingDeoptimizeCall() const
Returns the call instruction calling @llvm.experimental.deoptimize that is present either in current ...
Definition: BasicBlock.cpp:195
const Instruction & back() const
Definition: BasicBlock.h:349
void removePredecessor(BasicBlock *Pred, bool KeepOneInputPHIs=false)
Update PHI nodes in this BasicBlock before removal of predecessor Pred.
Definition: BasicBlock.cpp:353
Conditional or Unconditional Branch instruction.
bool isConditional() const
static BranchInst * Create(BasicBlock *IfTrue, Instruction *InsertBefore=nullptr)
BasicBlock * getSuccessor(unsigned i) const
bool isUnconditional() const
Value * getCondition() const
DWARF expression.
This is the common base class for debug info intrinsics for variables.
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition: Dominators.h:166
Instruction * clone() const
Create a copy of 'this' instruction that is identical in all ways except the following:
bool mayWriteToMemory() const LLVM_READONLY
Return true if this instruction may modify memory.
void insertBefore(Instruction *InsertPos)
Insert an unlinked instruction into a basic block immediately before the specified instruction.
Definition: Instruction.cpp:89
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
Definition: Instruction.h:392
const BasicBlock * getParent() const
Definition: Instruction.h:90
bool isTerminator() const
Definition: Instruction.h:198
bool mayReadFromMemory() const LLVM_READONLY
Return true if this instruction may read memory.
void setMetadata(unsigned KindID, MDNode *Node)
Set the metadata of the specified kind to the specified node.
Definition: Metadata.cpp:1521
SymbolTableList< Instruction >::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
Definition: Instruction.cpp:83
void setDebugLoc(DebugLoc Loc)
Set the debug location information for this instruction.
Definition: Instruction.h:389
void moveBefore(Instruction *MovePos)
Unlink this instruction from its current basic block and insert it into the basic block that MovePos ...
bool contains(const LoopT *L) const
Return true if the specified loop is contained within in this loop.
Represents a single loop in the control flow graph.
Definition: LoopInfo.h:47
MDNode * createBranchWeights(uint32_t TrueWeight, uint32_t FalseWeight)
Return metadata containing two branch weights.
Definition: MDBuilder.cpp:37
Metadata node.
Definition: Metadata.h:950
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
Value * getIncomingValueForBlock(const BasicBlock *BB) const
Helper class for SSA formation on a set of values defined in multiple blocks.
Definition: SSAUpdater.h:39
The main scalar evolution driver.
void forgetValue(Value *V)
This method should be called by the client when it has changed a value in a way that may effect its v...
Implements a dense probed hash-table based set with some number of buckets stored inline.
Definition: DenseSet.h:290
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition: SmallPtrSet.h:451
bool empty() const
Definition: SmallVector.h:94
size_t size() const
Definition: SmallVector.h:91
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:577
void push_back(const T &Elt)
Definition: SmallVector.h:416
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1200
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
static UndefValue * get(Type *T)
Static factory methods - Return an 'undef' object of the specified type.
Definition: Constants.cpp:1724
A Use represents the edge between a Value definition and its users.
Definition: Use.h:43
ValueT lookup(const KeyT &Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
Definition: ValueMap.h:164
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition: ValueMap.h:172
LLVM Value Representation.
Definition: Value.h:74
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
void setName(const Twine &Name)
Change the name of the value.
Definition: Value.cpp:378
iterator_range< user_iterator > users()
Definition: Value.h:421
bool use_empty() const
Definition: Value.h:344
LLVMContext & getContext() const
All values hold a context through their type.
Definition: Value.cpp:1069
iterator_range< use_iterator > uses()
Definition: Value.h:376
StringRef getName() const
Return a constant reference to the value's name.
Definition: Value.cpp:309
void dump() const
Support for debugging, callable in GDB: V->dump()
Definition: AsmWriter.cpp:4937
std::pair< iterator, bool > insert(const ValueT &V)
Definition: DenseSet.h:206
size_type count(const_arg_type_t< ValueT > V) const
Return 1 if the specified key is in the set, 0 otherwise.
Definition: DenseSet.h:97
self_iterator getIterator()
Definition: ilist_node.h:82
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:445
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition: STLExtras.h:330
bool RemoveRedundantDbgInstrs(BasicBlock *BB)
Try to remove redundant dbg.value instructions from given basic block.
auto successors(const MachineBasicBlock *BB)
MDNode * getBranchWeightMDNode(const Instruction &I)
Get the branch weights metadata node.
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition: STLExtras.h:666
void insertDebugValuesForPHIs(BasicBlock *BB, SmallVectorImpl< PHINode * > &InsertedPHIs)
Propagate dbg.value intrinsics through the newly inserted PHIs.
Definition: Local.cpp:1708
Value * simplifyInstruction(Instruction *I, const SimplifyQuery &Q)
See if we can compute a simplified version of this instruction.
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1734
auto reverse(ContainerTy &&C)
Definition: STLExtras.h:429
@ RF_IgnoreMissingLocals
If this flag is set, the remapper ignores missing function-local entries (Argument,...
Definition: ValueMapper.h:89
@ RF_NoModuleLevelChanges
If this flag is set, the remapper knows that only local values within a function (such as an instruct...
Definition: ValueMapper.h:71
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
void findDbgValues(SmallVectorImpl< DbgValueInst * > &DbgValues, Value *V)
Finds the llvm.dbg.value intrinsics describing a value.
Definition: DebugInfo.cpp:99
void RemapInstruction(Instruction *I, ValueToValueMapTy &VM, RemapFlags Flags=RF_None, ValueMapTypeRemapper *TypeMapper=nullptr, ValueMaterializer *Materializer=nullptr)
Convert the instruction operands from referencing the current values into those specified by VM.
Definition: ValueMapper.h:256
bool VerifyMemorySSA
Enables verification of MemorySSA.
Definition: MemorySSA.cpp:83
bool MergeBlockIntoPredecessor(BasicBlock *BB, DomTreeUpdater *DTU=nullptr, LoopInfo *LI=nullptr, MemorySSAUpdater *MSSAU=nullptr, MemoryDependenceResults *MemDep=nullptr, bool PredecessorWithTwoSuccessors=false, DominatorTree *DT=nullptr)
Attempts to merge a block into its predecessor, if possible.
BasicBlock * SplitCriticalEdge(Instruction *TI, unsigned SuccNum, const CriticalEdgeSplittingOptions &Options=CriticalEdgeSplittingOptions(), const Twine &BBName="")
If this edge is a critical edge, insert a new node to split the critical edge.
void cloneAndAdaptNoAliasScopes(ArrayRef< MDNode * > NoAliasDeclScopes, ArrayRef< BasicBlock * > NewBlocks, LLVMContext &Context, StringRef Ext)
Clone the specified noalias decl scopes.
bool FoldSingleEntryPHINodes(BasicBlock *BB, MemoryDependenceResults *MemDep=nullptr)
We know that BB has one predecessor.
bool isSafeToSpeculativelyExecute(const Instruction *I, const Instruction *CtxI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr)
Return true if the instruction does not have any effects besides calculating the result and does not ...
auto predecessors(const MachineBasicBlock *BB)
void extractFromBranchWeightMD(const MDNode *ProfileData, SmallVectorImpl< uint32_t > &Weights)
Faster version of extractBranchWeights() that skips checks and must only be called with "branch_weigh...
bool LoopRotation(Loop *L, LoopInfo *LI, const TargetTransformInfo *TTI, AssumptionCache *AC, DominatorTree *DT, ScalarEvolution *SE, MemorySSAUpdater *MSSAU, const SimplifyQuery &SQ, bool RotationOnly, unsigned Threshold, bool IsUtilMode, bool PrepareForLTO=false)
Convert a loop into a loop with bottom test.
hash_code hash_combine_range(InputIteratorT first, InputIteratorT last)
Compute a hash_code for a sequence of values.
Definition: Hashing.h:491
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition: BitVector.h:860
Utility to calculate the size and a few similar metrics for a set of basic blocks.
Definition: CodeMetrics.h:31
static void collectEphemeralValues(const Loop *L, AssumptionCache *AC, SmallPtrSetImpl< const Value * > &EphValues)
Collect a loop's ephemeral values (those used only by an assume or similar intrinsics in the loop).
Definition: CodeMetrics.cpp:70
Option class for critical edge splitting.