LLVM 23.0.0git
LoopUnrollAndJam.cpp
Go to the documentation of this file.
1//===-- LoopUnrollAndJam.cpp - Loop unrolling utilities -------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements loop unroll and jam as a routine, much like
10// LoopUnroll.cpp implements loop unroll.
11//
12//===----------------------------------------------------------------------===//
13
14#include "llvm/ADT/ArrayRef.h"
15#include "llvm/ADT/DenseMap.h"
16#include "llvm/ADT/STLExtras.h"
19#include "llvm/ADT/Statistic.h"
20#include "llvm/ADT/StringRef.h"
21#include "llvm/ADT/Twine.h"
30#include "llvm/IR/BasicBlock.h"
32#include "llvm/IR/DebugLoc.h"
34#include "llvm/IR/Dominators.h"
35#include "llvm/IR/Function.h"
36#include "llvm/IR/Instruction.h"
39#include "llvm/IR/User.h"
40#include "llvm/IR/Value.h"
41#include "llvm/IR/ValueHandle.h"
42#include "llvm/IR/ValueMap.h"
44#include "llvm/Support/Debug.h"
54#include <assert.h>
55#include <memory>
56#include <vector>
57
58using namespace llvm;
59
60#define DEBUG_TYPE "loop-unroll-and-jam"
61
62STATISTIC(NumUnrolledAndJammed, "Number of loops unroll and jammed");
63STATISTIC(NumCompletelyUnrolledAndJammed, "Number of loops unroll and jammed");
64
66
67// Partition blocks in an outer/inner loop pair into blocks before and after
68// the loop
69static bool partitionLoopBlocks(Loop &L, BasicBlockSet &ForeBlocks,
70 BasicBlockSet &AftBlocks, DominatorTree &DT) {
71 Loop *SubLoop = L.getSubLoops()[0];
72 BasicBlock *SubLoopLatch = SubLoop->getLoopLatch();
73
74 for (BasicBlock *BB : L.blocks()) {
75 if (!SubLoop->contains(BB)) {
76 if (DT.dominates(SubLoopLatch, BB))
77 AftBlocks.insert(BB);
78 else
79 ForeBlocks.insert(BB);
80 }
81 }
82
83 // Check that all blocks in ForeBlocks together dominate the subloop
84 // TODO: This might ideally be done better with a dominator/postdominators.
85 BasicBlock *SubLoopPreHeader = SubLoop->getLoopPreheader();
86 for (BasicBlock *BB : ForeBlocks) {
87 if (BB == SubLoopPreHeader)
88 continue;
89 Instruction *TI = BB->getTerminator();
90 for (BasicBlock *Succ : successors(TI))
91 if (!ForeBlocks.count(Succ))
92 return false;
93 }
94
95 return true;
96}
97
98/// Partition blocks in a loop nest into blocks before and after each inner
99/// loop.
101 Loop &Root, Loop &JamLoop, BasicBlockSet &JamLoopBlocks,
102 DenseMap<Loop *, BasicBlockSet> &ForeBlocksMap,
104 JamLoopBlocks.insert_range(JamLoop.blocks());
105
106 for (Loop *L : Root.getLoopsInPreorder()) {
107 if (L == &JamLoop)
108 break;
109
110 if (!partitionLoopBlocks(*L, ForeBlocksMap[L], AftBlocksMap[L], DT))
111 return false;
112 }
113
114 return true;
115}
116
117// TODO Remove when UnrollAndJamLoop changed to support unroll and jamming more
118// than 2 levels loop.
119static bool partitionOuterLoopBlocks(Loop *L, Loop *SubLoop,
120 BasicBlockSet &ForeBlocks,
121 BasicBlockSet &SubLoopBlocks,
122 BasicBlockSet &AftBlocks,
123 DominatorTree *DT) {
124 SubLoopBlocks.insert_range(SubLoop->blocks());
125 return partitionLoopBlocks(*L, ForeBlocks, AftBlocks, *DT);
126}
127
128// Looks at the phi nodes in Header for values coming from Latch. For these
129// instructions and all their operands calls Visit on them, keeping going for
130// all the operands in AftBlocks. Returns false if Visit returns false,
131// otherwise returns true. This is used to process the instructions in the
132// Aft blocks that need to be moved before the subloop. It is used in two
133// places. One to check that the required set of instructions can be moved
134// before the loop. Then to collect the instructions to actually move in
135// moveHeaderPhiOperandsToForeBlocks.
136template <typename T>
138 BasicBlockSet &AftBlocks, T Visit) {
140
141 std::function<bool(Instruction * I)> ProcessInstr = [&](Instruction *I) {
142 if (!VisitedInstr.insert(I).second)
143 return true;
144
145 if (AftBlocks.count(I->getParent()))
146 for (auto &U : I->operands())
148 if (!ProcessInstr(II))
149 return false;
150
151 return Visit(I);
152 };
153
154 for (auto &Phi : Header->phis()) {
155 Value *V = Phi.getIncomingValueForBlock(Latch);
157 if (!ProcessInstr(I))
158 return false;
159 }
160
161 return true;
162}
163
164// Move the phi operands of Header from Latch out of AftBlocks to InsertLoc.
166 BasicBlock *Latch,
167 BasicBlock::iterator InsertLoc,
168 BasicBlockSet &AftBlocks) {
169 // We need to ensure we move the instructions in the correct order,
170 // starting with the earliest required instruction and moving forward.
171 processHeaderPhiOperands(Header, Latch, AftBlocks,
172 [&AftBlocks, &InsertLoc](Instruction *I) {
173 if (AftBlocks.count(I->getParent()))
174 I->moveBefore(InsertLoc);
175 return true;
176 });
177}
178
179/*
180 This method performs Unroll and Jam. For a simple loop like:
181 for (i = ..)
182 Fore(i)
183 for (j = ..)
184 SubLoop(i, j)
185 Aft(i)
186
187 Instead of doing normal inner or outer unrolling, we do:
188 for (i = .., i+=2)
189 Fore(i)
190 Fore(i+1)
191 for (j = ..)
192 SubLoop(i, j)
193 SubLoop(i+1, j)
194 Aft(i)
195 Aft(i+1)
196
197 So the outer loop is essetially unrolled and then the inner loops are fused
198 ("jammed") together into a single loop. This can increase speed when there
199 are loads in SubLoop that are invariant to i, as they become shared between
200 the now jammed inner loops.
201
202 We do this by spliting the blocks in the loop into Fore, Subloop and Aft.
203 Fore blocks are those before the inner loop, Aft are those after. Normal
204 Unroll code is used to copy each of these sets of blocks and the results are
205 combined together into the final form above.
206
207 isSafeToUnrollAndJam should be used prior to calling this to make sure the
208 unrolling will be valid. Checking profitablility is also advisable.
209
210 If EpilogueLoop is non-null, it receives the epilogue loop (if it was
211 necessary to create one and not fully unrolled).
212*/
214llvm::UnrollAndJamLoop(Loop *L, unsigned Count, unsigned TripCount,
215 unsigned TripMultiple, bool UnrollRemainder,
218 OptimizationRemarkEmitter *ORE, Loop **EpilogueLoop) {
219
220 // When we enter here we should have already checked that it is safe
221 BasicBlock *Header = L->getHeader();
222 assert(Header && "No header.");
223 assert(L->getSubLoops().size() == 1);
224 Loop *SubLoop = *L->begin();
225
226 // Don't enter the unroll code if there is nothing to do.
227 if (TripCount == 0 && Count < 2) {
228 LLVM_DEBUG(dbgs() << "Won't unroll-and-jam; almost nothing to do\n");
230 }
231
232 assert(Count > 0);
233 assert(TripMultiple > 0);
234 assert(TripCount == 0 || TripCount % TripMultiple == 0);
235
236 // Are we eliminating the loop control altogether?
237 bool CompletelyUnroll = (Count == TripCount);
238
239 // We use the runtime remainder in cases where we don't know trip multiple
240 if (TripMultiple % Count != 0) {
241 if (!UnrollRuntimeLoopRemainder(L, Count, /*AllowExpensiveTripCount*/ false,
242 /*UseEpilogRemainder*/ true,
243 UnrollRemainder, /*ForgetAllSCEV*/ false,
244 LI, SE, DT, AC, TTI, true,
245 SCEVCheapExpansionBudget, EpilogueLoop)) {
246 LLVM_DEBUG(dbgs() << "Won't unroll-and-jam; remainder loop could not be "
247 "generated when assuming runtime trip count\n");
249 }
250 }
251
252 // Notify ScalarEvolution that the loop will be substantially changed,
253 // if not outright eliminated.
254 if (SE) {
255 SE->forgetLoop(L);
257 }
258
259 using namespace ore;
260 // Report the unrolling decision.
261 if (CompletelyUnroll) {
262 LLVM_DEBUG(dbgs() << "COMPLETELY UNROLL AND JAMMING loop %"
263 << Header->getName() << " with trip count " << TripCount
264 << "!\n");
265 ORE->emit(OptimizationRemark(DEBUG_TYPE, "FullyUnrolled", L->getStartLoc(),
266 L->getHeader())
267 << "completely unroll and jammed loop with "
268 << NV("UnrollCount", TripCount) << " iterations");
269 } else {
270 auto DiagBuilder = [&]() {
271 OptimizationRemark Diag(DEBUG_TYPE, "PartialUnrolled", L->getStartLoc(),
272 L->getHeader());
273 return Diag << "unroll and jammed loop by a factor of "
274 << NV("UnrollCount", Count);
275 };
276
277 LLVM_DEBUG(dbgs() << "UNROLL AND JAMMING loop %" << Header->getName()
278 << " by " << Count);
279 if (TripMultiple != 1) {
280 LLVM_DEBUG(dbgs() << " with " << TripMultiple << " trips per branch");
281 ORE->emit([&]() {
282 return DiagBuilder() << " with " << NV("TripMultiple", TripMultiple)
283 << " trips per branch";
284 });
285 } else {
286 LLVM_DEBUG(dbgs() << " with run-time trip count");
287 ORE->emit([&]() { return DiagBuilder() << " with run-time trip count"; });
288 }
289 LLVM_DEBUG(dbgs() << "!\n");
290 }
291
292 BasicBlock *Preheader = L->getLoopPreheader();
293 BasicBlock *LatchBlock = L->getLoopLatch();
294 assert(Preheader && "No preheader");
295 assert(LatchBlock && "No latch block");
296 CondBrInst *BI = cast<CondBrInst>(LatchBlock->getTerminator());
297 bool ContinueOnTrue = L->contains(BI->getSuccessor(0));
298 BasicBlock *LoopExit = BI->getSuccessor(ContinueOnTrue);
299 bool SubLoopContinueOnTrue = SubLoop->contains(
300 SubLoop->getLoopLatch()->getTerminator()->getSuccessor(0));
301
302 // Partition blocks in an outer/inner loop pair into blocks before and after
303 // the loop
304 BasicBlockSet SubLoopBlocks;
305 BasicBlockSet ForeBlocks;
306 BasicBlockSet AftBlocks;
307 partitionOuterLoopBlocks(L, SubLoop, ForeBlocks, SubLoopBlocks, AftBlocks,
308 DT);
309
310 // We keep track of the entering/first and exiting/last block of each of
311 // Fore/SubLoop/Aft in each iteration. This helps make the stapling up of
312 // blocks easier.
313 std::vector<BasicBlock *> ForeBlocksFirst;
314 std::vector<BasicBlock *> ForeBlocksLast;
315 std::vector<BasicBlock *> SubLoopBlocksFirst;
316 std::vector<BasicBlock *> SubLoopBlocksLast;
317 std::vector<BasicBlock *> AftBlocksFirst;
318 std::vector<BasicBlock *> AftBlocksLast;
319 ForeBlocksFirst.push_back(Header);
320 ForeBlocksLast.push_back(SubLoop->getLoopPreheader());
321 SubLoopBlocksFirst.push_back(SubLoop->getHeader());
322 SubLoopBlocksLast.push_back(SubLoop->getExitingBlock());
323 AftBlocksFirst.push_back(SubLoop->getExitBlock());
324 AftBlocksLast.push_back(L->getExitingBlock());
325 // Maps Blocks[0] -> Blocks[It]
326 ValueToValueMapTy LastValueMap;
327
328 // Move any instructions from fore phi operands from AftBlocks into Fore.
330 Header, LatchBlock, ForeBlocksLast[0]->getTerminator()->getIterator(),
331 AftBlocks);
332
333 // The current on-the-fly SSA update requires blocks to be processed in
334 // reverse postorder so that LastValueMap contains the correct value at each
335 // exit.
336 LoopBlocksDFS DFS(L);
337 DFS.perform(LI);
338 // Stash the DFS iterators before adding blocks to the loop.
339 LoopBlocksDFS::RPOIterator BlockBegin = DFS.beginRPO();
340 LoopBlocksDFS::RPOIterator BlockEnd = DFS.endRPO();
341
342 // When a FSDiscriminator is enabled, we don't need to add the multiply
343 // factors to the discriminators.
344 if (Header->getParent()->shouldEmitDebugInfoForProfiling() &&
346 for (BasicBlock *BB : L->getBlocks())
347 for (Instruction &I : *BB)
348 if (!I.isDebugOrPseudoInst())
349 if (const DILocation *DIL = I.getDebugLoc()) {
350 auto NewDIL = DIL->cloneByMultiplyingDuplicationFactor(Count);
351 if (NewDIL)
352 I.setDebugLoc(*NewDIL);
353 else
355 << "Failed to create new discriminator: "
356 << DIL->getFilename() << " Line: " << DIL->getLine());
357 }
358
359 // Copy all blocks
360 for (unsigned It = 1; It != Count; ++It) {
362 // Maps Blocks[It] -> Blocks[It-1]
363 DenseMap<Value *, Value *> PrevItValueMap;
365 NewLoops[L] = L;
366 NewLoops[SubLoop] = SubLoop;
367
368 for (LoopBlocksDFS::RPOIterator BB = BlockBegin; BB != BlockEnd; ++BB) {
370 BasicBlock *New = CloneBasicBlock(*BB, VMap, "." + Twine(It));
371 Header->getParent()->insert(Header->getParent()->end(), New);
372
373 // Tell LI about New.
374 addClonedBlockToLoopInfo(*BB, New, LI, NewLoops);
375
376 if (ForeBlocks.count(*BB)) {
377 if (*BB == ForeBlocksFirst[0])
378 ForeBlocksFirst.push_back(New);
379 if (*BB == ForeBlocksLast[0])
380 ForeBlocksLast.push_back(New);
381 } else if (SubLoopBlocks.count(*BB)) {
382 if (*BB == SubLoopBlocksFirst[0])
383 SubLoopBlocksFirst.push_back(New);
384 if (*BB == SubLoopBlocksLast[0])
385 SubLoopBlocksLast.push_back(New);
386 } else if (AftBlocks.count(*BB)) {
387 if (*BB == AftBlocksFirst[0])
388 AftBlocksFirst.push_back(New);
389 if (*BB == AftBlocksLast[0])
390 AftBlocksLast.push_back(New);
391 } else {
392 llvm_unreachable("BB being cloned should be in Fore/Sub/Aft");
393 }
394
395 // Update our running maps of newest clones
396 auto &Last = LastValueMap[*BB];
397 PrevItValueMap[New] = (It == 1 ? *BB : Last);
398 Last = New;
399 for (ValueToValueMapTy::iterator VI = VMap.begin(), VE = VMap.end();
400 VI != VE; ++VI) {
401 auto &LVM = LastValueMap[VI->first];
402 PrevItValueMap[VI->second] =
403 const_cast<Value *>(It == 1 ? VI->first : LVM);
404 LVM = VI->second;
405 }
406
407 NewBlocks.push_back(New);
408
409 // Update DomTree:
410 if (*BB == ForeBlocksFirst[0])
411 DT->addNewBlock(New, ForeBlocksLast[It - 1]);
412 else if (*BB == SubLoopBlocksFirst[0])
413 DT->addNewBlock(New, SubLoopBlocksLast[It - 1]);
414 else if (*BB == AftBlocksFirst[0])
415 DT->addNewBlock(New, AftBlocksLast[It - 1]);
416 else {
417 // Each set of blocks (Fore/Sub/Aft) will have the same internal domtree
418 // structure.
419 auto BBDomNode = DT->getNode(*BB);
420 auto BBIDom = BBDomNode->getIDom();
421 BasicBlock *OriginalBBIDom = BBIDom->getBlock();
422 assert(OriginalBBIDom);
423 assert(LastValueMap[cast<Value>(OriginalBBIDom)]);
424 DT->addNewBlock(
425 New, cast<BasicBlock>(LastValueMap[cast<Value>(OriginalBBIDom)]));
426 }
427 }
428
429 // Remap all instructions in the most recent iteration
430 remapInstructionsInBlocks(NewBlocks, LastValueMap);
431 for (BasicBlock *NewBlock : NewBlocks) {
432 for (Instruction &I : *NewBlock) {
433 if (auto *II = dyn_cast<AssumeInst>(&I))
435 }
436 }
437
438 // Alter the ForeBlocks phi's, pointing them at the latest version of the
439 // value from the previous iteration's phis
440 for (PHINode &Phi : ForeBlocksFirst[It]->phis()) {
441 Value *OldValue = Phi.getIncomingValueForBlock(AftBlocksLast[It]);
442 assert(OldValue && "should have incoming edge from Aft[It]");
443 Value *NewValue = OldValue;
444 if (Value *PrevValue = PrevItValueMap[OldValue])
445 NewValue = PrevValue;
446
447 assert(Phi.getNumOperands() == 2);
448 Phi.setIncomingBlock(0, ForeBlocksLast[It - 1]);
449 Phi.setIncomingValue(0, NewValue);
450 Phi.removeIncomingValue(1);
451 }
452 }
453
454 // Now that all the basic blocks for the unrolled iterations are in place,
455 // finish up connecting the blocks and phi nodes. At this point LastValueMap
456 // is the last unrolled iterations values.
457
458 // Update Phis in BB from OldBB to point to NewBB and use the latest value
459 // from LastValueMap
460 auto updatePHIBlocksAndValues = [](BasicBlock *BB, BasicBlock *OldBB,
461 BasicBlock *NewBB,
462 ValueToValueMapTy &LastValueMap) {
463 for (PHINode &Phi : BB->phis()) {
464 for (unsigned b = 0; b < Phi.getNumIncomingValues(); ++b) {
465 if (Phi.getIncomingBlock(b) == OldBB) {
466 Value *OldValue = Phi.getIncomingValue(b);
467 if (Value *LastValue = LastValueMap[OldValue])
468 Phi.setIncomingValue(b, LastValue);
469 Phi.setIncomingBlock(b, NewBB);
470 break;
471 }
472 }
473 }
474 };
475 // Move all the phis from Src into Dest
476 auto movePHIs = [](BasicBlock *Src, BasicBlock *Dest) {
477 BasicBlock::iterator insertPoint = Dest->getFirstNonPHIIt();
478 while (PHINode *Phi = dyn_cast<PHINode>(Src->begin()))
479 Phi->moveBefore(*Dest, insertPoint);
480 };
481
482 // Update the PHI values outside the loop to point to the last block
483 updatePHIBlocksAndValues(LoopExit, AftBlocksLast[0], AftBlocksLast.back(),
484 LastValueMap);
485
486 // Update ForeBlocks successors and phi nodes
487 UncondBrInst *ForeTerm =
488 cast<UncondBrInst>(ForeBlocksLast.back()->getTerminator());
489 ForeTerm->setSuccessor(SubLoopBlocksFirst[0]);
490
491 if (CompletelyUnroll) {
492 while (PHINode *Phi = dyn_cast<PHINode>(ForeBlocksFirst[0]->begin())) {
493 Phi->replaceAllUsesWith(Phi->getIncomingValueForBlock(Preheader));
494 Phi->eraseFromParent();
495 }
496 } else {
497 // Update the PHI values to point to the last aft block
498 updatePHIBlocksAndValues(ForeBlocksFirst[0], AftBlocksLast[0],
499 AftBlocksLast.back(), LastValueMap);
500 }
501
502 for (unsigned It = 1; It != Count; It++) {
503 // Remap ForeBlock successors from previous iteration to this
504 UncondBrInst *ForeTerm =
505 cast<UncondBrInst>(ForeBlocksLast[It - 1]->getTerminator());
506 ForeTerm->setSuccessor(ForeBlocksFirst[It]);
507 }
508
509 // Subloop successors and phis
510 CondBrInst *SubTerm =
511 cast<CondBrInst>(SubLoopBlocksLast.back()->getTerminator());
512 SubTerm->setSuccessor(!SubLoopContinueOnTrue, SubLoopBlocksFirst[0]);
513 SubTerm->setSuccessor(SubLoopContinueOnTrue, AftBlocksFirst[0]);
514 SubLoopBlocksFirst[0]->replacePhiUsesWith(ForeBlocksLast[0],
515 ForeBlocksLast.back());
516 SubLoopBlocksFirst[0]->replacePhiUsesWith(SubLoopBlocksLast[0],
517 SubLoopBlocksLast.back());
518
519 for (unsigned It = 1; It != Count; It++) {
520 // Replace the conditional branch of the previous iteration subloop with an
521 // unconditional one to this one
522 CondBrInst *SubTerm =
523 cast<CondBrInst>(SubLoopBlocksLast[It - 1]->getTerminator());
524 UncondBrInst::Create(SubLoopBlocksFirst[It], SubTerm->getIterator());
525 SubTerm->eraseFromParent();
526
527 SubLoopBlocksFirst[It]->replacePhiUsesWith(ForeBlocksLast[It],
528 ForeBlocksLast.back());
529 SubLoopBlocksFirst[It]->replacePhiUsesWith(SubLoopBlocksLast[It],
530 SubLoopBlocksLast.back());
531 movePHIs(SubLoopBlocksFirst[It], SubLoopBlocksFirst[0]);
532 }
533
534 // Aft blocks successors and phis
535 CondBrInst *AftTerm = cast<CondBrInst>(AftBlocksLast.back()->getTerminator());
536 if (CompletelyUnroll) {
537 UncondBrInst::Create(LoopExit, AftTerm->getIterator());
538 AftTerm->eraseFromParent();
539 } else {
540 AftTerm->setSuccessor(!ContinueOnTrue, ForeBlocksFirst[0]);
541 assert(AftTerm->getSuccessor(ContinueOnTrue) == LoopExit &&
542 "Expecting the ContinueOnTrue successor of AftTerm to be LoopExit");
543 }
544 AftBlocksFirst[0]->replacePhiUsesWith(SubLoopBlocksLast[0],
545 SubLoopBlocksLast.back());
546
547 for (unsigned It = 1; It != Count; It++) {
548 // Replace the conditional branch of the previous iteration subloop with an
549 // unconditional one to this one
550 CondBrInst *AftTerm =
551 cast<CondBrInst>(AftBlocksLast[It - 1]->getTerminator());
552 UncondBrInst::Create(AftBlocksFirst[It], AftTerm->getIterator());
553 AftTerm->eraseFromParent();
554
555 AftBlocksFirst[It]->replacePhiUsesWith(SubLoopBlocksLast[It],
556 SubLoopBlocksLast.back());
557 movePHIs(AftBlocksFirst[It], AftBlocksFirst[0]);
558 }
559
560 DomTreeUpdater DTU(DT, DomTreeUpdater::UpdateStrategy::Lazy);
561 // Dominator Tree. Remove the old links between Fore, Sub and Aft, adding the
562 // new ones required.
563 if (Count != 1) {
565 DTUpdates.emplace_back(DominatorTree::UpdateKind::Delete, ForeBlocksLast[0],
566 SubLoopBlocksFirst[0]);
567 DTUpdates.emplace_back(DominatorTree::UpdateKind::Delete,
568 SubLoopBlocksLast[0], AftBlocksFirst[0]);
569
570 DTUpdates.emplace_back(DominatorTree::UpdateKind::Insert,
571 ForeBlocksLast.back(), SubLoopBlocksFirst[0]);
572 DTUpdates.emplace_back(DominatorTree::UpdateKind::Insert,
573 SubLoopBlocksLast.back(), AftBlocksFirst[0]);
574 DTU.applyUpdatesPermissive(DTUpdates);
575 }
576
577 // Merge adjacent basic blocks, if possible.
579 MergeBlocks.insert_range(ForeBlocksLast);
580 MergeBlocks.insert_range(SubLoopBlocksLast);
581 MergeBlocks.insert_range(AftBlocksLast);
582
583 MergeBlockSuccessorsIntoGivenBlocks(MergeBlocks, L, &DTU, LI);
584
585 // Apply updates to the DomTree.
586 DT = &DTU.getDomTree();
587
588 // At this point, the code is well formed. We now do a quick sweep over the
589 // inserted code, doing constant propagation and dead code elimination as we
590 // go.
591 simplifyLoopAfterUnroll(SubLoop, true, LI, SE, DT, AC, TTI);
592 simplifyLoopAfterUnroll(L, !CompletelyUnroll && Count > 1, LI, SE, DT, AC,
593 TTI);
594
595 NumCompletelyUnrolledAndJammed += CompletelyUnroll;
596 ++NumUnrolledAndJammed;
597
598 // Update LoopInfo if the loop is completely removed.
599 if (CompletelyUnroll)
600 LI->erase(L);
601
602#ifndef NDEBUG
603 // We shouldn't have done anything to break loop simplify form or LCSSA.
604 Loop *OutestLoop = SubLoop->getParentLoop()
605 ? SubLoop->getParentLoop()->getParentLoop()
606 ? SubLoop->getParentLoop()->getParentLoop()
607 : SubLoop->getParentLoop()
608 : SubLoop;
609 assert(DT->verify());
610 LI->verify(*DT);
611 assert(OutestLoop->isRecursivelyLCSSAForm(*DT, *LI));
612 if (!CompletelyUnroll)
613 assert(L->isLoopSimplifyForm());
614 assert(SubLoop->isLoopSimplifyForm());
615 SE->verify();
616#endif
617
618 return CompletelyUnroll ? LoopUnrollResult::FullyUnrolled
620}
621
624 // Scan the BBs and collect legal loads and stores.
625 // Returns false if non-simple loads/stores are found.
626 for (BasicBlock *BB : Blocks) {
627 for (Instruction &I : *BB) {
628 if (auto *Ld = dyn_cast<LoadInst>(&I)) {
629 if (!Ld->isSimple())
630 return false;
631 MemInstr.push_back(&I);
632 } else if (auto *St = dyn_cast<StoreInst>(&I)) {
633 if (!St->isSimple())
634 return false;
635 MemInstr.push_back(&I);
636 } else if (I.mayReadOrWriteMemory()) {
637 return false;
638 }
639 }
640 }
641 return true;
642}
643
645 unsigned UnrollLevel, unsigned JamLevel,
646 bool Sequentialized, Dependence *D) {
647 // UnrollLevel might carry the dependency Src --> Dst
648 // Does a different loop after unrolling?
649 for (unsigned CurLoopDepth = UnrollLevel + 1; CurLoopDepth <= JamLevel;
650 ++CurLoopDepth) {
651 auto JammedDir = D->getDirection(CurLoopDepth);
652 if (JammedDir == Dependence::DVEntry::LT)
653 return true;
654
655 if (JammedDir & Dependence::DVEntry::GT)
656 return false;
657 }
658
659 return true;
660}
661
663 unsigned UnrollLevel, unsigned JamLevel,
664 bool Sequentialized, Dependence *D) {
665 // UnrollLevel might carry the dependency Dst --> Src
666 for (unsigned CurLoopDepth = UnrollLevel + 1; CurLoopDepth <= JamLevel;
667 ++CurLoopDepth) {
668 auto JammedDir = D->getDirection(CurLoopDepth);
669 if (JammedDir == Dependence::DVEntry::GT)
670 return true;
671
672 if (JammedDir & Dependence::DVEntry::LT)
673 return false;
674 }
675
676 // Backward dependencies are only preserved if not interleaved.
677 return Sequentialized;
678}
679
680// Check whether it is semantically safe Src and Dst considering any potential
681// dependency between them.
682//
683// @param UnrollLevel The level of the loop being unrolled
684// @param JamLevel The level of the loop being jammed; if Src and Dst are on
685// different levels, the outermost common loop counts as jammed level
686//
687// @return true if is safe and false if there is a dependency violation.
689 unsigned UnrollLevel, unsigned JamLevel,
690 bool Sequentialized, DependenceInfo &DI) {
691 assert(UnrollLevel <= JamLevel &&
692 "Expecting JamLevel to be at least UnrollLevel");
693
694 if (Src == Dst)
695 return true;
696 // Ignore Input dependencies.
697 if (isa<LoadInst>(Src) && isa<LoadInst>(Dst))
698 return true;
699
700 // Check whether unroll-and-jam may violate a dependency.
701 // By construction, every dependency will be lexicographically non-negative
702 // (if it was, it would violate the current execution order), such as
703 // (0,0,>,*,*)
704 // Unroll-and-jam changes the GT execution of two executions to the same
705 // iteration of the chosen unroll level. That is, a GT dependence becomes a GE
706 // dependence (or EQ, if we fully unrolled the loop) at the loop's position:
707 // (0,0,>=,*,*)
708 // Now, the dependency is not necessarily non-negative anymore, i.e.
709 // unroll-and-jam may violate correctness.
710 std::unique_ptr<Dependence> D = DI.depends(Src, Dst);
711 if (!D)
712 return true;
713 assert(D->isOrdered() && "Expected an output, flow or anti dep.");
714
715 if (D->isConfused()) {
716 LLVM_DEBUG(dbgs() << " Confused dependency between:\n"
717 << " " << *Src << "\n"
718 << " " << *Dst << "\n");
719 return false;
720 }
721
722 // If outer levels (levels enclosing the loop being unroll-and-jammed) have a
723 // non-equal direction, then the locations accessed in the inner levels cannot
724 // overlap in memory. We assumes the indexes never overlap into neighboring
725 // dimensions.
726 for (unsigned CurLoopDepth = 1; CurLoopDepth < UnrollLevel; ++CurLoopDepth)
727 if (!(D->getDirection(CurLoopDepth) & Dependence::DVEntry::EQ))
728 return true;
729
730 auto UnrollDirection = D->getDirection(UnrollLevel);
731
732 // If the distance carried by the unrolled loop is 0, then after unrolling
733 // that distance will become non-zero resulting in non-overlapping accesses in
734 // the inner loops.
735 if (UnrollDirection == Dependence::DVEntry::EQ)
736 return true;
737
738 if (UnrollDirection & Dependence::DVEntry::LT &&
739 !preservesForwardDependence(Src, Dst, UnrollLevel, JamLevel,
740 Sequentialized, D.get()))
741 return false;
742
743 if (UnrollDirection & Dependence::DVEntry::GT &&
744 !preservesBackwardDependence(Src, Dst, UnrollLevel, JamLevel,
745 Sequentialized, D.get()))
746 return false;
747
748 return true;
749}
750
751static bool
752checkDependencies(Loop &Root, const BasicBlockSet &SubLoopBlocks,
753 const DenseMap<Loop *, BasicBlockSet> &ForeBlocksMap,
754 const DenseMap<Loop *, BasicBlockSet> &AftBlocksMap,
755 DependenceInfo &DI, LoopInfo &LI) {
757 for (Loop *L : Root.getLoopsInPreorder())
758 if (ForeBlocksMap.contains(L))
759 AllBlocks.push_back(ForeBlocksMap.lookup(L));
760 AllBlocks.push_back(SubLoopBlocks);
761 for (Loop *L : Root.getLoopsInPreorder())
762 if (AftBlocksMap.contains(L))
763 AllBlocks.push_back(AftBlocksMap.lookup(L));
764
765 unsigned LoopDepth = Root.getLoopDepth();
766 SmallVector<Instruction *, 4> EarlierLoadsAndStores;
767 SmallVector<Instruction *, 4> CurrentLoadsAndStores;
768 for (BasicBlockSet &Blocks : AllBlocks) {
769 CurrentLoadsAndStores.clear();
770 if (!getLoadsAndStores(Blocks, CurrentLoadsAndStores))
771 return false;
772
773 Loop *CurLoop = LI.getLoopFor((*Blocks.begin())->front().getParent());
774 unsigned CurLoopDepth = CurLoop->getLoopDepth();
775
776 for (auto *Earlier : EarlierLoadsAndStores) {
777 Loop *EarlierLoop = LI.getLoopFor(Earlier->getParent());
778 unsigned EarlierDepth = EarlierLoop->getLoopDepth();
779 unsigned CommonLoopDepth = std::min(EarlierDepth, CurLoopDepth);
780 for (auto *Later : CurrentLoadsAndStores) {
781 if (!checkDependency(Earlier, Later, LoopDepth, CommonLoopDepth, false,
782 DI))
783 return false;
784 }
785 }
786
787 size_t NumInsts = CurrentLoadsAndStores.size();
788 for (size_t I = 0; I < NumInsts; ++I) {
789 for (size_t J = I; J < NumInsts; ++J) {
790 if (!checkDependency(CurrentLoadsAndStores[I], CurrentLoadsAndStores[J],
791 LoopDepth, CurLoopDepth, true, DI))
792 return false;
793 }
794 }
795
796 EarlierLoadsAndStores.append(CurrentLoadsAndStores.begin(),
797 CurrentLoadsAndStores.end());
798 }
799 return true;
800}
801
802static bool isEligibleLoopForm(const Loop &Root) {
803 // Root must have a child.
804 if (Root.getSubLoops().size() != 1)
805 return false;
806
807 const Loop *L = &Root;
808 do {
809 // All loops in Root need to be in simplify and rotated form.
810 if (!L->isLoopSimplifyForm())
811 return false;
812
813 if (!L->isRotatedForm())
814 return false;
815
816 if (L->getHeader()->hasAddressTaken()) {
817 LLVM_DEBUG(dbgs() << "Won't unroll-and-jam; Address taken\n");
818 return false;
819 }
820
821 unsigned SubLoopsSize = L->getSubLoops().size();
822 if (SubLoopsSize == 0)
823 return true;
824
825 // Only one child is allowed.
826 if (SubLoopsSize != 1)
827 return false;
828
829 // Only loops with a single exit block can be unrolled and jammed.
830 // The function getExitBlock() is used for this check, rather than
831 // getUniqueExitBlock() to ensure loops with mulitple exit edges are
832 // disallowed.
833 if (!L->getExitBlock()) {
834 LLVM_DEBUG(dbgs() << "Won't unroll-and-jam; only loops with single exit "
835 "blocks can be unrolled and jammed.\n");
836 return false;
837 }
838
839 // Only loops with a single exiting block can be unrolled and jammed.
840 if (!L->getExitingBlock()) {
841 LLVM_DEBUG(dbgs() << "Won't unroll-and-jam; only loops with single "
842 "exiting blocks can be unrolled and jammed.\n");
843 return false;
844 }
845
846 L = L->getSubLoops()[0];
847 } while (L);
848
849 return true;
850}
851
853 while (!L->getSubLoops().empty())
854 L = L->getSubLoops()[0];
855 return L;
856}
857
859 DependenceInfo &DI, LoopInfo &LI) {
860 if (!isEligibleLoopForm(*L)) {
861 LLVM_DEBUG(dbgs() << "Won't unroll-and-jam; Ineligible loop form\n");
862 return false;
863 }
864
865 /* We currently handle outer loops like this:
866 |
867 ForeFirst <------\ }
868 Blocks | } ForeBlocks of L
869 ForeLast | }
870 | |
871 ... |
872 | |
873 ForeFirst <----\ | }
874 Blocks | | } ForeBlocks of a inner loop of L
875 ForeLast | | }
876 | | |
877 JamLoopFirst <\ | | }
878 Blocks | | | } JamLoopBlocks of the innermost loop
879 JamLoopLast -/ | | }
880 | | |
881 AftFirst | | }
882 Blocks | | } AftBlocks of a inner loop of L
883 AftLast ------/ | }
884 | |
885 ... |
886 | |
887 AftFirst | }
888 Blocks | } AftBlocks of L
889 AftLast --------/ }
890 |
891
892 There are (theoretically) any number of blocks in ForeBlocks, SubLoopBlocks
893 and AftBlocks, providing that there is one edge from Fores to SubLoops,
894 one edge from SubLoops to Afts and a single outer loop exit (from Afts).
895 In practice we currently limit Aft blocks to a single block, and limit
896 things further in the profitablility checks of the unroll and jam pass.
897
898 Because of the way we rearrange basic blocks, we also require that
899 the Fore blocks of L on all unrolled iterations are safe to move before the
900 blocks of the direct child of L of all iterations. So we require that the
901 phi node looping operands of ForeHeader can be moved to at least the end of
902 ForeEnd, so that we can arrange cloned Fore Blocks before the subloop and
903 match up Phi's correctly.
904
905 i.e. The old order of blocks used to be
906 (F1)1 (F2)1 J1_1 J1_2 (A2)1 (A1)1 (F1)2 (F2)2 J2_1 J2_2 (A2)2 (A1)2.
907 It needs to be safe to transform this to
908 (F1)1 (F1)2 (F2)1 (F2)2 J1_1 J1_2 J2_1 J2_2 (A2)1 (A2)2 (A1)1 (A1)2.
909
910 There are then a number of checks along the lines of no calls, no
911 exceptions, inner loop IV is consistent, etc. Note that for loops requiring
912 runtime unrolling, UnrollRuntimeLoopRemainder can also fail in
913 UnrollAndJamLoop if the trip count cannot be easily calculated.
914 */
915
916 // Split blocks into Fore/SubLoop/Aft based on dominators
917 Loop *JamLoop = getInnerMostLoop(L);
918 BasicBlockSet SubLoopBlocks;
921 if (!partitionOuterLoopBlocks(*L, *JamLoop, SubLoopBlocks, ForeBlocksMap,
922 AftBlocksMap, DT)) {
923 LLVM_DEBUG(dbgs() << "Won't unroll-and-jam; Incompatible loop layout\n");
924 return false;
925 }
926
927 // Aft blocks may need to move instructions to fore blocks, which becomes more
928 // difficult if there are multiple (potentially conditionally executed)
929 // blocks. For now we just exclude loops with multiple aft blocks.
930 if (AftBlocksMap[L].size() != 1) {
931 LLVM_DEBUG(dbgs() << "Won't unroll-and-jam; Can't currently handle "
932 "multiple blocks after the loop\n");
933 return false;
934 }
935
936 // Check inner loop backedge count is consistent on all iterations of the
937 // outer loop
938 if (any_of(L->getLoopsInPreorder(), [&SE](Loop *SubLoop) {
939 return !hasIterationCountInvariantInParent(SubLoop, SE);
940 })) {
941 LLVM_DEBUG(dbgs() << "Won't unroll-and-jam; Inner loop iteration count is "
942 "not consistent on each iteration\n");
943 return false;
944 }
945
946 // Check the loop safety info for exceptions.
949 if (LSI.anyBlockMayThrow()) {
950 LLVM_DEBUG(dbgs() << "Won't unroll-and-jam; Something may throw\n");
951 return false;
952 }
953
954 // We've ruled out the easy stuff and now need to check that there are no
955 // interdependencies which may prevent us from moving the:
956 // ForeBlocks before Subloop and AftBlocks.
957 // Subloop before AftBlocks.
958 // ForeBlock phi operands before the subloop
959
960 // Make sure we can move all instructions we need to before the subloop
961 BasicBlock *Header = L->getHeader();
962 BasicBlock *Latch = L->getLoopLatch();
963 BasicBlockSet AftBlocks = AftBlocksMap[L];
964 Loop *SubLoop = L->getSubLoops()[0];
966 Header, Latch, AftBlocks, [&AftBlocks, &SubLoop](Instruction *I) {
967 if (SubLoop->contains(I->getParent()))
968 return false;
969 if (AftBlocks.count(I->getParent())) {
970 // If we hit a phi node in afts we know we are done (probably
971 // LCSSA)
972 if (isa<PHINode>(I))
973 return false;
974 // Can't move instructions with side effects or memory
975 // reads/writes
976 if (I->mayHaveSideEffects() || I->mayReadOrWriteMemory())
977 return false;
978 }
979 // Keep going
980 return true;
981 })) {
982 LLVM_DEBUG(dbgs() << "Won't unroll-and-jam; can't move required "
983 "instructions after subloop to before it\n");
984 return false;
985 }
986
987 // Check for memory dependencies which prohibit the unrolling we are doing.
988 // Because of the way we are unrolling Fore/Sub/Aft blocks, we need to check
989 // there are no dependencies between Fore-Sub, Fore-Aft, Sub-Aft and Sub-Sub.
990 if (!checkDependencies(*L, SubLoopBlocks, ForeBlocksMap, AftBlocksMap, DI,
991 LI)) {
992 LLVM_DEBUG(dbgs() << "Won't unroll-and-jam; failed dependency check\n");
993 return false;
994 }
995
996 return true;
997}
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
This file defines the DenseMap class.
#define DEBUG_TYPE
This file defines a set of templates that efficiently compute a dominator tree over a generic graph.
SmallPtrSet< BasicBlock *, 4 > BasicBlockSet
static bool partitionLoopBlocks(Loop &L, BasicBlockSet &ForeBlocks, BasicBlockSet &AftBlocks, DominatorTree &DT)
static void moveHeaderPhiOperandsToForeBlocks(BasicBlock *Header, BasicBlock *Latch, BasicBlock::iterator InsertLoc, BasicBlockSet &AftBlocks)
static Loop * getInnerMostLoop(Loop *L)
static bool getLoadsAndStores(BasicBlockSet &Blocks, SmallVector< Instruction *, 4 > &MemInstr)
static bool preservesForwardDependence(Instruction *Src, Instruction *Dst, unsigned UnrollLevel, unsigned JamLevel, bool Sequentialized, Dependence *D)
static bool partitionOuterLoopBlocks(Loop &Root, Loop &JamLoop, BasicBlockSet &JamLoopBlocks, DenseMap< Loop *, BasicBlockSet > &ForeBlocksMap, DenseMap< Loop *, BasicBlockSet > &AftBlocksMap, DominatorTree &DT)
Partition blocks in a loop nest into blocks before and after each inner loop.
static bool isEligibleLoopForm(const Loop &Root)
static bool preservesBackwardDependence(Instruction *Src, Instruction *Dst, unsigned UnrollLevel, unsigned JamLevel, bool Sequentialized, Dependence *D)
static bool checkDependencies(Loop &Root, const BasicBlockSet &SubLoopBlocks, const DenseMap< Loop *, BasicBlockSet > &ForeBlocksMap, const DenseMap< Loop *, BasicBlockSet > &AftBlocksMap, DependenceInfo &DI, LoopInfo &LI)
static bool processHeaderPhiOperands(BasicBlock *Header, BasicBlock *Latch, BasicBlockSet &AftBlocks, T Visit)
static bool checkDependency(Instruction *Src, Instruction *Dst, unsigned UnrollLevel, unsigned JamLevel, bool Sequentialized, DependenceInfo &DI)
#define I(x, y, z)
Definition MD5.cpp:57
#define T
Contains a collection of routines for determining if a given instruction is guaranteed to execute if ...
uint64_t IntrinsicInst * II
This file contains some templates that are useful if you are working with the STL at all.
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition Statistic.h:171
#define LLVM_DEBUG(...)
Definition Debug.h:114
A cache of @llvm.assume calls within a function.
LLVM_ABI void registerAssumption(AssumeInst *CI)
Add an @llvm.assume intrinsic to this function's cache.
LLVM Basic Block Representation.
Definition BasicBlock.h:62
InstListType::iterator iterator
Instruction iterators...
Definition BasicBlock.h:170
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition BasicBlock.h:233
Conditional Branch instruction.
void setSuccessor(unsigned idx, BasicBlock *NewSucc)
BasicBlock * getSuccessor(unsigned i) const
ValueT lookup(const_arg_type_t< KeyT > Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
Definition DenseMap.h:205
bool contains(const_arg_type_t< KeyT > Val) const
Return true if the specified key is in the map, false otherwise.
Definition DenseMap.h:169
DependenceInfo - This class is the main dependence-analysis driver.
LLVM_ABI std::unique_ptr< Dependence > depends(Instruction *Src, Instruction *Dst, bool UnderRuntimeAssumptions=false)
depends - Tests for a dependence between the Src and Dst instructions.
Dependence - This class represents a dependence between two memory memory references in a function.
DomTreeNodeBase * getIDom() const
bool verify(VerificationLevel VL=VerificationLevel::Full) const
verify - checks if the tree is correct.
DomTreeNodeBase< NodeT > * addNewBlock(NodeT *BB, NodeT *DomBB)
Add a new node to the dominator tree information.
DomTreeNodeBase< NodeT > * getNode(const NodeT *BB) const
getNode - return the (Post)DominatorTree node for the specified basic block.
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition Dominators.h:159
LLVM_ABI bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
DomTreeT & getDomTree()
Flush DomTree updates and return DomTree.
void applyUpdatesPermissive(ArrayRef< UpdateT > Updates)
Submit updates to all available trees.
LLVM_ABI BasicBlock * getSuccessor(unsigned Idx) const LLVM_READONLY
Return the specified successor. This instruction must be a terminator.
bool contains(const LoopT *L) const
Return true if the specified loop is contained within in this loop.
BlockT * getLoopLatch() const
If there is a single latch block for this loop, return it.
SmallVector< const LoopT *, 4 > getLoopsInPreorder() const
Return all loops in the loop nest rooted by the loop in preorder, with siblings in forward program or...
const std::vector< LoopT * > & getSubLoops() const
Return the loops contained entirely within this loop.
BlockT * getHeader() const
unsigned getLoopDepth() const
Return the nesting level of this loop.
iterator_range< block_iterator > blocks() const
BlockT * getExitBlock() const
If getExitBlocks would return exactly one block, return that block.
BlockT * getLoopPreheader() const
If there is a preheader for this loop, return it.
BlockT * getExitingBlock() const
If getExitingBlocks would return exactly one block, return that block.
LoopT * getParentLoop() const
Return the parent loop if it exists or nullptr for top level loops.
Store the result of a depth first search within basic blocks contained by a single loop.
RPOIterator beginRPO() const
Reverse iterate over the cached postorder blocks.
std::vector< BasicBlock * >::const_reverse_iterator RPOIterator
void perform(const LoopInfo *LI)
Traverse the loop blocks and store the DFS result.
RPOIterator endRPO() const
void verify(const DominatorTreeBase< BlockT, false > &DomTree) const
LoopT * getLoopFor(const BlockT *BB) const
Return the inner most loop that BB lives in.
LLVM_ABI void erase(Loop *L)
Update LoopInfo after removing the last backedge from a loop.
Definition LoopInfo.cpp:908
Represents a single loop in the control flow graph.
Definition LoopInfo.h:40
bool isLoopSimplifyForm() const
Return true if the Loop is in the form that the LoopSimplify form transforms loops to,...
Definition LoopInfo.cpp:501
bool isRecursivelyLCSSAForm(const DominatorTree &DT, const LoopInfo &LI, bool IgnoreTokens=true) const
Return true if this Loop and all inner subloops are in LCSSA form.
Definition LoopInfo.cpp:491
The optimization diagnostic interface.
LLVM_ABI void emit(DiagnosticInfoOptimizationBase &OptDiag)
Output the remark via the diagnostic handler and to the optimization record file.
Diagnostic information for applied optimization remarks.
The main scalar evolution driver.
LLVM_ABI void forgetLoop(const Loop *L)
This method should be called by the client when it has changed a loop in a way that may effect Scalar...
LLVM_ABI void forgetBlockAndLoopDispositions(Value *V=nullptr)
Called when the client has changed the disposition of values in a loop or block.
LLVM_ABI void verify() const
Simple and conservative implementation of LoopSafetyInfo that can give false-positive answers to its ...
void computeLoopSafetyInfo(const Loop *CurLoop) override
Computes safety information for a loop checks loop body & header for the possibility of may throw exc...
bool anyBlockMayThrow() const override
Returns true iff any block of the loop for which this info is contains an instruction that may throw ...
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
void insert_range(Range &&R)
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
reference emplace_back(ArgTypes &&... Args)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition Twine.h:82
Unconditional Branch instruction.
static UncondBrInst * Create(BasicBlock *IfTrue, InsertPosition InsertBefore=nullptr)
void setSuccessor(BasicBlock *NewSucc)
iterator begin()
Definition ValueMap.h:138
iterator end()
Definition ValueMap.h:139
ValueMapIteratorImpl< MapT, const Value *, false > iterator
Definition ValueMap.h:135
LLVM Value Representation.
Definition Value.h:75
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
Add a small namespace to avoid name clashes with the classes used in the streaming interface.
This is an optimization pass for GlobalISel generic memory operations.
Definition Types.h:26
LLVM_ABI bool isSafeToUnrollAndJam(Loop *L, ScalarEvolution &SE, DominatorTree &DT, DependenceInfo &DI, LoopInfo &LI)
LLVM_ABI void simplifyLoopAfterUnroll(Loop *L, bool SimplifyIVs, LoopInfo *LI, ScalarEvolution *SE, DominatorTree *DT, AssumptionCache *AC, const TargetTransformInfo *TTI, AAResults *AA=nullptr)
Perform some cleanup and simplifications on loops after unrolling.
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
Definition STLExtras.h:1669
LLVM_ABI BasicBlock * CloneBasicBlock(const BasicBlock *BB, ValueToValueMapTy &VMap, const Twine &NameSuffix="", Function *F=nullptr, ClonedCodeInfo *CodeInfo=nullptr, bool MapAtoms=true)
Return a copy of the specified basic block, but without embedding the block into a particular functio...
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
auto successors(const MachineBasicBlock *BB)
LLVM_ABI cl::opt< bool > EnableFSDiscriminator
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1746
LLVM_ABI bool MergeBlockSuccessorsIntoGivenBlocks(SmallPtrSetImpl< BasicBlock * > &MergeBlocks, Loop *L=nullptr, DomTreeUpdater *DTU=nullptr, LoopInfo *LI=nullptr)
Merge block(s) sucessors, if possible.
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
LLVM_ABI cl::opt< unsigned > SCEVCheapExpansionBudget
FunctionAddr VTableAddr Count
Definition InstrProf.h:139
LoopUnrollResult
Represents the result of a UnrollLoop invocation.
Definition UnrollLoop.h:58
@ PartiallyUnrolled
The loop was partially unrolled – we still have a loop, but with a smaller trip count.
Definition UnrollLoop.h:65
@ Unmodified
The loop was not modified.
Definition UnrollLoop.h:60
@ FullyUnrolled
The loop was fully unrolled into straight-line code.
Definition UnrollLoop.h:69
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
TargetTransformInfo TTI
LLVM_ABI void remapInstructionsInBlocks(ArrayRef< BasicBlock * > Blocks, ValueToValueMapTy &VMap)
Remaps instructions in Blocks using the mapping in VMap.
ValueMap< const Value *, WeakTrackingVH > ValueToValueMapTy
LLVM_ABI const Loop * addClonedBlockToLoopInfo(BasicBlock *OriginalBB, BasicBlock *ClonedBB, LoopInfo *LI, NewLoopsMap &NewLoops)
Adds ClonedBB to LoopInfo, creates a new loop for ClonedBB if necessary and adds a mapping from the o...
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
LLVM_ABI bool UnrollRuntimeLoopRemainder(Loop *L, unsigned Count, bool AllowExpensiveTripCount, bool UseEpilogRemainder, bool UnrollRemainder, bool ForgetAllSCEV, LoopInfo *LI, ScalarEvolution *SE, DominatorTree *DT, AssumptionCache *AC, const TargetTransformInfo *TTI, bool PreserveLCSSA, unsigned SCEVExpansionBudget, bool RuntimeUnrollMultiExit, Loop **ResultLoop=nullptr, std::optional< unsigned > OriginalTripCount=std::nullopt, BranchProbability OriginalLoopProb=BranchProbability::getUnknown())
Insert code in the prolog/epilog code when unrolling a loop with a run-time trip-count.
LLVM_ABI LoopUnrollResult UnrollAndJamLoop(Loop *L, unsigned Count, unsigned TripCount, unsigned TripMultiple, bool UnrollRemainder, LoopInfo *LI, ScalarEvolution *SE, DominatorTree *DT, AssumptionCache *AC, const TargetTransformInfo *TTI, OptimizationRemarkEmitter *ORE, Loop **EpilogueLoop=nullptr)