LLVM 20.0.0git
BranchFolding.cpp
Go to the documentation of this file.
1//===- BranchFolding.cpp - Fold machine code branch instructions ----------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This pass forwards branches to unconditional branches to make them branch
10// directly to the target block. This pass often results in dead MBB's, which
11// it then removes.
12//
13// Note that this pass must be run after register allocation, it cannot handle
14// SSA form. It also must handle virtual registers for targets that emit virtual
15// ISA (e.g. NVPTX).
16//
17//===----------------------------------------------------------------------===//
18
19#include "BranchFolding.h"
20#include "llvm/ADT/BitVector.h"
21#include "llvm/ADT/STLExtras.h"
22#include "llvm/ADT/SmallSet.h"
24#include "llvm/ADT/Statistic.h"
45#include "llvm/IR/DebugLoc.h"
46#include "llvm/IR/Function.h"
48#include "llvm/MC/LaneBitmask.h"
50#include "llvm/Pass.h"
54#include "llvm/Support/Debug.h"
58#include <cassert>
59#include <cstddef>
60#include <iterator>
61#include <numeric>
62
63using namespace llvm;
64
65#define DEBUG_TYPE "branch-folder"
66
67STATISTIC(NumDeadBlocks, "Number of dead blocks removed");
68STATISTIC(NumBranchOpts, "Number of branches optimized");
69STATISTIC(NumTailMerge , "Number of block tails merged");
70STATISTIC(NumHoist , "Number of times common instructions are hoisted");
71STATISTIC(NumTailCalls, "Number of tail calls optimized");
72
75
76// Throttle for huge numbers of predecessors (compile speed problems)
78TailMergeThreshold("tail-merge-threshold",
79 cl::desc("Max number of predecessors to consider tail merging"),
80 cl::init(150), cl::Hidden);
81
82// Heuristic for tail merging (and, inversely, tail duplication).
84TailMergeSize("tail-merge-size",
85 cl::desc("Min number of instructions to consider tail merging"),
87
88namespace {
89
90 /// BranchFolderPass - Wrap branch folder in a machine function pass.
91 class BranchFolderPass : public MachineFunctionPass {
92 public:
93 static char ID;
94
95 explicit BranchFolderPass(): MachineFunctionPass(ID) {}
96
97 bool runOnMachineFunction(MachineFunction &MF) override;
98
99 void getAnalysisUsage(AnalysisUsage &AU) const override {
105 }
106
109 MachineFunctionProperties::Property::NoPHIs);
110 }
111 };
112
113} // end anonymous namespace
114
115char BranchFolderPass::ID = 0;
116
117char &llvm::BranchFolderPassID = BranchFolderPass::ID;
118
119INITIALIZE_PASS(BranchFolderPass, DEBUG_TYPE,
120 "Control Flow Optimizer", false, false)
121
122bool BranchFolderPass::runOnMachineFunction(MachineFunction &MF) {
123 if (skipFunction(MF.getFunction()))
124 return false;
125
126 TargetPassConfig *PassConfig = &getAnalysis<TargetPassConfig>();
127 // TailMerge can create jump into if branches that make CFG irreducible for
128 // HW that requires structurized CFG.
129 bool EnableTailMerge = !MF.getTarget().requiresStructuredCFG() &&
130 PassConfig->getEnableTailMerge();
131 MBFIWrapper MBBFreqInfo(
132 getAnalysis<MachineBlockFrequencyInfoWrapperPass>().getMBFI());
133 BranchFolder Folder(
134 EnableTailMerge, /*CommonHoist=*/true, MBBFreqInfo,
135 getAnalysis<MachineBranchProbabilityInfoWrapperPass>().getMBPI(),
136 &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI());
137 return Folder.OptimizeFunction(MF, MF.getSubtarget().getInstrInfo(),
138 MF.getSubtarget().getRegisterInfo());
139}
140
141BranchFolder::BranchFolder(bool DefaultEnableTailMerge, bool CommonHoist,
142 MBFIWrapper &FreqInfo,
143 const MachineBranchProbabilityInfo &ProbInfo,
144 ProfileSummaryInfo *PSI, unsigned MinTailLength)
145 : EnableHoistCommonCode(CommonHoist), MinCommonTailLength(MinTailLength),
146 MBBFreqInfo(FreqInfo), MBPI(ProbInfo), PSI(PSI) {
147 switch (FlagEnableTailMerge) {
148 case cl::BOU_UNSET:
149 EnableTailMerge = DefaultEnableTailMerge;
150 break;
151 case cl::BOU_TRUE: EnableTailMerge = true; break;
152 case cl::BOU_FALSE: EnableTailMerge = false; break;
153 }
154}
155
156void BranchFolder::RemoveDeadBlock(MachineBasicBlock *MBB) {
157 assert(MBB->pred_empty() && "MBB must be dead!");
158 LLVM_DEBUG(dbgs() << "\nRemoving MBB: " << *MBB);
159
161 // drop all successors.
162 while (!MBB->succ_empty())
164
165 // Avoid matching if this pointer gets reused.
166 TriedMerging.erase(MBB);
167
168 // Update call site info.
169 for (const MachineInstr &MI : *MBB)
170 if (MI.shouldUpdateCallSiteInfo())
171 MF->eraseCallSiteInfo(&MI);
172
173 // Remove the block.
174 MF->erase(MBB);
175 EHScopeMembership.erase(MBB);
176 if (MLI)
177 MLI->removeBlock(MBB);
178}
179
181 const TargetInstrInfo *tii,
182 const TargetRegisterInfo *tri,
183 MachineLoopInfo *mli, bool AfterPlacement) {
184 if (!tii) return false;
185
186 TriedMerging.clear();
187
189 AfterBlockPlacement = AfterPlacement;
190 TII = tii;
191 TRI = tri;
192 MLI = mli;
193 this->MRI = &MRI;
194
195 if (MinCommonTailLength == 0) {
196 MinCommonTailLength = TailMergeSize.getNumOccurrences() > 0
198 : TII->getTailMergeSize(MF);
199 }
200
201 UpdateLiveIns = MRI.tracksLiveness() && TRI->trackLivenessAfterRegAlloc(MF);
202 if (!UpdateLiveIns)
203 MRI.invalidateLiveness();
204
205 bool MadeChange = false;
206
207 // Recalculate EH scope membership.
208 EHScopeMembership = getEHScopeMembership(MF);
209
210 bool MadeChangeThisIteration = true;
211 while (MadeChangeThisIteration) {
212 MadeChangeThisIteration = TailMergeBlocks(MF);
213 // No need to clean up if tail merging does not change anything after the
214 // block placement.
215 if (!AfterBlockPlacement || MadeChangeThisIteration)
216 MadeChangeThisIteration |= OptimizeBranches(MF);
217 if (EnableHoistCommonCode)
218 MadeChangeThisIteration |= HoistCommonCode(MF);
219 MadeChange |= MadeChangeThisIteration;
220 }
221
222 // See if any jump tables have become dead as the code generator
223 // did its thing.
225 if (!JTI)
226 return MadeChange;
227
228 // Walk the function to find jump tables that are live.
229 BitVector JTIsLive(JTI->getJumpTables().size());
230 for (const MachineBasicBlock &BB : MF) {
231 for (const MachineInstr &I : BB)
232 for (const MachineOperand &Op : I.operands()) {
233 if (!Op.isJTI()) continue;
234
235 // Remember that this JT is live.
236 JTIsLive.set(Op.getIndex());
237 }
238 }
239
240 // Finally, remove dead jump tables. This happens when the
241 // indirect jump was unreachable (and thus deleted).
242 for (unsigned i = 0, e = JTIsLive.size(); i != e; ++i)
243 if (!JTIsLive.test(i)) {
244 JTI->RemoveJumpTable(i);
245 MadeChange = true;
246 }
247
248 return MadeChange;
249}
250
251//===----------------------------------------------------------------------===//
252// Tail Merging of Blocks
253//===----------------------------------------------------------------------===//
254
255/// HashMachineInstr - Compute a hash value for MI and its operands.
256static unsigned HashMachineInstr(const MachineInstr &MI) {
257 unsigned Hash = MI.getOpcode();
258 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
259 const MachineOperand &Op = MI.getOperand(i);
260
261 // Merge in bits from the operand if easy. We can't use MachineOperand's
262 // hash_code here because it's not deterministic and we sort by hash value
263 // later.
264 unsigned OperandHash = 0;
265 switch (Op.getType()) {
267 OperandHash = Op.getReg();
268 break;
270 OperandHash = Op.getImm();
271 break;
273 OperandHash = Op.getMBB()->getNumber();
274 break;
278 OperandHash = Op.getIndex();
279 break;
282 // Global address / external symbol are too hard, don't bother, but do
283 // pull in the offset.
284 OperandHash = Op.getOffset();
285 break;
286 default:
287 break;
288 }
289
290 Hash += ((OperandHash << 3) | Op.getType()) << (i & 31);
291 }
292 return Hash;
293}
294
295/// HashEndOfMBB - Hash the last instruction in the MBB.
296static unsigned HashEndOfMBB(const MachineBasicBlock &MBB) {
298 if (I == MBB.end())
299 return 0;
300
301 return HashMachineInstr(*I);
302}
303
304/// Whether MI should be counted as an instruction when calculating common tail.
306 return !(MI.isDebugInstr() || MI.isCFIInstruction());
307}
308
309/// Iterate backwards from the given iterator \p I, towards the beginning of the
310/// block. If a MI satisfying 'countsAsInstruction' is found, return an iterator
311/// pointing to that MI. If no such MI is found, return the end iterator.
315 while (I != MBB->begin()) {
316 --I;
318 return I;
319 }
320 return MBB->end();
321}
322
323/// Given two machine basic blocks, return the number of instructions they
324/// actually have in common together at their end. If a common tail is found (at
325/// least by one instruction), then iterators for the first shared instruction
326/// in each block are returned as well.
327///
328/// Non-instructions according to countsAsInstruction are ignored.
330 MachineBasicBlock *MBB2,
333 MachineBasicBlock::iterator MBBI1 = MBB1->end();
334 MachineBasicBlock::iterator MBBI2 = MBB2->end();
335
336 unsigned TailLen = 0;
337 while (true) {
338 MBBI1 = skipBackwardPastNonInstructions(MBBI1, MBB1);
339 MBBI2 = skipBackwardPastNonInstructions(MBBI2, MBB2);
340 if (MBBI1 == MBB1->end() || MBBI2 == MBB2->end())
341 break;
342 if (!MBBI1->isIdenticalTo(*MBBI2) ||
343 // FIXME: This check is dubious. It's used to get around a problem where
344 // people incorrectly expect inline asm directives to remain in the same
345 // relative order. This is untenable because normal compiler
346 // optimizations (like this one) may reorder and/or merge these
347 // directives.
348 MBBI1->isInlineAsm()) {
349 break;
350 }
351 if (MBBI1->getFlag(MachineInstr::NoMerge) ||
352 MBBI2->getFlag(MachineInstr::NoMerge))
353 break;
354 ++TailLen;
355 I1 = MBBI1;
356 I2 = MBBI2;
357 }
358
359 return TailLen;
360}
361
362void BranchFolder::replaceTailWithBranchTo(MachineBasicBlock::iterator OldInst,
363 MachineBasicBlock &NewDest) {
364 if (UpdateLiveIns) {
365 // OldInst should always point to an instruction.
366 MachineBasicBlock &OldMBB = *OldInst->getParent();
367 LiveRegs.clear();
368 LiveRegs.addLiveOuts(OldMBB);
369 // Move backward to the place where will insert the jump.
371 do {
372 --I;
373 LiveRegs.stepBackward(*I);
374 } while (I != OldInst);
375
376 // Merging the tails may have switched some undef operand to non-undef ones.
377 // Add IMPLICIT_DEFS into OldMBB as necessary to have a definition of the
378 // register.
380 // We computed the liveins with computeLiveIn earlier and should only see
381 // full registers:
382 assert(P.LaneMask == LaneBitmask::getAll() &&
383 "Can only handle full register.");
384 MCPhysReg Reg = P.PhysReg;
385 if (!LiveRegs.available(*MRI, Reg))
386 continue;
387 DebugLoc DL;
388 BuildMI(OldMBB, OldInst, DL, TII->get(TargetOpcode::IMPLICIT_DEF), Reg);
389 }
390 }
391
392 TII->ReplaceTailWithBranchTo(OldInst, &NewDest);
393 ++NumTailMerge;
394}
395
396MachineBasicBlock *BranchFolder::SplitMBBAt(MachineBasicBlock &CurMBB,
398 const BasicBlock *BB) {
399 if (!TII->isLegalToSplitMBBAt(CurMBB, BBI1))
400 return nullptr;
401
402 MachineFunction &MF = *CurMBB.getParent();
403
404 // Create the fall-through block.
407 CurMBB.getParent()->insert(++MBBI, NewMBB);
408
409 // Move all the successors of this block to the specified block.
410 NewMBB->transferSuccessors(&CurMBB);
411
412 // Add an edge from CurMBB to NewMBB for the fall-through.
413 CurMBB.addSuccessor(NewMBB);
414
415 // Splice the code over.
416 NewMBB->splice(NewMBB->end(), &CurMBB, BBI1, CurMBB.end());
417
418 // NewMBB belongs to the same loop as CurMBB.
419 if (MLI)
420 if (MachineLoop *ML = MLI->getLoopFor(&CurMBB))
421 ML->addBasicBlockToLoop(NewMBB, *MLI);
422
423 // NewMBB inherits CurMBB's block frequency.
424 MBBFreqInfo.setBlockFreq(NewMBB, MBBFreqInfo.getBlockFreq(&CurMBB));
425
426 if (UpdateLiveIns)
427 computeAndAddLiveIns(LiveRegs, *NewMBB);
428
429 // Add the new block to the EH scope.
430 const auto &EHScopeI = EHScopeMembership.find(&CurMBB);
431 if (EHScopeI != EHScopeMembership.end()) {
432 auto n = EHScopeI->second;
433 EHScopeMembership[NewMBB] = n;
434 }
435
436 return NewMBB;
437}
438
439/// EstimateRuntime - Make a rough estimate for how long it will take to run
440/// the specified code.
443 unsigned Time = 0;
444 for (; I != E; ++I) {
445 if (!countsAsInstruction(*I))
446 continue;
447 if (I->isCall())
448 Time += 10;
449 else if (I->mayLoadOrStore())
450 Time += 2;
451 else
452 ++Time;
453 }
454 return Time;
455}
456
457// CurMBB needs to add an unconditional branch to SuccMBB (we removed these
458// branches temporarily for tail merging). In the case where CurMBB ends
459// with a conditional branch to the next block, optimize by reversing the
460// test and conditionally branching to SuccMBB instead.
461static void FixTail(MachineBasicBlock *CurMBB, MachineBasicBlock *SuccBB,
462 const TargetInstrInfo *TII, const DebugLoc &BranchDL) {
463 MachineFunction *MF = CurMBB->getParent();
465 MachineBasicBlock *TBB = nullptr, *FBB = nullptr;
467 DebugLoc dl = CurMBB->findBranchDebugLoc();
468 if (!dl)
469 dl = BranchDL;
470 if (I != MF->end() && !TII->analyzeBranch(*CurMBB, TBB, FBB, Cond, true)) {
471 MachineBasicBlock *NextBB = &*I;
472 if (TBB == NextBB && !Cond.empty() && !FBB) {
474 TII->removeBranch(*CurMBB);
475 TII->insertBranch(*CurMBB, SuccBB, nullptr, Cond, dl);
476 return;
477 }
478 }
479 }
480 TII->insertBranch(*CurMBB, SuccBB, nullptr,
482}
483
484bool
485BranchFolder::MergePotentialsElt::operator<(const MergePotentialsElt &o) const {
486 if (getHash() < o.getHash())
487 return true;
488 if (getHash() > o.getHash())
489 return false;
490 if (getBlock()->getNumber() < o.getBlock()->getNumber())
491 return true;
492 if (getBlock()->getNumber() > o.getBlock()->getNumber())
493 return false;
494 return false;
495}
496
497/// CountTerminators - Count the number of terminators in the given
498/// block and set I to the position of the first non-terminator, if there
499/// is one, or MBB->end() otherwise.
502 I = MBB->end();
503 unsigned NumTerms = 0;
504 while (true) {
505 if (I == MBB->begin()) {
506 I = MBB->end();
507 break;
508 }
509 --I;
510 if (!I->isTerminator()) break;
511 ++NumTerms;
512 }
513 return NumTerms;
514}
515
516/// A no successor, non-return block probably ends in unreachable and is cold.
517/// Also consider a block that ends in an indirect branch to be a return block,
518/// since many targets use plain indirect branches to return.
520 if (!MBB->succ_empty())
521 return false;
522 if (MBB->empty())
523 return true;
524 return !(MBB->back().isReturn() || MBB->back().isIndirectBranch());
525}
526
527/// ProfitableToMerge - Check if two machine basic blocks have a common tail
528/// and decide if it would be profitable to merge those tails. Return the
529/// length of the common tail and iterators to the first common instruction
530/// in each block.
531/// MBB1, MBB2 The blocks to check
532/// MinCommonTailLength Minimum size of tail block to be merged.
533/// CommonTailLen Out parameter to record the size of the shared tail between
534/// MBB1 and MBB2
535/// I1, I2 Iterator references that will be changed to point to the first
536/// instruction in the common tail shared by MBB1,MBB2
537/// SuccBB A common successor of MBB1, MBB2 which are in a canonical form
538/// relative to SuccBB
539/// PredBB The layout predecessor of SuccBB, if any.
540/// EHScopeMembership map from block to EH scope #.
541/// AfterPlacement True if we are merging blocks after layout. Stricter
542/// thresholds apply to prevent undoing tail-duplication.
543static bool
545 unsigned MinCommonTailLength, unsigned &CommonTailLen,
548 MachineBasicBlock *PredBB,
550 bool AfterPlacement,
551 MBFIWrapper &MBBFreqInfo,
552 ProfileSummaryInfo *PSI) {
553 // It is never profitable to tail-merge blocks from two different EH scopes.
554 if (!EHScopeMembership.empty()) {
555 auto EHScope1 = EHScopeMembership.find(MBB1);
556 assert(EHScope1 != EHScopeMembership.end());
557 auto EHScope2 = EHScopeMembership.find(MBB2);
558 assert(EHScope2 != EHScopeMembership.end());
559 if (EHScope1->second != EHScope2->second)
560 return false;
561 }
562
563 CommonTailLen = ComputeCommonTailLength(MBB1, MBB2, I1, I2);
564 if (CommonTailLen == 0)
565 return false;
566 LLVM_DEBUG(dbgs() << "Common tail length of " << printMBBReference(*MBB1)
567 << " and " << printMBBReference(*MBB2) << " is "
568 << CommonTailLen << '\n');
569
570 // Move the iterators to the beginning of the MBB if we only got debug
571 // instructions before the tail. This is to avoid splitting a block when we
572 // only got debug instructions before the tail (to be invariant on -g).
573 if (skipDebugInstructionsForward(MBB1->begin(), MBB1->end(), false) == I1)
574 I1 = MBB1->begin();
575 if (skipDebugInstructionsForward(MBB2->begin(), MBB2->end(), false) == I2)
576 I2 = MBB2->begin();
577
578 bool FullBlockTail1 = I1 == MBB1->begin();
579 bool FullBlockTail2 = I2 == MBB2->begin();
580
581 // It's almost always profitable to merge any number of non-terminator
582 // instructions with the block that falls through into the common successor.
583 // This is true only for a single successor. For multiple successors, we are
584 // trading a conditional branch for an unconditional one.
585 // TODO: Re-visit successor size for non-layout tail merging.
586 if ((MBB1 == PredBB || MBB2 == PredBB) &&
587 (!AfterPlacement || MBB1->succ_size() == 1)) {
589 unsigned NumTerms = CountTerminators(MBB1 == PredBB ? MBB2 : MBB1, I);
590 if (CommonTailLen > NumTerms)
591 return true;
592 }
593
594 // If these are identical non-return blocks with no successors, merge them.
595 // Such blocks are typically cold calls to noreturn functions like abort, and
596 // are unlikely to become a fallthrough target after machine block placement.
597 // Tail merging these blocks is unlikely to create additional unconditional
598 // branches, and will reduce the size of this cold code.
599 if (FullBlockTail1 && FullBlockTail2 &&
601 return true;
602
603 // If one of the blocks can be completely merged and happens to be in
604 // a position where the other could fall through into it, merge any number
605 // of instructions, because it can be done without a branch.
606 // TODO: If the blocks are not adjacent, move one of them so that they are?
607 if (MBB1->isLayoutSuccessor(MBB2) && FullBlockTail2)
608 return true;
609 if (MBB2->isLayoutSuccessor(MBB1) && FullBlockTail1)
610 return true;
611
612 // If both blocks are identical and end in a branch, merge them unless they
613 // both have a fallthrough predecessor and successor.
614 // We can only do this after block placement because it depends on whether
615 // there are fallthroughs, and we don't know until after layout.
616 if (AfterPlacement && FullBlockTail1 && FullBlockTail2) {
617 auto BothFallThrough = [](MachineBasicBlock *MBB) {
618 if (!MBB->succ_empty() && !MBB->canFallThrough())
619 return false;
622 return (MBB != &*MF->begin()) && std::prev(I)->canFallThrough();
623 };
624 if (!BothFallThrough(MBB1) || !BothFallThrough(MBB2))
625 return true;
626 }
627
628 // If both blocks have an unconditional branch temporarily stripped out,
629 // count that as an additional common instruction for the following
630 // heuristics. This heuristic is only accurate for single-succ blocks, so to
631 // make sure that during layout merging and duplicating don't crash, we check
632 // for that when merging during layout.
633 unsigned EffectiveTailLen = CommonTailLen;
634 if (SuccBB && MBB1 != PredBB && MBB2 != PredBB &&
635 (MBB1->succ_size() == 1 || !AfterPlacement) &&
636 !MBB1->back().isBarrier() &&
637 !MBB2->back().isBarrier())
638 ++EffectiveTailLen;
639
640 // Check if the common tail is long enough to be worthwhile.
641 if (EffectiveTailLen >= MinCommonTailLength)
642 return true;
643
644 // If we are optimizing for code size, 2 instructions in common is enough if
645 // we don't have to split a block. At worst we will be introducing 1 new
646 // branch instruction, which is likely to be smaller than the 2
647 // instructions that would be deleted in the merge.
648 bool OptForSize = llvm::shouldOptimizeForSize(MBB1, PSI, &MBBFreqInfo) &&
649 llvm::shouldOptimizeForSize(MBB2, PSI, &MBBFreqInfo);
650 return EffectiveTailLen >= 2 && OptForSize &&
651 (FullBlockTail1 || FullBlockTail2);
652}
653
654unsigned BranchFolder::ComputeSameTails(unsigned CurHash,
655 unsigned MinCommonTailLength,
656 MachineBasicBlock *SuccBB,
657 MachineBasicBlock *PredBB) {
658 unsigned maxCommonTailLength = 0U;
659 SameTails.clear();
660 MachineBasicBlock::iterator TrialBBI1, TrialBBI2;
661 MPIterator HighestMPIter = std::prev(MergePotentials.end());
662 for (MPIterator CurMPIter = std::prev(MergePotentials.end()),
663 B = MergePotentials.begin();
664 CurMPIter != B && CurMPIter->getHash() == CurHash; --CurMPIter) {
665 for (MPIterator I = std::prev(CurMPIter); I->getHash() == CurHash; --I) {
666 unsigned CommonTailLen;
667 if (ProfitableToMerge(CurMPIter->getBlock(), I->getBlock(),
668 MinCommonTailLength,
669 CommonTailLen, TrialBBI1, TrialBBI2,
670 SuccBB, PredBB,
671 EHScopeMembership,
672 AfterBlockPlacement, MBBFreqInfo, PSI)) {
673 if (CommonTailLen > maxCommonTailLength) {
674 SameTails.clear();
675 maxCommonTailLength = CommonTailLen;
676 HighestMPIter = CurMPIter;
677 SameTails.push_back(SameTailElt(CurMPIter, TrialBBI1));
678 }
679 if (HighestMPIter == CurMPIter &&
680 CommonTailLen == maxCommonTailLength)
681 SameTails.push_back(SameTailElt(I, TrialBBI2));
682 }
683 if (I == B)
684 break;
685 }
686 }
687 return maxCommonTailLength;
688}
689
690void BranchFolder::RemoveBlocksWithHash(unsigned CurHash,
691 MachineBasicBlock *SuccBB,
692 MachineBasicBlock *PredBB,
693 const DebugLoc &BranchDL) {
694 MPIterator CurMPIter, B;
695 for (CurMPIter = std::prev(MergePotentials.end()),
696 B = MergePotentials.begin();
697 CurMPIter->getHash() == CurHash; --CurMPIter) {
698 // Put the unconditional branch back, if we need one.
699 MachineBasicBlock *CurMBB = CurMPIter->getBlock();
700 if (SuccBB && CurMBB != PredBB)
701 FixTail(CurMBB, SuccBB, TII, BranchDL);
702 if (CurMPIter == B)
703 break;
704 }
705 if (CurMPIter->getHash() != CurHash)
706 CurMPIter++;
707 MergePotentials.erase(CurMPIter, MergePotentials.end());
708}
709
710bool BranchFolder::CreateCommonTailOnlyBlock(MachineBasicBlock *&PredBB,
711 MachineBasicBlock *SuccBB,
712 unsigned maxCommonTailLength,
713 unsigned &commonTailIndex) {
714 commonTailIndex = 0;
715 unsigned TimeEstimate = ~0U;
716 for (unsigned i = 0, e = SameTails.size(); i != e; ++i) {
717 // Use PredBB if possible; that doesn't require a new branch.
718 if (SameTails[i].getBlock() == PredBB) {
719 commonTailIndex = i;
720 break;
721 }
722 // Otherwise, make a (fairly bogus) choice based on estimate of
723 // how long it will take the various blocks to execute.
724 unsigned t = EstimateRuntime(SameTails[i].getBlock()->begin(),
725 SameTails[i].getTailStartPos());
726 if (t <= TimeEstimate) {
727 TimeEstimate = t;
728 commonTailIndex = i;
729 }
730 }
731
733 SameTails[commonTailIndex].getTailStartPos();
734 MachineBasicBlock *MBB = SameTails[commonTailIndex].getBlock();
735
736 LLVM_DEBUG(dbgs() << "\nSplitting " << printMBBReference(*MBB) << ", size "
737 << maxCommonTailLength);
738
739 // If the split block unconditionally falls-thru to SuccBB, it will be
740 // merged. In control flow terms it should then take SuccBB's name. e.g. If
741 // SuccBB is an inner loop, the common tail is still part of the inner loop.
742 const BasicBlock *BB = (SuccBB && MBB->succ_size() == 1) ?
743 SuccBB->getBasicBlock() : MBB->getBasicBlock();
744 MachineBasicBlock *newMBB = SplitMBBAt(*MBB, BBI, BB);
745 if (!newMBB) {
746 LLVM_DEBUG(dbgs() << "... failed!");
747 return false;
748 }
749
750 SameTails[commonTailIndex].setBlock(newMBB);
751 SameTails[commonTailIndex].setTailStartPos(newMBB->begin());
752
753 // If we split PredBB, newMBB is the new predecessor.
754 if (PredBB == MBB)
755 PredBB = newMBB;
756
757 return true;
758}
759
760static void
762 MachineBasicBlock &MBBCommon) {
763 MachineBasicBlock *MBB = MBBIStartPos->getParent();
764 // Note CommonTailLen does not necessarily matches the size of
765 // the common BB nor all its instructions because of debug
766 // instructions differences.
767 unsigned CommonTailLen = 0;
768 for (auto E = MBB->end(); MBBIStartPos != E; ++MBBIStartPos)
769 ++CommonTailLen;
770
773 MachineBasicBlock::reverse_iterator MBBICommon = MBBCommon.rbegin();
774 MachineBasicBlock::reverse_iterator MBBIECommon = MBBCommon.rend();
775
776 while (CommonTailLen--) {
777 assert(MBBI != MBBIE && "Reached BB end within common tail length!");
778 (void)MBBIE;
779
780 if (!countsAsInstruction(*MBBI)) {
781 ++MBBI;
782 continue;
783 }
784
785 while ((MBBICommon != MBBIECommon) && !countsAsInstruction(*MBBICommon))
786 ++MBBICommon;
787
788 assert(MBBICommon != MBBIECommon &&
789 "Reached BB end within common tail length!");
790 assert(MBBICommon->isIdenticalTo(*MBBI) && "Expected matching MIIs!");
791
792 // Merge MMOs from memory operations in the common block.
793 if (MBBICommon->mayLoadOrStore())
794 MBBICommon->cloneMergedMemRefs(*MBB->getParent(), {&*MBBICommon, &*MBBI});
795 // Drop undef flags if they aren't present in all merged instructions.
796 for (unsigned I = 0, E = MBBICommon->getNumOperands(); I != E; ++I) {
797 MachineOperand &MO = MBBICommon->getOperand(I);
798 if (MO.isReg() && MO.isUndef()) {
799 const MachineOperand &OtherMO = MBBI->getOperand(I);
800 if (!OtherMO.isUndef())
801 MO.setIsUndef(false);
802 }
803 }
804
805 ++MBBI;
806 ++MBBICommon;
807 }
808}
809
810void BranchFolder::mergeCommonTails(unsigned commonTailIndex) {
811 MachineBasicBlock *MBB = SameTails[commonTailIndex].getBlock();
812
813 std::vector<MachineBasicBlock::iterator> NextCommonInsts(SameTails.size());
814 for (unsigned int i = 0 ; i != SameTails.size() ; ++i) {
815 if (i != commonTailIndex) {
816 NextCommonInsts[i] = SameTails[i].getTailStartPos();
817 mergeOperations(SameTails[i].getTailStartPos(), *MBB);
818 } else {
819 assert(SameTails[i].getTailStartPos() == MBB->begin() &&
820 "MBB is not a common tail only block");
821 }
822 }
823
824 for (auto &MI : *MBB) {
826 continue;
827 DebugLoc DL = MI.getDebugLoc();
828 for (unsigned int i = 0 ; i < NextCommonInsts.size() ; i++) {
829 if (i == commonTailIndex)
830 continue;
831
832 auto &Pos = NextCommonInsts[i];
833 assert(Pos != SameTails[i].getBlock()->end() &&
834 "Reached BB end within common tail");
835 while (!countsAsInstruction(*Pos)) {
836 ++Pos;
837 assert(Pos != SameTails[i].getBlock()->end() &&
838 "Reached BB end within common tail");
839 }
840 assert(MI.isIdenticalTo(*Pos) && "Expected matching MIIs!");
841 DL = DILocation::getMergedLocation(DL, Pos->getDebugLoc());
842 NextCommonInsts[i] = ++Pos;
843 }
844 MI.setDebugLoc(DL);
845 }
846
847 if (UpdateLiveIns) {
848 LivePhysRegs NewLiveIns(*TRI);
849 computeLiveIns(NewLiveIns, *MBB);
850 LiveRegs.init(*TRI);
851
852 // The flag merging may lead to some register uses no longer using the
853 // <undef> flag, add IMPLICIT_DEFs in the predecessors as necessary.
854 for (MachineBasicBlock *Pred : MBB->predecessors()) {
855 LiveRegs.clear();
856 LiveRegs.addLiveOuts(*Pred);
857 MachineBasicBlock::iterator InsertBefore = Pred->getFirstTerminator();
858 for (Register Reg : NewLiveIns) {
859 if (!LiveRegs.available(*MRI, Reg))
860 continue;
861
862 // Skip the register if we are about to add one of its super registers.
863 // TODO: Common this up with the same logic in addLineIns().
864 if (any_of(TRI->superregs(Reg), [&](MCPhysReg SReg) {
865 return NewLiveIns.contains(SReg) && !MRI->isReserved(SReg);
866 }))
867 continue;
868
869 DebugLoc DL;
870 BuildMI(*Pred, InsertBefore, DL, TII->get(TargetOpcode::IMPLICIT_DEF),
871 Reg);
872 }
873 }
874
875 MBB->clearLiveIns();
876 addLiveIns(*MBB, NewLiveIns);
877 }
878}
879
880// See if any of the blocks in MergePotentials (which all have SuccBB as a
881// successor, or all have no successor if it is null) can be tail-merged.
882// If there is a successor, any blocks in MergePotentials that are not
883// tail-merged and are not immediately before Succ must have an unconditional
884// branch to Succ added (but the predecessor/successor lists need no
885// adjustment). The lone predecessor of Succ that falls through into Succ,
886// if any, is given in PredBB.
887// MinCommonTailLength - Except for the special cases below, tail-merge if
888// there are at least this many instructions in common.
889bool BranchFolder::TryTailMergeBlocks(MachineBasicBlock *SuccBB,
890 MachineBasicBlock *PredBB,
891 unsigned MinCommonTailLength) {
892 bool MadeChange = false;
893
894 LLVM_DEBUG({
895 dbgs() << "\nTryTailMergeBlocks: ";
896 for (unsigned i = 0, e = MergePotentials.size(); i != e; ++i)
897 dbgs() << printMBBReference(*MergePotentials[i].getBlock())
898 << (i == e - 1 ? "" : ", ");
899 dbgs() << "\n";
900 if (SuccBB) {
901 dbgs() << " with successor " << printMBBReference(*SuccBB) << '\n';
902 if (PredBB)
903 dbgs() << " which has fall-through from " << printMBBReference(*PredBB)
904 << "\n";
905 }
906 dbgs() << "Looking for common tails of at least " << MinCommonTailLength
907 << " instruction" << (MinCommonTailLength == 1 ? "" : "s") << '\n';
908 });
909
910 // Sort by hash value so that blocks with identical end sequences sort
911 // together.
912 array_pod_sort(MergePotentials.begin(), MergePotentials.end());
913
914 // Walk through equivalence sets looking for actual exact matches.
915 while (MergePotentials.size() > 1) {
916 unsigned CurHash = MergePotentials.back().getHash();
917 const DebugLoc &BranchDL = MergePotentials.back().getBranchDebugLoc();
918
919 // Build SameTails, identifying the set of blocks with this hash code
920 // and with the maximum number of instructions in common.
921 unsigned maxCommonTailLength = ComputeSameTails(CurHash,
922 MinCommonTailLength,
923 SuccBB, PredBB);
924
925 // If we didn't find any pair that has at least MinCommonTailLength
926 // instructions in common, remove all blocks with this hash code and retry.
927 if (SameTails.empty()) {
928 RemoveBlocksWithHash(CurHash, SuccBB, PredBB, BranchDL);
929 continue;
930 }
931
932 // If one of the blocks is the entire common tail (and is not the entry
933 // block/an EH pad, which we can't jump to), we can treat all blocks with
934 // this same tail at once. Use PredBB if that is one of the possibilities,
935 // as that will not introduce any extra branches.
936 MachineBasicBlock *EntryBB =
937 &MergePotentials.front().getBlock()->getParent()->front();
938 unsigned commonTailIndex = SameTails.size();
939 // If there are two blocks, check to see if one can be made to fall through
940 // into the other.
941 if (SameTails.size() == 2 &&
942 SameTails[0].getBlock()->isLayoutSuccessor(SameTails[1].getBlock()) &&
943 SameTails[1].tailIsWholeBlock() && !SameTails[1].getBlock()->isEHPad())
944 commonTailIndex = 1;
945 else if (SameTails.size() == 2 &&
946 SameTails[1].getBlock()->isLayoutSuccessor(
947 SameTails[0].getBlock()) &&
948 SameTails[0].tailIsWholeBlock() &&
949 !SameTails[0].getBlock()->isEHPad())
950 commonTailIndex = 0;
951 else {
952 // Otherwise just pick one, favoring the fall-through predecessor if
953 // there is one.
954 for (unsigned i = 0, e = SameTails.size(); i != e; ++i) {
955 MachineBasicBlock *MBB = SameTails[i].getBlock();
956 if ((MBB == EntryBB || MBB->isEHPad()) &&
957 SameTails[i].tailIsWholeBlock())
958 continue;
959 if (MBB == PredBB) {
960 commonTailIndex = i;
961 break;
962 }
963 if (SameTails[i].tailIsWholeBlock())
964 commonTailIndex = i;
965 }
966 }
967
968 if (commonTailIndex == SameTails.size() ||
969 (SameTails[commonTailIndex].getBlock() == PredBB &&
970 !SameTails[commonTailIndex].tailIsWholeBlock())) {
971 // None of the blocks consist entirely of the common tail.
972 // Split a block so that one does.
973 if (!CreateCommonTailOnlyBlock(PredBB, SuccBB,
974 maxCommonTailLength, commonTailIndex)) {
975 RemoveBlocksWithHash(CurHash, SuccBB, PredBB, BranchDL);
976 continue;
977 }
978 }
979
980 MachineBasicBlock *MBB = SameTails[commonTailIndex].getBlock();
981
982 // Recompute common tail MBB's edge weights and block frequency.
983 setCommonTailEdgeWeights(*MBB);
984
985 // Merge debug locations, MMOs and undef flags across identical instructions
986 // for common tail.
987 mergeCommonTails(commonTailIndex);
988
989 // MBB is common tail. Adjust all other BB's to jump to this one.
990 // Traversal must be forwards so erases work.
991 LLVM_DEBUG(dbgs() << "\nUsing common tail in " << printMBBReference(*MBB)
992 << " for ");
993 for (unsigned int i=0, e = SameTails.size(); i != e; ++i) {
994 if (commonTailIndex == i)
995 continue;
996 LLVM_DEBUG(dbgs() << printMBBReference(*SameTails[i].getBlock())
997 << (i == e - 1 ? "" : ", "));
998 // Hack the end off BB i, making it jump to BB commonTailIndex instead.
999 replaceTailWithBranchTo(SameTails[i].getTailStartPos(), *MBB);
1000 // BB i is no longer a predecessor of SuccBB; remove it from the worklist.
1001 MergePotentials.erase(SameTails[i].getMPIter());
1002 }
1003 LLVM_DEBUG(dbgs() << "\n");
1004 // We leave commonTailIndex in the worklist in case there are other blocks
1005 // that match it with a smaller number of instructions.
1006 MadeChange = true;
1007 }
1008 return MadeChange;
1009}
1010
1011bool BranchFolder::TailMergeBlocks(MachineFunction &MF) {
1012 bool MadeChange = false;
1013 if (!EnableTailMerge)
1014 return MadeChange;
1015
1016 // First find blocks with no successors.
1017 // Block placement may create new tail merging opportunities for these blocks.
1018 MergePotentials.clear();
1019 for (MachineBasicBlock &MBB : MF) {
1020 if (MergePotentials.size() == TailMergeThreshold)
1021 break;
1022 if (!TriedMerging.count(&MBB) && MBB.succ_empty())
1023 MergePotentials.push_back(MergePotentialsElt(HashEndOfMBB(MBB), &MBB,
1025 }
1026
1027 // If this is a large problem, avoid visiting the same basic blocks
1028 // multiple times.
1029 if (MergePotentials.size() == TailMergeThreshold)
1030 for (const MergePotentialsElt &Elt : MergePotentials)
1031 TriedMerging.insert(Elt.getBlock());
1032
1033 // See if we can do any tail merging on those.
1034 if (MergePotentials.size() >= 2)
1035 MadeChange |= TryTailMergeBlocks(nullptr, nullptr, MinCommonTailLength);
1036
1037 // Look at blocks (IBB) with multiple predecessors (PBB).
1038 // We change each predecessor to a canonical form, by
1039 // (1) temporarily removing any unconditional branch from the predecessor
1040 // to IBB, and
1041 // (2) alter conditional branches so they branch to the other block
1042 // not IBB; this may require adding back an unconditional branch to IBB
1043 // later, where there wasn't one coming in. E.g.
1044 // Bcc IBB
1045 // fallthrough to QBB
1046 // here becomes
1047 // Bncc QBB
1048 // with a conceptual B to IBB after that, which never actually exists.
1049 // With those changes, we see whether the predecessors' tails match,
1050 // and merge them if so. We change things out of canonical form and
1051 // back to the way they were later in the process. (OptimizeBranches
1052 // would undo some of this, but we can't use it, because we'd get into
1053 // a compile-time infinite loop repeatedly doing and undoing the same
1054 // transformations.)
1055
1056 for (MachineFunction::iterator I = std::next(MF.begin()), E = MF.end();
1057 I != E; ++I) {
1058 if (I->pred_size() < 2) continue;
1060 MachineBasicBlock *IBB = &*I;
1061 MachineBasicBlock *PredBB = &*std::prev(I);
1062 MergePotentials.clear();
1063 MachineLoop *ML;
1064
1065 // Bail if merging after placement and IBB is the loop header because
1066 // -- If merging predecessors that belong to the same loop as IBB, the
1067 // common tail of merged predecessors may become the loop top if block
1068 // placement is called again and the predecessors may branch to this common
1069 // tail and require more branches. This can be relaxed if
1070 // MachineBlockPlacement::findBestLoopTop is more flexible.
1071 // --If merging predecessors that do not belong to the same loop as IBB, the
1072 // loop info of IBB's loop and the other loops may be affected. Calling the
1073 // block placement again may make big change to the layout and eliminate the
1074 // reason to do tail merging here.
1075 if (AfterBlockPlacement && MLI) {
1076 ML = MLI->getLoopFor(IBB);
1077 if (ML && IBB == ML->getHeader())
1078 continue;
1079 }
1080
1081 for (MachineBasicBlock *PBB : I->predecessors()) {
1082 if (MergePotentials.size() == TailMergeThreshold)
1083 break;
1084
1085 if (TriedMerging.count(PBB))
1086 continue;
1087
1088 // Skip blocks that loop to themselves, can't tail merge these.
1089 if (PBB == IBB)
1090 continue;
1091
1092 // Visit each predecessor only once.
1093 if (!UniquePreds.insert(PBB).second)
1094 continue;
1095
1096 // Skip blocks which may jump to a landing pad or jump from an asm blob.
1097 // Can't tail merge these.
1098 if (PBB->hasEHPadSuccessor() || PBB->mayHaveInlineAsmBr())
1099 continue;
1100
1101 // After block placement, only consider predecessors that belong to the
1102 // same loop as IBB. The reason is the same as above when skipping loop
1103 // header.
1104 if (AfterBlockPlacement && MLI)
1105 if (ML != MLI->getLoopFor(PBB))
1106 continue;
1107
1108 MachineBasicBlock *TBB = nullptr, *FBB = nullptr;
1110 if (!TII->analyzeBranch(*PBB, TBB, FBB, Cond, true)) {
1111 // Failing case: IBB is the target of a cbr, and we cannot reverse the
1112 // branch.
1114 if (!Cond.empty() && TBB == IBB) {
1115 if (TII->reverseBranchCondition(NewCond))
1116 continue;
1117 // This is the QBB case described above
1118 if (!FBB) {
1119 auto Next = ++PBB->getIterator();
1120 if (Next != MF.end())
1121 FBB = &*Next;
1122 }
1123 }
1124
1125 // Remove the unconditional branch at the end, if any.
1126 DebugLoc dl = PBB->findBranchDebugLoc();
1127 if (TBB && (Cond.empty() || FBB)) {
1128 TII->removeBranch(*PBB);
1129 if (!Cond.empty())
1130 // reinsert conditional branch only, for now
1131 TII->insertBranch(*PBB, (TBB == IBB) ? FBB : TBB, nullptr,
1132 NewCond, dl);
1133 }
1134
1135 MergePotentials.push_back(
1136 MergePotentialsElt(HashEndOfMBB(*PBB), PBB, dl));
1137 }
1138 }
1139
1140 // If this is a large problem, avoid visiting the same basic blocks multiple
1141 // times.
1142 if (MergePotentials.size() == TailMergeThreshold)
1143 for (MergePotentialsElt &Elt : MergePotentials)
1144 TriedMerging.insert(Elt.getBlock());
1145
1146 if (MergePotentials.size() >= 2)
1147 MadeChange |= TryTailMergeBlocks(IBB, PredBB, MinCommonTailLength);
1148
1149 // Reinsert an unconditional branch if needed. The 1 below can occur as a
1150 // result of removing blocks in TryTailMergeBlocks.
1151 PredBB = &*std::prev(I); // this may have been changed in TryTailMergeBlocks
1152 if (MergePotentials.size() == 1 &&
1153 MergePotentials.begin()->getBlock() != PredBB)
1154 FixTail(MergePotentials.begin()->getBlock(), IBB, TII,
1155 MergePotentials.begin()->getBranchDebugLoc());
1156 }
1157
1158 return MadeChange;
1159}
1160
1161void BranchFolder::setCommonTailEdgeWeights(MachineBasicBlock &TailMBB) {
1162 SmallVector<BlockFrequency, 2> EdgeFreqLs(TailMBB.succ_size());
1163 BlockFrequency AccumulatedMBBFreq;
1164
1165 // Aggregate edge frequency of successor edge j:
1166 // edgeFreq(j) = sum (freq(bb) * edgeProb(bb, j)),
1167 // where bb is a basic block that is in SameTails.
1168 for (const auto &Src : SameTails) {
1169 const MachineBasicBlock *SrcMBB = Src.getBlock();
1170 BlockFrequency BlockFreq = MBBFreqInfo.getBlockFreq(SrcMBB);
1171 AccumulatedMBBFreq += BlockFreq;
1172
1173 // It is not necessary to recompute edge weights if TailBB has less than two
1174 // successors.
1175 if (TailMBB.succ_size() <= 1)
1176 continue;
1177
1178 auto EdgeFreq = EdgeFreqLs.begin();
1179
1180 for (auto SuccI = TailMBB.succ_begin(), SuccE = TailMBB.succ_end();
1181 SuccI != SuccE; ++SuccI, ++EdgeFreq)
1182 *EdgeFreq += BlockFreq * MBPI.getEdgeProbability(SrcMBB, *SuccI);
1183 }
1184
1185 MBBFreqInfo.setBlockFreq(&TailMBB, AccumulatedMBBFreq);
1186
1187 if (TailMBB.succ_size() <= 1)
1188 return;
1189
1190 auto SumEdgeFreq =
1191 std::accumulate(EdgeFreqLs.begin(), EdgeFreqLs.end(), BlockFrequency(0))
1192 .getFrequency();
1193 auto EdgeFreq = EdgeFreqLs.begin();
1194
1195 if (SumEdgeFreq > 0) {
1196 for (auto SuccI = TailMBB.succ_begin(), SuccE = TailMBB.succ_end();
1197 SuccI != SuccE; ++SuccI, ++EdgeFreq) {
1199 EdgeFreq->getFrequency(), SumEdgeFreq);
1200 TailMBB.setSuccProbability(SuccI, Prob);
1201 }
1202 }
1203}
1204
1205//===----------------------------------------------------------------------===//
1206// Branch Optimization
1207//===----------------------------------------------------------------------===//
1208
1209bool BranchFolder::OptimizeBranches(MachineFunction &MF) {
1210 bool MadeChange = false;
1211
1212 // Make sure blocks are numbered in order
1213 MF.RenumberBlocks();
1214 // Renumbering blocks alters EH scope membership, recalculate it.
1215 EHScopeMembership = getEHScopeMembership(MF);
1216
1217 for (MachineBasicBlock &MBB :
1219 MadeChange |= OptimizeBlock(&MBB);
1220
1221 // If it is dead, remove it.
1223 RemoveDeadBlock(&MBB);
1224 MadeChange = true;
1225 ++NumDeadBlocks;
1226 }
1227 }
1228
1229 return MadeChange;
1230}
1231
1232// Blocks should be considered empty if they contain only debug info;
1233// else the debug info would affect codegen.
1235 return MBB->getFirstNonDebugInstr(true) == MBB->end();
1236}
1237
1238// Blocks with only debug info and branches should be considered the same
1239// as blocks with only branches.
1242 assert(I != MBB->end() && "empty block!");
1243 return I->isBranch();
1244}
1245
1246/// IsBetterFallthrough - Return true if it would be clearly better to
1247/// fall-through to MBB1 than to fall through into MBB2. This has to return
1248/// a strict ordering, returning true for both (MBB1,MBB2) and (MBB2,MBB1) will
1249/// result in infinite loops.
1251 MachineBasicBlock *MBB2) {
1252 assert(MBB1 && MBB2 && "Unknown MachineBasicBlock");
1253
1254 // Right now, we use a simple heuristic. If MBB2 ends with a call, and
1255 // MBB1 doesn't, we prefer to fall through into MBB1. This allows us to
1256 // optimize branches that branch to either a return block or an assert block
1257 // into a fallthrough to the return.
1260 if (MBB1I == MBB1->end() || MBB2I == MBB2->end())
1261 return false;
1262
1263 // If there is a clear successor ordering we make sure that one block
1264 // will fall through to the next
1265 if (MBB1->isSuccessor(MBB2)) return true;
1266 if (MBB2->isSuccessor(MBB1)) return false;
1267
1268 return MBB2I->isCall() && !MBB1I->isCall();
1269}
1270
1271/// getBranchDebugLoc - Find and return, if any, the DebugLoc of the branch
1272/// instructions on the block.
1275 if (I != MBB.end() && I->isBranch())
1276 return I->getDebugLoc();
1277 return DebugLoc();
1278}
1279
1282 MachineBasicBlock &PredMBB) {
1283 auto InsertBefore = PredMBB.getFirstTerminator();
1284 for (MachineInstr &MI : MBB.instrs())
1285 if (MI.isDebugInstr()) {
1286 TII->duplicate(PredMBB, InsertBefore, MI);
1287 LLVM_DEBUG(dbgs() << "Copied debug entity from empty block to pred: "
1288 << MI);
1289 }
1290}
1291
1294 MachineBasicBlock &SuccMBB) {
1295 auto InsertBefore = SuccMBB.SkipPHIsAndLabels(SuccMBB.begin());
1296 for (MachineInstr &MI : MBB.instrs())
1297 if (MI.isDebugInstr()) {
1298 TII->duplicate(SuccMBB, InsertBefore, MI);
1299 LLVM_DEBUG(dbgs() << "Copied debug entity from empty block to succ: "
1300 << MI);
1301 }
1302}
1303
1304// Try to salvage DBG_VALUE instructions from an otherwise empty block. If such
1305// a basic block is removed we would lose the debug information unless we have
1306// copied the information to a predecessor/successor.
1307//
1308// TODO: This function only handles some simple cases. An alternative would be
1309// to run a heavier analysis, such as the LiveDebugValues pass, before we do
1310// branch folding.
1313 assert(IsEmptyBlock(&MBB) && "Expected an empty block (except debug info).");
1314 // If this MBB is the only predecessor of a successor it is legal to copy
1315 // DBG_VALUE instructions to the beginning of the successor.
1316 for (MachineBasicBlock *SuccBB : MBB.successors())
1317 if (SuccBB->pred_size() == 1)
1318 copyDebugInfoToSuccessor(TII, MBB, *SuccBB);
1319 // If this MBB is the only successor of a predecessor it is legal to copy the
1320 // DBG_VALUE instructions to the end of the predecessor (just before the
1321 // terminators, assuming that the terminator isn't affecting the DBG_VALUE).
1322 for (MachineBasicBlock *PredBB : MBB.predecessors())
1323 if (PredBB->succ_size() == 1)
1325}
1326
1327bool BranchFolder::OptimizeBlock(MachineBasicBlock *MBB) {
1328 bool MadeChange = false;
1329 MachineFunction &MF = *MBB->getParent();
1330ReoptimizeBlock:
1331
1332 MachineFunction::iterator FallThrough = MBB->getIterator();
1333 ++FallThrough;
1334
1335 // Make sure MBB and FallThrough belong to the same EH scope.
1336 bool SameEHScope = true;
1337 if (!EHScopeMembership.empty() && FallThrough != MF.end()) {
1338 auto MBBEHScope = EHScopeMembership.find(MBB);
1339 assert(MBBEHScope != EHScopeMembership.end());
1340 auto FallThroughEHScope = EHScopeMembership.find(&*FallThrough);
1341 assert(FallThroughEHScope != EHScopeMembership.end());
1342 SameEHScope = MBBEHScope->second == FallThroughEHScope->second;
1343 }
1344
1345 // Analyze the branch in the current block. As a side-effect, this may cause
1346 // the block to become empty.
1347 MachineBasicBlock *CurTBB = nullptr, *CurFBB = nullptr;
1349 bool CurUnAnalyzable =
1350 TII->analyzeBranch(*MBB, CurTBB, CurFBB, CurCond, true);
1351
1352 // If this block is empty, make everyone use its fall-through, not the block
1353 // explicitly. Landing pads should not do this since the landing-pad table
1354 // points to this block. Blocks with their addresses taken shouldn't be
1355 // optimized away.
1356 if (IsEmptyBlock(MBB) && !MBB->isEHPad() && !MBB->hasAddressTaken() &&
1357 SameEHScope) {
1359 // Dead block? Leave for cleanup later.
1360 if (MBB->pred_empty()) return MadeChange;
1361
1362 if (FallThrough == MF.end()) {
1363 // TODO: Simplify preds to not branch here if possible!
1364 } else if (FallThrough->isEHPad()) {
1365 // Don't rewrite to a landing pad fallthough. That could lead to the case
1366 // where a BB jumps to more than one landing pad.
1367 // TODO: Is it ever worth rewriting predecessors which don't already
1368 // jump to a landing pad, and so can safely jump to the fallthrough?
1369 } else if (MBB->isSuccessor(&*FallThrough)) {
1370 // Rewrite all predecessors of the old block to go to the fallthrough
1371 // instead.
1372 while (!MBB->pred_empty()) {
1373 MachineBasicBlock *Pred = *(MBB->pred_end()-1);
1374 Pred->ReplaceUsesOfBlockWith(MBB, &*FallThrough);
1375 }
1376 // Add rest successors of MBB to successors of FallThrough. Those
1377 // successors are not directly reachable via MBB, so it should be
1378 // landing-pad.
1379 for (auto SI = MBB->succ_begin(), SE = MBB->succ_end(); SI != SE; ++SI)
1380 if (*SI != &*FallThrough && !FallThrough->isSuccessor(*SI)) {
1381 assert((*SI)->isEHPad() && "Bad CFG");
1382 FallThrough->copySuccessor(MBB, SI);
1383 }
1384 // If MBB was the target of a jump table, update jump tables to go to the
1385 // fallthrough instead.
1386 if (MachineJumpTableInfo *MJTI = MF.getJumpTableInfo())
1387 MJTI->ReplaceMBBInJumpTables(MBB, &*FallThrough);
1388 MadeChange = true;
1389 }
1390 return MadeChange;
1391 }
1392
1393 // Check to see if we can simplify the terminator of the block before this
1394 // one.
1395 MachineBasicBlock &PrevBB = *std::prev(MachineFunction::iterator(MBB));
1396
1397 MachineBasicBlock *PriorTBB = nullptr, *PriorFBB = nullptr;
1399 bool PriorUnAnalyzable =
1400 TII->analyzeBranch(PrevBB, PriorTBB, PriorFBB, PriorCond, true);
1401 if (!PriorUnAnalyzable) {
1402 // If the previous branch is conditional and both conditions go to the same
1403 // destination, remove the branch, replacing it with an unconditional one or
1404 // a fall-through.
1405 if (PriorTBB && PriorTBB == PriorFBB) {
1406 DebugLoc dl = getBranchDebugLoc(PrevBB);
1407 TII->removeBranch(PrevBB);
1408 PriorCond.clear();
1409 if (PriorTBB != MBB)
1410 TII->insertBranch(PrevBB, PriorTBB, nullptr, PriorCond, dl);
1411 MadeChange = true;
1412 ++NumBranchOpts;
1413 goto ReoptimizeBlock;
1414 }
1415
1416 // If the previous block unconditionally falls through to this block and
1417 // this block has no other predecessors, move the contents of this block
1418 // into the prior block. This doesn't usually happen when SimplifyCFG
1419 // has been used, but it can happen if tail merging splits a fall-through
1420 // predecessor of a block.
1421 // This has to check PrevBB->succ_size() because EH edges are ignored by
1422 // analyzeBranch.
1423 if (PriorCond.empty() && !PriorTBB && MBB->pred_size() == 1 &&
1424 PrevBB.succ_size() == 1 && PrevBB.isSuccessor(MBB) &&
1425 !MBB->hasAddressTaken() && !MBB->isEHPad()) {
1426 LLVM_DEBUG(dbgs() << "\nMerging into block: " << PrevBB
1427 << "From MBB: " << *MBB);
1428 // Remove redundant DBG_VALUEs first.
1429 if (!PrevBB.empty()) {
1430 MachineBasicBlock::iterator PrevBBIter = PrevBB.end();
1431 --PrevBBIter;
1433 // Check if DBG_VALUE at the end of PrevBB is identical to the
1434 // DBG_VALUE at the beginning of MBB.
1435 while (PrevBBIter != PrevBB.begin() && MBBIter != MBB->end()
1436 && PrevBBIter->isDebugInstr() && MBBIter->isDebugInstr()) {
1437 if (!MBBIter->isIdenticalTo(*PrevBBIter))
1438 break;
1439 MachineInstr &DuplicateDbg = *MBBIter;
1440 ++MBBIter; -- PrevBBIter;
1441 DuplicateDbg.eraseFromParent();
1442 }
1443 }
1444 PrevBB.splice(PrevBB.end(), MBB, MBB->begin(), MBB->end());
1445 PrevBB.removeSuccessor(PrevBB.succ_begin());
1446 assert(PrevBB.succ_empty());
1447 PrevBB.transferSuccessors(MBB);
1448 MadeChange = true;
1449 return MadeChange;
1450 }
1451
1452 // If the previous branch *only* branches to *this* block (conditional or
1453 // not) remove the branch.
1454 if (PriorTBB == MBB && !PriorFBB) {
1455 TII->removeBranch(PrevBB);
1456 MadeChange = true;
1457 ++NumBranchOpts;
1458 goto ReoptimizeBlock;
1459 }
1460
1461 // If the prior block branches somewhere else on the condition and here if
1462 // the condition is false, remove the uncond second branch.
1463 if (PriorFBB == MBB) {
1464 DebugLoc dl = getBranchDebugLoc(PrevBB);
1465 TII->removeBranch(PrevBB);
1466 TII->insertBranch(PrevBB, PriorTBB, nullptr, PriorCond, dl);
1467 MadeChange = true;
1468 ++NumBranchOpts;
1469 goto ReoptimizeBlock;
1470 }
1471
1472 // If the prior block branches here on true and somewhere else on false, and
1473 // if the branch condition is reversible, reverse the branch to create a
1474 // fall-through.
1475 if (PriorTBB == MBB) {
1476 SmallVector<MachineOperand, 4> NewPriorCond(PriorCond);
1477 if (!TII->reverseBranchCondition(NewPriorCond)) {
1478 DebugLoc dl = getBranchDebugLoc(PrevBB);
1479 TII->removeBranch(PrevBB);
1480 TII->insertBranch(PrevBB, PriorFBB, nullptr, NewPriorCond, dl);
1481 MadeChange = true;
1482 ++NumBranchOpts;
1483 goto ReoptimizeBlock;
1484 }
1485 }
1486
1487 // If this block has no successors (e.g. it is a return block or ends with
1488 // a call to a no-return function like abort or __cxa_throw) and if the pred
1489 // falls through into this block, and if it would otherwise fall through
1490 // into the block after this, move this block to the end of the function.
1491 //
1492 // We consider it more likely that execution will stay in the function (e.g.
1493 // due to loops) than it is to exit it. This asserts in loops etc, moving
1494 // the assert condition out of the loop body.
1495 if (MBB->succ_empty() && !PriorCond.empty() && !PriorFBB &&
1496 MachineFunction::iterator(PriorTBB) == FallThrough &&
1497 !MBB->canFallThrough()) {
1498 bool DoTransform = true;
1499
1500 // We have to be careful that the succs of PredBB aren't both no-successor
1501 // blocks. If neither have successors and if PredBB is the second from
1502 // last block in the function, we'd just keep swapping the two blocks for
1503 // last. Only do the swap if one is clearly better to fall through than
1504 // the other.
1505 if (FallThrough == --MF.end() &&
1506 !IsBetterFallthrough(PriorTBB, MBB))
1507 DoTransform = false;
1508
1509 if (DoTransform) {
1510 // Reverse the branch so we will fall through on the previous true cond.
1511 SmallVector<MachineOperand, 4> NewPriorCond(PriorCond);
1512 if (!TII->reverseBranchCondition(NewPriorCond)) {
1513 LLVM_DEBUG(dbgs() << "\nMoving MBB: " << *MBB
1514 << "To make fallthrough to: " << *PriorTBB << "\n");
1515
1516 DebugLoc dl = getBranchDebugLoc(PrevBB);
1517 TII->removeBranch(PrevBB);
1518 TII->insertBranch(PrevBB, MBB, nullptr, NewPriorCond, dl);
1519
1520 // Move this block to the end of the function.
1521 MBB->moveAfter(&MF.back());
1522 MadeChange = true;
1523 ++NumBranchOpts;
1524 return MadeChange;
1525 }
1526 }
1527 }
1528 }
1529
1530 if (!IsEmptyBlock(MBB)) {
1532 if (TII->isUnconditionalTailCall(TailCall)) {
1534 for (auto &Pred : MBB->predecessors()) {
1535 MachineBasicBlock *PredTBB = nullptr, *PredFBB = nullptr;
1537 bool PredAnalyzable =
1538 !TII->analyzeBranch(*Pred, PredTBB, PredFBB, PredCond, true);
1539
1540 // Only eliminate if MBB == TBB (Taken Basic Block)
1541 if (PredAnalyzable && !PredCond.empty() && PredTBB == MBB &&
1542 PredTBB != PredFBB) {
1543 // The predecessor has a conditional branch to this block which
1544 // consists of only a tail call. Try to fold the tail call into the
1545 // conditional branch.
1546 if (TII->canMakeTailCallConditional(PredCond, TailCall)) {
1547 // TODO: It would be nice if analyzeBranch() could provide a pointer
1548 // to the branch instruction so replaceBranchWithTailCall() doesn't
1549 // have to search for it.
1550 TII->replaceBranchWithTailCall(*Pred, PredCond, TailCall);
1551 PredsChanged.push_back(Pred);
1552 }
1553 }
1554 // If the predecessor is falling through to this block, we could reverse
1555 // the branch condition and fold the tail call into that. However, after
1556 // that we might have to re-arrange the CFG to fall through to the other
1557 // block and there is a high risk of regressing code size rather than
1558 // improving it.
1559 }
1560 if (!PredsChanged.empty()) {
1561 NumTailCalls += PredsChanged.size();
1562 for (auto &Pred : PredsChanged)
1563 Pred->removeSuccessor(MBB);
1564
1565 return true;
1566 }
1567 }
1568 }
1569
1570 if (!CurUnAnalyzable) {
1571 // If this is a two-way branch, and the FBB branches to this block, reverse
1572 // the condition so the single-basic-block loop is faster. Instead of:
1573 // Loop: xxx; jcc Out; jmp Loop
1574 // we want:
1575 // Loop: xxx; jncc Loop; jmp Out
1576 if (CurTBB && CurFBB && CurFBB == MBB && CurTBB != MBB) {
1577 SmallVector<MachineOperand, 4> NewCond(CurCond);
1578 if (!TII->reverseBranchCondition(NewCond)) {
1580 TII->removeBranch(*MBB);
1581 TII->insertBranch(*MBB, CurFBB, CurTBB, NewCond, dl);
1582 MadeChange = true;
1583 ++NumBranchOpts;
1584 goto ReoptimizeBlock;
1585 }
1586 }
1587
1588 // If this branch is the only thing in its block, see if we can forward
1589 // other blocks across it.
1590 if (CurTBB && CurCond.empty() && !CurFBB &&
1591 IsBranchOnlyBlock(MBB) && CurTBB != MBB &&
1592 !MBB->hasAddressTaken() && !MBB->isEHPad()) {
1594 // This block may contain just an unconditional branch. Because there can
1595 // be 'non-branch terminators' in the block, try removing the branch and
1596 // then seeing if the block is empty.
1597 TII->removeBranch(*MBB);
1598 // If the only things remaining in the block are debug info, remove these
1599 // as well, so this will behave the same as an empty block in non-debug
1600 // mode.
1601 if (IsEmptyBlock(MBB)) {
1602 // Make the block empty, losing the debug info (we could probably
1603 // improve this in some cases.)
1604 MBB->erase(MBB->begin(), MBB->end());
1605 }
1606 // If this block is just an unconditional branch to CurTBB, we can
1607 // usually completely eliminate the block. The only case we cannot
1608 // completely eliminate the block is when the block before this one
1609 // falls through into MBB and we can't understand the prior block's branch
1610 // condition.
1611 if (MBB->empty()) {
1612 bool PredHasNoFallThrough = !PrevBB.canFallThrough();
1613 if (PredHasNoFallThrough || !PriorUnAnalyzable ||
1614 !PrevBB.isSuccessor(MBB)) {
1615 // If the prior block falls through into us, turn it into an
1616 // explicit branch to us to make updates simpler.
1617 if (!PredHasNoFallThrough && PrevBB.isSuccessor(MBB) &&
1618 PriorTBB != MBB && PriorFBB != MBB) {
1619 if (!PriorTBB) {
1620 assert(PriorCond.empty() && !PriorFBB &&
1621 "Bad branch analysis");
1622 PriorTBB = MBB;
1623 } else {
1624 assert(!PriorFBB && "Machine CFG out of date!");
1625 PriorFBB = MBB;
1626 }
1627 DebugLoc pdl = getBranchDebugLoc(PrevBB);
1628 TII->removeBranch(PrevBB);
1629 TII->insertBranch(PrevBB, PriorTBB, PriorFBB, PriorCond, pdl);
1630 }
1631
1632 // Iterate through all the predecessors, revectoring each in-turn.
1633 size_t PI = 0;
1634 bool DidChange = false;
1635 bool HasBranchToSelf = false;
1636 while(PI != MBB->pred_size()) {
1637 MachineBasicBlock *PMBB = *(MBB->pred_begin() + PI);
1638 if (PMBB == MBB) {
1639 // If this block has an uncond branch to itself, leave it.
1640 ++PI;
1641 HasBranchToSelf = true;
1642 } else {
1643 DidChange = true;
1644 PMBB->ReplaceUsesOfBlockWith(MBB, CurTBB);
1645 // Add rest successors of MBB to successors of CurTBB. Those
1646 // successors are not directly reachable via MBB, so it should be
1647 // landing-pad.
1648 for (auto SI = MBB->succ_begin(), SE = MBB->succ_end(); SI != SE;
1649 ++SI)
1650 if (*SI != CurTBB && !CurTBB->isSuccessor(*SI)) {
1651 assert((*SI)->isEHPad() && "Bad CFG");
1652 CurTBB->copySuccessor(MBB, SI);
1653 }
1654 // If this change resulted in PMBB ending in a conditional
1655 // branch where both conditions go to the same destination,
1656 // change this to an unconditional branch.
1657 MachineBasicBlock *NewCurTBB = nullptr, *NewCurFBB = nullptr;
1659 bool NewCurUnAnalyzable = TII->analyzeBranch(
1660 *PMBB, NewCurTBB, NewCurFBB, NewCurCond, true);
1661 if (!NewCurUnAnalyzable && NewCurTBB && NewCurTBB == NewCurFBB) {
1662 DebugLoc pdl = getBranchDebugLoc(*PMBB);
1663 TII->removeBranch(*PMBB);
1664 NewCurCond.clear();
1665 TII->insertBranch(*PMBB, NewCurTBB, nullptr, NewCurCond, pdl);
1666 MadeChange = true;
1667 ++NumBranchOpts;
1668 }
1669 }
1670 }
1671
1672 // Change any jumptables to go to the new MBB.
1673 if (MachineJumpTableInfo *MJTI = MF.getJumpTableInfo())
1674 MJTI->ReplaceMBBInJumpTables(MBB, CurTBB);
1675 if (DidChange) {
1676 ++NumBranchOpts;
1677 MadeChange = true;
1678 if (!HasBranchToSelf) return MadeChange;
1679 }
1680 }
1681 }
1682
1683 // Add the branch back if the block is more than just an uncond branch.
1684 TII->insertBranch(*MBB, CurTBB, nullptr, CurCond, dl);
1685 }
1686 }
1687
1688 // If the prior block doesn't fall through into this block, and if this
1689 // block doesn't fall through into some other block, see if we can find a
1690 // place to move this block where a fall-through will happen.
1691 if (!PrevBB.canFallThrough()) {
1692 // Now we know that there was no fall-through into this block, check to
1693 // see if it has a fall-through into its successor.
1694 bool CurFallsThru = MBB->canFallThrough();
1695
1696 if (!MBB->isEHPad()) {
1697 // Check all the predecessors of this block. If one of them has no fall
1698 // throughs, and analyzeBranch thinks it _could_ fallthrough to this
1699 // block, move this block right after it.
1700 for (MachineBasicBlock *PredBB : MBB->predecessors()) {
1701 // Analyze the branch at the end of the pred.
1702 MachineBasicBlock *PredTBB = nullptr, *PredFBB = nullptr;
1704 if (PredBB != MBB && !PredBB->canFallThrough() &&
1705 !TII->analyzeBranch(*PredBB, PredTBB, PredFBB, PredCond, true) &&
1706 (PredTBB == MBB || PredFBB == MBB) &&
1707 (!CurFallsThru || !CurTBB || !CurFBB) &&
1708 (!CurFallsThru || MBB->getNumber() >= PredBB->getNumber())) {
1709 // If the current block doesn't fall through, just move it.
1710 // If the current block can fall through and does not end with a
1711 // conditional branch, we need to append an unconditional jump to
1712 // the (current) next block. To avoid a possible compile-time
1713 // infinite loop, move blocks only backward in this case.
1714 // Also, if there are already 2 branches here, we cannot add a third;
1715 // this means we have the case
1716 // Bcc next
1717 // B elsewhere
1718 // next:
1719 if (CurFallsThru) {
1720 MachineBasicBlock *NextBB = &*std::next(MBB->getIterator());
1721 CurCond.clear();
1722 TII->insertBranch(*MBB, NextBB, nullptr, CurCond, DebugLoc());
1723 }
1724 MBB->moveAfter(PredBB);
1725 MadeChange = true;
1726 goto ReoptimizeBlock;
1727 }
1728 }
1729 }
1730
1731 if (!CurFallsThru) {
1732 // Check analyzable branch-successors to see if we can move this block
1733 // before one.
1734 if (!CurUnAnalyzable) {
1735 for (MachineBasicBlock *SuccBB : {CurFBB, CurTBB}) {
1736 if (!SuccBB)
1737 continue;
1738 // Analyze the branch at the end of the block before the succ.
1739 MachineFunction::iterator SuccPrev = --SuccBB->getIterator();
1740
1741 // If this block doesn't already fall-through to that successor, and
1742 // if the succ doesn't already have a block that can fall through into
1743 // it, we can arrange for the fallthrough to happen.
1744 if (SuccBB != MBB && &*SuccPrev != MBB &&
1745 !SuccPrev->canFallThrough()) {
1746 MBB->moveBefore(SuccBB);
1747 MadeChange = true;
1748 goto ReoptimizeBlock;
1749 }
1750 }
1751 }
1752
1753 // Okay, there is no really great place to put this block. If, however,
1754 // the block before this one would be a fall-through if this block were
1755 // removed, move this block to the end of the function. There is no real
1756 // advantage in "falling through" to an EH block, so we don't want to
1757 // perform this transformation for that case.
1758 //
1759 // Also, Windows EH introduced the possibility of an arbitrary number of
1760 // successors to a given block. The analyzeBranch call does not consider
1761 // exception handling and so we can get in a state where a block
1762 // containing a call is followed by multiple EH blocks that would be
1763 // rotated infinitely at the end of the function if the transformation
1764 // below were performed for EH "FallThrough" blocks. Therefore, even if
1765 // that appears not to be happening anymore, we should assume that it is
1766 // possible and not remove the "!FallThrough()->isEHPad" condition below.
1767 MachineBasicBlock *PrevTBB = nullptr, *PrevFBB = nullptr;
1769 if (FallThrough != MF.end() &&
1770 !FallThrough->isEHPad() &&
1771 !TII->analyzeBranch(PrevBB, PrevTBB, PrevFBB, PrevCond, true) &&
1772 PrevBB.isSuccessor(&*FallThrough)) {
1773 MBB->moveAfter(&MF.back());
1774 MadeChange = true;
1775 return MadeChange;
1776 }
1777 }
1778 }
1779
1780 return MadeChange;
1781}
1782
1783//===----------------------------------------------------------------------===//
1784// Hoist Common Code
1785//===----------------------------------------------------------------------===//
1786
1787bool BranchFolder::HoistCommonCode(MachineFunction &MF) {
1788 bool MadeChange = false;
1790 MadeChange |= HoistCommonCodeInSuccs(&MBB);
1791
1792 return MadeChange;
1793}
1794
1795/// findFalseBlock - BB has a fallthrough. Find its 'false' successor given
1796/// its 'true' successor.
1798 MachineBasicBlock *TrueBB) {
1799 for (MachineBasicBlock *SuccBB : BB->successors())
1800 if (SuccBB != TrueBB)
1801 return SuccBB;
1802 return nullptr;
1803}
1804
1805template <class Container>
1807 Container &Set) {
1808 if (Reg.isPhysical()) {
1809 for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI)
1810 Set.insert(*AI);
1811 } else {
1812 Set.insert(Reg);
1813 }
1814}
1815
1816/// findHoistingInsertPosAndDeps - Find the location to move common instructions
1817/// in successors to. The location is usually just before the terminator,
1818/// however if the terminator is a conditional branch and its previous
1819/// instruction is the flag setting instruction, the previous instruction is
1820/// the preferred location. This function also gathers uses and defs of the
1821/// instructions from the insertion point to the end of the block. The data is
1822/// used by HoistCommonCodeInSuccs to ensure safety.
1823static
1825 const TargetInstrInfo *TII,
1826 const TargetRegisterInfo *TRI,
1828 SmallSet<Register, 4> &Defs) {
1830 if (!TII->isUnpredicatedTerminator(*Loc))
1831 return MBB->end();
1832
1833 for (const MachineOperand &MO : Loc->operands()) {
1834 if (!MO.isReg())
1835 continue;
1836 Register Reg = MO.getReg();
1837 if (!Reg)
1838 continue;
1839 if (MO.isUse()) {
1841 } else {
1842 if (!MO.isDead())
1843 // Don't try to hoist code in the rare case the terminator defines a
1844 // register that is later used.
1845 return MBB->end();
1846
1847 // If the terminator defines a register, make sure we don't hoist
1848 // the instruction whose def might be clobbered by the terminator.
1849 addRegAndItsAliases(Reg, TRI, Defs);
1850 }
1851 }
1852
1853 if (Uses.empty())
1854 return Loc;
1855 // If the terminator is the only instruction in the block and Uses is not
1856 // empty (or we would have returned above), we can still safely hoist
1857 // instructions just before the terminator as long as the Defs/Uses are not
1858 // violated (which is checked in HoistCommonCodeInSuccs).
1859 if (Loc == MBB->begin())
1860 return Loc;
1861
1862 // The terminator is probably a conditional branch, try not to separate the
1863 // branch from condition setting instruction.
1865
1866 bool IsDef = false;
1867 for (const MachineOperand &MO : PI->operands()) {
1868 // If PI has a regmask operand, it is probably a call. Separate away.
1869 if (MO.isRegMask())
1870 return Loc;
1871 if (!MO.isReg() || MO.isUse())
1872 continue;
1873 Register Reg = MO.getReg();
1874 if (!Reg)
1875 continue;
1876 if (Uses.count(Reg)) {
1877 IsDef = true;
1878 break;
1879 }
1880 }
1881 if (!IsDef)
1882 // The condition setting instruction is not just before the conditional
1883 // branch.
1884 return Loc;
1885
1886 // Be conservative, don't insert instruction above something that may have
1887 // side-effects. And since it's potentially bad to separate flag setting
1888 // instruction from the conditional branch, just abort the optimization
1889 // completely.
1890 // Also avoid moving code above predicated instruction since it's hard to
1891 // reason about register liveness with predicated instruction.
1892 bool DontMoveAcrossStore = true;
1893 if (!PI->isSafeToMove(DontMoveAcrossStore) || TII->isPredicated(*PI))
1894 return MBB->end();
1895
1896 // Find out what registers are live. Note this routine is ignoring other live
1897 // registers which are only used by instructions in successor blocks.
1898 for (const MachineOperand &MO : PI->operands()) {
1899 if (!MO.isReg())
1900 continue;
1901 Register Reg = MO.getReg();
1902 if (!Reg)
1903 continue;
1904 if (MO.isUse()) {
1906 } else {
1907 if (Uses.erase(Reg)) {
1908 if (Reg.isPhysical()) {
1909 for (MCPhysReg SubReg : TRI->subregs(Reg))
1910 Uses.erase(SubReg); // Use sub-registers to be conservative
1911 }
1912 }
1913 addRegAndItsAliases(Reg, TRI, Defs);
1914 }
1915 }
1916
1917 return PI;
1918}
1919
1920bool BranchFolder::HoistCommonCodeInSuccs(MachineBasicBlock *MBB) {
1921 MachineBasicBlock *TBB = nullptr, *FBB = nullptr;
1923 if (TII->analyzeBranch(*MBB, TBB, FBB, Cond, true) || !TBB || Cond.empty())
1924 return false;
1925
1926 if (!FBB) FBB = findFalseBlock(MBB, TBB);
1927 if (!FBB)
1928 // Malformed bcc? True and false blocks are the same?
1929 return false;
1930
1931 // Restrict the optimization to cases where MBB is the only predecessor,
1932 // it is an obvious win.
1933 if (TBB->pred_size() > 1 || FBB->pred_size() > 1)
1934 return false;
1935
1936 // Find a suitable position to hoist the common instructions to. Also figure
1937 // out which registers are used or defined by instructions from the insertion
1938 // point to the end of the block.
1941 findHoistingInsertPosAndDeps(MBB, TII, TRI, Uses, Defs);
1942 if (Loc == MBB->end())
1943 return false;
1944
1945 bool HasDups = false;
1946 SmallSet<Register, 4> ActiveDefsSet, AllDefsSet;
1948 MachineBasicBlock::iterator FIB = FBB->begin();
1950 MachineBasicBlock::iterator FIE = FBB->end();
1951 while (TIB != TIE && FIB != FIE) {
1952 // Skip dbg_value instructions. These do not count.
1953 TIB = skipDebugInstructionsForward(TIB, TIE, false);
1954 FIB = skipDebugInstructionsForward(FIB, FIE, false);
1955 if (TIB == TIE || FIB == FIE)
1956 break;
1957
1958 if (!TIB->isIdenticalTo(*FIB, MachineInstr::CheckKillDead))
1959 break;
1960
1961 if (TII->isPredicated(*TIB))
1962 // Hard to reason about register liveness with predicated instruction.
1963 break;
1964
1965 bool IsSafe = true;
1966 for (MachineOperand &MO : TIB->operands()) {
1967 // Don't attempt to hoist instructions with register masks.
1968 if (MO.isRegMask()) {
1969 IsSafe = false;
1970 break;
1971 }
1972 if (!MO.isReg())
1973 continue;
1974 Register Reg = MO.getReg();
1975 if (!Reg)
1976 continue;
1977 if (MO.isDef()) {
1978 if (Uses.count(Reg)) {
1979 // Avoid clobbering a register that's used by the instruction at
1980 // the point of insertion.
1981 IsSafe = false;
1982 break;
1983 }
1984
1985 if (Defs.count(Reg) && !MO.isDead()) {
1986 // Don't hoist the instruction if the def would be clobber by the
1987 // instruction at the point insertion. FIXME: This is overly
1988 // conservative. It should be possible to hoist the instructions
1989 // in BB2 in the following example:
1990 // BB1:
1991 // r1, eflag = op1 r2, r3
1992 // brcc eflag
1993 //
1994 // BB2:
1995 // r1 = op2, ...
1996 // = op3, killed r1
1997 IsSafe = false;
1998 break;
1999 }
2000 } else if (!ActiveDefsSet.count(Reg)) {
2001 if (Defs.count(Reg)) {
2002 // Use is defined by the instruction at the point of insertion.
2003 IsSafe = false;
2004 break;
2005 }
2006
2007 if (MO.isKill() && Uses.count(Reg))
2008 // Kills a register that's read by the instruction at the point of
2009 // insertion. Remove the kill marker.
2010 MO.setIsKill(false);
2011 }
2012 }
2013 if (!IsSafe)
2014 break;
2015
2016 bool DontMoveAcrossStore = true;
2017 if (!TIB->isSafeToMove(DontMoveAcrossStore))
2018 break;
2019
2020 // Remove kills from ActiveDefsSet, these registers had short live ranges.
2021 for (const MachineOperand &MO : TIB->all_uses()) {
2022 if (!MO.isKill())
2023 continue;
2024 Register Reg = MO.getReg();
2025 if (!Reg)
2026 continue;
2027 if (!AllDefsSet.count(Reg)) {
2028 continue;
2029 }
2030 if (Reg.isPhysical()) {
2031 for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI)
2032 ActiveDefsSet.erase(*AI);
2033 } else {
2034 ActiveDefsSet.erase(Reg);
2035 }
2036 }
2037
2038 // Track local defs so we can update liveins.
2039 for (const MachineOperand &MO : TIB->all_defs()) {
2040 if (MO.isDead())
2041 continue;
2042 Register Reg = MO.getReg();
2043 if (!Reg || Reg.isVirtual())
2044 continue;
2045 addRegAndItsAliases(Reg, TRI, ActiveDefsSet);
2046 addRegAndItsAliases(Reg, TRI, AllDefsSet);
2047 }
2048
2049 HasDups = true;
2050 ++TIB;
2051 ++FIB;
2052 }
2053
2054 if (!HasDups)
2055 return false;
2056
2057 MBB->splice(Loc, TBB, TBB->begin(), TIB);
2058 FBB->erase(FBB->begin(), FIB);
2059
2060 if (UpdateLiveIns)
2061 fullyRecomputeLiveIns({TBB, FBB});
2062
2063 ++NumHoist;
2064 return true;
2065}
unsigned SubReg
unsigned const MachineRegisterInfo * MRI
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
This file implements the BitVector class.
static unsigned EstimateRuntime(MachineBasicBlock::iterator I, MachineBasicBlock::iterator E)
EstimateRuntime - Make a rough estimate for how long it will take to run the specified code.
static unsigned ComputeCommonTailLength(MachineBasicBlock *MBB1, MachineBasicBlock *MBB2, MachineBasicBlock::iterator &I1, MachineBasicBlock::iterator &I2)
Given two machine basic blocks, return the number of instructions they actually have in common togeth...
static MachineBasicBlock * findFalseBlock(MachineBasicBlock *BB, MachineBasicBlock *TrueBB)
findFalseBlock - BB has a fallthrough.
static void copyDebugInfoToPredecessor(const TargetInstrInfo *TII, MachineBasicBlock &MBB, MachineBasicBlock &PredMBB)
static unsigned HashMachineInstr(const MachineInstr &MI)
HashMachineInstr - Compute a hash value for MI and its operands.
static bool countsAsInstruction(const MachineInstr &MI)
Whether MI should be counted as an instruction when calculating common tail.
static unsigned CountTerminators(MachineBasicBlock *MBB, MachineBasicBlock::iterator &I)
CountTerminators - Count the number of terminators in the given block and set I to the position of th...
static bool blockEndsInUnreachable(const MachineBasicBlock *MBB)
A no successor, non-return block probably ends in unreachable and is cold.
static void salvageDebugInfoFromEmptyBlock(const TargetInstrInfo *TII, MachineBasicBlock &MBB)
static MachineBasicBlock::iterator skipBackwardPastNonInstructions(MachineBasicBlock::iterator I, MachineBasicBlock *MBB)
Iterate backwards from the given iterator I, towards the beginning of the block.
static cl::opt< unsigned > TailMergeThreshold("tail-merge-threshold", cl::desc("Max number of predecessors to consider tail merging"), cl::init(150), cl::Hidden)
static void addRegAndItsAliases(Register Reg, const TargetRegisterInfo *TRI, Container &Set)
static cl::opt< cl::boolOrDefault > FlagEnableTailMerge("enable-tail-merge", cl::init(cl::BOU_UNSET), cl::Hidden)
static cl::opt< unsigned > TailMergeSize("tail-merge-size", cl::desc("Min number of instructions to consider tail merging"), cl::init(3), cl::Hidden)
static bool IsEmptyBlock(MachineBasicBlock *MBB)
static bool ProfitableToMerge(MachineBasicBlock *MBB1, MachineBasicBlock *MBB2, unsigned MinCommonTailLength, unsigned &CommonTailLen, MachineBasicBlock::iterator &I1, MachineBasicBlock::iterator &I2, MachineBasicBlock *SuccBB, MachineBasicBlock *PredBB, DenseMap< const MachineBasicBlock *, int > &EHScopeMembership, bool AfterPlacement, MBFIWrapper &MBBFreqInfo, ProfileSummaryInfo *PSI)
ProfitableToMerge - Check if two machine basic blocks have a common tail and decide if it would be pr...
static void copyDebugInfoToSuccessor(const TargetInstrInfo *TII, MachineBasicBlock &MBB, MachineBasicBlock &SuccMBB)
static bool IsBranchOnlyBlock(MachineBasicBlock *MBB)
static void FixTail(MachineBasicBlock *CurMBB, MachineBasicBlock *SuccBB, const TargetInstrInfo *TII, const DebugLoc &BranchDL)
static bool IsBetterFallthrough(MachineBasicBlock *MBB1, MachineBasicBlock *MBB2)
IsBetterFallthrough - Return true if it would be clearly better to fall-through to MBB1 than to fall ...
#define DEBUG_TYPE
static unsigned HashEndOfMBB(const MachineBasicBlock &MBB)
HashEndOfMBB - Hash the last instruction in the MBB.
static void mergeOperations(MachineBasicBlock::iterator MBBIStartPos, MachineBasicBlock &MBBCommon)
static MachineBasicBlock::iterator findHoistingInsertPosAndDeps(MachineBasicBlock *MBB, const TargetInstrInfo *TII, const TargetRegisterInfo *TRI, SmallSet< Register, 4 > &Uses, SmallSet< Register, 4 > &Defs)
findHoistingInsertPosAndDeps - Find the location to move common instructions in successors to.
static DebugLoc getBranchDebugLoc(MachineBasicBlock &MBB)
getBranchDebugLoc - Find and return, if any, the DebugLoc of the branch instructions on the block.
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
#define LLVM_DEBUG(...)
Definition: Debug.h:106
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
A common definition of LaneBitmask for use in TableGen and CodeGen.
#define I(x, y, z)
Definition: MD5.cpp:58
unsigned const TargetRegisterInfo * TRI
#define P(N)
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
Definition: PassSupport.h:38
const SmallVectorImpl< MachineOperand > MachineBasicBlock * TBB
const SmallVectorImpl< MachineOperand > & Cond
Remove Loads Into Fake Uses
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file contains some templates that are useful if you are working with the STL at all.
This file defines the SmallSet class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition: Statistic.h:166
Target-Independent Code Generator Pass Configuration Options pass.
Represent the analysis usage information of a pass.
AnalysisUsage & addRequired()
LLVM Basic Block Representation.
Definition: BasicBlock.h:61
bool test(unsigned Idx) const
Definition: BitVector.h:461
BitVector & set()
Definition: BitVector.h:351
size_type size() const
size - Returns the number of bits in this bitvector.
Definition: BitVector.h:159
bool OptimizeFunction(MachineFunction &MF, const TargetInstrInfo *tii, const TargetRegisterInfo *tri, MachineLoopInfo *mli=nullptr, bool AfterPlacement=false)
Perhaps branch folding, tail merging and other CFG optimizations on the given function.
BranchFolder(bool DefaultEnableTailMerge, bool CommonHoist, MBFIWrapper &FreqInfo, const MachineBranchProbabilityInfo &ProbInfo, ProfileSummaryInfo *PSI, unsigned MinTailLength=0)
static BranchProbability getBranchProbability(uint64_t Numerator, uint64_t Denominator)
static DILocation * getMergedLocation(DILocation *LocA, DILocation *LocB)
When two instructions are combined into a single instruction we also need to combine the original loc...
This class represents an Operation in the Expression.
A debug info location.
Definition: DebugLoc.h:33
iterator find(const_arg_type_t< KeyT > Val)
Definition: DenseMap.h:156
bool empty() const
Definition: DenseMap.h:98
iterator end()
Definition: DenseMap.h:84
unsigned removeBranch(MachineBasicBlock &MBB, int *BytesRemoved=nullptr) const override
Remove the branching code at the end of the specific MBB.
bool isPredicated(const MachineInstr &MI) const override
Returns true if the instruction is already predicated.
bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB, SmallVectorImpl< MachineOperand > &Cond, bool AllowModify) const override
Analyze the branching code at the end of MBB, returning true if it cannot be understood (e....
bool reverseBranchCondition(SmallVectorImpl< MachineOperand > &Cond) const override
Reverses the branch condition of the specified condition list, returning false on success and true if...
unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB, ArrayRef< MachineOperand > Cond, const DebugLoc &DL, int *BytesAdded=nullptr) const override
Insert branch code into the end of the specified MachineBasicBlock.
A set of physical registers with utility functions to track liveness when walking backward/forward th...
Definition: LivePhysRegs.h:52
void clear()
Clears the set.
Definition: LivePhysRegs.h:77
bool available(const MachineRegisterInfo &MRI, MCPhysReg Reg) const
Returns true if register Reg and no aliasing register is in the set.
void stepBackward(const MachineInstr &MI)
Simulates liveness when stepping backwards over an instruction(bundle).
void init(const TargetRegisterInfo &TRI)
(re-)initializes and clears the set.
Definition: LivePhysRegs.h:70
void addLiveOuts(const MachineBasicBlock &MBB)
Adds all live-out registers of basic block MBB.
void removeBlock(BlockT *BB)
This method completely removes BB from all data structures, including all of the Loop objects it is n...
LoopT * getLoopFor(const BlockT *BB) const
Return the inner most loop that BB lives in.
BlockFrequency getBlockFreq(const MachineBasicBlock *MBB) const
Definition: MBFIWrapper.cpp:20
void setBlockFreq(const MachineBasicBlock *MBB, BlockFrequency F)
Definition: MBFIWrapper.cpp:29
const MCInstrDesc & get(unsigned Opcode) const
Return the machine instruction descriptor that corresponds to the specified instruction opcode.
Definition: MCInstrInfo.h:63
MCRegAliasIterator enumerates all registers aliasing Reg.
iterator_range< MCSuperRegIterator > superregs(MCRegister Reg) const
Return an iterator range over all super-registers of Reg, excluding Reg.
unsigned pred_size() const
bool isEHPad() const
Returns true if the block is a landing pad.
void moveBefore(MachineBasicBlock *NewAfter)
Move 'this' block before or after the specified block.
reverse_iterator rend()
void transferSuccessors(MachineBasicBlock *FromMBB)
Transfers all the successors from MBB to this machine basic block (i.e., copies all the successors Fr...
iterator_range< livein_iterator > liveins() const
int getNumber() const
MachineBasicBlocks are uniquely numbered at the function level, unless they're not in a MachineFuncti...
iterator SkipPHIsAndLabels(iterator I)
Return the first instruction in MBB after I that is not a PHI or a label.
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
bool canFallThrough()
Return true if the block can implicitly transfer control to the block after it by falling off the end...
void setSuccProbability(succ_iterator I, BranchProbability Prob)
Set successor probability of a given iterator.
iterator getFirstNonDebugInstr(bool SkipPseudoOp=true)
Returns an iterator to the first non-debug instruction in the basic block, or end().
void clearLiveIns()
Clear live in list.
iterator getFirstTerminator()
Returns an iterator to the first terminator instruction of this basic block.
unsigned succ_size() const
bool hasAddressTaken() const
Test whether this block is used as something other than the target of a terminator,...
void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
void copySuccessor(const MachineBasicBlock *Orig, succ_iterator I)
Copy a successor (and any probability info) from original block to this block's.
void removeSuccessor(MachineBasicBlock *Succ, bool NormalizeSuccProbs=false)
Remove successor from the successors list of this MachineBasicBlock.
iterator getLastNonDebugInstr(bool SkipPseudoOp=true)
Returns an iterator to the last non-debug instruction in the basic block, or end().
void ReplaceUsesOfBlockWith(MachineBasicBlock *Old, MachineBasicBlock *New)
Given a machine basic block that branched to 'Old', change the code and CFG so that it branches to 'N...
bool isLayoutSuccessor(const MachineBasicBlock *MBB) const
Return true if the specified MBB will be emitted immediately after this block, such that if this bloc...
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
instr_iterator erase(instr_iterator I)
Remove an instruction from the instruction list and delete it.
DebugLoc findBranchDebugLoc()
Find and return the merged DebugLoc of the branch instructions of the block.
iterator_range< succ_iterator > successors()
reverse_iterator rbegin()
bool isMachineBlockAddressTaken() const
Test whether this block is used as something other than the target of a terminator,...
bool isSuccessor(const MachineBasicBlock *MBB) const
Return true if the specified MBB is a successor of this block.
iterator_range< pred_iterator > predecessors()
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
void moveAfter(MachineBasicBlock *NewBefore)
BranchProbability getEdgeProbability(const MachineBasicBlock *Src, const MachineBasicBlock *Dst) const
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.
virtual bool runOnMachineFunction(MachineFunction &MF)=0
runOnMachineFunction - This method must be overloaded to perform the desired machine code transformat...
virtual MachineFunctionProperties getRequiredProperties() const
Properties which a MachineFunction may have at a given point in time.
MachineFunctionProperties & set(Property P)
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const MachineBasicBlock & back() const
void eraseCallSiteInfo(const MachineInstr *MI)
Following functions update call site info.
void RenumberBlocks(MachineBasicBlock *MBBFrom=nullptr)
RenumberBlocks - This discards all of the MachineBasicBlock numbers and recomputes them.
const MachineJumpTableInfo * getJumpTableInfo() const
getJumpTableInfo - Return the jump table info object for the current function.
MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *BB=nullptr, std::optional< UniqueBBID > BBID=std::nullopt)
CreateMachineBasicBlock - Allocate a new MachineBasicBlock.
void erase(iterator MBBI)
void insert(iterator MBBI, MachineBasicBlock *MBB)
Representation of each machine instruction.
Definition: MachineInstr.h:69
bool isReturn(QueryType Type=AnyInBundle) const
Definition: MachineInstr.h:946
bool isBarrier(QueryType Type=AnyInBundle) const
Returns true if the specified instruction stops control flow from executing the instruction immediate...
Definition: MachineInstr.h:971
void eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
bool isIndirectBranch(QueryType Type=AnyInBundle) const
Return true if this is an indirect branch, such as a branch through a register.
Definition: MachineInstr.h:994
void RemoveJumpTable(unsigned Idx)
RemoveJumpTable - Mark the specific index as being dead.
const std::vector< MachineJumpTableEntry > & getJumpTables() const
MachineOperand class - Representation of each machine instruction operand.
bool isReg() const
isReg - Tests if this is a MO_Register operand.
void setIsUndef(bool Val=true)
@ MO_Immediate
Immediate operand.
@ MO_ConstantPoolIndex
Address of indexed Constant in Constant Pool.
@ MO_GlobalAddress
Address of a global value.
@ MO_MachineBasicBlock
MachineBasicBlock reference.
@ MO_FrameIndex
Abstract Stack Frame Index.
@ MO_Register
Register operand.
@ MO_ExternalSymbol
Name of external global symbol.
@ MO_JumpTableIndex
Address of indexed Jump Table for switch.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
An analysis pass based on legacy pass manager to deliver ProfileSummaryInfo.
Analysis providing profile information.
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:384
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition: SmallPtrSet.h:519
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
Definition: SmallSet.h:132
size_type count(const T &V) const
count - Return 1 if the element is in the set, 0 otherwise.
Definition: SmallSet.h:175
bool erase(const T &V)
Definition: SmallSet.h:193
bool empty() const
Definition: SmallVector.h:81
size_t size() const
Definition: SmallVector.h:78
void push_back(const T &Elt)
Definition: SmallVector.h:413
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1196
TargetInstrInfo - Interface to description of machine instruction set.
virtual bool reverseBranchCondition(SmallVectorImpl< MachineOperand > &Cond) const
Reverses the branch condition of the specified condition list, returning false on success and true if...
virtual unsigned removeBranch(MachineBasicBlock &MBB, int *BytesRemoved=nullptr) const
Remove the branching code at the end of the specific MBB.
virtual bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB, SmallVectorImpl< MachineOperand > &Cond, bool AllowModify=false) const
Analyze the branching code at the end of MBB, returning true if it cannot be understood (e....
virtual bool canMakeTailCallConditional(SmallVectorImpl< MachineOperand > &Cond, const MachineInstr &TailCall) const
Returns true if the tail call can be made conditional on BranchCond.
virtual void ReplaceTailWithBranchTo(MachineBasicBlock::iterator Tail, MachineBasicBlock *NewDest) const
Delete the instruction OldInst and everything after it, replacing it with an unconditional branch to ...
virtual bool isUnconditionalTailCall(const MachineInstr &MI) const
Returns true if MI is an unconditional tail call.
virtual unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB, ArrayRef< MachineOperand > Cond, const DebugLoc &DL, int *BytesAdded=nullptr) const
Insert branch code into the end of the specified MachineBasicBlock.
virtual unsigned getTailMergeSize(const MachineFunction &MF) const
Returns the target-specific default value for tail merging.
virtual bool isPredicated(const MachineInstr &MI) const
Returns true if the instruction is already predicated.
virtual void replaceBranchWithTailCall(MachineBasicBlock &MBB, SmallVectorImpl< MachineOperand > &Cond, const MachineInstr &TailCall) const
Replace the conditional branch in MBB with a conditional tail call.
virtual bool isLegalToSplitMBBAt(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI) const
Return true if it's legal to split the given basic block at the specified instruction (i....
Target-Independent Code Generator Pass Configuration Options.
bool getEnableTailMerge() const
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
virtual bool trackLivenessAfterRegAlloc(const MachineFunction &MF) const
Returns true if the live-ins should be tracked after register allocation.
self_iterator getIterator()
Definition: ilist_node.h:132
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition: CallingConv.h:24
Reg
All possible values of the reg field in the ModR/M byte.
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:443
constexpr double e
Definition: MathExtras.h:47
const_iterator begin(StringRef path, Style style=Style::native)
Get begin iterator over path.
Definition: Path.cpp:226
const_iterator end(StringRef path)
Get end iterator over path.
Definition: Path.cpp:235
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition: STLExtras.h:329
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
bool shouldOptimizeForSize(const MachineFunction *MF, ProfileSummaryInfo *PSI, const MachineBlockFrequencyInfo *BFI, PGSOQueryType QueryType=PGSOQueryType::Other)
Returns true if machine function MF is suggested to be size-optimized based on the profile.
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition: STLExtras.h:657
IterT skipDebugInstructionsForward(IterT It, IterT End, bool SkipPseudoOp=true)
Increment It until it points to a non-debug instruction or to End and return the resulting iterator.
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1746
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
void computeAndAddLiveIns(LivePhysRegs &LiveRegs, MachineBasicBlock &MBB)
Convenience function combining computeLiveIns() and addLiveIns().
void array_pod_sort(IteratorTy Start, IteratorTy End)
array_pod_sort - This sorts an array with the specified start and end extent.
Definition: STLExtras.h:1624
void computeLiveIns(LivePhysRegs &LiveRegs, const MachineBasicBlock &MBB)
Computes registers live-in to MBB assuming all of its successors live-in lists are up-to-date.
char & BranchFolderPassID
BranchFolding - This pass performs machine code CFG based optimizations to delete branches to branche...
IterT prev_nodbg(IterT It, IterT Begin, bool SkipPseudoOp=true)
Decrement It, then continue decrementing it while it points to a debug instruction.
void fullyRecomputeLiveIns(ArrayRef< MachineBasicBlock * > MBBs)
Convenience function for recomputing live-in's for a set of MBBs until the computation converges.
Definition: LivePhysRegs.h:215
Printable printMBBReference(const MachineBasicBlock &MBB)
Prints a machine basic block reference.
void addLiveIns(MachineBasicBlock &MBB, const LivePhysRegs &LiveRegs)
Adds registers contained in LiveRegs to the block live-in list of MBB.
DenseMap< const MachineBasicBlock *, int > getEHScopeMembership(const MachineFunction &MF)
Definition: Analysis.cpp:753
static constexpr LaneBitmask getAll()
Definition: LaneBitmask.h:82
Pair of physical register and lane mask.