LLVM 20.0.0git
BranchFolding.cpp
Go to the documentation of this file.
1//===- BranchFolding.cpp - Fold machine code branch instructions ----------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This pass forwards branches to unconditional branches to make them branch
10// directly to the target block. This pass often results in dead MBB's, which
11// it then removes.
12//
13// Note that this pass must be run after register allocation, it cannot handle
14// SSA form. It also must handle virtual registers for targets that emit virtual
15// ISA (e.g. NVPTX).
16//
17//===----------------------------------------------------------------------===//
18
19#include "BranchFolding.h"
20#include "llvm/ADT/BitVector.h"
21#include "llvm/ADT/STLExtras.h"
22#include "llvm/ADT/SmallSet.h"
24#include "llvm/ADT/Statistic.h"
45#include "llvm/IR/DebugLoc.h"
46#include "llvm/IR/Function.h"
48#include "llvm/MC/LaneBitmask.h"
50#include "llvm/Pass.h"
54#include "llvm/Support/Debug.h"
58#include <cassert>
59#include <cstddef>
60#include <iterator>
61#include <numeric>
62
63using namespace llvm;
64
65#define DEBUG_TYPE "branch-folder"
66
67STATISTIC(NumDeadBlocks, "Number of dead blocks removed");
68STATISTIC(NumBranchOpts, "Number of branches optimized");
69STATISTIC(NumTailMerge , "Number of block tails merged");
70STATISTIC(NumHoist , "Number of times common instructions are hoisted");
71STATISTIC(NumTailCalls, "Number of tail calls optimized");
72
75
76// Throttle for huge numbers of predecessors (compile speed problems)
78TailMergeThreshold("tail-merge-threshold",
79 cl::desc("Max number of predecessors to consider tail merging"),
80 cl::init(150), cl::Hidden);
81
82// Heuristic for tail merging (and, inversely, tail duplication).
84TailMergeSize("tail-merge-size",
85 cl::desc("Min number of instructions to consider tail merging"),
87
88namespace {
89
90 /// BranchFolderPass - Wrap branch folder in a machine function pass.
91 class BranchFolderPass : public MachineFunctionPass {
92 public:
93 static char ID;
94
95 explicit BranchFolderPass(): MachineFunctionPass(ID) {}
96
97 bool runOnMachineFunction(MachineFunction &MF) override;
98
99 void getAnalysisUsage(AnalysisUsage &AU) const override {
105 }
106
109 MachineFunctionProperties::Property::NoPHIs);
110 }
111 };
112
113} // end anonymous namespace
114
115char BranchFolderPass::ID = 0;
116
117char &llvm::BranchFolderPassID = BranchFolderPass::ID;
118
119INITIALIZE_PASS(BranchFolderPass, DEBUG_TYPE,
120 "Control Flow Optimizer", false, false)
121
122bool BranchFolderPass::runOnMachineFunction(MachineFunction &MF) {
123 if (skipFunction(MF.getFunction()))
124 return false;
125
126 TargetPassConfig *PassConfig = &getAnalysis<TargetPassConfig>();
127 // TailMerge can create jump into if branches that make CFG irreducible for
128 // HW that requires structurized CFG.
129 bool EnableTailMerge = !MF.getTarget().requiresStructuredCFG() &&
130 PassConfig->getEnableTailMerge();
131 MBFIWrapper MBBFreqInfo(
132 getAnalysis<MachineBlockFrequencyInfoWrapperPass>().getMBFI());
133 BranchFolder Folder(
134 EnableTailMerge, /*CommonHoist=*/true, MBBFreqInfo,
135 getAnalysis<MachineBranchProbabilityInfoWrapperPass>().getMBPI(),
136 &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI());
137 return Folder.OptimizeFunction(MF, MF.getSubtarget().getInstrInfo(),
138 MF.getSubtarget().getRegisterInfo());
139}
140
141BranchFolder::BranchFolder(bool DefaultEnableTailMerge, bool CommonHoist,
142 MBFIWrapper &FreqInfo,
143 const MachineBranchProbabilityInfo &ProbInfo,
144 ProfileSummaryInfo *PSI, unsigned MinTailLength)
145 : EnableHoistCommonCode(CommonHoist), MinCommonTailLength(MinTailLength),
146 MBBFreqInfo(FreqInfo), MBPI(ProbInfo), PSI(PSI) {
147 switch (FlagEnableTailMerge) {
148 case cl::BOU_UNSET:
149 EnableTailMerge = DefaultEnableTailMerge;
150 break;
151 case cl::BOU_TRUE: EnableTailMerge = true; break;
152 case cl::BOU_FALSE: EnableTailMerge = false; break;
153 }
154}
155
156void BranchFolder::RemoveDeadBlock(MachineBasicBlock *MBB) {
157 assert(MBB->pred_empty() && "MBB must be dead!");
158 LLVM_DEBUG(dbgs() << "\nRemoving MBB: " << *MBB);
159
161 // drop all successors.
162 while (!MBB->succ_empty())
164
165 // Avoid matching if this pointer gets reused.
166 TriedMerging.erase(MBB);
167
168 // Update call site info.
169 for (const MachineInstr &MI : *MBB)
170 if (MI.shouldUpdateCallSiteInfo())
171 MF->eraseCallSiteInfo(&MI);
172
173 // Remove the block.
174 MF->erase(MBB);
175 EHScopeMembership.erase(MBB);
176 if (MLI)
177 MLI->removeBlock(MBB);
178}
179
181 const TargetInstrInfo *tii,
182 const TargetRegisterInfo *tri,
183 MachineLoopInfo *mli, bool AfterPlacement) {
184 if (!tii) return false;
185
186 TriedMerging.clear();
187
189 AfterBlockPlacement = AfterPlacement;
190 TII = tii;
191 TRI = tri;
192 MLI = mli;
193 this->MRI = &MRI;
194
195 if (MinCommonTailLength == 0) {
196 MinCommonTailLength = TailMergeSize.getNumOccurrences() > 0
198 : TII->getTailMergeSize(MF);
199 }
200
201 UpdateLiveIns = MRI.tracksLiveness() && TRI->trackLivenessAfterRegAlloc(MF);
202 if (!UpdateLiveIns)
203 MRI.invalidateLiveness();
204
205 bool MadeChange = false;
206
207 // Recalculate EH scope membership.
208 EHScopeMembership = getEHScopeMembership(MF);
209
210 bool MadeChangeThisIteration = true;
211 while (MadeChangeThisIteration) {
212 MadeChangeThisIteration = TailMergeBlocks(MF);
213 // No need to clean up if tail merging does not change anything after the
214 // block placement.
215 if (!AfterBlockPlacement || MadeChangeThisIteration)
216 MadeChangeThisIteration |= OptimizeBranches(MF);
217 if (EnableHoistCommonCode)
218 MadeChangeThisIteration |= HoistCommonCode(MF);
219 MadeChange |= MadeChangeThisIteration;
220 }
221
222 // See if any jump tables have become dead as the code generator
223 // did its thing.
225 if (!JTI)
226 return MadeChange;
227
228 // Walk the function to find jump tables that are live.
229 BitVector JTIsLive(JTI->getJumpTables().size());
230 for (const MachineBasicBlock &BB : MF) {
231 for (const MachineInstr &I : BB)
232 for (const MachineOperand &Op : I.operands()) {
233 if (!Op.isJTI()) continue;
234
235 // Remember that this JT is live.
236 JTIsLive.set(Op.getIndex());
237 }
238 }
239
240 // Finally, remove dead jump tables. This happens when the
241 // indirect jump was unreachable (and thus deleted).
242 for (unsigned i = 0, e = JTIsLive.size(); i != e; ++i)
243 if (!JTIsLive.test(i)) {
244 JTI->RemoveJumpTable(i);
245 MadeChange = true;
246 }
247
248 return MadeChange;
249}
250
251//===----------------------------------------------------------------------===//
252// Tail Merging of Blocks
253//===----------------------------------------------------------------------===//
254
255/// HashMachineInstr - Compute a hash value for MI and its operands.
256static unsigned HashMachineInstr(const MachineInstr &MI) {
257 unsigned Hash = MI.getOpcode();
258 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
259 const MachineOperand &Op = MI.getOperand(i);
260
261 // Merge in bits from the operand if easy. We can't use MachineOperand's
262 // hash_code here because it's not deterministic and we sort by hash value
263 // later.
264 unsigned OperandHash = 0;
265 switch (Op.getType()) {
267 OperandHash = Op.getReg();
268 break;
270 OperandHash = Op.getImm();
271 break;
273 OperandHash = Op.getMBB()->getNumber();
274 break;
278 OperandHash = Op.getIndex();
279 break;
282 // Global address / external symbol are too hard, don't bother, but do
283 // pull in the offset.
284 OperandHash = Op.getOffset();
285 break;
286 default:
287 break;
288 }
289
290 Hash += ((OperandHash << 3) | Op.getType()) << (i & 31);
291 }
292 return Hash;
293}
294
295/// HashEndOfMBB - Hash the last instruction in the MBB.
296static unsigned HashEndOfMBB(const MachineBasicBlock &MBB) {
298 if (I == MBB.end())
299 return 0;
300
301 return HashMachineInstr(*I);
302}
303
304/// Whether MI should be counted as an instruction when calculating common tail.
306 return !(MI.isDebugInstr() || MI.isCFIInstruction());
307}
308
309/// Iterate backwards from the given iterator \p I, towards the beginning of the
310/// block. If a MI satisfying 'countsAsInstruction' is found, return an iterator
311/// pointing to that MI. If no such MI is found, return the end iterator.
315 while (I != MBB->begin()) {
316 --I;
318 return I;
319 }
320 return MBB->end();
321}
322
323/// Given two machine basic blocks, return the number of instructions they
324/// actually have in common together at their end. If a common tail is found (at
325/// least by one instruction), then iterators for the first shared instruction
326/// in each block are returned as well.
327///
328/// Non-instructions according to countsAsInstruction are ignored.
330 MachineBasicBlock *MBB2,
333 MachineBasicBlock::iterator MBBI1 = MBB1->end();
334 MachineBasicBlock::iterator MBBI2 = MBB2->end();
335
336 unsigned TailLen = 0;
337 while (true) {
338 MBBI1 = skipBackwardPastNonInstructions(MBBI1, MBB1);
339 MBBI2 = skipBackwardPastNonInstructions(MBBI2, MBB2);
340 if (MBBI1 == MBB1->end() || MBBI2 == MBB2->end())
341 break;
342 if (!MBBI1->isIdenticalTo(*MBBI2) ||
343 // FIXME: This check is dubious. It's used to get around a problem where
344 // people incorrectly expect inline asm directives to remain in the same
345 // relative order. This is untenable because normal compiler
346 // optimizations (like this one) may reorder and/or merge these
347 // directives.
348 MBBI1->isInlineAsm()) {
349 break;
350 }
351 if (MBBI1->getFlag(MachineInstr::NoMerge) ||
352 MBBI2->getFlag(MachineInstr::NoMerge))
353 break;
354 ++TailLen;
355 I1 = MBBI1;
356 I2 = MBBI2;
357 }
358
359 return TailLen;
360}
361
362void BranchFolder::replaceTailWithBranchTo(MachineBasicBlock::iterator OldInst,
363 MachineBasicBlock &NewDest) {
364 if (UpdateLiveIns) {
365 // OldInst should always point to an instruction.
366 MachineBasicBlock &OldMBB = *OldInst->getParent();
367 LiveRegs.clear();
368 LiveRegs.addLiveOuts(OldMBB);
369 // Move backward to the place where will insert the jump.
371 do {
372 --I;
373 LiveRegs.stepBackward(*I);
374 } while (I != OldInst);
375
376 // Merging the tails may have switched some undef operand to non-undef ones.
377 // Add IMPLICIT_DEFS into OldMBB as necessary to have a definition of the
378 // register.
380 // We computed the liveins with computeLiveIn earlier and should only see
381 // full registers:
382 assert(P.LaneMask == LaneBitmask::getAll() &&
383 "Can only handle full register.");
384 MCPhysReg Reg = P.PhysReg;
385 if (!LiveRegs.available(*MRI, Reg))
386 continue;
387 DebugLoc DL;
388 BuildMI(OldMBB, OldInst, DL, TII->get(TargetOpcode::IMPLICIT_DEF), Reg);
389 }
390 }
391
392 TII->ReplaceTailWithBranchTo(OldInst, &NewDest);
393 ++NumTailMerge;
394}
395
396MachineBasicBlock *BranchFolder::SplitMBBAt(MachineBasicBlock &CurMBB,
398 const BasicBlock *BB) {
399 if (!TII->isLegalToSplitMBBAt(CurMBB, BBI1))
400 return nullptr;
401
402 MachineFunction &MF = *CurMBB.getParent();
403
404 // Create the fall-through block.
407 CurMBB.getParent()->insert(++MBBI, NewMBB);
408
409 // Move all the successors of this block to the specified block.
410 NewMBB->transferSuccessors(&CurMBB);
411
412 // Add an edge from CurMBB to NewMBB for the fall-through.
413 CurMBB.addSuccessor(NewMBB);
414
415 // Splice the code over.
416 NewMBB->splice(NewMBB->end(), &CurMBB, BBI1, CurMBB.end());
417
418 // NewMBB belongs to the same loop as CurMBB.
419 if (MLI)
420 if (MachineLoop *ML = MLI->getLoopFor(&CurMBB))
421 ML->addBasicBlockToLoop(NewMBB, *MLI);
422
423 // NewMBB inherits CurMBB's block frequency.
424 MBBFreqInfo.setBlockFreq(NewMBB, MBBFreqInfo.getBlockFreq(&CurMBB));
425
426 if (UpdateLiveIns)
427 computeAndAddLiveIns(LiveRegs, *NewMBB);
428
429 // Add the new block to the EH scope.
430 const auto &EHScopeI = EHScopeMembership.find(&CurMBB);
431 if (EHScopeI != EHScopeMembership.end()) {
432 auto n = EHScopeI->second;
433 EHScopeMembership[NewMBB] = n;
434 }
435
436 return NewMBB;
437}
438
439/// EstimateRuntime - Make a rough estimate for how long it will take to run
440/// the specified code.
443 unsigned Time = 0;
444 for (; I != E; ++I) {
445 if (!countsAsInstruction(*I))
446 continue;
447 if (I->isCall())
448 Time += 10;
449 else if (I->mayLoadOrStore())
450 Time += 2;
451 else
452 ++Time;
453 }
454 return Time;
455}
456
457// CurMBB needs to add an unconditional branch to SuccMBB (we removed these
458// branches temporarily for tail merging). In the case where CurMBB ends
459// with a conditional branch to the next block, optimize by reversing the
460// test and conditionally branching to SuccMBB instead.
461static void FixTail(MachineBasicBlock *CurMBB, MachineBasicBlock *SuccBB,
462 const TargetInstrInfo *TII, const DebugLoc &BranchDL) {
463 MachineFunction *MF = CurMBB->getParent();
465 MachineBasicBlock *TBB = nullptr, *FBB = nullptr;
467 DebugLoc dl = CurMBB->findBranchDebugLoc();
468 if (!dl)
469 dl = BranchDL;
470 if (I != MF->end() && !TII->analyzeBranch(*CurMBB, TBB, FBB, Cond, true)) {
471 MachineBasicBlock *NextBB = &*I;
472 if (TBB == NextBB && !Cond.empty() && !FBB) {
474 TII->removeBranch(*CurMBB);
475 TII->insertBranch(*CurMBB, SuccBB, nullptr, Cond, dl);
476 return;
477 }
478 }
479 }
480 TII->insertBranch(*CurMBB, SuccBB, nullptr,
482}
483
484bool
485BranchFolder::MergePotentialsElt::operator<(const MergePotentialsElt &o) const {
486 if (getHash() < o.getHash())
487 return true;
488 if (getHash() > o.getHash())
489 return false;
490 if (getBlock()->getNumber() < o.getBlock()->getNumber())
491 return true;
492 if (getBlock()->getNumber() > o.getBlock()->getNumber())
493 return false;
494 return false;
495}
496
497/// CountTerminators - Count the number of terminators in the given
498/// block and set I to the position of the first non-terminator, if there
499/// is one, or MBB->end() otherwise.
502 I = MBB->end();
503 unsigned NumTerms = 0;
504 while (true) {
505 if (I == MBB->begin()) {
506 I = MBB->end();
507 break;
508 }
509 --I;
510 if (!I->isTerminator()) break;
511 ++NumTerms;
512 }
513 return NumTerms;
514}
515
516/// A no successor, non-return block probably ends in unreachable and is cold.
517/// Also consider a block that ends in an indirect branch to be a return block,
518/// since many targets use plain indirect branches to return.
520 if (!MBB->succ_empty())
521 return false;
522 if (MBB->empty())
523 return true;
524 return !(MBB->back().isReturn() || MBB->back().isIndirectBranch());
525}
526
527/// ProfitableToMerge - Check if two machine basic blocks have a common tail
528/// and decide if it would be profitable to merge those tails. Return the
529/// length of the common tail and iterators to the first common instruction
530/// in each block.
531/// MBB1, MBB2 The blocks to check
532/// MinCommonTailLength Minimum size of tail block to be merged.
533/// CommonTailLen Out parameter to record the size of the shared tail between
534/// MBB1 and MBB2
535/// I1, I2 Iterator references that will be changed to point to the first
536/// instruction in the common tail shared by MBB1,MBB2
537/// SuccBB A common successor of MBB1, MBB2 which are in a canonical form
538/// relative to SuccBB
539/// PredBB The layout predecessor of SuccBB, if any.
540/// EHScopeMembership map from block to EH scope #.
541/// AfterPlacement True if we are merging blocks after layout. Stricter
542/// thresholds apply to prevent undoing tail-duplication.
543static bool
545 unsigned MinCommonTailLength, unsigned &CommonTailLen,
548 MachineBasicBlock *PredBB,
550 bool AfterPlacement,
551 MBFIWrapper &MBBFreqInfo,
552 ProfileSummaryInfo *PSI) {
553 // It is never profitable to tail-merge blocks from two different EH scopes.
554 if (!EHScopeMembership.empty()) {
555 auto EHScope1 = EHScopeMembership.find(MBB1);
556 assert(EHScope1 != EHScopeMembership.end());
557 auto EHScope2 = EHScopeMembership.find(MBB2);
558 assert(EHScope2 != EHScopeMembership.end());
559 if (EHScope1->second != EHScope2->second)
560 return false;
561 }
562
563 CommonTailLen = ComputeCommonTailLength(MBB1, MBB2, I1, I2);
564 if (CommonTailLen == 0)
565 return false;
566 LLVM_DEBUG(dbgs() << "Common tail length of " << printMBBReference(*MBB1)
567 << " and " << printMBBReference(*MBB2) << " is "
568 << CommonTailLen << '\n');
569
570 // Move the iterators to the beginning of the MBB if we only got debug
571 // instructions before the tail. This is to avoid splitting a block when we
572 // only got debug instructions before the tail (to be invariant on -g).
573 if (skipDebugInstructionsForward(MBB1->begin(), MBB1->end(), false) == I1)
574 I1 = MBB1->begin();
575 if (skipDebugInstructionsForward(MBB2->begin(), MBB2->end(), false) == I2)
576 I2 = MBB2->begin();
577
578 bool FullBlockTail1 = I1 == MBB1->begin();
579 bool FullBlockTail2 = I2 == MBB2->begin();
580
581 // It's almost always profitable to merge any number of non-terminator
582 // instructions with the block that falls through into the common successor.
583 // This is true only for a single successor. For multiple successors, we are
584 // trading a conditional branch for an unconditional one.
585 // TODO: Re-visit successor size for non-layout tail merging.
586 if ((MBB1 == PredBB || MBB2 == PredBB) &&
587 (!AfterPlacement || MBB1->succ_size() == 1)) {
589 unsigned NumTerms = CountTerminators(MBB1 == PredBB ? MBB2 : MBB1, I);
590 if (CommonTailLen > NumTerms)
591 return true;
592 }
593
594 // If these are identical non-return blocks with no successors, merge them.
595 // Such blocks are typically cold calls to noreturn functions like abort, and
596 // are unlikely to become a fallthrough target after machine block placement.
597 // Tail merging these blocks is unlikely to create additional unconditional
598 // branches, and will reduce the size of this cold code.
599 if (FullBlockTail1 && FullBlockTail2 &&
601 return true;
602
603 // If one of the blocks can be completely merged and happens to be in
604 // a position where the other could fall through into it, merge any number
605 // of instructions, because it can be done without a branch.
606 // TODO: If the blocks are not adjacent, move one of them so that they are?
607 if (MBB1->isLayoutSuccessor(MBB2) && FullBlockTail2)
608 return true;
609 if (MBB2->isLayoutSuccessor(MBB1) && FullBlockTail1)
610 return true;
611
612 // If both blocks are identical and end in a branch, merge them unless they
613 // both have a fallthrough predecessor and successor.
614 // We can only do this after block placement because it depends on whether
615 // there are fallthroughs, and we don't know until after layout.
616 if (AfterPlacement && FullBlockTail1 && FullBlockTail2) {
617 auto BothFallThrough = [](MachineBasicBlock *MBB) {
618 if (!MBB->succ_empty() && !MBB->canFallThrough())
619 return false;
622 return (MBB != &*MF->begin()) && std::prev(I)->canFallThrough();
623 };
624 if (!BothFallThrough(MBB1) || !BothFallThrough(MBB2))
625 return true;
626 }
627
628 // If both blocks have an unconditional branch temporarily stripped out,
629 // count that as an additional common instruction for the following
630 // heuristics. This heuristic is only accurate for single-succ blocks, so to
631 // make sure that during layout merging and duplicating don't crash, we check
632 // for that when merging during layout.
633 unsigned EffectiveTailLen = CommonTailLen;
634 if (SuccBB && MBB1 != PredBB && MBB2 != PredBB &&
635 (MBB1->succ_size() == 1 || !AfterPlacement) &&
636 !MBB1->back().isBarrier() &&
637 !MBB2->back().isBarrier())
638 ++EffectiveTailLen;
639
640 // Check if the common tail is long enough to be worthwhile.
641 if (EffectiveTailLen >= MinCommonTailLength)
642 return true;
643
644 // If we are optimizing for code size, 2 instructions in common is enough if
645 // we don't have to split a block. At worst we will be introducing 1 new
646 // branch instruction, which is likely to be smaller than the 2
647 // instructions that would be deleted in the merge.
648 MachineFunction *MF = MBB1->getParent();
649 bool OptForSize =
650 MF->getFunction().hasOptSize() ||
651 (llvm::shouldOptimizeForSize(MBB1, PSI, &MBBFreqInfo) &&
652 llvm::shouldOptimizeForSize(MBB2, PSI, &MBBFreqInfo));
653 return EffectiveTailLen >= 2 && OptForSize &&
654 (FullBlockTail1 || FullBlockTail2);
655}
656
657unsigned BranchFolder::ComputeSameTails(unsigned CurHash,
658 unsigned MinCommonTailLength,
659 MachineBasicBlock *SuccBB,
660 MachineBasicBlock *PredBB) {
661 unsigned maxCommonTailLength = 0U;
662 SameTails.clear();
663 MachineBasicBlock::iterator TrialBBI1, TrialBBI2;
664 MPIterator HighestMPIter = std::prev(MergePotentials.end());
665 for (MPIterator CurMPIter = std::prev(MergePotentials.end()),
666 B = MergePotentials.begin();
667 CurMPIter != B && CurMPIter->getHash() == CurHash; --CurMPIter) {
668 for (MPIterator I = std::prev(CurMPIter); I->getHash() == CurHash; --I) {
669 unsigned CommonTailLen;
670 if (ProfitableToMerge(CurMPIter->getBlock(), I->getBlock(),
671 MinCommonTailLength,
672 CommonTailLen, TrialBBI1, TrialBBI2,
673 SuccBB, PredBB,
674 EHScopeMembership,
675 AfterBlockPlacement, MBBFreqInfo, PSI)) {
676 if (CommonTailLen > maxCommonTailLength) {
677 SameTails.clear();
678 maxCommonTailLength = CommonTailLen;
679 HighestMPIter = CurMPIter;
680 SameTails.push_back(SameTailElt(CurMPIter, TrialBBI1));
681 }
682 if (HighestMPIter == CurMPIter &&
683 CommonTailLen == maxCommonTailLength)
684 SameTails.push_back(SameTailElt(I, TrialBBI2));
685 }
686 if (I == B)
687 break;
688 }
689 }
690 return maxCommonTailLength;
691}
692
693void BranchFolder::RemoveBlocksWithHash(unsigned CurHash,
694 MachineBasicBlock *SuccBB,
695 MachineBasicBlock *PredBB,
696 const DebugLoc &BranchDL) {
697 MPIterator CurMPIter, B;
698 for (CurMPIter = std::prev(MergePotentials.end()),
699 B = MergePotentials.begin();
700 CurMPIter->getHash() == CurHash; --CurMPIter) {
701 // Put the unconditional branch back, if we need one.
702 MachineBasicBlock *CurMBB = CurMPIter->getBlock();
703 if (SuccBB && CurMBB != PredBB)
704 FixTail(CurMBB, SuccBB, TII, BranchDL);
705 if (CurMPIter == B)
706 break;
707 }
708 if (CurMPIter->getHash() != CurHash)
709 CurMPIter++;
710 MergePotentials.erase(CurMPIter, MergePotentials.end());
711}
712
713bool BranchFolder::CreateCommonTailOnlyBlock(MachineBasicBlock *&PredBB,
714 MachineBasicBlock *SuccBB,
715 unsigned maxCommonTailLength,
716 unsigned &commonTailIndex) {
717 commonTailIndex = 0;
718 unsigned TimeEstimate = ~0U;
719 for (unsigned i = 0, e = SameTails.size(); i != e; ++i) {
720 // Use PredBB if possible; that doesn't require a new branch.
721 if (SameTails[i].getBlock() == PredBB) {
722 commonTailIndex = i;
723 break;
724 }
725 // Otherwise, make a (fairly bogus) choice based on estimate of
726 // how long it will take the various blocks to execute.
727 unsigned t = EstimateRuntime(SameTails[i].getBlock()->begin(),
728 SameTails[i].getTailStartPos());
729 if (t <= TimeEstimate) {
730 TimeEstimate = t;
731 commonTailIndex = i;
732 }
733 }
734
736 SameTails[commonTailIndex].getTailStartPos();
737 MachineBasicBlock *MBB = SameTails[commonTailIndex].getBlock();
738
739 LLVM_DEBUG(dbgs() << "\nSplitting " << printMBBReference(*MBB) << ", size "
740 << maxCommonTailLength);
741
742 // If the split block unconditionally falls-thru to SuccBB, it will be
743 // merged. In control flow terms it should then take SuccBB's name. e.g. If
744 // SuccBB is an inner loop, the common tail is still part of the inner loop.
745 const BasicBlock *BB = (SuccBB && MBB->succ_size() == 1) ?
746 SuccBB->getBasicBlock() : MBB->getBasicBlock();
747 MachineBasicBlock *newMBB = SplitMBBAt(*MBB, BBI, BB);
748 if (!newMBB) {
749 LLVM_DEBUG(dbgs() << "... failed!");
750 return false;
751 }
752
753 SameTails[commonTailIndex].setBlock(newMBB);
754 SameTails[commonTailIndex].setTailStartPos(newMBB->begin());
755
756 // If we split PredBB, newMBB is the new predecessor.
757 if (PredBB == MBB)
758 PredBB = newMBB;
759
760 return true;
761}
762
763static void
765 MachineBasicBlock &MBBCommon) {
766 MachineBasicBlock *MBB = MBBIStartPos->getParent();
767 // Note CommonTailLen does not necessarily matches the size of
768 // the common BB nor all its instructions because of debug
769 // instructions differences.
770 unsigned CommonTailLen = 0;
771 for (auto E = MBB->end(); MBBIStartPos != E; ++MBBIStartPos)
772 ++CommonTailLen;
773
776 MachineBasicBlock::reverse_iterator MBBICommon = MBBCommon.rbegin();
777 MachineBasicBlock::reverse_iterator MBBIECommon = MBBCommon.rend();
778
779 while (CommonTailLen--) {
780 assert(MBBI != MBBIE && "Reached BB end within common tail length!");
781 (void)MBBIE;
782
783 if (!countsAsInstruction(*MBBI)) {
784 ++MBBI;
785 continue;
786 }
787
788 while ((MBBICommon != MBBIECommon) && !countsAsInstruction(*MBBICommon))
789 ++MBBICommon;
790
791 assert(MBBICommon != MBBIECommon &&
792 "Reached BB end within common tail length!");
793 assert(MBBICommon->isIdenticalTo(*MBBI) && "Expected matching MIIs!");
794
795 // Merge MMOs from memory operations in the common block.
796 if (MBBICommon->mayLoadOrStore())
797 MBBICommon->cloneMergedMemRefs(*MBB->getParent(), {&*MBBICommon, &*MBBI});
798 // Drop undef flags if they aren't present in all merged instructions.
799 for (unsigned I = 0, E = MBBICommon->getNumOperands(); I != E; ++I) {
800 MachineOperand &MO = MBBICommon->getOperand(I);
801 if (MO.isReg() && MO.isUndef()) {
802 const MachineOperand &OtherMO = MBBI->getOperand(I);
803 if (!OtherMO.isUndef())
804 MO.setIsUndef(false);
805 }
806 }
807
808 ++MBBI;
809 ++MBBICommon;
810 }
811}
812
813void BranchFolder::mergeCommonTails(unsigned commonTailIndex) {
814 MachineBasicBlock *MBB = SameTails[commonTailIndex].getBlock();
815
816 std::vector<MachineBasicBlock::iterator> NextCommonInsts(SameTails.size());
817 for (unsigned int i = 0 ; i != SameTails.size() ; ++i) {
818 if (i != commonTailIndex) {
819 NextCommonInsts[i] = SameTails[i].getTailStartPos();
820 mergeOperations(SameTails[i].getTailStartPos(), *MBB);
821 } else {
822 assert(SameTails[i].getTailStartPos() == MBB->begin() &&
823 "MBB is not a common tail only block");
824 }
825 }
826
827 for (auto &MI : *MBB) {
829 continue;
830 DebugLoc DL = MI.getDebugLoc();
831 for (unsigned int i = 0 ; i < NextCommonInsts.size() ; i++) {
832 if (i == commonTailIndex)
833 continue;
834
835 auto &Pos = NextCommonInsts[i];
836 assert(Pos != SameTails[i].getBlock()->end() &&
837 "Reached BB end within common tail");
838 while (!countsAsInstruction(*Pos)) {
839 ++Pos;
840 assert(Pos != SameTails[i].getBlock()->end() &&
841 "Reached BB end within common tail");
842 }
843 assert(MI.isIdenticalTo(*Pos) && "Expected matching MIIs!");
844 DL = DILocation::getMergedLocation(DL, Pos->getDebugLoc());
845 NextCommonInsts[i] = ++Pos;
846 }
847 MI.setDebugLoc(DL);
848 }
849
850 if (UpdateLiveIns) {
851 LivePhysRegs NewLiveIns(*TRI);
852 computeLiveIns(NewLiveIns, *MBB);
853 LiveRegs.init(*TRI);
854
855 // The flag merging may lead to some register uses no longer using the
856 // <undef> flag, add IMPLICIT_DEFs in the predecessors as necessary.
857 for (MachineBasicBlock *Pred : MBB->predecessors()) {
858 LiveRegs.clear();
859 LiveRegs.addLiveOuts(*Pred);
860 MachineBasicBlock::iterator InsertBefore = Pred->getFirstTerminator();
861 for (Register Reg : NewLiveIns) {
862 if (!LiveRegs.available(*MRI, Reg))
863 continue;
864
865 // Skip the register if we are about to add one of its super registers.
866 // TODO: Common this up with the same logic in addLineIns().
867 if (any_of(TRI->superregs(Reg), [&](MCPhysReg SReg) {
868 return NewLiveIns.contains(SReg) && !MRI->isReserved(SReg);
869 }))
870 continue;
871
872 DebugLoc DL;
873 BuildMI(*Pred, InsertBefore, DL, TII->get(TargetOpcode::IMPLICIT_DEF),
874 Reg);
875 }
876 }
877
878 MBB->clearLiveIns();
879 addLiveIns(*MBB, NewLiveIns);
880 }
881}
882
883// See if any of the blocks in MergePotentials (which all have SuccBB as a
884// successor, or all have no successor if it is null) can be tail-merged.
885// If there is a successor, any blocks in MergePotentials that are not
886// tail-merged and are not immediately before Succ must have an unconditional
887// branch to Succ added (but the predecessor/successor lists need no
888// adjustment). The lone predecessor of Succ that falls through into Succ,
889// if any, is given in PredBB.
890// MinCommonTailLength - Except for the special cases below, tail-merge if
891// there are at least this many instructions in common.
892bool BranchFolder::TryTailMergeBlocks(MachineBasicBlock *SuccBB,
893 MachineBasicBlock *PredBB,
894 unsigned MinCommonTailLength) {
895 bool MadeChange = false;
896
898 dbgs() << "\nTryTailMergeBlocks: ";
899 for (unsigned i = 0, e = MergePotentials.size(); i != e; ++i) dbgs()
900 << printMBBReference(*MergePotentials[i].getBlock())
901 << (i == e - 1 ? "" : ", ");
902 dbgs() << "\n"; if (SuccBB) {
903 dbgs() << " with successor " << printMBBReference(*SuccBB) << '\n';
904 if (PredBB)
905 dbgs() << " which has fall-through from "
906 << printMBBReference(*PredBB) << "\n";
907 } dbgs() << "Looking for common tails of at least "
908 << MinCommonTailLength << " instruction"
909 << (MinCommonTailLength == 1 ? "" : "s") << '\n';);
910
911 // Sort by hash value so that blocks with identical end sequences sort
912 // together.
913 array_pod_sort(MergePotentials.begin(), MergePotentials.end());
914
915 // Walk through equivalence sets looking for actual exact matches.
916 while (MergePotentials.size() > 1) {
917 unsigned CurHash = MergePotentials.back().getHash();
918 const DebugLoc &BranchDL = MergePotentials.back().getBranchDebugLoc();
919
920 // Build SameTails, identifying the set of blocks with this hash code
921 // and with the maximum number of instructions in common.
922 unsigned maxCommonTailLength = ComputeSameTails(CurHash,
923 MinCommonTailLength,
924 SuccBB, PredBB);
925
926 // If we didn't find any pair that has at least MinCommonTailLength
927 // instructions in common, remove all blocks with this hash code and retry.
928 if (SameTails.empty()) {
929 RemoveBlocksWithHash(CurHash, SuccBB, PredBB, BranchDL);
930 continue;
931 }
932
933 // If one of the blocks is the entire common tail (and is not the entry
934 // block/an EH pad, which we can't jump to), we can treat all blocks with
935 // this same tail at once. Use PredBB if that is one of the possibilities,
936 // as that will not introduce any extra branches.
937 MachineBasicBlock *EntryBB =
938 &MergePotentials.front().getBlock()->getParent()->front();
939 unsigned commonTailIndex = SameTails.size();
940 // If there are two blocks, check to see if one can be made to fall through
941 // into the other.
942 if (SameTails.size() == 2 &&
943 SameTails[0].getBlock()->isLayoutSuccessor(SameTails[1].getBlock()) &&
944 SameTails[1].tailIsWholeBlock() && !SameTails[1].getBlock()->isEHPad())
945 commonTailIndex = 1;
946 else if (SameTails.size() == 2 &&
947 SameTails[1].getBlock()->isLayoutSuccessor(
948 SameTails[0].getBlock()) &&
949 SameTails[0].tailIsWholeBlock() &&
950 !SameTails[0].getBlock()->isEHPad())
951 commonTailIndex = 0;
952 else {
953 // Otherwise just pick one, favoring the fall-through predecessor if
954 // there is one.
955 for (unsigned i = 0, e = SameTails.size(); i != e; ++i) {
956 MachineBasicBlock *MBB = SameTails[i].getBlock();
957 if ((MBB == EntryBB || MBB->isEHPad()) &&
958 SameTails[i].tailIsWholeBlock())
959 continue;
960 if (MBB == PredBB) {
961 commonTailIndex = i;
962 break;
963 }
964 if (SameTails[i].tailIsWholeBlock())
965 commonTailIndex = i;
966 }
967 }
968
969 if (commonTailIndex == SameTails.size() ||
970 (SameTails[commonTailIndex].getBlock() == PredBB &&
971 !SameTails[commonTailIndex].tailIsWholeBlock())) {
972 // None of the blocks consist entirely of the common tail.
973 // Split a block so that one does.
974 if (!CreateCommonTailOnlyBlock(PredBB, SuccBB,
975 maxCommonTailLength, commonTailIndex)) {
976 RemoveBlocksWithHash(CurHash, SuccBB, PredBB, BranchDL);
977 continue;
978 }
979 }
980
981 MachineBasicBlock *MBB = SameTails[commonTailIndex].getBlock();
982
983 // Recompute common tail MBB's edge weights and block frequency.
984 setCommonTailEdgeWeights(*MBB);
985
986 // Merge debug locations, MMOs and undef flags across identical instructions
987 // for common tail.
988 mergeCommonTails(commonTailIndex);
989
990 // MBB is common tail. Adjust all other BB's to jump to this one.
991 // Traversal must be forwards so erases work.
992 LLVM_DEBUG(dbgs() << "\nUsing common tail in " << printMBBReference(*MBB)
993 << " for ");
994 for (unsigned int i=0, e = SameTails.size(); i != e; ++i) {
995 if (commonTailIndex == i)
996 continue;
997 LLVM_DEBUG(dbgs() << printMBBReference(*SameTails[i].getBlock())
998 << (i == e - 1 ? "" : ", "));
999 // Hack the end off BB i, making it jump to BB commonTailIndex instead.
1000 replaceTailWithBranchTo(SameTails[i].getTailStartPos(), *MBB);
1001 // BB i is no longer a predecessor of SuccBB; remove it from the worklist.
1002 MergePotentials.erase(SameTails[i].getMPIter());
1003 }
1004 LLVM_DEBUG(dbgs() << "\n");
1005 // We leave commonTailIndex in the worklist in case there are other blocks
1006 // that match it with a smaller number of instructions.
1007 MadeChange = true;
1008 }
1009 return MadeChange;
1010}
1011
1012bool BranchFolder::TailMergeBlocks(MachineFunction &MF) {
1013 bool MadeChange = false;
1014 if (!EnableTailMerge)
1015 return MadeChange;
1016
1017 // First find blocks with no successors.
1018 // Block placement may create new tail merging opportunities for these blocks.
1019 MergePotentials.clear();
1020 for (MachineBasicBlock &MBB : MF) {
1021 if (MergePotentials.size() == TailMergeThreshold)
1022 break;
1023 if (!TriedMerging.count(&MBB) && MBB.succ_empty())
1024 MergePotentials.push_back(MergePotentialsElt(HashEndOfMBB(MBB), &MBB,
1026 }
1027
1028 // If this is a large problem, avoid visiting the same basic blocks
1029 // multiple times.
1030 if (MergePotentials.size() == TailMergeThreshold)
1031 for (const MergePotentialsElt &Elt : MergePotentials)
1032 TriedMerging.insert(Elt.getBlock());
1033
1034 // See if we can do any tail merging on those.
1035 if (MergePotentials.size() >= 2)
1036 MadeChange |= TryTailMergeBlocks(nullptr, nullptr, MinCommonTailLength);
1037
1038 // Look at blocks (IBB) with multiple predecessors (PBB).
1039 // We change each predecessor to a canonical form, by
1040 // (1) temporarily removing any unconditional branch from the predecessor
1041 // to IBB, and
1042 // (2) alter conditional branches so they branch to the other block
1043 // not IBB; this may require adding back an unconditional branch to IBB
1044 // later, where there wasn't one coming in. E.g.
1045 // Bcc IBB
1046 // fallthrough to QBB
1047 // here becomes
1048 // Bncc QBB
1049 // with a conceptual B to IBB after that, which never actually exists.
1050 // With those changes, we see whether the predecessors' tails match,
1051 // and merge them if so. We change things out of canonical form and
1052 // back to the way they were later in the process. (OptimizeBranches
1053 // would undo some of this, but we can't use it, because we'd get into
1054 // a compile-time infinite loop repeatedly doing and undoing the same
1055 // transformations.)
1056
1057 for (MachineFunction::iterator I = std::next(MF.begin()), E = MF.end();
1058 I != E; ++I) {
1059 if (I->pred_size() < 2) continue;
1061 MachineBasicBlock *IBB = &*I;
1062 MachineBasicBlock *PredBB = &*std::prev(I);
1063 MergePotentials.clear();
1064 MachineLoop *ML;
1065
1066 // Bail if merging after placement and IBB is the loop header because
1067 // -- If merging predecessors that belong to the same loop as IBB, the
1068 // common tail of merged predecessors may become the loop top if block
1069 // placement is called again and the predecessors may branch to this common
1070 // tail and require more branches. This can be relaxed if
1071 // MachineBlockPlacement::findBestLoopTop is more flexible.
1072 // --If merging predecessors that do not belong to the same loop as IBB, the
1073 // loop info of IBB's loop and the other loops may be affected. Calling the
1074 // block placement again may make big change to the layout and eliminate the
1075 // reason to do tail merging here.
1076 if (AfterBlockPlacement && MLI) {
1077 ML = MLI->getLoopFor(IBB);
1078 if (ML && IBB == ML->getHeader())
1079 continue;
1080 }
1081
1082 for (MachineBasicBlock *PBB : I->predecessors()) {
1083 if (MergePotentials.size() == TailMergeThreshold)
1084 break;
1085
1086 if (TriedMerging.count(PBB))
1087 continue;
1088
1089 // Skip blocks that loop to themselves, can't tail merge these.
1090 if (PBB == IBB)
1091 continue;
1092
1093 // Visit each predecessor only once.
1094 if (!UniquePreds.insert(PBB).second)
1095 continue;
1096
1097 // Skip blocks which may jump to a landing pad or jump from an asm blob.
1098 // Can't tail merge these.
1099 if (PBB->hasEHPadSuccessor() || PBB->mayHaveInlineAsmBr())
1100 continue;
1101
1102 // After block placement, only consider predecessors that belong to the
1103 // same loop as IBB. The reason is the same as above when skipping loop
1104 // header.
1105 if (AfterBlockPlacement && MLI)
1106 if (ML != MLI->getLoopFor(PBB))
1107 continue;
1108
1109 MachineBasicBlock *TBB = nullptr, *FBB = nullptr;
1111 if (!TII->analyzeBranch(*PBB, TBB, FBB, Cond, true)) {
1112 // Failing case: IBB is the target of a cbr, and we cannot reverse the
1113 // branch.
1115 if (!Cond.empty() && TBB == IBB) {
1116 if (TII->reverseBranchCondition(NewCond))
1117 continue;
1118 // This is the QBB case described above
1119 if (!FBB) {
1120 auto Next = ++PBB->getIterator();
1121 if (Next != MF.end())
1122 FBB = &*Next;
1123 }
1124 }
1125
1126 // Remove the unconditional branch at the end, if any.
1127 DebugLoc dl = PBB->findBranchDebugLoc();
1128 if (TBB && (Cond.empty() || FBB)) {
1129 TII->removeBranch(*PBB);
1130 if (!Cond.empty())
1131 // reinsert conditional branch only, for now
1132 TII->insertBranch(*PBB, (TBB == IBB) ? FBB : TBB, nullptr,
1133 NewCond, dl);
1134 }
1135
1136 MergePotentials.push_back(
1137 MergePotentialsElt(HashEndOfMBB(*PBB), PBB, dl));
1138 }
1139 }
1140
1141 // If this is a large problem, avoid visiting the same basic blocks multiple
1142 // times.
1143 if (MergePotentials.size() == TailMergeThreshold)
1144 for (MergePotentialsElt &Elt : MergePotentials)
1145 TriedMerging.insert(Elt.getBlock());
1146
1147 if (MergePotentials.size() >= 2)
1148 MadeChange |= TryTailMergeBlocks(IBB, PredBB, MinCommonTailLength);
1149
1150 // Reinsert an unconditional branch if needed. The 1 below can occur as a
1151 // result of removing blocks in TryTailMergeBlocks.
1152 PredBB = &*std::prev(I); // this may have been changed in TryTailMergeBlocks
1153 if (MergePotentials.size() == 1 &&
1154 MergePotentials.begin()->getBlock() != PredBB)
1155 FixTail(MergePotentials.begin()->getBlock(), IBB, TII,
1156 MergePotentials.begin()->getBranchDebugLoc());
1157 }
1158
1159 return MadeChange;
1160}
1161
1162void BranchFolder::setCommonTailEdgeWeights(MachineBasicBlock &TailMBB) {
1163 SmallVector<BlockFrequency, 2> EdgeFreqLs(TailMBB.succ_size());
1164 BlockFrequency AccumulatedMBBFreq;
1165
1166 // Aggregate edge frequency of successor edge j:
1167 // edgeFreq(j) = sum (freq(bb) * edgeProb(bb, j)),
1168 // where bb is a basic block that is in SameTails.
1169 for (const auto &Src : SameTails) {
1170 const MachineBasicBlock *SrcMBB = Src.getBlock();
1171 BlockFrequency BlockFreq = MBBFreqInfo.getBlockFreq(SrcMBB);
1172 AccumulatedMBBFreq += BlockFreq;
1173
1174 // It is not necessary to recompute edge weights if TailBB has less than two
1175 // successors.
1176 if (TailMBB.succ_size() <= 1)
1177 continue;
1178
1179 auto EdgeFreq = EdgeFreqLs.begin();
1180
1181 for (auto SuccI = TailMBB.succ_begin(), SuccE = TailMBB.succ_end();
1182 SuccI != SuccE; ++SuccI, ++EdgeFreq)
1183 *EdgeFreq += BlockFreq * MBPI.getEdgeProbability(SrcMBB, *SuccI);
1184 }
1185
1186 MBBFreqInfo.setBlockFreq(&TailMBB, AccumulatedMBBFreq);
1187
1188 if (TailMBB.succ_size() <= 1)
1189 return;
1190
1191 auto SumEdgeFreq =
1192 std::accumulate(EdgeFreqLs.begin(), EdgeFreqLs.end(), BlockFrequency(0))
1193 .getFrequency();
1194 auto EdgeFreq = EdgeFreqLs.begin();
1195
1196 if (SumEdgeFreq > 0) {
1197 for (auto SuccI = TailMBB.succ_begin(), SuccE = TailMBB.succ_end();
1198 SuccI != SuccE; ++SuccI, ++EdgeFreq) {
1200 EdgeFreq->getFrequency(), SumEdgeFreq);
1201 TailMBB.setSuccProbability(SuccI, Prob);
1202 }
1203 }
1204}
1205
1206//===----------------------------------------------------------------------===//
1207// Branch Optimization
1208//===----------------------------------------------------------------------===//
1209
1210bool BranchFolder::OptimizeBranches(MachineFunction &MF) {
1211 bool MadeChange = false;
1212
1213 // Make sure blocks are numbered in order
1214 MF.RenumberBlocks();
1215 // Renumbering blocks alters EH scope membership, recalculate it.
1216 EHScopeMembership = getEHScopeMembership(MF);
1217
1218 for (MachineBasicBlock &MBB :
1220 MadeChange |= OptimizeBlock(&MBB);
1221
1222 // If it is dead, remove it.
1224 RemoveDeadBlock(&MBB);
1225 MadeChange = true;
1226 ++NumDeadBlocks;
1227 }
1228 }
1229
1230 return MadeChange;
1231}
1232
1233// Blocks should be considered empty if they contain only debug info;
1234// else the debug info would affect codegen.
1236 return MBB->getFirstNonDebugInstr(true) == MBB->end();
1237}
1238
1239// Blocks with only debug info and branches should be considered the same
1240// as blocks with only branches.
1243 assert(I != MBB->end() && "empty block!");
1244 return I->isBranch();
1245}
1246
1247/// IsBetterFallthrough - Return true if it would be clearly better to
1248/// fall-through to MBB1 than to fall through into MBB2. This has to return
1249/// a strict ordering, returning true for both (MBB1,MBB2) and (MBB2,MBB1) will
1250/// result in infinite loops.
1252 MachineBasicBlock *MBB2) {
1253 assert(MBB1 && MBB2 && "Unknown MachineBasicBlock");
1254
1255 // Right now, we use a simple heuristic. If MBB2 ends with a call, and
1256 // MBB1 doesn't, we prefer to fall through into MBB1. This allows us to
1257 // optimize branches that branch to either a return block or an assert block
1258 // into a fallthrough to the return.
1261 if (MBB1I == MBB1->end() || MBB2I == MBB2->end())
1262 return false;
1263
1264 // If there is a clear successor ordering we make sure that one block
1265 // will fall through to the next
1266 if (MBB1->isSuccessor(MBB2)) return true;
1267 if (MBB2->isSuccessor(MBB1)) return false;
1268
1269 return MBB2I->isCall() && !MBB1I->isCall();
1270}
1271
1272/// getBranchDebugLoc - Find and return, if any, the DebugLoc of the branch
1273/// instructions on the block.
1276 if (I != MBB.end() && I->isBranch())
1277 return I->getDebugLoc();
1278 return DebugLoc();
1279}
1280
1283 MachineBasicBlock &PredMBB) {
1284 auto InsertBefore = PredMBB.getFirstTerminator();
1285 for (MachineInstr &MI : MBB.instrs())
1286 if (MI.isDebugInstr()) {
1287 TII->duplicate(PredMBB, InsertBefore, MI);
1288 LLVM_DEBUG(dbgs() << "Copied debug entity from empty block to pred: "
1289 << MI);
1290 }
1291}
1292
1295 MachineBasicBlock &SuccMBB) {
1296 auto InsertBefore = SuccMBB.SkipPHIsAndLabels(SuccMBB.begin());
1297 for (MachineInstr &MI : MBB.instrs())
1298 if (MI.isDebugInstr()) {
1299 TII->duplicate(SuccMBB, InsertBefore, MI);
1300 LLVM_DEBUG(dbgs() << "Copied debug entity from empty block to succ: "
1301 << MI);
1302 }
1303}
1304
1305// Try to salvage DBG_VALUE instructions from an otherwise empty block. If such
1306// a basic block is removed we would lose the debug information unless we have
1307// copied the information to a predecessor/successor.
1308//
1309// TODO: This function only handles some simple cases. An alternative would be
1310// to run a heavier analysis, such as the LiveDebugValues pass, before we do
1311// branch folding.
1314 assert(IsEmptyBlock(&MBB) && "Expected an empty block (except debug info).");
1315 // If this MBB is the only predecessor of a successor it is legal to copy
1316 // DBG_VALUE instructions to the beginning of the successor.
1317 for (MachineBasicBlock *SuccBB : MBB.successors())
1318 if (SuccBB->pred_size() == 1)
1319 copyDebugInfoToSuccessor(TII, MBB, *SuccBB);
1320 // If this MBB is the only successor of a predecessor it is legal to copy the
1321 // DBG_VALUE instructions to the end of the predecessor (just before the
1322 // terminators, assuming that the terminator isn't affecting the DBG_VALUE).
1323 for (MachineBasicBlock *PredBB : MBB.predecessors())
1324 if (PredBB->succ_size() == 1)
1326}
1327
1328bool BranchFolder::OptimizeBlock(MachineBasicBlock *MBB) {
1329 bool MadeChange = false;
1330 MachineFunction &MF = *MBB->getParent();
1331ReoptimizeBlock:
1332
1333 MachineFunction::iterator FallThrough = MBB->getIterator();
1334 ++FallThrough;
1335
1336 // Make sure MBB and FallThrough belong to the same EH scope.
1337 bool SameEHScope = true;
1338 if (!EHScopeMembership.empty() && FallThrough != MF.end()) {
1339 auto MBBEHScope = EHScopeMembership.find(MBB);
1340 assert(MBBEHScope != EHScopeMembership.end());
1341 auto FallThroughEHScope = EHScopeMembership.find(&*FallThrough);
1342 assert(FallThroughEHScope != EHScopeMembership.end());
1343 SameEHScope = MBBEHScope->second == FallThroughEHScope->second;
1344 }
1345
1346 // Analyze the branch in the current block. As a side-effect, this may cause
1347 // the block to become empty.
1348 MachineBasicBlock *CurTBB = nullptr, *CurFBB = nullptr;
1350 bool CurUnAnalyzable =
1351 TII->analyzeBranch(*MBB, CurTBB, CurFBB, CurCond, true);
1352
1353 // If this block is empty, make everyone use its fall-through, not the block
1354 // explicitly. Landing pads should not do this since the landing-pad table
1355 // points to this block. Blocks with their addresses taken shouldn't be
1356 // optimized away.
1357 if (IsEmptyBlock(MBB) && !MBB->isEHPad() && !MBB->hasAddressTaken() &&
1358 SameEHScope) {
1360 // Dead block? Leave for cleanup later.
1361 if (MBB->pred_empty()) return MadeChange;
1362
1363 if (FallThrough == MF.end()) {
1364 // TODO: Simplify preds to not branch here if possible!
1365 } else if (FallThrough->isEHPad()) {
1366 // Don't rewrite to a landing pad fallthough. That could lead to the case
1367 // where a BB jumps to more than one landing pad.
1368 // TODO: Is it ever worth rewriting predecessors which don't already
1369 // jump to a landing pad, and so can safely jump to the fallthrough?
1370 } else if (MBB->isSuccessor(&*FallThrough)) {
1371 // Rewrite all predecessors of the old block to go to the fallthrough
1372 // instead.
1373 while (!MBB->pred_empty()) {
1374 MachineBasicBlock *Pred = *(MBB->pred_end()-1);
1375 Pred->ReplaceUsesOfBlockWith(MBB, &*FallThrough);
1376 }
1377 // Add rest successors of MBB to successors of FallThrough. Those
1378 // successors are not directly reachable via MBB, so it should be
1379 // landing-pad.
1380 for (auto SI = MBB->succ_begin(), SE = MBB->succ_end(); SI != SE; ++SI)
1381 if (*SI != &*FallThrough && !FallThrough->isSuccessor(*SI)) {
1382 assert((*SI)->isEHPad() && "Bad CFG");
1383 FallThrough->copySuccessor(MBB, SI);
1384 }
1385 // If MBB was the target of a jump table, update jump tables to go to the
1386 // fallthrough instead.
1387 if (MachineJumpTableInfo *MJTI = MF.getJumpTableInfo())
1388 MJTI->ReplaceMBBInJumpTables(MBB, &*FallThrough);
1389 MadeChange = true;
1390 }
1391 return MadeChange;
1392 }
1393
1394 // Check to see if we can simplify the terminator of the block before this
1395 // one.
1396 MachineBasicBlock &PrevBB = *std::prev(MachineFunction::iterator(MBB));
1397
1398 MachineBasicBlock *PriorTBB = nullptr, *PriorFBB = nullptr;
1400 bool PriorUnAnalyzable =
1401 TII->analyzeBranch(PrevBB, PriorTBB, PriorFBB, PriorCond, true);
1402 if (!PriorUnAnalyzable) {
1403 // If the previous branch is conditional and both conditions go to the same
1404 // destination, remove the branch, replacing it with an unconditional one or
1405 // a fall-through.
1406 if (PriorTBB && PriorTBB == PriorFBB) {
1407 DebugLoc dl = getBranchDebugLoc(PrevBB);
1408 TII->removeBranch(PrevBB);
1409 PriorCond.clear();
1410 if (PriorTBB != MBB)
1411 TII->insertBranch(PrevBB, PriorTBB, nullptr, PriorCond, dl);
1412 MadeChange = true;
1413 ++NumBranchOpts;
1414 goto ReoptimizeBlock;
1415 }
1416
1417 // If the previous block unconditionally falls through to this block and
1418 // this block has no other predecessors, move the contents of this block
1419 // into the prior block. This doesn't usually happen when SimplifyCFG
1420 // has been used, but it can happen if tail merging splits a fall-through
1421 // predecessor of a block.
1422 // This has to check PrevBB->succ_size() because EH edges are ignored by
1423 // analyzeBranch.
1424 if (PriorCond.empty() && !PriorTBB && MBB->pred_size() == 1 &&
1425 PrevBB.succ_size() == 1 && PrevBB.isSuccessor(MBB) &&
1426 !MBB->hasAddressTaken() && !MBB->isEHPad()) {
1427 LLVM_DEBUG(dbgs() << "\nMerging into block: " << PrevBB
1428 << "From MBB: " << *MBB);
1429 // Remove redundant DBG_VALUEs first.
1430 if (!PrevBB.empty()) {
1431 MachineBasicBlock::iterator PrevBBIter = PrevBB.end();
1432 --PrevBBIter;
1434 // Check if DBG_VALUE at the end of PrevBB is identical to the
1435 // DBG_VALUE at the beginning of MBB.
1436 while (PrevBBIter != PrevBB.begin() && MBBIter != MBB->end()
1437 && PrevBBIter->isDebugInstr() && MBBIter->isDebugInstr()) {
1438 if (!MBBIter->isIdenticalTo(*PrevBBIter))
1439 break;
1440 MachineInstr &DuplicateDbg = *MBBIter;
1441 ++MBBIter; -- PrevBBIter;
1442 DuplicateDbg.eraseFromParent();
1443 }
1444 }
1445 PrevBB.splice(PrevBB.end(), MBB, MBB->begin(), MBB->end());
1446 PrevBB.removeSuccessor(PrevBB.succ_begin());
1447 assert(PrevBB.succ_empty());
1448 PrevBB.transferSuccessors(MBB);
1449 MadeChange = true;
1450 return MadeChange;
1451 }
1452
1453 // If the previous branch *only* branches to *this* block (conditional or
1454 // not) remove the branch.
1455 if (PriorTBB == MBB && !PriorFBB) {
1456 TII->removeBranch(PrevBB);
1457 MadeChange = true;
1458 ++NumBranchOpts;
1459 goto ReoptimizeBlock;
1460 }
1461
1462 // If the prior block branches somewhere else on the condition and here if
1463 // the condition is false, remove the uncond second branch.
1464 if (PriorFBB == MBB) {
1465 DebugLoc dl = getBranchDebugLoc(PrevBB);
1466 TII->removeBranch(PrevBB);
1467 TII->insertBranch(PrevBB, PriorTBB, nullptr, PriorCond, dl);
1468 MadeChange = true;
1469 ++NumBranchOpts;
1470 goto ReoptimizeBlock;
1471 }
1472
1473 // If the prior block branches here on true and somewhere else on false, and
1474 // if the branch condition is reversible, reverse the branch to create a
1475 // fall-through.
1476 if (PriorTBB == MBB) {
1477 SmallVector<MachineOperand, 4> NewPriorCond(PriorCond);
1478 if (!TII->reverseBranchCondition(NewPriorCond)) {
1479 DebugLoc dl = getBranchDebugLoc(PrevBB);
1480 TII->removeBranch(PrevBB);
1481 TII->insertBranch(PrevBB, PriorFBB, nullptr, NewPriorCond, dl);
1482 MadeChange = true;
1483 ++NumBranchOpts;
1484 goto ReoptimizeBlock;
1485 }
1486 }
1487
1488 // If this block has no successors (e.g. it is a return block or ends with
1489 // a call to a no-return function like abort or __cxa_throw) and if the pred
1490 // falls through into this block, and if it would otherwise fall through
1491 // into the block after this, move this block to the end of the function.
1492 //
1493 // We consider it more likely that execution will stay in the function (e.g.
1494 // due to loops) than it is to exit it. This asserts in loops etc, moving
1495 // the assert condition out of the loop body.
1496 if (MBB->succ_empty() && !PriorCond.empty() && !PriorFBB &&
1497 MachineFunction::iterator(PriorTBB) == FallThrough &&
1498 !MBB->canFallThrough()) {
1499 bool DoTransform = true;
1500
1501 // We have to be careful that the succs of PredBB aren't both no-successor
1502 // blocks. If neither have successors and if PredBB is the second from
1503 // last block in the function, we'd just keep swapping the two blocks for
1504 // last. Only do the swap if one is clearly better to fall through than
1505 // the other.
1506 if (FallThrough == --MF.end() &&
1507 !IsBetterFallthrough(PriorTBB, MBB))
1508 DoTransform = false;
1509
1510 if (DoTransform) {
1511 // Reverse the branch so we will fall through on the previous true cond.
1512 SmallVector<MachineOperand, 4> NewPriorCond(PriorCond);
1513 if (!TII->reverseBranchCondition(NewPriorCond)) {
1514 LLVM_DEBUG(dbgs() << "\nMoving MBB: " << *MBB
1515 << "To make fallthrough to: " << *PriorTBB << "\n");
1516
1517 DebugLoc dl = getBranchDebugLoc(PrevBB);
1518 TII->removeBranch(PrevBB);
1519 TII->insertBranch(PrevBB, MBB, nullptr, NewPriorCond, dl);
1520
1521 // Move this block to the end of the function.
1522 MBB->moveAfter(&MF.back());
1523 MadeChange = true;
1524 ++NumBranchOpts;
1525 return MadeChange;
1526 }
1527 }
1528 }
1529 }
1530
1531 if (!IsEmptyBlock(MBB)) {
1533 if (TII->isUnconditionalTailCall(TailCall)) {
1535 for (auto &Pred : MBB->predecessors()) {
1536 MachineBasicBlock *PredTBB = nullptr, *PredFBB = nullptr;
1538 bool PredAnalyzable =
1539 !TII->analyzeBranch(*Pred, PredTBB, PredFBB, PredCond, true);
1540
1541 // Only eliminate if MBB == TBB (Taken Basic Block)
1542 if (PredAnalyzable && !PredCond.empty() && PredTBB == MBB &&
1543 PredTBB != PredFBB) {
1544 // The predecessor has a conditional branch to this block which
1545 // consists of only a tail call. Try to fold the tail call into the
1546 // conditional branch.
1547 if (TII->canMakeTailCallConditional(PredCond, TailCall)) {
1548 // TODO: It would be nice if analyzeBranch() could provide a pointer
1549 // to the branch instruction so replaceBranchWithTailCall() doesn't
1550 // have to search for it.
1551 TII->replaceBranchWithTailCall(*Pred, PredCond, TailCall);
1552 PredsChanged.push_back(Pred);
1553 }
1554 }
1555 // If the predecessor is falling through to this block, we could reverse
1556 // the branch condition and fold the tail call into that. However, after
1557 // that we might have to re-arrange the CFG to fall through to the other
1558 // block and there is a high risk of regressing code size rather than
1559 // improving it.
1560 }
1561 if (!PredsChanged.empty()) {
1562 NumTailCalls += PredsChanged.size();
1563 for (auto &Pred : PredsChanged)
1564 Pred->removeSuccessor(MBB);
1565
1566 return true;
1567 }
1568 }
1569 }
1570
1571 if (!CurUnAnalyzable) {
1572 // If this is a two-way branch, and the FBB branches to this block, reverse
1573 // the condition so the single-basic-block loop is faster. Instead of:
1574 // Loop: xxx; jcc Out; jmp Loop
1575 // we want:
1576 // Loop: xxx; jncc Loop; jmp Out
1577 if (CurTBB && CurFBB && CurFBB == MBB && CurTBB != MBB) {
1578 SmallVector<MachineOperand, 4> NewCond(CurCond);
1579 if (!TII->reverseBranchCondition(NewCond)) {
1581 TII->removeBranch(*MBB);
1582 TII->insertBranch(*MBB, CurFBB, CurTBB, NewCond, dl);
1583 MadeChange = true;
1584 ++NumBranchOpts;
1585 goto ReoptimizeBlock;
1586 }
1587 }
1588
1589 // If this branch is the only thing in its block, see if we can forward
1590 // other blocks across it.
1591 if (CurTBB && CurCond.empty() && !CurFBB &&
1592 IsBranchOnlyBlock(MBB) && CurTBB != MBB &&
1593 !MBB->hasAddressTaken() && !MBB->isEHPad()) {
1595 // This block may contain just an unconditional branch. Because there can
1596 // be 'non-branch terminators' in the block, try removing the branch and
1597 // then seeing if the block is empty.
1598 TII->removeBranch(*MBB);
1599 // If the only things remaining in the block are debug info, remove these
1600 // as well, so this will behave the same as an empty block in non-debug
1601 // mode.
1602 if (IsEmptyBlock(MBB)) {
1603 // Make the block empty, losing the debug info (we could probably
1604 // improve this in some cases.)
1605 MBB->erase(MBB->begin(), MBB->end());
1606 }
1607 // If this block is just an unconditional branch to CurTBB, we can
1608 // usually completely eliminate the block. The only case we cannot
1609 // completely eliminate the block is when the block before this one
1610 // falls through into MBB and we can't understand the prior block's branch
1611 // condition.
1612 if (MBB->empty()) {
1613 bool PredHasNoFallThrough = !PrevBB.canFallThrough();
1614 if (PredHasNoFallThrough || !PriorUnAnalyzable ||
1615 !PrevBB.isSuccessor(MBB)) {
1616 // If the prior block falls through into us, turn it into an
1617 // explicit branch to us to make updates simpler.
1618 if (!PredHasNoFallThrough && PrevBB.isSuccessor(MBB) &&
1619 PriorTBB != MBB && PriorFBB != MBB) {
1620 if (!PriorTBB) {
1621 assert(PriorCond.empty() && !PriorFBB &&
1622 "Bad branch analysis");
1623 PriorTBB = MBB;
1624 } else {
1625 assert(!PriorFBB && "Machine CFG out of date!");
1626 PriorFBB = MBB;
1627 }
1628 DebugLoc pdl = getBranchDebugLoc(PrevBB);
1629 TII->removeBranch(PrevBB);
1630 TII->insertBranch(PrevBB, PriorTBB, PriorFBB, PriorCond, pdl);
1631 }
1632
1633 // Iterate through all the predecessors, revectoring each in-turn.
1634 size_t PI = 0;
1635 bool DidChange = false;
1636 bool HasBranchToSelf = false;
1637 while(PI != MBB->pred_size()) {
1638 MachineBasicBlock *PMBB = *(MBB->pred_begin() + PI);
1639 if (PMBB == MBB) {
1640 // If this block has an uncond branch to itself, leave it.
1641 ++PI;
1642 HasBranchToSelf = true;
1643 } else {
1644 DidChange = true;
1645 PMBB->ReplaceUsesOfBlockWith(MBB, CurTBB);
1646 // Add rest successors of MBB to successors of CurTBB. Those
1647 // successors are not directly reachable via MBB, so it should be
1648 // landing-pad.
1649 for (auto SI = MBB->succ_begin(), SE = MBB->succ_end(); SI != SE;
1650 ++SI)
1651 if (*SI != CurTBB && !CurTBB->isSuccessor(*SI)) {
1652 assert((*SI)->isEHPad() && "Bad CFG");
1653 CurTBB->copySuccessor(MBB, SI);
1654 }
1655 // If this change resulted in PMBB ending in a conditional
1656 // branch where both conditions go to the same destination,
1657 // change this to an unconditional branch.
1658 MachineBasicBlock *NewCurTBB = nullptr, *NewCurFBB = nullptr;
1660 bool NewCurUnAnalyzable = TII->analyzeBranch(
1661 *PMBB, NewCurTBB, NewCurFBB, NewCurCond, true);
1662 if (!NewCurUnAnalyzable && NewCurTBB && NewCurTBB == NewCurFBB) {
1663 DebugLoc pdl = getBranchDebugLoc(*PMBB);
1664 TII->removeBranch(*PMBB);
1665 NewCurCond.clear();
1666 TII->insertBranch(*PMBB, NewCurTBB, nullptr, NewCurCond, pdl);
1667 MadeChange = true;
1668 ++NumBranchOpts;
1669 }
1670 }
1671 }
1672
1673 // Change any jumptables to go to the new MBB.
1674 if (MachineJumpTableInfo *MJTI = MF.getJumpTableInfo())
1675 MJTI->ReplaceMBBInJumpTables(MBB, CurTBB);
1676 if (DidChange) {
1677 ++NumBranchOpts;
1678 MadeChange = true;
1679 if (!HasBranchToSelf) return MadeChange;
1680 }
1681 }
1682 }
1683
1684 // Add the branch back if the block is more than just an uncond branch.
1685 TII->insertBranch(*MBB, CurTBB, nullptr, CurCond, dl);
1686 }
1687 }
1688
1689 // If the prior block doesn't fall through into this block, and if this
1690 // block doesn't fall through into some other block, see if we can find a
1691 // place to move this block where a fall-through will happen.
1692 if (!PrevBB.canFallThrough()) {
1693 // Now we know that there was no fall-through into this block, check to
1694 // see if it has a fall-through into its successor.
1695 bool CurFallsThru = MBB->canFallThrough();
1696
1697 if (!MBB->isEHPad()) {
1698 // Check all the predecessors of this block. If one of them has no fall
1699 // throughs, and analyzeBranch thinks it _could_ fallthrough to this
1700 // block, move this block right after it.
1701 for (MachineBasicBlock *PredBB : MBB->predecessors()) {
1702 // Analyze the branch at the end of the pred.
1703 MachineBasicBlock *PredTBB = nullptr, *PredFBB = nullptr;
1705 if (PredBB != MBB && !PredBB->canFallThrough() &&
1706 !TII->analyzeBranch(*PredBB, PredTBB, PredFBB, PredCond, true) &&
1707 (PredTBB == MBB || PredFBB == MBB) &&
1708 (!CurFallsThru || !CurTBB || !CurFBB) &&
1709 (!CurFallsThru || MBB->getNumber() >= PredBB->getNumber())) {
1710 // If the current block doesn't fall through, just move it.
1711 // If the current block can fall through and does not end with a
1712 // conditional branch, we need to append an unconditional jump to
1713 // the (current) next block. To avoid a possible compile-time
1714 // infinite loop, move blocks only backward in this case.
1715 // Also, if there are already 2 branches here, we cannot add a third;
1716 // this means we have the case
1717 // Bcc next
1718 // B elsewhere
1719 // next:
1720 if (CurFallsThru) {
1721 MachineBasicBlock *NextBB = &*std::next(MBB->getIterator());
1722 CurCond.clear();
1723 TII->insertBranch(*MBB, NextBB, nullptr, CurCond, DebugLoc());
1724 }
1725 MBB->moveAfter(PredBB);
1726 MadeChange = true;
1727 goto ReoptimizeBlock;
1728 }
1729 }
1730 }
1731
1732 if (!CurFallsThru) {
1733 // Check analyzable branch-successors to see if we can move this block
1734 // before one.
1735 if (!CurUnAnalyzable) {
1736 for (MachineBasicBlock *SuccBB : {CurFBB, CurTBB}) {
1737 if (!SuccBB)
1738 continue;
1739 // Analyze the branch at the end of the block before the succ.
1740 MachineFunction::iterator SuccPrev = --SuccBB->getIterator();
1741
1742 // If this block doesn't already fall-through to that successor, and
1743 // if the succ doesn't already have a block that can fall through into
1744 // it, we can arrange for the fallthrough to happen.
1745 if (SuccBB != MBB && &*SuccPrev != MBB &&
1746 !SuccPrev->canFallThrough()) {
1747 MBB->moveBefore(SuccBB);
1748 MadeChange = true;
1749 goto ReoptimizeBlock;
1750 }
1751 }
1752 }
1753
1754 // Okay, there is no really great place to put this block. If, however,
1755 // the block before this one would be a fall-through if this block were
1756 // removed, move this block to the end of the function. There is no real
1757 // advantage in "falling through" to an EH block, so we don't want to
1758 // perform this transformation for that case.
1759 //
1760 // Also, Windows EH introduced the possibility of an arbitrary number of
1761 // successors to a given block. The analyzeBranch call does not consider
1762 // exception handling and so we can get in a state where a block
1763 // containing a call is followed by multiple EH blocks that would be
1764 // rotated infinitely at the end of the function if the transformation
1765 // below were performed for EH "FallThrough" blocks. Therefore, even if
1766 // that appears not to be happening anymore, we should assume that it is
1767 // possible and not remove the "!FallThrough()->isEHPad" condition below.
1768 MachineBasicBlock *PrevTBB = nullptr, *PrevFBB = nullptr;
1770 if (FallThrough != MF.end() &&
1771 !FallThrough->isEHPad() &&
1772 !TII->analyzeBranch(PrevBB, PrevTBB, PrevFBB, PrevCond, true) &&
1773 PrevBB.isSuccessor(&*FallThrough)) {
1774 MBB->moveAfter(&MF.back());
1775 MadeChange = true;
1776 return MadeChange;
1777 }
1778 }
1779 }
1780
1781 return MadeChange;
1782}
1783
1784//===----------------------------------------------------------------------===//
1785// Hoist Common Code
1786//===----------------------------------------------------------------------===//
1787
1788bool BranchFolder::HoistCommonCode(MachineFunction &MF) {
1789 bool MadeChange = false;
1791 MadeChange |= HoistCommonCodeInSuccs(&MBB);
1792
1793 return MadeChange;
1794}
1795
1796/// findFalseBlock - BB has a fallthrough. Find its 'false' successor given
1797/// its 'true' successor.
1799 MachineBasicBlock *TrueBB) {
1800 for (MachineBasicBlock *SuccBB : BB->successors())
1801 if (SuccBB != TrueBB)
1802 return SuccBB;
1803 return nullptr;
1804}
1805
1806template <class Container>
1808 Container &Set) {
1809 if (Reg.isPhysical()) {
1810 for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI)
1811 Set.insert(*AI);
1812 } else {
1813 Set.insert(Reg);
1814 }
1815}
1816
1817/// findHoistingInsertPosAndDeps - Find the location to move common instructions
1818/// in successors to. The location is usually just before the terminator,
1819/// however if the terminator is a conditional branch and its previous
1820/// instruction is the flag setting instruction, the previous instruction is
1821/// the preferred location. This function also gathers uses and defs of the
1822/// instructions from the insertion point to the end of the block. The data is
1823/// used by HoistCommonCodeInSuccs to ensure safety.
1824static
1826 const TargetInstrInfo *TII,
1827 const TargetRegisterInfo *TRI,
1829 SmallSet<Register, 4> &Defs) {
1831 if (!TII->isUnpredicatedTerminator(*Loc))
1832 return MBB->end();
1833
1834 for (const MachineOperand &MO : Loc->operands()) {
1835 if (!MO.isReg())
1836 continue;
1837 Register Reg = MO.getReg();
1838 if (!Reg)
1839 continue;
1840 if (MO.isUse()) {
1842 } else {
1843 if (!MO.isDead())
1844 // Don't try to hoist code in the rare case the terminator defines a
1845 // register that is later used.
1846 return MBB->end();
1847
1848 // If the terminator defines a register, make sure we don't hoist
1849 // the instruction whose def might be clobbered by the terminator.
1850 addRegAndItsAliases(Reg, TRI, Defs);
1851 }
1852 }
1853
1854 if (Uses.empty())
1855 return Loc;
1856 // If the terminator is the only instruction in the block and Uses is not
1857 // empty (or we would have returned above), we can still safely hoist
1858 // instructions just before the terminator as long as the Defs/Uses are not
1859 // violated (which is checked in HoistCommonCodeInSuccs).
1860 if (Loc == MBB->begin())
1861 return Loc;
1862
1863 // The terminator is probably a conditional branch, try not to separate the
1864 // branch from condition setting instruction.
1866
1867 bool IsDef = false;
1868 for (const MachineOperand &MO : PI->operands()) {
1869 // If PI has a regmask operand, it is probably a call. Separate away.
1870 if (MO.isRegMask())
1871 return Loc;
1872 if (!MO.isReg() || MO.isUse())
1873 continue;
1874 Register Reg = MO.getReg();
1875 if (!Reg)
1876 continue;
1877 if (Uses.count(Reg)) {
1878 IsDef = true;
1879 break;
1880 }
1881 }
1882 if (!IsDef)
1883 // The condition setting instruction is not just before the conditional
1884 // branch.
1885 return Loc;
1886
1887 // Be conservative, don't insert instruction above something that may have
1888 // side-effects. And since it's potentially bad to separate flag setting
1889 // instruction from the conditional branch, just abort the optimization
1890 // completely.
1891 // Also avoid moving code above predicated instruction since it's hard to
1892 // reason about register liveness with predicated instruction.
1893 bool DontMoveAcrossStore = true;
1894 if (!PI->isSafeToMove(nullptr, DontMoveAcrossStore) || TII->isPredicated(*PI))
1895 return MBB->end();
1896
1897 // Find out what registers are live. Note this routine is ignoring other live
1898 // registers which are only used by instructions in successor blocks.
1899 for (const MachineOperand &MO : PI->operands()) {
1900 if (!MO.isReg())
1901 continue;
1902 Register Reg = MO.getReg();
1903 if (!Reg)
1904 continue;
1905 if (MO.isUse()) {
1907 } else {
1908 if (Uses.erase(Reg)) {
1909 if (Reg.isPhysical()) {
1910 for (MCPhysReg SubReg : TRI->subregs(Reg))
1911 Uses.erase(SubReg); // Use sub-registers to be conservative
1912 }
1913 }
1914 addRegAndItsAliases(Reg, TRI, Defs);
1915 }
1916 }
1917
1918 return PI;
1919}
1920
1921bool BranchFolder::HoistCommonCodeInSuccs(MachineBasicBlock *MBB) {
1922 MachineBasicBlock *TBB = nullptr, *FBB = nullptr;
1924 if (TII->analyzeBranch(*MBB, TBB, FBB, Cond, true) || !TBB || Cond.empty())
1925 return false;
1926
1927 if (!FBB) FBB = findFalseBlock(MBB, TBB);
1928 if (!FBB)
1929 // Malformed bcc? True and false blocks are the same?
1930 return false;
1931
1932 // Restrict the optimization to cases where MBB is the only predecessor,
1933 // it is an obvious win.
1934 if (TBB->pred_size() > 1 || FBB->pred_size() > 1)
1935 return false;
1936
1937 // Find a suitable position to hoist the common instructions to. Also figure
1938 // out which registers are used or defined by instructions from the insertion
1939 // point to the end of the block.
1942 findHoistingInsertPosAndDeps(MBB, TII, TRI, Uses, Defs);
1943 if (Loc == MBB->end())
1944 return false;
1945
1946 bool HasDups = false;
1947 SmallSet<Register, 4> ActiveDefsSet, AllDefsSet;
1949 MachineBasicBlock::iterator FIB = FBB->begin();
1951 MachineBasicBlock::iterator FIE = FBB->end();
1952 while (TIB != TIE && FIB != FIE) {
1953 // Skip dbg_value instructions. These do not count.
1954 TIB = skipDebugInstructionsForward(TIB, TIE, false);
1955 FIB = skipDebugInstructionsForward(FIB, FIE, false);
1956 if (TIB == TIE || FIB == FIE)
1957 break;
1958
1959 if (!TIB->isIdenticalTo(*FIB, MachineInstr::CheckKillDead))
1960 break;
1961
1962 if (TII->isPredicated(*TIB))
1963 // Hard to reason about register liveness with predicated instruction.
1964 break;
1965
1966 bool IsSafe = true;
1967 for (MachineOperand &MO : TIB->operands()) {
1968 // Don't attempt to hoist instructions with register masks.
1969 if (MO.isRegMask()) {
1970 IsSafe = false;
1971 break;
1972 }
1973 if (!MO.isReg())
1974 continue;
1975 Register Reg = MO.getReg();
1976 if (!Reg)
1977 continue;
1978 if (MO.isDef()) {
1979 if (Uses.count(Reg)) {
1980 // Avoid clobbering a register that's used by the instruction at
1981 // the point of insertion.
1982 IsSafe = false;
1983 break;
1984 }
1985
1986 if (Defs.count(Reg) && !MO.isDead()) {
1987 // Don't hoist the instruction if the def would be clobber by the
1988 // instruction at the point insertion. FIXME: This is overly
1989 // conservative. It should be possible to hoist the instructions
1990 // in BB2 in the following example:
1991 // BB1:
1992 // r1, eflag = op1 r2, r3
1993 // brcc eflag
1994 //
1995 // BB2:
1996 // r1 = op2, ...
1997 // = op3, killed r1
1998 IsSafe = false;
1999 break;
2000 }
2001 } else if (!ActiveDefsSet.count(Reg)) {
2002 if (Defs.count(Reg)) {
2003 // Use is defined by the instruction at the point of insertion.
2004 IsSafe = false;
2005 break;
2006 }
2007
2008 if (MO.isKill() && Uses.count(Reg))
2009 // Kills a register that's read by the instruction at the point of
2010 // insertion. Remove the kill marker.
2011 MO.setIsKill(false);
2012 }
2013 }
2014 if (!IsSafe)
2015 break;
2016
2017 bool DontMoveAcrossStore = true;
2018 if (!TIB->isSafeToMove(nullptr, DontMoveAcrossStore))
2019 break;
2020
2021 // Remove kills from ActiveDefsSet, these registers had short live ranges.
2022 for (const MachineOperand &MO : TIB->all_uses()) {
2023 if (!MO.isKill())
2024 continue;
2025 Register Reg = MO.getReg();
2026 if (!Reg)
2027 continue;
2028 if (!AllDefsSet.count(Reg)) {
2029 continue;
2030 }
2031 if (Reg.isPhysical()) {
2032 for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI)
2033 ActiveDefsSet.erase(*AI);
2034 } else {
2035 ActiveDefsSet.erase(Reg);
2036 }
2037 }
2038
2039 // Track local defs so we can update liveins.
2040 for (const MachineOperand &MO : TIB->all_defs()) {
2041 if (MO.isDead())
2042 continue;
2043 Register Reg = MO.getReg();
2044 if (!Reg || Reg.isVirtual())
2045 continue;
2046 addRegAndItsAliases(Reg, TRI, ActiveDefsSet);
2047 addRegAndItsAliases(Reg, TRI, AllDefsSet);
2048 }
2049
2050 HasDups = true;
2051 ++TIB;
2052 ++FIB;
2053 }
2054
2055 if (!HasDups)
2056 return false;
2057
2058 MBB->splice(Loc, TBB, TBB->begin(), TIB);
2059 FBB->erase(FBB->begin(), FIB);
2060
2061 if (UpdateLiveIns)
2062 fullyRecomputeLiveIns({TBB, FBB});
2063
2064 ++NumHoist;
2065 return true;
2066}
unsigned SubReg
unsigned const MachineRegisterInfo * MRI
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
This file implements the BitVector class.
static unsigned EstimateRuntime(MachineBasicBlock::iterator I, MachineBasicBlock::iterator E)
EstimateRuntime - Make a rough estimate for how long it will take to run the specified code.
static unsigned ComputeCommonTailLength(MachineBasicBlock *MBB1, MachineBasicBlock *MBB2, MachineBasicBlock::iterator &I1, MachineBasicBlock::iterator &I2)
Given two machine basic blocks, return the number of instructions they actually have in common togeth...
static MachineBasicBlock * findFalseBlock(MachineBasicBlock *BB, MachineBasicBlock *TrueBB)
findFalseBlock - BB has a fallthrough.
static void copyDebugInfoToPredecessor(const TargetInstrInfo *TII, MachineBasicBlock &MBB, MachineBasicBlock &PredMBB)
static unsigned HashMachineInstr(const MachineInstr &MI)
HashMachineInstr - Compute a hash value for MI and its operands.
static bool countsAsInstruction(const MachineInstr &MI)
Whether MI should be counted as an instruction when calculating common tail.
static unsigned CountTerminators(MachineBasicBlock *MBB, MachineBasicBlock::iterator &I)
CountTerminators - Count the number of terminators in the given block and set I to the position of th...
static bool blockEndsInUnreachable(const MachineBasicBlock *MBB)
A no successor, non-return block probably ends in unreachable and is cold.
static void salvageDebugInfoFromEmptyBlock(const TargetInstrInfo *TII, MachineBasicBlock &MBB)
static MachineBasicBlock::iterator skipBackwardPastNonInstructions(MachineBasicBlock::iterator I, MachineBasicBlock *MBB)
Iterate backwards from the given iterator I, towards the beginning of the block.
static cl::opt< unsigned > TailMergeThreshold("tail-merge-threshold", cl::desc("Max number of predecessors to consider tail merging"), cl::init(150), cl::Hidden)
static void addRegAndItsAliases(Register Reg, const TargetRegisterInfo *TRI, Container &Set)
static cl::opt< cl::boolOrDefault > FlagEnableTailMerge("enable-tail-merge", cl::init(cl::BOU_UNSET), cl::Hidden)
static cl::opt< unsigned > TailMergeSize("tail-merge-size", cl::desc("Min number of instructions to consider tail merging"), cl::init(3), cl::Hidden)
static bool IsEmptyBlock(MachineBasicBlock *MBB)
static bool ProfitableToMerge(MachineBasicBlock *MBB1, MachineBasicBlock *MBB2, unsigned MinCommonTailLength, unsigned &CommonTailLen, MachineBasicBlock::iterator &I1, MachineBasicBlock::iterator &I2, MachineBasicBlock *SuccBB, MachineBasicBlock *PredBB, DenseMap< const MachineBasicBlock *, int > &EHScopeMembership, bool AfterPlacement, MBFIWrapper &MBBFreqInfo, ProfileSummaryInfo *PSI)
ProfitableToMerge - Check if two machine basic blocks have a common tail and decide if it would be pr...
static void copyDebugInfoToSuccessor(const TargetInstrInfo *TII, MachineBasicBlock &MBB, MachineBasicBlock &SuccMBB)
static bool IsBranchOnlyBlock(MachineBasicBlock *MBB)
static void FixTail(MachineBasicBlock *CurMBB, MachineBasicBlock *SuccBB, const TargetInstrInfo *TII, const DebugLoc &BranchDL)
static bool IsBetterFallthrough(MachineBasicBlock *MBB1, MachineBasicBlock *MBB2)
IsBetterFallthrough - Return true if it would be clearly better to fall-through to MBB1 than to fall ...
#define DEBUG_TYPE
static unsigned HashEndOfMBB(const MachineBasicBlock &MBB)
HashEndOfMBB - Hash the last instruction in the MBB.
static void mergeOperations(MachineBasicBlock::iterator MBBIStartPos, MachineBasicBlock &MBBCommon)
static MachineBasicBlock::iterator findHoistingInsertPosAndDeps(MachineBasicBlock *MBB, const TargetInstrInfo *TII, const TargetRegisterInfo *TRI, SmallSet< Register, 4 > &Uses, SmallSet< Register, 4 > &Defs)
findHoistingInsertPosAndDeps - Find the location to move common instructions in successors to.
static DebugLoc getBranchDebugLoc(MachineBasicBlock &MBB)
getBranchDebugLoc - Find and return, if any, the DebugLoc of the branch instructions on the block.
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
#define LLVM_DEBUG(X)
Definition: Debug.h:101
Rewrite Partial Register Uses
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
A common definition of LaneBitmask for use in TableGen and CodeGen.
#define I(x, y, z)
Definition: MD5.cpp:58
unsigned const TargetRegisterInfo * TRI
#define P(N)
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
Definition: PassSupport.h:38
const SmallVectorImpl< MachineOperand > MachineBasicBlock * TBB
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file contains some templates that are useful if you are working with the STL at all.
This file defines the SmallSet class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition: Statistic.h:167
Target-Independent Code Generator Pass Configuration Options pass.
Represent the analysis usage information of a pass.
AnalysisUsage & addRequired()
LLVM Basic Block Representation.
Definition: BasicBlock.h:61
bool test(unsigned Idx) const
Definition: BitVector.h:461
BitVector & set()
Definition: BitVector.h:351
size_type size() const
size - Returns the number of bits in this bitvector.
Definition: BitVector.h:159
bool OptimizeFunction(MachineFunction &MF, const TargetInstrInfo *tii, const TargetRegisterInfo *tri, MachineLoopInfo *mli=nullptr, bool AfterPlacement=false)
Perhaps branch folding, tail merging and other CFG optimizations on the given function.
BranchFolder(bool DefaultEnableTailMerge, bool CommonHoist, MBFIWrapper &FreqInfo, const MachineBranchProbabilityInfo &ProbInfo, ProfileSummaryInfo *PSI, unsigned MinTailLength=0)
static BranchProbability getBranchProbability(uint64_t Numerator, uint64_t Denominator)
static DILocation * getMergedLocation(DILocation *LocA, DILocation *LocB)
When two instructions are combined into a single instruction we also need to combine the original loc...
This class represents an Operation in the Expression.
A debug info location.
Definition: DebugLoc.h:33
iterator find(const_arg_type_t< KeyT > Val)
Definition: DenseMap.h:155
bool empty() const
Definition: DenseMap.h:98
iterator end()
Definition: DenseMap.h:84
bool hasOptSize() const
Optimize this function for size (-Os) or minimum size (-Oz).
Definition: Function.h:698
unsigned removeBranch(MachineBasicBlock &MBB, int *BytesRemoved=nullptr) const override
Remove the branching code at the end of the specific MBB.
bool isPredicated(const MachineInstr &MI) const override
Returns true if the instruction is already predicated.
bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB, SmallVectorImpl< MachineOperand > &Cond, bool AllowModify) const override
Analyze the branching code at the end of MBB, returning true if it cannot be understood (e....
bool reverseBranchCondition(SmallVectorImpl< MachineOperand > &Cond) const override
Reverses the branch condition of the specified condition list, returning false on success and true if...
unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB, ArrayRef< MachineOperand > Cond, const DebugLoc &DL, int *BytesAdded=nullptr) const override
Insert branch code into the end of the specified MachineBasicBlock.
A set of physical registers with utility functions to track liveness when walking backward/forward th...
Definition: LivePhysRegs.h:52
void clear()
Clears the set.
Definition: LivePhysRegs.h:77
bool available(const MachineRegisterInfo &MRI, MCPhysReg Reg) const
Returns true if register Reg and no aliasing register is in the set.
void stepBackward(const MachineInstr &MI)
Simulates liveness when stepping backwards over an instruction(bundle).
void init(const TargetRegisterInfo &TRI)
(re-)initializes and clears the set.
Definition: LivePhysRegs.h:70
void addLiveOuts(const MachineBasicBlock &MBB)
Adds all live-out registers of basic block MBB.
void removeBlock(BlockT *BB)
This method completely removes BB from all data structures, including all of the Loop objects it is n...
LoopT * getLoopFor(const BlockT *BB) const
Return the inner most loop that BB lives in.
BlockFrequency getBlockFreq(const MachineBasicBlock *MBB) const
Definition: MBFIWrapper.cpp:20
void setBlockFreq(const MachineBasicBlock *MBB, BlockFrequency F)
Definition: MBFIWrapper.cpp:29
const MCInstrDesc & get(unsigned Opcode) const
Return the machine instruction descriptor that corresponds to the specified instruction opcode.
Definition: MCInstrInfo.h:63
MCRegAliasIterator enumerates all registers aliasing Reg.
iterator_range< MCSuperRegIterator > superregs(MCRegister Reg) const
Return an iterator range over all super-registers of Reg, excluding Reg.
unsigned pred_size() const
bool isEHPad() const
Returns true if the block is a landing pad.
void moveBefore(MachineBasicBlock *NewAfter)
Move 'this' block before or after the specified block.
reverse_iterator rend()
void transferSuccessors(MachineBasicBlock *FromMBB)
Transfers all the successors from MBB to this machine basic block (i.e., copies all the successors Fr...
iterator_range< livein_iterator > liveins() const
int getNumber() const
MachineBasicBlocks are uniquely numbered at the function level, unless they're not in a MachineFuncti...
iterator SkipPHIsAndLabels(iterator I)
Return the first instruction in MBB after I that is not a PHI or a label.
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
bool canFallThrough()
Return true if the block can implicitly transfer control to the block after it by falling off the end...
void setSuccProbability(succ_iterator I, BranchProbability Prob)
Set successor probability of a given iterator.
iterator getFirstNonDebugInstr(bool SkipPseudoOp=true)
Returns an iterator to the first non-debug instruction in the basic block, or end().
void clearLiveIns()
Clear live in list.
iterator getFirstTerminator()
Returns an iterator to the first terminator instruction of this basic block.
unsigned succ_size() const
bool hasAddressTaken() const
Test whether this block is used as something other than the target of a terminator,...
void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
void copySuccessor(const MachineBasicBlock *Orig, succ_iterator I)
Copy a successor (and any probability info) from original block to this block's.
void removeSuccessor(MachineBasicBlock *Succ, bool NormalizeSuccProbs=false)
Remove successor from the successors list of this MachineBasicBlock.
iterator getLastNonDebugInstr(bool SkipPseudoOp=true)
Returns an iterator to the last non-debug instruction in the basic block, or end().
void ReplaceUsesOfBlockWith(MachineBasicBlock *Old, MachineBasicBlock *New)
Given a machine basic block that branched to 'Old', change the code and CFG so that it branches to 'N...
bool isLayoutSuccessor(const MachineBasicBlock *MBB) const
Return true if the specified MBB will be emitted immediately after this block, such that if this bloc...
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
instr_iterator erase(instr_iterator I)
Remove an instruction from the instruction list and delete it.
DebugLoc findBranchDebugLoc()
Find and return the merged DebugLoc of the branch instructions of the block.
iterator_range< succ_iterator > successors()
reverse_iterator rbegin()
bool isMachineBlockAddressTaken() const
Test whether this block is used as something other than the target of a terminator,...
bool isSuccessor(const MachineBasicBlock *MBB) const
Return true if the specified MBB is a successor of this block.
iterator_range< pred_iterator > predecessors()
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
void moveAfter(MachineBasicBlock *NewBefore)
BranchProbability getEdgeProbability(const MachineBasicBlock *Src, const MachineBasicBlock *Dst) const
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.
virtual bool runOnMachineFunction(MachineFunction &MF)=0
runOnMachineFunction - This method must be overloaded to perform the desired machine code transformat...
virtual MachineFunctionProperties getRequiredProperties() const
Properties which a MachineFunction may have at a given point in time.
MachineFunctionProperties & set(Property P)
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
const MachineBasicBlock & back() const
void eraseCallSiteInfo(const MachineInstr *MI)
Following functions update call site info.
void RenumberBlocks(MachineBasicBlock *MBBFrom=nullptr)
RenumberBlocks - This discards all of the MachineBasicBlock numbers and recomputes them.
const MachineJumpTableInfo * getJumpTableInfo() const
getJumpTableInfo - Return the jump table info object for the current function.
MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *BB=nullptr, std::optional< UniqueBBID > BBID=std::nullopt)
CreateMachineBasicBlock - Allocate a new MachineBasicBlock.
void erase(iterator MBBI)
void insert(iterator MBBI, MachineBasicBlock *MBB)
Representation of each machine instruction.
Definition: MachineInstr.h:69
bool isReturn(QueryType Type=AnyInBundle) const
Definition: MachineInstr.h:940
bool isBarrier(QueryType Type=AnyInBundle) const
Returns true if the specified instruction stops control flow from executing the instruction immediate...
Definition: MachineInstr.h:965
void eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
bool isIndirectBranch(QueryType Type=AnyInBundle) const
Return true if this is an indirect branch, such as a branch through a register.
Definition: MachineInstr.h:988
void RemoveJumpTable(unsigned Idx)
RemoveJumpTable - Mark the specific index as being dead.
const std::vector< MachineJumpTableEntry > & getJumpTables() const
MachineOperand class - Representation of each machine instruction operand.
bool isReg() const
isReg - Tests if this is a MO_Register operand.
void setIsUndef(bool Val=true)
@ MO_Immediate
Immediate operand.
@ MO_ConstantPoolIndex
Address of indexed Constant in Constant Pool.
@ MO_GlobalAddress
Address of a global value.
@ MO_MachineBasicBlock
MachineBasicBlock reference.
@ MO_FrameIndex
Abstract Stack Frame Index.
@ MO_Register
Register operand.
@ MO_ExternalSymbol
Name of external global symbol.
@ MO_JumpTableIndex
Address of indexed Jump Table for switch.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
An analysis pass based on legacy pass manager to deliver ProfileSummaryInfo.
Analysis providing profile information.
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:344
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition: SmallPtrSet.h:479
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
Definition: SmallSet.h:135
size_type count(const T &V) const
count - Return 1 if the element is in the set, 0 otherwise.
Definition: SmallSet.h:166
bool erase(const T &V)
Definition: SmallSet.h:207
bool empty() const
Definition: SmallVector.h:94
size_t size() const
Definition: SmallVector.h:91
void push_back(const T &Elt)
Definition: SmallVector.h:426
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1209
TargetInstrInfo - Interface to description of machine instruction set.
virtual bool reverseBranchCondition(SmallVectorImpl< MachineOperand > &Cond) const
Reverses the branch condition of the specified condition list, returning false on success and true if...
virtual unsigned removeBranch(MachineBasicBlock &MBB, int *BytesRemoved=nullptr) const
Remove the branching code at the end of the specific MBB.
virtual bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB, SmallVectorImpl< MachineOperand > &Cond, bool AllowModify=false) const
Analyze the branching code at the end of MBB, returning true if it cannot be understood (e....
virtual bool canMakeTailCallConditional(SmallVectorImpl< MachineOperand > &Cond, const MachineInstr &TailCall) const
Returns true if the tail call can be made conditional on BranchCond.
virtual void ReplaceTailWithBranchTo(MachineBasicBlock::iterator Tail, MachineBasicBlock *NewDest) const
Delete the instruction OldInst and everything after it, replacing it with an unconditional branch to ...
virtual bool isUnconditionalTailCall(const MachineInstr &MI) const
Returns true if MI is an unconditional tail call.
virtual unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB, ArrayRef< MachineOperand > Cond, const DebugLoc &DL, int *BytesAdded=nullptr) const
Insert branch code into the end of the specified MachineBasicBlock.
virtual unsigned getTailMergeSize(const MachineFunction &MF) const
Returns the target-specific default value for tail merging.
virtual bool isPredicated(const MachineInstr &MI) const
Returns true if the instruction is already predicated.
virtual void replaceBranchWithTailCall(MachineBasicBlock &MBB, SmallVectorImpl< MachineOperand > &Cond, const MachineInstr &TailCall) const
Replace the conditional branch in MBB with a conditional tail call.
virtual bool isLegalToSplitMBBAt(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI) const
Return true if it's legal to split the given basic block at the specified instruction (i....
Target-Independent Code Generator Pass Configuration Options.
bool getEnableTailMerge() const
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
virtual bool trackLivenessAfterRegAlloc(const MachineFunction &MF) const
Returns true if the live-ins should be tracked after register allocation.
self_iterator getIterator()
Definition: ilist_node.h:132
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition: CallingConv.h:24
Reg
All possible values of the reg field in the ModR/M byte.
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:443
const_iterator begin(StringRef path, Style style=Style::native)
Get begin iterator over path.
Definition: Path.cpp:227
const_iterator end(StringRef path)
Get end iterator over path.
Definition: Path.cpp:236
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition: STLExtras.h:329
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
bool shouldOptimizeForSize(const MachineFunction *MF, ProfileSummaryInfo *PSI, const MachineBlockFrequencyInfo *BFI, PGSOQueryType QueryType=PGSOQueryType::Other)
Returns true if machine function MF is suggested to be size-optimized based on the profile.
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition: STLExtras.h:656
IterT skipDebugInstructionsForward(IterT It, IterT End, bool SkipPseudoOp=true)
Increment It until it points to a non-debug instruction or to End and return the resulting iterator.
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1729
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
void computeAndAddLiveIns(LivePhysRegs &LiveRegs, MachineBasicBlock &MBB)
Convenience function combining computeLiveIns() and addLiveIns().
void array_pod_sort(IteratorTy Start, IteratorTy End)
array_pod_sort - This sorts an array with the specified start and end extent.
Definition: STLExtras.h:1607
void computeLiveIns(LivePhysRegs &LiveRegs, const MachineBasicBlock &MBB)
Computes registers live-in to MBB assuming all of its successors live-in lists are up-to-date.
char & BranchFolderPassID
BranchFolding - This pass performs machine code CFG based optimizations to delete branches to branche...
IterT prev_nodbg(IterT It, IterT Begin, bool SkipPseudoOp=true)
Decrement It, then continue decrementing it while it points to a debug instruction.
void fullyRecomputeLiveIns(ArrayRef< MachineBasicBlock * > MBBs)
Convenience function for recomputing live-in's for a set of MBBs until the computation converges.
Definition: LivePhysRegs.h:215
Printable printMBBReference(const MachineBasicBlock &MBB)
Prints a machine basic block reference.
void addLiveIns(MachineBasicBlock &MBB, const LivePhysRegs &LiveRegs)
Adds registers contained in LiveRegs to the block live-in list of MBB.
DenseMap< const MachineBasicBlock *, int > getEHScopeMembership(const MachineFunction &MF)
Definition: Analysis.cpp:752
static constexpr LaneBitmask getAll()
Definition: LaneBitmask.h:82
Pair of physical register and lane mask.