LLVM 21.0.0git
RegAllocFast.cpp
Go to the documentation of this file.
1//===- RegAllocFast.cpp - A fast register allocator for debug code --------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9/// \file This register allocator allocates registers to a basic block at a
10/// time, attempting to keep values in registers and reusing registers as
11/// appropriate.
12//
13//===----------------------------------------------------------------------===//
14
16#include "llvm/ADT/ArrayRef.h"
17#include "llvm/ADT/DenseMap.h"
18#include "llvm/ADT/IndexedMap.h"
19#include "llvm/ADT/MapVector.h"
20#include "llvm/ADT/SmallSet.h"
22#include "llvm/ADT/SparseSet.h"
23#include "llvm/ADT/Statistic.h"
41#include "llvm/Pass.h"
42#include "llvm/Support/Debug.h"
45#include <cassert>
46#include <tuple>
47#include <vector>
48
49using namespace llvm;
50
51#define DEBUG_TYPE "regalloc"
52
53STATISTIC(NumStores, "Number of stores added");
54STATISTIC(NumLoads, "Number of loads added");
55STATISTIC(NumCoalesced, "Number of copies coalesced");
56
57// FIXME: Remove this switch when all testcases are fixed!
58static cl::opt<bool> IgnoreMissingDefs("rafast-ignore-missing-defs",
60
61static RegisterRegAlloc fastRegAlloc("fast", "fast register allocator",
63
64namespace {
65
66/// Assign ascending index for instructions in machine basic block. The index
67/// can be used to determine dominance between instructions in same MBB.
68class InstrPosIndexes {
69public:
70 void unsetInitialized() { IsInitialized = false; }
71
72 void init(const MachineBasicBlock &MBB) {
73 CurMBB = &MBB;
74 Instr2PosIndex.clear();
75 uint64_t LastIndex = 0;
76 for (const MachineInstr &MI : MBB) {
77 LastIndex += InstrDist;
78 Instr2PosIndex[&MI] = LastIndex;
79 }
80 }
81
82 /// Set \p Index to index of \p MI. If \p MI is new inserted, it try to assign
83 /// index without affecting existing instruction's index. Return true if all
84 /// instructions index has been reassigned.
85 bool getIndex(const MachineInstr &MI, uint64_t &Index) {
86 if (!IsInitialized) {
87 init(*MI.getParent());
88 IsInitialized = true;
89 Index = Instr2PosIndex.at(&MI);
90 return true;
91 }
92
93 assert(MI.getParent() == CurMBB && "MI is not in CurMBB");
94 auto It = Instr2PosIndex.find(&MI);
95 if (It != Instr2PosIndex.end()) {
96 Index = It->second;
97 return false;
98 }
99
100 // Distance is the number of consecutive unassigned instructions including
101 // MI. Start is the first instruction of them. End is the next of last
102 // instruction of them.
103 // e.g.
104 // |Instruction| A | B | C | MI | D | E |
105 // | Index | 1024 | | | | | 2048 |
106 //
107 // In this case, B, C, MI, D are unassigned. Distance is 4, Start is B, End
108 // is E.
109 unsigned Distance = 1;
110 MachineBasicBlock::const_iterator Start = MI.getIterator(),
111 End = std::next(Start);
112 while (Start != CurMBB->begin() &&
113 !Instr2PosIndex.count(&*std::prev(Start))) {
114 --Start;
115 ++Distance;
116 }
117 while (End != CurMBB->end() && !Instr2PosIndex.count(&*(End))) {
118 ++End;
119 ++Distance;
120 }
121
122 // LastIndex is initialized to last used index prior to MI or zero.
123 // In previous example, LastIndex is 1024, EndIndex is 2048;
124 uint64_t LastIndex =
125 Start == CurMBB->begin() ? 0 : Instr2PosIndex.at(&*std::prev(Start));
126 uint64_t Step;
127 if (End == CurMBB->end())
128 Step = static_cast<uint64_t>(InstrDist);
129 else {
130 // No instruction uses index zero.
131 uint64_t EndIndex = Instr2PosIndex.at(&*End);
132 assert(EndIndex > LastIndex && "Index must be ascending order");
133 unsigned NumAvailableIndexes = EndIndex - LastIndex - 1;
134 // We want index gap between two adjacent MI is as same as possible. Given
135 // total A available indexes, D is number of consecutive unassigned
136 // instructions, S is the step.
137 // |<- S-1 -> MI <- S-1 -> MI <- A-S*D ->|
138 // There're S-1 available indexes between unassigned instruction and its
139 // predecessor. There're A-S*D available indexes between the last
140 // unassigned instruction and its successor.
141 // Ideally, we want
142 // S-1 = A-S*D
143 // then
144 // S = (A+1)/(D+1)
145 // An valid S must be integer greater than zero, so
146 // S <= (A+1)/(D+1)
147 // =>
148 // A-S*D >= 0
149 // That means we can safely use (A+1)/(D+1) as step.
150 // In previous example, Step is 204, Index of B, C, MI, D is 1228, 1432,
151 // 1636, 1840.
152 Step = (NumAvailableIndexes + 1) / (Distance + 1);
153 }
154
155 // Reassign index for all instructions if number of new inserted
156 // instructions exceed slot or all instructions are new.
157 if (LLVM_UNLIKELY(!Step || (!LastIndex && Step == InstrDist))) {
158 init(*CurMBB);
159 Index = Instr2PosIndex.at(&MI);
160 return true;
161 }
162
163 for (auto I = Start; I != End; ++I) {
164 LastIndex += Step;
165 Instr2PosIndex[&*I] = LastIndex;
166 }
167 Index = Instr2PosIndex.at(&MI);
168 return false;
169 }
170
171private:
172 bool IsInitialized = false;
173 enum { InstrDist = 1024 };
174 const MachineBasicBlock *CurMBB = nullptr;
176};
177
178class RegAllocFastImpl {
179public:
180 RegAllocFastImpl(const RegAllocFilterFunc F = nullptr,
181 bool ClearVirtRegs_ = true)
182 : ShouldAllocateRegisterImpl(F), StackSlotForVirtReg(-1),
183 ClearVirtRegs(ClearVirtRegs_) {}
184
185private:
186 MachineFrameInfo *MFI = nullptr;
187 MachineRegisterInfo *MRI = nullptr;
188 const TargetRegisterInfo *TRI = nullptr;
189 const TargetInstrInfo *TII = nullptr;
190 RegisterClassInfo RegClassInfo;
191 const RegAllocFilterFunc ShouldAllocateRegisterImpl;
192
193 /// Basic block currently being allocated.
194 MachineBasicBlock *MBB = nullptr;
195
196 /// Maps virtual regs to the frame index where these values are spilled.
197 IndexedMap<int, VirtReg2IndexFunctor> StackSlotForVirtReg;
198
199 /// Everything we know about a live virtual register.
200 struct LiveReg {
201 MachineInstr *LastUse = nullptr; ///< Last instr to use reg.
202 Register VirtReg; ///< Virtual register number.
203 MCPhysReg PhysReg = 0; ///< Currently held here.
204 bool LiveOut = false; ///< Register is possibly live out.
205 bool Reloaded = false; ///< Register was reloaded.
206 bool Error = false; ///< Could not allocate.
207
208 explicit LiveReg(Register VirtReg) : VirtReg(VirtReg) {}
209
210 unsigned getSparseSetIndex() const { return VirtReg.virtRegIndex(); }
211 };
212
214 /// This map contains entries for each virtual register that is currently
215 /// available in a physical register.
216 LiveRegMap LiveVirtRegs;
217
218 /// Stores assigned virtual registers present in the bundle MI.
219 DenseMap<Register, MCPhysReg> BundleVirtRegsMap;
220
222 /// List of DBG_VALUE that we encountered without the vreg being assigned
223 /// because they were placed after the last use of the vreg.
225
226 /// Has a bit set for every virtual register for which it was determined
227 /// that it is alive across blocks.
228 BitVector MayLiveAcrossBlocks;
229
230 /// State of a register unit.
231 enum RegUnitState {
232 /// A free register is not currently in use and can be allocated
233 /// immediately without checking aliases.
234 regFree,
235
236 /// A pre-assigned register has been assigned before register allocation
237 /// (e.g., setting up a call parameter).
238 regPreAssigned,
239
240 /// Used temporarily in reloadAtBegin() to mark register units that are
241 /// live-in to the basic block.
242 regLiveIn,
243
244 /// A register state may also be a virtual register number, indication
245 /// that the physical register is currently allocated to a virtual
246 /// register. In that case, LiveVirtRegs contains the inverse mapping.
247 };
248
249 /// Maps each physical register to a RegUnitState enum or virtual register.
250 std::vector<unsigned> RegUnitStates;
251
253
254 /// Track register units that are used in the current instruction, and so
255 /// cannot be allocated.
256 ///
257 /// In the first phase (tied defs/early clobber), we consider also physical
258 /// uses, afterwards, we don't. If the lowest bit isn't set, it's a solely
259 /// physical use (markPhysRegUsedInInstr), otherwise, it's a normal use. To
260 /// avoid resetting the entire vector after every instruction, we track the
261 /// instruction "generation" in the remaining 31 bits -- this means, that if
262 /// UsedInInstr[Idx] < InstrGen, the register unit is unused. InstrGen is
263 /// never zero and always incremented by two.
264 ///
265 /// Don't allocate inline storage: the number of register units is typically
266 /// quite large (e.g., AArch64 > 100, X86 > 200, AMDGPU > 1000).
267 uint32_t InstrGen;
268 SmallVector<unsigned, 0> UsedInInstr;
269
270 SmallVector<unsigned, 8> DefOperandIndexes;
271 // Register masks attached to the current instruction.
273
274 // Assign index for each instruction to quickly determine dominance.
275 InstrPosIndexes PosIndexes;
276
277 void setPhysRegState(MCRegister PhysReg, unsigned NewState);
278 bool isPhysRegFree(MCPhysReg PhysReg) const;
279
280 /// Mark a physreg as used in this instruction.
281 void markRegUsedInInstr(MCPhysReg PhysReg) {
282 for (MCRegUnit Unit : TRI->regunits(PhysReg))
283 UsedInInstr[Unit] = InstrGen | 1;
284 }
285
286 // Check if physreg is clobbered by instruction's regmask(s).
287 bool isClobberedByRegMasks(MCPhysReg PhysReg) const {
288 return llvm::any_of(RegMasks, [PhysReg](const uint32_t *Mask) {
289 return MachineOperand::clobbersPhysReg(Mask, PhysReg);
290 });
291 }
292
293 /// Check if a physreg or any of its aliases are used in this instruction.
294 bool isRegUsedInInstr(MCPhysReg PhysReg, bool LookAtPhysRegUses) const {
295 if (LookAtPhysRegUses && isClobberedByRegMasks(PhysReg))
296 return true;
297 for (MCRegUnit Unit : TRI->regunits(PhysReg))
298 if (UsedInInstr[Unit] >= (InstrGen | !LookAtPhysRegUses))
299 return true;
300 return false;
301 }
302
303 /// Mark physical register as being used in a register use operand.
304 /// This is only used by the special livethrough handling code.
305 void markPhysRegUsedInInstr(MCPhysReg PhysReg) {
306 for (MCRegUnit Unit : TRI->regunits(PhysReg)) {
307 assert(UsedInInstr[Unit] <= InstrGen && "non-phys use before phys use?");
308 UsedInInstr[Unit] = InstrGen;
309 }
310 }
311
312 /// Remove mark of physical register being used in the instruction.
313 void unmarkRegUsedInInstr(MCPhysReg PhysReg) {
314 for (MCRegUnit Unit : TRI->regunits(PhysReg))
315 UsedInInstr[Unit] = 0;
316 }
317
318 enum : unsigned {
319 spillClean = 50,
320 spillDirty = 100,
321 spillPrefBonus = 20,
322 spillImpossible = ~0u
323 };
324
325public:
326 bool ClearVirtRegs;
327
328 bool runOnMachineFunction(MachineFunction &MF);
329
330private:
331 void allocateBasicBlock(MachineBasicBlock &MBB);
332
333 void addRegClassDefCounts(MutableArrayRef<unsigned> RegClassDefCounts,
334 Register Reg) const;
335
336 void findAndSortDefOperandIndexes(const MachineInstr &MI);
337
338 void allocateInstruction(MachineInstr &MI);
339 void handleDebugValue(MachineInstr &MI);
340 void handleBundle(MachineInstr &MI);
341
342 bool usePhysReg(MachineInstr &MI, MCPhysReg PhysReg);
343 bool definePhysReg(MachineInstr &MI, MCPhysReg PhysReg);
344 bool displacePhysReg(MachineInstr &MI, MCPhysReg PhysReg);
345 void freePhysReg(MCPhysReg PhysReg);
346
347 unsigned calcSpillCost(MCPhysReg PhysReg) const;
348
349 LiveRegMap::iterator findLiveVirtReg(Register VirtReg) {
350 return LiveVirtRegs.find(VirtReg.virtRegIndex());
351 }
352
353 LiveRegMap::const_iterator findLiveVirtReg(Register VirtReg) const {
354 return LiveVirtRegs.find(VirtReg.virtRegIndex());
355 }
356
357 void assignVirtToPhysReg(MachineInstr &MI, LiveReg &, MCPhysReg PhysReg);
358 void allocVirtReg(MachineInstr &MI, LiveReg &LR, Register Hint,
359 bool LookAtPhysRegUses = false);
360 void allocVirtRegUndef(MachineOperand &MO);
361 void assignDanglingDebugValues(MachineInstr &Def, Register VirtReg,
362 MCPhysReg Reg);
363 bool defineLiveThroughVirtReg(MachineInstr &MI, unsigned OpNum,
364 Register VirtReg);
365 bool defineVirtReg(MachineInstr &MI, unsigned OpNum, Register VirtReg,
366 bool LookAtPhysRegUses = false);
367 bool useVirtReg(MachineInstr &MI, MachineOperand &MO, Register VirtReg);
368
369 MCPhysReg getErrorAssignment(const LiveReg &LR, MachineInstr &MI,
370 const TargetRegisterClass &RC);
371
373 getMBBBeginInsertionPoint(MachineBasicBlock &MBB,
374 SmallSet<Register, 2> &PrologLiveIns) const;
375
376 void reloadAtBegin(MachineBasicBlock &MBB);
377 bool setPhysReg(MachineInstr &MI, MachineOperand &MO, MCPhysReg PhysReg);
378
379 Register traceCopies(Register VirtReg) const;
380 Register traceCopyChain(Register Reg) const;
381
382 bool shouldAllocateRegister(const Register Reg) const;
383 int getStackSpaceFor(Register VirtReg);
384 void spill(MachineBasicBlock::iterator Before, Register VirtReg,
385 MCPhysReg AssignedReg, bool Kill, bool LiveOut);
386 void reload(MachineBasicBlock::iterator Before, Register VirtReg,
387 MCPhysReg PhysReg);
388
389 bool mayLiveOut(Register VirtReg);
390 bool mayLiveIn(Register VirtReg);
391
392 void dumpState() const;
393};
394
395class RegAllocFast : public MachineFunctionPass {
396 RegAllocFastImpl Impl;
397
398public:
399 static char ID;
400
401 RegAllocFast(const RegAllocFilterFunc F = nullptr, bool ClearVirtRegs_ = true)
402 : MachineFunctionPass(ID), Impl(F, ClearVirtRegs_) {}
403
404 bool runOnMachineFunction(MachineFunction &MF) override {
405 return Impl.runOnMachineFunction(MF);
406 }
407
408 StringRef getPassName() const override { return "Fast Register Allocator"; }
409
410 void getAnalysisUsage(AnalysisUsage &AU) const override {
411 AU.setPreservesCFG();
413 }
414
417 MachineFunctionProperties::Property::NoPHIs);
418 }
419
421 if (Impl.ClearVirtRegs) {
423 MachineFunctionProperties::Property::NoVRegs);
424 }
425
427 }
428
431 MachineFunctionProperties::Property::IsSSA);
432 }
433};
434
435} // end anonymous namespace
436
437char RegAllocFast::ID = 0;
438
439INITIALIZE_PASS(RegAllocFast, "regallocfast", "Fast Register Allocator", false,
440 false)
441
442bool RegAllocFastImpl::shouldAllocateRegister(const Register Reg) const {
443 assert(Reg.isVirtual());
444 if (!ShouldAllocateRegisterImpl)
445 return true;
446
447 return ShouldAllocateRegisterImpl(*TRI, *MRI, Reg);
448}
449
450void RegAllocFastImpl::setPhysRegState(MCRegister PhysReg, unsigned NewState) {
451 for (MCRegUnit Unit : TRI->regunits(PhysReg))
452 RegUnitStates[Unit] = NewState;
453}
454
455bool RegAllocFastImpl::isPhysRegFree(MCPhysReg PhysReg) const {
456 for (MCRegUnit Unit : TRI->regunits(PhysReg)) {
457 if (RegUnitStates[Unit] != regFree)
458 return false;
459 }
460 return true;
461}
462
463/// This allocates space for the specified virtual register to be held on the
464/// stack.
465int RegAllocFastImpl::getStackSpaceFor(Register VirtReg) {
466 // Find the location Reg would belong...
467 int SS = StackSlotForVirtReg[VirtReg];
468 // Already has space allocated?
469 if (SS != -1)
470 return SS;
471
472 // Allocate a new stack object for this spill location...
473 const TargetRegisterClass &RC = *MRI->getRegClass(VirtReg);
474 unsigned Size = TRI->getSpillSize(RC);
475 Align Alignment = TRI->getSpillAlign(RC);
476 int FrameIdx = MFI->CreateSpillStackObject(Size, Alignment);
477
478 // Assign the slot.
479 StackSlotForVirtReg[VirtReg] = FrameIdx;
480 return FrameIdx;
481}
482
483static bool dominates(InstrPosIndexes &PosIndexes, const MachineInstr &A,
484 const MachineInstr &B) {
485 uint64_t IndexA, IndexB;
486 PosIndexes.getIndex(A, IndexA);
487 if (LLVM_UNLIKELY(PosIndexes.getIndex(B, IndexB)))
488 PosIndexes.getIndex(A, IndexA);
489 return IndexA < IndexB;
490}
491
492/// Returns false if \p VirtReg is known to not live out of the current block.
493bool RegAllocFastImpl::mayLiveOut(Register VirtReg) {
494 if (MayLiveAcrossBlocks.test(VirtReg.virtRegIndex())) {
495 // Cannot be live-out if there are no successors.
496 return !MBB->succ_empty();
497 }
498
499 const MachineInstr *SelfLoopDef = nullptr;
500
501 // If this block loops back to itself, it is necessary to check whether the
502 // use comes after the def.
503 if (MBB->isSuccessor(MBB)) {
504 // Find the first def in the self loop MBB.
505 for (const MachineInstr &DefInst : MRI->def_instructions(VirtReg)) {
506 if (DefInst.getParent() != MBB) {
507 MayLiveAcrossBlocks.set(VirtReg.virtRegIndex());
508 return true;
509 } else {
510 if (!SelfLoopDef || dominates(PosIndexes, DefInst, *SelfLoopDef))
511 SelfLoopDef = &DefInst;
512 }
513 }
514 if (!SelfLoopDef) {
515 MayLiveAcrossBlocks.set(VirtReg.virtRegIndex());
516 return true;
517 }
518 }
519
520 // See if the first \p Limit uses of the register are all in the current
521 // block.
522 static const unsigned Limit = 8;
523 unsigned C = 0;
524 for (const MachineInstr &UseInst : MRI->use_nodbg_instructions(VirtReg)) {
525 if (UseInst.getParent() != MBB || ++C >= Limit) {
526 MayLiveAcrossBlocks.set(VirtReg.virtRegIndex());
527 // Cannot be live-out if there are no successors.
528 return !MBB->succ_empty();
529 }
530
531 if (SelfLoopDef) {
532 // Try to handle some simple cases to avoid spilling and reloading every
533 // value inside a self looping block.
534 if (SelfLoopDef == &UseInst ||
535 !dominates(PosIndexes, *SelfLoopDef, UseInst)) {
536 MayLiveAcrossBlocks.set(VirtReg.virtRegIndex());
537 return true;
538 }
539 }
540 }
541
542 return false;
543}
544
545/// Returns false if \p VirtReg is known to not be live into the current block.
546bool RegAllocFastImpl::mayLiveIn(Register VirtReg) {
547 if (MayLiveAcrossBlocks.test(VirtReg.virtRegIndex()))
548 return !MBB->pred_empty();
549
550 // See if the first \p Limit def of the register are all in the current block.
551 static const unsigned Limit = 8;
552 unsigned C = 0;
553 for (const MachineInstr &DefInst : MRI->def_instructions(VirtReg)) {
554 if (DefInst.getParent() != MBB || ++C >= Limit) {
555 MayLiveAcrossBlocks.set(VirtReg.virtRegIndex());
556 return !MBB->pred_empty();
557 }
558 }
559
560 return false;
561}
562
563/// Insert spill instruction for \p AssignedReg before \p Before. Update
564/// DBG_VALUEs with \p VirtReg operands with the stack slot.
565void RegAllocFastImpl::spill(MachineBasicBlock::iterator Before,
566 Register VirtReg, MCPhysReg AssignedReg, bool Kill,
567 bool LiveOut) {
568 LLVM_DEBUG(dbgs() << "Spilling " << printReg(VirtReg, TRI) << " in "
569 << printReg(AssignedReg, TRI));
570 int FI = getStackSpaceFor(VirtReg);
571 LLVM_DEBUG(dbgs() << " to stack slot #" << FI << '\n');
572
573 const TargetRegisterClass &RC = *MRI->getRegClass(VirtReg);
574 TII->storeRegToStackSlot(*MBB, Before, AssignedReg, Kill, FI, &RC, TRI,
575 VirtReg);
576 ++NumStores;
577
579
580 // When we spill a virtual register, we will have spill instructions behind
581 // every definition of it, meaning we can switch all the DBG_VALUEs over
582 // to just reference the stack slot.
583 SmallVectorImpl<MachineOperand *> &LRIDbgOperands = LiveDbgValueMap[VirtReg];
585 SpilledOperandsMap;
586 for (MachineOperand *MO : LRIDbgOperands)
587 SpilledOperandsMap[MO->getParent()].push_back(MO);
588 for (const auto &MISpilledOperands : SpilledOperandsMap) {
589 MachineInstr &DBG = *MISpilledOperands.first;
590 // We don't have enough support for tracking operands of DBG_VALUE_LISTs.
591 if (DBG.isDebugValueList())
592 continue;
594 *MBB, Before, *MISpilledOperands.first, FI, MISpilledOperands.second);
595 assert(NewDV->getParent() == MBB && "dangling parent pointer");
596 (void)NewDV;
597 LLVM_DEBUG(dbgs() << "Inserting debug info due to spill:\n" << *NewDV);
598
599 if (LiveOut) {
600 // We need to insert a DBG_VALUE at the end of the block if the spill slot
601 // is live out, but there is another use of the value after the
602 // spill. This will allow LiveDebugValues to see the correct live out
603 // value to propagate to the successors.
604 MachineInstr *ClonedDV = MBB->getParent()->CloneMachineInstr(NewDV);
605 MBB->insert(FirstTerm, ClonedDV);
606 LLVM_DEBUG(dbgs() << "Cloning debug info due to live out spill\n");
607 }
608
609 // Rewrite unassigned dbg_values to use the stack slot.
610 // TODO We can potentially do this for list debug values as well if we know
611 // how the dbg_values are getting unassigned.
612 if (DBG.isNonListDebugValue()) {
613 MachineOperand &MO = DBG.getDebugOperand(0);
614 if (MO.isReg() && MO.getReg() == 0) {
615 updateDbgValueForSpill(DBG, FI, 0);
616 }
617 }
618 }
619 // Now this register is spilled there is should not be any DBG_VALUE
620 // pointing to this register because they are all pointing to spilled value
621 // now.
622 LRIDbgOperands.clear();
623}
624
625/// Insert reload instruction for \p PhysReg before \p Before.
626void RegAllocFastImpl::reload(MachineBasicBlock::iterator Before,
627 Register VirtReg, MCPhysReg PhysReg) {
628 LLVM_DEBUG(dbgs() << "Reloading " << printReg(VirtReg, TRI) << " into "
629 << printReg(PhysReg, TRI) << '\n');
630 int FI = getStackSpaceFor(VirtReg);
631 const TargetRegisterClass &RC = *MRI->getRegClass(VirtReg);
632 TII->loadRegFromStackSlot(*MBB, Before, PhysReg, FI, &RC, TRI, VirtReg);
633 ++NumLoads;
634}
635
636/// Get basic block begin insertion point.
637/// This is not just MBB.begin() because surprisingly we have EH_LABEL
638/// instructions marking the begin of a basic block. This means we must insert
639/// new instructions after such labels...
640MachineBasicBlock::iterator RegAllocFastImpl::getMBBBeginInsertionPoint(
641 MachineBasicBlock &MBB, SmallSet<Register, 2> &PrologLiveIns) const {
643 while (I != MBB.end()) {
644 if (I->isLabel()) {
645 ++I;
646 continue;
647 }
648
649 // Most reloads should be inserted after prolog instructions.
650 if (!TII->isBasicBlockPrologue(*I))
651 break;
652
653 // However if a prolog instruction reads a register that needs to be
654 // reloaded, the reload should be inserted before the prolog.
655 for (MachineOperand &MO : I->operands()) {
656 if (MO.isReg())
657 PrologLiveIns.insert(MO.getReg());
658 }
659
660 ++I;
661 }
662
663 return I;
664}
665
666/// Reload all currently assigned virtual registers.
667void RegAllocFastImpl::reloadAtBegin(MachineBasicBlock &MBB) {
668 if (LiveVirtRegs.empty())
669 return;
670
672 MCRegister Reg = P.PhysReg;
673 // Set state to live-in. This possibly overrides mappings to virtual
674 // registers but we don't care anymore at this point.
675 setPhysRegState(Reg, regLiveIn);
676 }
677
678 SmallSet<Register, 2> PrologLiveIns;
679
680 // The LiveRegMap is keyed by an unsigned (the virtreg number), so the order
681 // of spilling here is deterministic, if arbitrary.
682 MachineBasicBlock::iterator InsertBefore =
683 getMBBBeginInsertionPoint(MBB, PrologLiveIns);
684 for (const LiveReg &LR : LiveVirtRegs) {
685 MCPhysReg PhysReg = LR.PhysReg;
686 if (PhysReg == 0 || LR.Error)
687 continue;
688
689 MCRegUnit FirstUnit = *TRI->regunits(PhysReg).begin();
690 if (RegUnitStates[FirstUnit] == regLiveIn)
691 continue;
692
694 "no reload in start block. Missing vreg def?");
695
696 if (PrologLiveIns.count(PhysReg)) {
697 // FIXME: Theoretically this should use an insert point skipping labels
698 // but I'm not sure how labels should interact with prolog instruction
699 // that need reloads.
700 reload(MBB.begin(), LR.VirtReg, PhysReg);
701 } else
702 reload(InsertBefore, LR.VirtReg, PhysReg);
703 }
704 LiveVirtRegs.clear();
705}
706
707/// Handle the direct use of a physical register. Check that the register is
708/// not used by a virtreg. Kill the physreg, marking it free. This may add
709/// implicit kills to MO->getParent() and invalidate MO.
710bool RegAllocFastImpl::usePhysReg(MachineInstr &MI, MCPhysReg Reg) {
711 assert(Register::isPhysicalRegister(Reg) && "expected physreg");
712 bool displacedAny = displacePhysReg(MI, Reg);
713 setPhysRegState(Reg, regPreAssigned);
714 markRegUsedInInstr(Reg);
715 return displacedAny;
716}
717
718bool RegAllocFastImpl::definePhysReg(MachineInstr &MI, MCPhysReg Reg) {
719 bool displacedAny = displacePhysReg(MI, Reg);
720 setPhysRegState(Reg, regPreAssigned);
721 return displacedAny;
722}
723
724/// Mark PhysReg as reserved or free after spilling any virtregs. This is very
725/// similar to defineVirtReg except the physreg is reserved instead of
726/// allocated.
727bool RegAllocFastImpl::displacePhysReg(MachineInstr &MI, MCPhysReg PhysReg) {
728 bool displacedAny = false;
729
730 for (MCRegUnit Unit : TRI->regunits(PhysReg)) {
731 switch (unsigned VirtReg = RegUnitStates[Unit]) {
732 default: {
733 LiveRegMap::iterator LRI = findLiveVirtReg(VirtReg);
734 assert(LRI != LiveVirtRegs.end() && "datastructures in sync");
735 MachineBasicBlock::iterator ReloadBefore =
736 std::next((MachineBasicBlock::iterator)MI.getIterator());
737 reload(ReloadBefore, VirtReg, LRI->PhysReg);
738
739 setPhysRegState(LRI->PhysReg, regFree);
740 LRI->PhysReg = 0;
741 LRI->Reloaded = true;
742 displacedAny = true;
743 break;
744 }
745 case regPreAssigned:
746 RegUnitStates[Unit] = regFree;
747 displacedAny = true;
748 break;
749 case regFree:
750 break;
751 }
752 }
753 return displacedAny;
754}
755
756void RegAllocFastImpl::freePhysReg(MCPhysReg PhysReg) {
757 LLVM_DEBUG(dbgs() << "Freeing " << printReg(PhysReg, TRI) << ':');
758
759 MCRegUnit FirstUnit = *TRI->regunits(PhysReg).begin();
760 switch (unsigned VirtReg = RegUnitStates[FirstUnit]) {
761 case regFree:
762 LLVM_DEBUG(dbgs() << '\n');
763 return;
764 case regPreAssigned:
765 LLVM_DEBUG(dbgs() << '\n');
766 setPhysRegState(PhysReg, regFree);
767 return;
768 default: {
769 LiveRegMap::iterator LRI = findLiveVirtReg(VirtReg);
770 assert(LRI != LiveVirtRegs.end());
771 LLVM_DEBUG(dbgs() << ' ' << printReg(LRI->VirtReg, TRI) << '\n');
772 setPhysRegState(LRI->PhysReg, regFree);
773 LRI->PhysReg = 0;
774 }
775 return;
776 }
777}
778
779/// Return the cost of spilling clearing out PhysReg and aliases so it is free
780/// for allocation. Returns 0 when PhysReg is free or disabled with all aliases
781/// disabled - it can be allocated directly.
782/// \returns spillImpossible when PhysReg or an alias can't be spilled.
783unsigned RegAllocFastImpl::calcSpillCost(MCPhysReg PhysReg) const {
784 for (MCRegUnit Unit : TRI->regunits(PhysReg)) {
785 switch (unsigned VirtReg = RegUnitStates[Unit]) {
786 case regFree:
787 break;
788 case regPreAssigned:
789 LLVM_DEBUG(dbgs() << "Cannot spill pre-assigned "
790 << printReg(PhysReg, TRI) << '\n');
791 return spillImpossible;
792 default: {
793 bool SureSpill = StackSlotForVirtReg[VirtReg] != -1 ||
794 findLiveVirtReg(VirtReg)->LiveOut;
795 return SureSpill ? spillClean : spillDirty;
796 }
797 }
798 }
799 return 0;
800}
801
802void RegAllocFastImpl::assignDanglingDebugValues(MachineInstr &Definition,
803 Register VirtReg,
804 MCPhysReg Reg) {
805 auto UDBGValIter = DanglingDbgValues.find(VirtReg);
806 if (UDBGValIter == DanglingDbgValues.end())
807 return;
808
809 SmallVectorImpl<MachineInstr *> &Dangling = UDBGValIter->second;
810 for (MachineInstr *DbgValue : Dangling) {
811 assert(DbgValue->isDebugValue());
812 if (!DbgValue->hasDebugOperandForReg(VirtReg))
813 continue;
814
815 // Test whether the physreg survives from the definition to the DBG_VALUE.
816 MCPhysReg SetToReg = Reg;
817 unsigned Limit = 20;
818 for (MachineBasicBlock::iterator I = std::next(Definition.getIterator()),
819 E = DbgValue->getIterator();
820 I != E; ++I) {
821 if (I->modifiesRegister(Reg, TRI) || --Limit == 0) {
822 LLVM_DEBUG(dbgs() << "Register did not survive for " << *DbgValue
823 << '\n');
824 SetToReg = 0;
825 break;
826 }
827 }
828 for (MachineOperand &MO : DbgValue->getDebugOperandsForReg(VirtReg)) {
829 MO.setReg(SetToReg);
830 if (SetToReg != 0)
831 MO.setIsRenamable();
832 }
833 }
834 Dangling.clear();
835}
836
837/// This method updates local state so that we know that PhysReg is the
838/// proper container for VirtReg now. The physical register must not be used
839/// for anything else when this is called.
840void RegAllocFastImpl::assignVirtToPhysReg(MachineInstr &AtMI, LiveReg &LR,
841 MCPhysReg PhysReg) {
842 Register VirtReg = LR.VirtReg;
843 LLVM_DEBUG(dbgs() << "Assigning " << printReg(VirtReg, TRI) << " to "
844 << printReg(PhysReg, TRI) << '\n');
845 assert(LR.PhysReg == 0 && "Already assigned a physreg");
846 assert(PhysReg != 0 && "Trying to assign no register");
847 LR.PhysReg = PhysReg;
848 setPhysRegState(PhysReg, VirtReg);
849
850 assignDanglingDebugValues(AtMI, VirtReg, PhysReg);
851}
852
853static bool isCoalescable(const MachineInstr &MI) { return MI.isFullCopy(); }
854
855Register RegAllocFastImpl::traceCopyChain(Register Reg) const {
856 static const unsigned ChainLengthLimit = 3;
857 unsigned C = 0;
858 do {
859 if (Reg.isPhysical())
860 return Reg;
861 assert(Reg.isVirtual());
862
863 MachineInstr *VRegDef = MRI->getUniqueVRegDef(Reg);
864 if (!VRegDef || !isCoalescable(*VRegDef))
865 return 0;
866 Reg = VRegDef->getOperand(1).getReg();
867 } while (++C <= ChainLengthLimit);
868 return 0;
869}
870
871/// Check if any of \p VirtReg's definitions is a copy. If it is follow the
872/// chain of copies to check whether we reach a physical register we can
873/// coalesce with.
874Register RegAllocFastImpl::traceCopies(Register VirtReg) const {
875 static const unsigned DefLimit = 3;
876 unsigned C = 0;
877 for (const MachineInstr &MI : MRI->def_instructions(VirtReg)) {
878 if (isCoalescable(MI)) {
879 Register Reg = MI.getOperand(1).getReg();
880 Reg = traceCopyChain(Reg);
881 if (Reg.isValid())
882 return Reg;
883 }
884
885 if (++C >= DefLimit)
886 break;
887 }
888 return Register();
889}
890
891/// Allocates a physical register for VirtReg.
892void RegAllocFastImpl::allocVirtReg(MachineInstr &MI, LiveReg &LR,
893 Register Hint0, bool LookAtPhysRegUses) {
894 const Register VirtReg = LR.VirtReg;
895 assert(LR.PhysReg == 0);
896
897 const TargetRegisterClass &RC = *MRI->getRegClass(VirtReg);
898 LLVM_DEBUG(dbgs() << "Search register for " << printReg(VirtReg)
899 << " in class " << TRI->getRegClassName(&RC)
900 << " with hint " << printReg(Hint0, TRI) << '\n');
901
902 // Take hint when possible.
903 if (Hint0.isPhysical() && MRI->isAllocatable(Hint0) && RC.contains(Hint0) &&
904 !isRegUsedInInstr(Hint0, LookAtPhysRegUses)) {
905 // Take hint if the register is currently free.
906 if (isPhysRegFree(Hint0)) {
907 LLVM_DEBUG(dbgs() << "\tPreferred Register 1: " << printReg(Hint0, TRI)
908 << '\n');
909 assignVirtToPhysReg(MI, LR, Hint0);
910 return;
911 } else {
912 LLVM_DEBUG(dbgs() << "\tPreferred Register 0: " << printReg(Hint0, TRI)
913 << " occupied\n");
914 }
915 } else {
916 Hint0 = Register();
917 }
918
919 // Try other hint.
920 Register Hint1 = traceCopies(VirtReg);
921 if (Hint1.isPhysical() && MRI->isAllocatable(Hint1) && RC.contains(Hint1) &&
922 !isRegUsedInInstr(Hint1, LookAtPhysRegUses)) {
923 // Take hint if the register is currently free.
924 if (isPhysRegFree(Hint1)) {
925 LLVM_DEBUG(dbgs() << "\tPreferred Register 0: " << printReg(Hint1, TRI)
926 << '\n');
927 assignVirtToPhysReg(MI, LR, Hint1);
928 return;
929 } else {
930 LLVM_DEBUG(dbgs() << "\tPreferred Register 1: " << printReg(Hint1, TRI)
931 << " occupied\n");
932 }
933 } else {
934 Hint1 = Register();
935 }
936
937 MCPhysReg BestReg = 0;
938 unsigned BestCost = spillImpossible;
939 ArrayRef<MCPhysReg> AllocationOrder = RegClassInfo.getOrder(&RC);
940 for (MCPhysReg PhysReg : AllocationOrder) {
941 LLVM_DEBUG(dbgs() << "\tRegister: " << printReg(PhysReg, TRI) << ' ');
942 if (isRegUsedInInstr(PhysReg, LookAtPhysRegUses)) {
943 LLVM_DEBUG(dbgs() << "already used in instr.\n");
944 continue;
945 }
946
947 unsigned Cost = calcSpillCost(PhysReg);
948 LLVM_DEBUG(dbgs() << "Cost: " << Cost << " BestCost: " << BestCost << '\n');
949 // Immediate take a register with cost 0.
950 if (Cost == 0) {
951 assignVirtToPhysReg(MI, LR, PhysReg);
952 return;
953 }
954
955 if (PhysReg == Hint0 || PhysReg == Hint1)
956 Cost -= spillPrefBonus;
957
958 if (Cost < BestCost) {
959 BestReg = PhysReg;
960 BestCost = Cost;
961 }
962 }
963
964 if (!BestReg) {
965 // Nothing we can do: Report an error and keep going with an invalid
966 // allocation.
967 LR.PhysReg = getErrorAssignment(LR, MI, RC);
968 LR.Error = true;
969 return;
970 }
971
972 displacePhysReg(MI, BestReg);
973 assignVirtToPhysReg(MI, LR, BestReg);
974}
975
976void RegAllocFastImpl::allocVirtRegUndef(MachineOperand &MO) {
977 assert(MO.isUndef() && "expected undef use");
978 Register VirtReg = MO.getReg();
979 assert(VirtReg.isVirtual() && "Expected virtreg");
980 if (!shouldAllocateRegister(VirtReg))
981 return;
982
983 LiveRegMap::iterator LRI = findLiveVirtReg(VirtReg);
984 MCPhysReg PhysReg;
985 if (LRI != LiveVirtRegs.end() && LRI->PhysReg) {
986 PhysReg = LRI->PhysReg;
987 } else {
988 const TargetRegisterClass &RC = *MRI->getRegClass(VirtReg);
989 ArrayRef<MCPhysReg> AllocationOrder = RegClassInfo.getOrder(&RC);
990 if (AllocationOrder.empty()) {
991 // All registers in the class were reserved.
992 //
993 // It might be OK to take any entry from the class as this is an undef
994 // use, but accepting this would give different behavior than greedy and
995 // basic.
996 PhysReg = getErrorAssignment(*LRI, *MO.getParent(), RC);
997 LRI->Error = true;
998 } else
999 PhysReg = AllocationOrder.front();
1000 }
1001
1002 unsigned SubRegIdx = MO.getSubReg();
1003 if (SubRegIdx != 0) {
1004 PhysReg = TRI->getSubReg(PhysReg, SubRegIdx);
1005 MO.setSubReg(0);
1006 }
1007 MO.setReg(PhysReg);
1008 MO.setIsRenamable(true);
1009}
1010
1011/// Variation of defineVirtReg() with special handling for livethrough regs
1012/// (tied or earlyclobber) that may interfere with preassigned uses.
1013/// \return true if MI's MachineOperands were re-arranged/invalidated.
1014bool RegAllocFastImpl::defineLiveThroughVirtReg(MachineInstr &MI,
1015 unsigned OpNum,
1016 Register VirtReg) {
1017 if (!shouldAllocateRegister(VirtReg))
1018 return false;
1019 LiveRegMap::iterator LRI = findLiveVirtReg(VirtReg);
1020 if (LRI != LiveVirtRegs.end()) {
1021 MCPhysReg PrevReg = LRI->PhysReg;
1022 if (PrevReg != 0 && isRegUsedInInstr(PrevReg, true)) {
1023 LLVM_DEBUG(dbgs() << "Need new assignment for " << printReg(PrevReg, TRI)
1024 << " (tied/earlyclobber resolution)\n");
1025 freePhysReg(PrevReg);
1026 LRI->PhysReg = 0;
1027 allocVirtReg(MI, *LRI, 0, true);
1028 MachineBasicBlock::iterator InsertBefore =
1029 std::next((MachineBasicBlock::iterator)MI.getIterator());
1030 LLVM_DEBUG(dbgs() << "Copy " << printReg(LRI->PhysReg, TRI) << " to "
1031 << printReg(PrevReg, TRI) << '\n');
1032 BuildMI(*MBB, InsertBefore, MI.getDebugLoc(),
1033 TII->get(TargetOpcode::COPY), PrevReg)
1034 .addReg(LRI->PhysReg, llvm::RegState::Kill);
1035 }
1036 MachineOperand &MO = MI.getOperand(OpNum);
1037 if (MO.getSubReg() && !MO.isUndef()) {
1038 LRI->LastUse = &MI;
1039 }
1040 }
1041 return defineVirtReg(MI, OpNum, VirtReg, true);
1042}
1043
1044/// Allocates a register for VirtReg definition. Typically the register is
1045/// already assigned from a use of the virtreg, however we still need to
1046/// perform an allocation if:
1047/// - It is a dead definition without any uses.
1048/// - The value is live out and all uses are in different basic blocks.
1049///
1050/// \return true if MI's MachineOperands were re-arranged/invalidated.
1051bool RegAllocFastImpl::defineVirtReg(MachineInstr &MI, unsigned OpNum,
1052 Register VirtReg, bool LookAtPhysRegUses) {
1053 assert(VirtReg.isVirtual() && "Not a virtual register");
1054 if (!shouldAllocateRegister(VirtReg))
1055 return false;
1056 MachineOperand &MO = MI.getOperand(OpNum);
1057 LiveRegMap::iterator LRI;
1058 bool New;
1059 std::tie(LRI, New) = LiveVirtRegs.insert(LiveReg(VirtReg));
1060 if (New) {
1061 if (!MO.isDead()) {
1062 if (mayLiveOut(VirtReg)) {
1063 LRI->LiveOut = true;
1064 } else {
1065 // It is a dead def without the dead flag; add the flag now.
1066 MO.setIsDead(true);
1067 }
1068 }
1069 }
1070 if (LRI->PhysReg == 0) {
1071 allocVirtReg(MI, *LRI, 0, LookAtPhysRegUses);
1072 } else {
1073 assert((!isRegUsedInInstr(LRI->PhysReg, LookAtPhysRegUses) || LRI->Error) &&
1074 "TODO: preassign mismatch");
1075 LLVM_DEBUG(dbgs() << "In def of " << printReg(VirtReg, TRI)
1076 << " use existing assignment to "
1077 << printReg(LRI->PhysReg, TRI) << '\n');
1078 }
1079
1080 MCPhysReg PhysReg = LRI->PhysReg;
1081 if (LRI->Reloaded || LRI->LiveOut) {
1082 if (!MI.isImplicitDef()) {
1083 MachineBasicBlock::iterator SpillBefore =
1084 std::next((MachineBasicBlock::iterator)MI.getIterator());
1085 LLVM_DEBUG(dbgs() << "Spill Reason: LO: " << LRI->LiveOut
1086 << " RL: " << LRI->Reloaded << '\n');
1087 bool Kill = LRI->LastUse == nullptr;
1088 spill(SpillBefore, VirtReg, PhysReg, Kill, LRI->LiveOut);
1089
1090 // We need to place additional spills for each indirect destination of an
1091 // INLINEASM_BR.
1092 if (MI.getOpcode() == TargetOpcode::INLINEASM_BR) {
1093 int FI = StackSlotForVirtReg[VirtReg];
1094 const TargetRegisterClass &RC = *MRI->getRegClass(VirtReg);
1095 for (MachineOperand &MO : MI.operands()) {
1096 if (MO.isMBB()) {
1097 MachineBasicBlock *Succ = MO.getMBB();
1098 TII->storeRegToStackSlot(*Succ, Succ->begin(), PhysReg, Kill, FI,
1099 &RC, TRI, VirtReg);
1100 ++NumStores;
1101 Succ->addLiveIn(PhysReg);
1102 }
1103 }
1104 }
1105
1106 LRI->LastUse = nullptr;
1107 }
1108 LRI->LiveOut = false;
1109 LRI->Reloaded = false;
1110 }
1111 if (MI.getOpcode() == TargetOpcode::BUNDLE) {
1112 BundleVirtRegsMap[VirtReg] = PhysReg;
1113 }
1114 markRegUsedInInstr(PhysReg);
1115 return setPhysReg(MI, MO, PhysReg);
1116}
1117
1118/// Allocates a register for a VirtReg use.
1119/// \return true if MI's MachineOperands were re-arranged/invalidated.
1120bool RegAllocFastImpl::useVirtReg(MachineInstr &MI, MachineOperand &MO,
1121 Register VirtReg) {
1122 assert(VirtReg.isVirtual() && "Not a virtual register");
1123 if (!shouldAllocateRegister(VirtReg))
1124 return false;
1125 LiveRegMap::iterator LRI;
1126 bool New;
1127 std::tie(LRI, New) = LiveVirtRegs.insert(LiveReg(VirtReg));
1128 if (New) {
1129 if (!MO.isKill()) {
1130 if (mayLiveOut(VirtReg)) {
1131 LRI->LiveOut = true;
1132 } else {
1133 // It is a last (killing) use without the kill flag; add the flag now.
1134 MO.setIsKill(true);
1135 }
1136 }
1137 } else {
1138 assert((!MO.isKill() || LRI->LastUse == &MI) && "Invalid kill flag");
1139 }
1140
1141 // If necessary allocate a register.
1142 if (LRI->PhysReg == 0) {
1143 assert(!MO.isTied() && "tied op should be allocated");
1144 Register Hint;
1145 if (MI.isCopy() && MI.getOperand(1).getSubReg() == 0) {
1146 Hint = MI.getOperand(0).getReg();
1147 if (Hint.isVirtual()) {
1148 assert(!shouldAllocateRegister(Hint));
1149 Hint = Register();
1150 } else {
1151 assert(Hint.isPhysical() &&
1152 "Copy destination should already be assigned");
1153 }
1154 }
1155 allocVirtReg(MI, *LRI, Hint, false);
1156 }
1157
1158 LRI->LastUse = &MI;
1159
1160 if (MI.getOpcode() == TargetOpcode::BUNDLE) {
1161 BundleVirtRegsMap[VirtReg] = LRI->PhysReg;
1162 }
1163 markRegUsedInInstr(LRI->PhysReg);
1164 return setPhysReg(MI, MO, LRI->PhysReg);
1165}
1166
1167/// Query a physical register to use as a filler in contexts where the
1168/// allocation has failed. This will raise an error, but not abort the
1169/// compilation.
1170MCPhysReg RegAllocFastImpl::getErrorAssignment(const LiveReg &LR,
1172 const TargetRegisterClass &RC) {
1173 MachineFunction &MF = *MI.getMF();
1174
1175 // Avoid repeating the error every time a register is used.
1176 bool EmitError = !MF.getProperties().hasProperty(
1177 MachineFunctionProperties::Property::FailedRegAlloc);
1178 if (EmitError)
1179 MF.getProperties().set(MachineFunctionProperties::Property::FailedRegAlloc);
1180
1181 // If the allocation order was empty, all registers in the class were
1182 // probably reserved. Fall back to taking the first register in the class,
1183 // even if it's reserved.
1184 ArrayRef<MCPhysReg> AllocationOrder = RegClassInfo.getOrder(&RC);
1185 if (AllocationOrder.empty()) {
1186 const Function &Fn = MF.getFunction();
1187 if (EmitError) {
1189 "no registers from class available to allocate", Fn,
1190 MI.getDebugLoc()));
1191 }
1192
1193 ArrayRef<MCPhysReg> RawRegs = RC.getRegisters();
1194 assert(!RawRegs.empty() && "register classes cannot have no registers");
1195 return RawRegs.front();
1196 }
1197
1198 if (!LR.Error && EmitError) {
1199 // Nothing we can do: Report an error and keep going with an invalid
1200 // allocation.
1201 if (MI.isInlineAsm()) {
1202 MI.emitInlineAsmError(
1203 "inline assembly requires more registers than available");
1204 } else {
1205 const Function &Fn = MBB->getParent()->getFunction();
1207 "ran out of registers during register allocation", Fn,
1208 MI.getDebugLoc()));
1209 }
1210 }
1211
1212 return AllocationOrder.front();
1213}
1214
1215/// Changes operand OpNum in MI the refer the PhysReg, considering subregs.
1216/// \return true if MI's MachineOperands were re-arranged/invalidated.
1217bool RegAllocFastImpl::setPhysReg(MachineInstr &MI, MachineOperand &MO,
1218 MCPhysReg PhysReg) {
1219 if (!MO.getSubReg()) {
1220 MO.setReg(PhysReg);
1221 MO.setIsRenamable(true);
1222 return false;
1223 }
1224
1225 // Handle subregister index.
1226 MO.setReg(PhysReg ? TRI->getSubReg(PhysReg, MO.getSubReg()) : MCRegister());
1227 MO.setIsRenamable(true);
1228 // Note: We leave the subreg number around a little longer in case of defs.
1229 // This is so that the register freeing logic in allocateInstruction can still
1230 // recognize this as subregister defs. The code there will clear the number.
1231 if (!MO.isDef())
1232 MO.setSubReg(0);
1233
1234 // A kill flag implies killing the full register. Add corresponding super
1235 // register kill.
1236 if (MO.isKill()) {
1237 MI.addRegisterKilled(PhysReg, TRI, true);
1238 // Conservatively assume implicit MOs were re-arranged
1239 return true;
1240 }
1241
1242 // A <def,read-undef> of a sub-register requires an implicit def of the full
1243 // register.
1244 if (MO.isDef() && MO.isUndef()) {
1245 if (MO.isDead())
1246 MI.addRegisterDead(PhysReg, TRI, true);
1247 else
1248 MI.addRegisterDefined(PhysReg, TRI);
1249 // Conservatively assume implicit MOs were re-arranged
1250 return true;
1251 }
1252 return false;
1253}
1254
1255#ifndef NDEBUG
1256
1257void RegAllocFastImpl::dumpState() const {
1258 for (unsigned Unit = 1, UnitE = TRI->getNumRegUnits(); Unit != UnitE;
1259 ++Unit) {
1260 switch (unsigned VirtReg = RegUnitStates[Unit]) {
1261 case regFree:
1262 break;
1263 case regPreAssigned:
1264 dbgs() << " " << printRegUnit(Unit, TRI) << "[P]";
1265 break;
1266 case regLiveIn:
1267 llvm_unreachable("Should not have regLiveIn in map");
1268 default: {
1269 dbgs() << ' ' << printRegUnit(Unit, TRI) << '=' << printReg(VirtReg);
1270 LiveRegMap::const_iterator I = findLiveVirtReg(VirtReg);
1271 assert(I != LiveVirtRegs.end() && "have LiveVirtRegs entry");
1272 if (I->LiveOut || I->Reloaded) {
1273 dbgs() << '[';
1274 if (I->LiveOut)
1275 dbgs() << 'O';
1276 if (I->Reloaded)
1277 dbgs() << 'R';
1278 dbgs() << ']';
1279 }
1280 assert(TRI->hasRegUnit(I->PhysReg, Unit) && "inverse mapping present");
1281 break;
1282 }
1283 }
1284 }
1285 dbgs() << '\n';
1286 // Check that LiveVirtRegs is the inverse.
1287 for (const LiveReg &LR : LiveVirtRegs) {
1288 Register VirtReg = LR.VirtReg;
1289 assert(VirtReg.isVirtual() && "Bad map key");
1290 MCPhysReg PhysReg = LR.PhysReg;
1291 if (PhysReg != 0) {
1292 assert(Register::isPhysicalRegister(PhysReg) && "mapped to physreg");
1293 for (MCRegUnit Unit : TRI->regunits(PhysReg)) {
1294 assert(RegUnitStates[Unit] == VirtReg && "inverse map valid");
1295 }
1296 }
1297 }
1298}
1299#endif
1300
1301/// Count number of defs consumed from each register class by \p Reg
1302void RegAllocFastImpl::addRegClassDefCounts(
1303 MutableArrayRef<unsigned> RegClassDefCounts, Register Reg) const {
1304 assert(RegClassDefCounts.size() == TRI->getNumRegClasses());
1305
1306 if (Reg.isVirtual()) {
1307 if (!shouldAllocateRegister(Reg))
1308 return;
1309 const TargetRegisterClass *OpRC = MRI->getRegClass(Reg);
1310 for (unsigned RCIdx = 0, RCIdxEnd = TRI->getNumRegClasses();
1311 RCIdx != RCIdxEnd; ++RCIdx) {
1312 const TargetRegisterClass *IdxRC = TRI->getRegClass(RCIdx);
1313 // FIXME: Consider aliasing sub/super registers.
1314 if (OpRC->hasSubClassEq(IdxRC))
1315 ++RegClassDefCounts[RCIdx];
1316 }
1317
1318 return;
1319 }
1320
1321 for (unsigned RCIdx = 0, RCIdxEnd = TRI->getNumRegClasses();
1322 RCIdx != RCIdxEnd; ++RCIdx) {
1323 const TargetRegisterClass *IdxRC = TRI->getRegClass(RCIdx);
1324 for (MCRegAliasIterator Alias(Reg, TRI, true); Alias.isValid(); ++Alias) {
1325 if (IdxRC->contains(*Alias)) {
1326 ++RegClassDefCounts[RCIdx];
1327 break;
1328 }
1329 }
1330 }
1331}
1332
1333/// Compute \ref DefOperandIndexes so it contains the indices of "def" operands
1334/// that are to be allocated. Those are ordered in a way that small classes,
1335/// early clobbers and livethroughs are allocated first.
1336void RegAllocFastImpl::findAndSortDefOperandIndexes(const MachineInstr &MI) {
1337 DefOperandIndexes.clear();
1338
1339 LLVM_DEBUG(dbgs() << "Need to assign livethroughs\n");
1340 for (unsigned I = 0, E = MI.getNumOperands(); I < E; ++I) {
1341 const MachineOperand &MO = MI.getOperand(I);
1342 if (!MO.isReg())
1343 continue;
1344 Register Reg = MO.getReg();
1345 if (MO.readsReg()) {
1346 if (Reg.isPhysical()) {
1347 LLVM_DEBUG(dbgs() << "mark extra used: " << printReg(Reg, TRI) << '\n');
1348 markPhysRegUsedInInstr(Reg);
1349 }
1350 }
1351
1352 if (MO.isDef() && Reg.isVirtual() && shouldAllocateRegister(Reg))
1353 DefOperandIndexes.push_back(I);
1354 }
1355
1356 // Most instructions only have one virtual def, so there's no point in
1357 // computing the possible number of defs for every register class.
1358 if (DefOperandIndexes.size() <= 1)
1359 return;
1360
1361 // Track number of defs which may consume a register from the class. This is
1362 // used to assign registers for possibly-too-small classes first. Example:
1363 // defs are eax, 3 * gr32_abcd, 2 * gr32 => we want to assign the gr32_abcd
1364 // registers first so that the gr32 don't use the gr32_abcd registers before
1365 // we assign these.
1366 SmallVector<unsigned> RegClassDefCounts(TRI->getNumRegClasses(), 0);
1367
1368 for (const MachineOperand &MO : MI.all_defs())
1369 addRegClassDefCounts(RegClassDefCounts, MO.getReg());
1370
1371 llvm::sort(DefOperandIndexes, [&](unsigned I0, unsigned I1) {
1372 const MachineOperand &MO0 = MI.getOperand(I0);
1373 const MachineOperand &MO1 = MI.getOperand(I1);
1374 Register Reg0 = MO0.getReg();
1375 Register Reg1 = MO1.getReg();
1376 const TargetRegisterClass &RC0 = *MRI->getRegClass(Reg0);
1377 const TargetRegisterClass &RC1 = *MRI->getRegClass(Reg1);
1378
1379 // Identify regclass that are easy to use up completely just in this
1380 // instruction.
1381 unsigned ClassSize0 = RegClassInfo.getOrder(&RC0).size();
1382 unsigned ClassSize1 = RegClassInfo.getOrder(&RC1).size();
1383
1384 bool SmallClass0 = ClassSize0 < RegClassDefCounts[RC0.getID()];
1385 bool SmallClass1 = ClassSize1 < RegClassDefCounts[RC1.getID()];
1386 if (SmallClass0 > SmallClass1)
1387 return true;
1388 if (SmallClass0 < SmallClass1)
1389 return false;
1390
1391 // Allocate early clobbers and livethrough operands first.
1392 bool Livethrough0 = MO0.isEarlyClobber() || MO0.isTied() ||
1393 (MO0.getSubReg() == 0 && !MO0.isUndef());
1394 bool Livethrough1 = MO1.isEarlyClobber() || MO1.isTied() ||
1395 (MO1.getSubReg() == 0 && !MO1.isUndef());
1396 if (Livethrough0 > Livethrough1)
1397 return true;
1398 if (Livethrough0 < Livethrough1)
1399 return false;
1400
1401 // Tie-break rule: operand index.
1402 return I0 < I1;
1403 });
1404}
1405
1406// Returns true if MO is tied and the operand it's tied to is not Undef (not
1407// Undef is not the same thing as Def).
1408static bool isTiedToNotUndef(const MachineOperand &MO) {
1409 if (!MO.isTied())
1410 return false;
1411 const MachineInstr &MI = *MO.getParent();
1412 unsigned TiedIdx = MI.findTiedOperandIdx(MI.getOperandNo(&MO));
1413 const MachineOperand &TiedMO = MI.getOperand(TiedIdx);
1414 return !TiedMO.isUndef();
1415}
1416
1417void RegAllocFastImpl::allocateInstruction(MachineInstr &MI) {
1418 // The basic algorithm here is:
1419 // 1. Mark registers of def operands as free
1420 // 2. Allocate registers to use operands and place reload instructions for
1421 // registers displaced by the allocation.
1422 //
1423 // However we need to handle some corner cases:
1424 // - pre-assigned defs and uses need to be handled before the other def/use
1425 // operands are processed to avoid the allocation heuristics clashing with
1426 // the pre-assignment.
1427 // - The "free def operands" step has to come last instead of first for tied
1428 // operands and early-clobbers.
1429
1430 InstrGen += 2;
1431 // In the event we ever get more than 2**31 instructions...
1432 if (LLVM_UNLIKELY(InstrGen == 0)) {
1433 UsedInInstr.assign(UsedInInstr.size(), 0);
1434 InstrGen = 2;
1435 }
1436 RegMasks.clear();
1437 BundleVirtRegsMap.clear();
1438
1439 // Scan for special cases; Apply pre-assigned register defs to state.
1440 bool HasPhysRegUse = false;
1441 bool HasRegMask = false;
1442 bool HasVRegDef = false;
1443 bool HasDef = false;
1444 bool HasEarlyClobber = false;
1445 bool NeedToAssignLiveThroughs = false;
1446 for (MachineOperand &MO : MI.operands()) {
1447 if (MO.isReg()) {
1448 Register Reg = MO.getReg();
1449 if (Reg.isVirtual()) {
1450 if (!shouldAllocateRegister(Reg))
1451 continue;
1452 if (MO.isDef()) {
1453 HasDef = true;
1454 HasVRegDef = true;
1455 if (MO.isEarlyClobber()) {
1456 HasEarlyClobber = true;
1457 NeedToAssignLiveThroughs = true;
1458 }
1459 if (isTiedToNotUndef(MO) || (MO.getSubReg() != 0 && !MO.isUndef()))
1460 NeedToAssignLiveThroughs = true;
1461 }
1462 } else if (Reg.isPhysical()) {
1463 if (!MRI->isReserved(Reg)) {
1464 if (MO.isDef()) {
1465 HasDef = true;
1466 bool displacedAny = definePhysReg(MI, Reg);
1467 if (MO.isEarlyClobber())
1468 HasEarlyClobber = true;
1469 if (!displacedAny)
1470 MO.setIsDead(true);
1471 }
1472 if (MO.readsReg())
1473 HasPhysRegUse = true;
1474 }
1475 }
1476 } else if (MO.isRegMask()) {
1477 HasRegMask = true;
1478 RegMasks.push_back(MO.getRegMask());
1479 }
1480 }
1481
1482 // Allocate virtreg defs.
1483 if (HasDef) {
1484 if (HasVRegDef) {
1485 // Note that Implicit MOs can get re-arranged by defineVirtReg(), so loop
1486 // multiple times to ensure no operand is missed.
1487 bool ReArrangedImplicitOps = true;
1488
1489 // Special handling for early clobbers, tied operands or subregister defs:
1490 // Compared to "normal" defs these:
1491 // - Must not use a register that is pre-assigned for a use operand.
1492 // - In order to solve tricky inline assembly constraints we change the
1493 // heuristic to figure out a good operand order before doing
1494 // assignments.
1495 if (NeedToAssignLiveThroughs) {
1496 while (ReArrangedImplicitOps) {
1497 ReArrangedImplicitOps = false;
1498 findAndSortDefOperandIndexes(MI);
1499 for (unsigned OpIdx : DefOperandIndexes) {
1500 MachineOperand &MO = MI.getOperand(OpIdx);
1501 LLVM_DEBUG(dbgs() << "Allocating " << MO << '\n');
1502 Register Reg = MO.getReg();
1503 if (MO.isEarlyClobber() || isTiedToNotUndef(MO) ||
1504 (MO.getSubReg() && !MO.isUndef())) {
1505 ReArrangedImplicitOps = defineLiveThroughVirtReg(MI, OpIdx, Reg);
1506 } else {
1507 ReArrangedImplicitOps = defineVirtReg(MI, OpIdx, Reg);
1508 }
1509 // Implicit operands of MI were re-arranged,
1510 // re-compute DefOperandIndexes.
1511 if (ReArrangedImplicitOps)
1512 break;
1513 }
1514 }
1515 } else {
1516 // Assign virtual register defs.
1517 while (ReArrangedImplicitOps) {
1518 ReArrangedImplicitOps = false;
1519 for (MachineOperand &MO : MI.all_defs()) {
1520 Register Reg = MO.getReg();
1521 if (Reg.isVirtual()) {
1522 ReArrangedImplicitOps =
1523 defineVirtReg(MI, MI.getOperandNo(&MO), Reg);
1524 if (ReArrangedImplicitOps)
1525 break;
1526 }
1527 }
1528 }
1529 }
1530 }
1531
1532 // Free registers occupied by defs.
1533 // Iterate operands in reverse order, so we see the implicit super register
1534 // defs first (we added them earlier in case of <def,read-undef>).
1535 for (MachineOperand &MO : reverse(MI.all_defs())) {
1536 Register Reg = MO.getReg();
1537
1538 // subreg defs don't free the full register. We left the subreg number
1539 // around as a marker in setPhysReg() to recognize this case here.
1540 if (Reg.isPhysical() && MO.getSubReg() != 0) {
1541 MO.setSubReg(0);
1542 continue;
1543 }
1544
1545 assert((!MO.isTied() || !isClobberedByRegMasks(MO.getReg())) &&
1546 "tied def assigned to clobbered register");
1547
1548 // Do not free tied operands and early clobbers.
1549 if (isTiedToNotUndef(MO) || MO.isEarlyClobber())
1550 continue;
1551 if (!Reg)
1552 continue;
1553 if (Reg.isVirtual()) {
1554 assert(!shouldAllocateRegister(Reg));
1555 continue;
1556 }
1557 assert(Reg.isPhysical());
1558 if (MRI->isReserved(Reg))
1559 continue;
1560 freePhysReg(Reg);
1561 unmarkRegUsedInInstr(Reg);
1562 }
1563 }
1564
1565 // Displace clobbered registers.
1566 if (HasRegMask) {
1567 assert(!RegMasks.empty() && "expected RegMask");
1568 // MRI bookkeeping.
1569 for (const auto *RM : RegMasks)
1570 MRI->addPhysRegsUsedFromRegMask(RM);
1571
1572 // Displace clobbered registers.
1573 for (const LiveReg &LR : LiveVirtRegs) {
1574 MCPhysReg PhysReg = LR.PhysReg;
1575 if (PhysReg != 0 && isClobberedByRegMasks(PhysReg))
1576 displacePhysReg(MI, PhysReg);
1577 }
1578 }
1579
1580 // Apply pre-assigned register uses to state.
1581 if (HasPhysRegUse) {
1582 for (MachineOperand &MO : MI.operands()) {
1583 if (!MO.isReg() || !MO.readsReg())
1584 continue;
1585 Register Reg = MO.getReg();
1586 if (!Reg.isPhysical())
1587 continue;
1588 if (MRI->isReserved(Reg))
1589 continue;
1590 if (!usePhysReg(MI, Reg))
1591 MO.setIsKill(true);
1592 }
1593 }
1594
1595 // Allocate virtreg uses and insert reloads as necessary.
1596 // Implicit MOs can get moved/removed by useVirtReg(), so loop multiple
1597 // times to ensure no operand is missed.
1598 bool HasUndefUse = false;
1599 bool ReArrangedImplicitMOs = true;
1600 while (ReArrangedImplicitMOs) {
1601 ReArrangedImplicitMOs = false;
1602 for (MachineOperand &MO : MI.operands()) {
1603 if (!MO.isReg() || !MO.isUse())
1604 continue;
1605 Register Reg = MO.getReg();
1606 if (!Reg.isVirtual() || !shouldAllocateRegister(Reg))
1607 continue;
1608
1609 if (MO.isUndef()) {
1610 HasUndefUse = true;
1611 continue;
1612 }
1613
1614 // Populate MayLiveAcrossBlocks in case the use block is allocated before
1615 // the def block (removing the vreg uses).
1616 mayLiveIn(Reg);
1617
1618 assert(!MO.isInternalRead() && "Bundles not supported");
1619 assert(MO.readsReg() && "reading use");
1620 ReArrangedImplicitMOs = useVirtReg(MI, MO, Reg);
1621 if (ReArrangedImplicitMOs)
1622 break;
1623 }
1624 }
1625
1626 // Allocate undef operands. This is a separate step because in a situation
1627 // like ` = OP undef %X, %X` both operands need the same register assign
1628 // so we should perform the normal assignment first.
1629 if (HasUndefUse) {
1630 for (MachineOperand &MO : MI.all_uses()) {
1631 Register Reg = MO.getReg();
1632 if (!Reg.isVirtual() || !shouldAllocateRegister(Reg))
1633 continue;
1634
1635 assert(MO.isUndef() && "Should only have undef virtreg uses left");
1636 allocVirtRegUndef(MO);
1637 }
1638 }
1639
1640 // Free early clobbers.
1641 if (HasEarlyClobber) {
1642 for (MachineOperand &MO : reverse(MI.all_defs())) {
1643 if (!MO.isEarlyClobber())
1644 continue;
1645 assert(!MO.getSubReg() && "should be already handled in def processing");
1646
1647 Register Reg = MO.getReg();
1648 if (!Reg)
1649 continue;
1650 if (Reg.isVirtual()) {
1651 assert(!shouldAllocateRegister(Reg));
1652 continue;
1653 }
1654 assert(Reg.isPhysical() && "should have register assigned");
1655
1656 // We sometimes get odd situations like:
1657 // early-clobber %x0 = INSTRUCTION %x0
1658 // which is semantically questionable as the early-clobber should
1659 // apply before the use. But in practice we consider the use to
1660 // happen before the early clobber now. Don't free the early clobber
1661 // register in this case.
1662 if (MI.readsRegister(Reg, TRI))
1663 continue;
1664
1665 freePhysReg(Reg);
1666 }
1667 }
1668
1669 LLVM_DEBUG(dbgs() << "<< " << MI);
1670 if (MI.isCopy() && MI.getOperand(0).getReg() == MI.getOperand(1).getReg() &&
1671 MI.getNumOperands() == 2) {
1672 LLVM_DEBUG(dbgs() << "Mark identity copy for removal\n");
1673 Coalesced.push_back(&MI);
1674 }
1675}
1676
1677void RegAllocFastImpl::handleDebugValue(MachineInstr &MI) {
1678 // Ignore DBG_VALUEs that aren't based on virtual registers. These are
1679 // mostly constants and frame indices.
1680 assert(MI.isDebugValue() && "not a DBG_VALUE*");
1681 for (const auto &MO : MI.debug_operands()) {
1682 if (!MO.isReg())
1683 continue;
1684 Register Reg = MO.getReg();
1685 if (!Reg.isVirtual())
1686 continue;
1687 if (!shouldAllocateRegister(Reg))
1688 continue;
1689
1690 // Already spilled to a stackslot?
1691 int SS = StackSlotForVirtReg[Reg];
1692 if (SS != -1) {
1693 // Modify DBG_VALUE now that the value is in a spill slot.
1694 updateDbgValueForSpill(MI, SS, Reg);
1695 LLVM_DEBUG(dbgs() << "Rewrite DBG_VALUE for spilled memory: " << MI);
1696 continue;
1697 }
1698
1699 // See if this virtual register has already been allocated to a physical
1700 // register or spilled to a stack slot.
1701 LiveRegMap::iterator LRI = findLiveVirtReg(Reg);
1703 for (MachineOperand &Op : MI.getDebugOperandsForReg(Reg))
1704 DbgOps.push_back(&Op);
1705
1706 if (LRI != LiveVirtRegs.end() && LRI->PhysReg) {
1707 // Update every use of Reg within MI.
1708 for (auto &RegMO : DbgOps)
1709 setPhysReg(MI, *RegMO, LRI->PhysReg);
1710 } else {
1711 DanglingDbgValues[Reg].push_back(&MI);
1712 }
1713
1714 // If Reg hasn't been spilled, put this DBG_VALUE in LiveDbgValueMap so
1715 // that future spills of Reg will have DBG_VALUEs.
1716 LiveDbgValueMap[Reg].append(DbgOps.begin(), DbgOps.end());
1717 }
1718}
1719
1720void RegAllocFastImpl::handleBundle(MachineInstr &MI) {
1721 MachineBasicBlock::instr_iterator BundledMI = MI.getIterator();
1722 ++BundledMI;
1723 while (BundledMI->isBundledWithPred()) {
1724 for (MachineOperand &MO : BundledMI->operands()) {
1725 if (!MO.isReg())
1726 continue;
1727
1728 Register Reg = MO.getReg();
1729 if (!Reg.isVirtual() || !shouldAllocateRegister(Reg))
1730 continue;
1731
1733 DI = BundleVirtRegsMap.find(Reg);
1734 assert(DI != BundleVirtRegsMap.end() && "Unassigned virtual register");
1735
1736 setPhysReg(MI, MO, DI->second);
1737 }
1738
1739 ++BundledMI;
1740 }
1741}
1742
1743void RegAllocFastImpl::allocateBasicBlock(MachineBasicBlock &MBB) {
1744 this->MBB = &MBB;
1745 LLVM_DEBUG(dbgs() << "\nAllocating " << MBB);
1746
1747 PosIndexes.unsetInitialized();
1748 RegUnitStates.assign(TRI->getNumRegUnits(), regFree);
1749 assert(LiveVirtRegs.empty() && "Mapping not cleared from last block?");
1750
1751 for (const auto &LiveReg : MBB.liveouts())
1752 setPhysRegState(LiveReg.PhysReg, regPreAssigned);
1753
1754 Coalesced.clear();
1755
1756 // Traverse block in reverse order allocating instructions one by one.
1757 for (MachineInstr &MI : reverse(MBB)) {
1758 LLVM_DEBUG(dbgs() << "\n>> " << MI << "Regs:"; dumpState());
1759
1760 // Special handling for debug values. Note that they are not allowed to
1761 // affect codegen of the other instructions in any way.
1762 if (MI.isDebugValue()) {
1763 handleDebugValue(MI);
1764 continue;
1765 }
1766
1767 allocateInstruction(MI);
1768
1769 // Once BUNDLE header is assigned registers, same assignments need to be
1770 // done for bundled MIs.
1771 if (MI.getOpcode() == TargetOpcode::BUNDLE) {
1772 handleBundle(MI);
1773 }
1774 }
1775
1776 LLVM_DEBUG(dbgs() << "Begin Regs:"; dumpState());
1777
1778 // Spill all physical registers holding virtual registers now.
1779 LLVM_DEBUG(dbgs() << "Loading live registers at begin of block.\n");
1780 reloadAtBegin(MBB);
1781
1782 // Erase all the coalesced copies. We are delaying it until now because
1783 // LiveVirtRegs might refer to the instrs.
1784 for (MachineInstr *MI : Coalesced)
1785 MBB.erase(MI);
1786 NumCoalesced += Coalesced.size();
1787
1788 for (auto &UDBGPair : DanglingDbgValues) {
1789 for (MachineInstr *DbgValue : UDBGPair.second) {
1790 assert(DbgValue->isDebugValue() && "expected DBG_VALUE");
1791 // Nothing to do if the vreg was spilled in the meantime.
1792 if (!DbgValue->hasDebugOperandForReg(UDBGPair.first))
1793 continue;
1794 LLVM_DEBUG(dbgs() << "Register did not survive for " << *DbgValue
1795 << '\n');
1796 DbgValue->setDebugValueUndef();
1797 }
1798 }
1799 DanglingDbgValues.clear();
1800
1801 LLVM_DEBUG(MBB.dump());
1802}
1803
1804bool RegAllocFastImpl::runOnMachineFunction(MachineFunction &MF) {
1805 LLVM_DEBUG(dbgs() << "********** FAST REGISTER ALLOCATION **********\n"
1806 << "********** Function: " << MF.getName() << '\n');
1807 MRI = &MF.getRegInfo();
1808 const TargetSubtargetInfo &STI = MF.getSubtarget();
1809 TRI = STI.getRegisterInfo();
1810 TII = STI.getInstrInfo();
1811 MFI = &MF.getFrameInfo();
1812 MRI->freezeReservedRegs();
1813 RegClassInfo.runOnMachineFunction(MF);
1814 unsigned NumRegUnits = TRI->getNumRegUnits();
1815 InstrGen = 0;
1816 UsedInInstr.assign(NumRegUnits, 0);
1817
1818 // initialize the virtual->physical register map to have a 'null'
1819 // mapping for all virtual registers
1820 unsigned NumVirtRegs = MRI->getNumVirtRegs();
1821 StackSlotForVirtReg.resize(NumVirtRegs);
1822 LiveVirtRegs.setUniverse(NumVirtRegs);
1823 MayLiveAcrossBlocks.clear();
1824 MayLiveAcrossBlocks.resize(NumVirtRegs);
1825
1826 // Loop over all of the basic blocks, eliminating virtual register references
1827 for (MachineBasicBlock &MBB : MF)
1828 allocateBasicBlock(MBB);
1829
1830 if (ClearVirtRegs) {
1831 // All machine operands and other references to virtual registers have been
1832 // replaced. Remove the virtual registers.
1833 MRI->clearVirtRegs();
1834 }
1835
1836 StackSlotForVirtReg.clear();
1837 LiveDbgValueMap.clear();
1838 return true;
1839}
1840
1843 MFPropsModifier _(*this, MF);
1844 RegAllocFastImpl Impl(Opts.Filter, Opts.ClearVRegs);
1845 bool Changed = Impl.runOnMachineFunction(MF);
1846 if (!Changed)
1847 return PreservedAnalyses::all();
1849 PA.preserveSet<CFGAnalyses>();
1850 return PA;
1851}
1852
1854 raw_ostream &OS, function_ref<StringRef(StringRef)> MapClassName2PassName) {
1855 bool PrintFilterName = Opts.FilterName != "all";
1856 bool PrintNoClearVRegs = !Opts.ClearVRegs;
1857 bool PrintSemicolon = PrintFilterName && PrintNoClearVRegs;
1858
1859 OS << "regallocfast";
1860 if (PrintFilterName || PrintNoClearVRegs) {
1861 OS << '<';
1862 if (PrintFilterName)
1863 OS << "filter=" << Opts.FilterName;
1864 if (PrintSemicolon)
1865 OS << ';';
1866 if (PrintNoClearVRegs)
1867 OS << "no-clear-vregs";
1868 OS << '>';
1869 }
1870}
1871
1872FunctionPass *llvm::createFastRegisterAllocator() { return new RegAllocFast(); }
1873
1875 bool ClearVirtRegs) {
1876 return new RegAllocFast(Ftor, ClearVirtRegs);
1877}
unsigned const MachineRegisterInfo * MRI
aarch64 promote const
MachineBasicBlock & MBB
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
#define LLVM_UNLIKELY(EXPR)
Definition: Compiler.h:320
#define LLVM_DEBUG(...)
Definition: Debug.h:106
This file defines the DenseMap class.
uint64_t Size
bool End
Definition: ELF_riscv.cpp:480
const HexagonInstrInfo * TII
#define _
IRTranslator LLVM IR MI
This file implements an indexed map.
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
unsigned const TargetRegisterInfo * TRI
This file implements a map that provides insertion order iteration.
#define P(N)
if(PassOpts->AAPipeline)
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
Definition: PassSupport.h:38
static bool isCoalescable(const MachineInstr &MI)
static cl::opt< bool > IgnoreMissingDefs("rafast-ignore-missing-defs", cl::Hidden)
static bool dominates(InstrPosIndexes &PosIndexes, const MachineInstr &A, const MachineInstr &B)
static bool isTiedToNotUndef(const MachineOperand &MO)
static RegisterRegAlloc fastRegAlloc("fast", "fast register allocator", createFastRegisterAllocator)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
raw_pwrite_stream & OS
This file defines the SmallSet class.
This file defines the SmallVector class.
This file defines the SparseSet class derived from the version described in Briggs,...
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition: Statistic.h:166
Class recording the (high level) value of a variable.
A container for analyses that lazily runs them and caches their results.
Definition: PassManager.h:253
Represent the analysis usage information of a pass.
void setPreservesCFG()
This function should be called by the pass, iff they do not:
Definition: Pass.cpp:256
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
const T & front() const
front - Get the first element.
Definition: ArrayRef.h:171
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:168
bool empty() const
empty - Check if the array is empty.
Definition: ArrayRef.h:163
bool test(unsigned Idx) const
Definition: BitVector.h:461
void resize(unsigned N, bool t=false)
resize - Grow or shrink the bitvector.
Definition: BitVector.h:341
void clear()
clear - Removes all bits from the bitvector.
Definition: BitVector.h:335
BitVector & set()
Definition: BitVector.h:351
Represents analyses that only rely on functions' control flow.
Definition: Analysis.h:72
This class represents an Operation in the Expression.
iterator find(const_arg_type_t< KeyT > Val)
Definition: DenseMap.h:156
iterator end()
Definition: DenseMap.h:84
Lightweight error class with error context and mandatory checking.
Definition: Error.h:160
FunctionPass class - This class is used to implement most global optimizations.
Definition: Pass.h:310
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Definition: Function.cpp:369
void storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register SrcReg, bool isKill, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI, Register VReg, MachineInstr::MIFlag Flags=MachineInstr::NoFlags) const override
Store the specified register of the given register class to the specified stack frame index.
void loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register DestReg, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI, Register VReg, MachineInstr::MIFlag Flags=MachineInstr::NoFlags) const override
Load the specified register of the given register class from the specified stack frame index.
void resize(typename StorageT::size_type s)
Definition: IndexedMap.h:61
void diagnose(const DiagnosticInfo &DI)
Report a message to the currently installed diagnostic handler.
MCRegAliasIterator enumerates all registers aliasing Reg.
Wrapper class representing physical registers. Should be passed by value.
Definition: MCRegister.h:33
An RAII based helper class to modify MachineFunctionProperties when running pass.
iterator_range< liveout_iterator > liveouts() const
instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
iterator_range< livein_iterator > liveins() const
iterator getFirstTerminator()
Returns an iterator to the first terminator instruction of this basic block.
Instructions::iterator instr_iterator
void addLiveIn(MCRegister PhysReg, LaneBitmask LaneMask=LaneBitmask::getAll())
Adds the specified register as a live in.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
instr_iterator erase(instr_iterator I)
Remove an instruction from the instruction list and delete it.
bool isSuccessor(const MachineBasicBlock *MBB) const
Return true if the specified MBB is a successor of this block.
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
int CreateSpillStackObject(uint64_t Size, Align Alignment)
Create a new statically sized stack object that represents a spill slot, returning a nonnegative iden...
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
virtual MachineFunctionProperties getClearedProperties() const
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.
virtual MachineFunctionProperties getSetProperties() const
virtual bool runOnMachineFunction(MachineFunction &MF)=0
runOnMachineFunction - This method must be overloaded to perform the desired machine code transformat...
virtual MachineFunctionProperties getRequiredProperties() const
Properties which a MachineFunction may have at a given point in time.
MachineFunctionProperties & set(Property P)
bool hasProperty(Property P) const
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
const MachineFunctionProperties & getProperties() const
Get the function properties.
MachineInstr * CloneMachineInstr(const MachineInstr *Orig)
Create a new MachineInstr which is a copy of Orig, identical in all ways except the instruction has n...
const MachineBasicBlock & front() const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
Representation of each machine instruction.
Definition: MachineInstr.h:71
bool isDebugValueList() const
const MachineBasicBlock * getParent() const
Definition: MachineInstr.h:349
bool isNonListDebugValue() const
MachineOperand & getDebugOperand(unsigned Index)
Definition: MachineInstr.h:596
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:587
MachineOperand class - Representation of each machine instruction operand.
void setSubReg(unsigned subReg)
unsigned getSubReg() const
bool readsReg() const
readsReg - Returns true if this operand reads the previous value of its register.
void setIsRenamable(bool Val=true)
bool isReg() const
isReg - Tests if this is a MO_Register operand.
bool isRegMask() const
isRegMask - Tests if this is a MO_RegisterMask operand.
MachineBasicBlock * getMBB() const
void setIsDead(bool Val=true)
void setReg(Register Reg)
Change the register this operand corresponds to.
void setIsKill(bool Val=true)
MachineInstr * getParent()
getParent - Return the instruction that this operand belongs to.
bool isEarlyClobber() const
Register getReg() const
getReg - Returns the register number.
bool isInternalRead() const
static bool clobbersPhysReg(const uint32_t *RegMask, MCRegister PhysReg)
clobbersPhysReg - Returns true if this RegMask clobbers PhysReg.
const uint32_t * getRegMask() const
getRegMask - Returns a bit mask of registers preserved by this RegMask operand.
bool isMBB() const
isMBB - Tests if this is a MO_MachineBasicBlock operand.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
MutableArrayRef - Represent a mutable reference to an array (0 or more elements consecutively in memo...
Definition: ArrayRef.h:310
virtual StringRef getPassName() const
getPassName - Return a nice clean name for a pass.
Definition: Pass.cpp:81
A set of analyses that are preserved following a run of a transformation pass.
Definition: Analysis.h:111
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition: Analysis.h:117
PreservedAnalyses run(MachineFunction &MF, MachineFunctionAnalysisManager &)
void printPipeline(raw_ostream &OS, function_ref< StringRef(StringRef)> MapClassName2PassName)
void runOnMachineFunction(const MachineFunction &MF)
runOnFunction - Prepare to answer questions about MF.
ArrayRef< MCPhysReg > getOrder(const TargetRegisterClass *RC) const
getOrder - Returns the preferred allocation order for RC.
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
unsigned virtRegIndex() const
Convert a virtual register number to a 0-based index.
Definition: Register.h:99
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
Definition: Register.h:91
static constexpr bool isPhysicalRegister(unsigned Reg)
Return true if the specified register number is in the physical register namespace.
Definition: Register.h:65
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
Definition: Register.h:95
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
Definition: SmallSet.h:132
size_type count(const T &V) const
count - Return 1 if the element is in the set, 0 otherwise.
Definition: SmallSet.h:175
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
Definition: SmallSet.h:181
bool empty() const
Definition: SmallVector.h:81
size_t size() const
Definition: SmallVector.h:78
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:573
void assign(size_type NumElts, ValueParamT Elt)
Definition: SmallVector.h:704
void push_back(const T &Elt)
Definition: SmallVector.h:413
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1196
SparseSet - Fast set implementation for objects that can be identified by small unsigned keys.
Definition: SparseSet.h:124
typename DenseT::iterator iterator
Definition: SparseSet.h:175
typename DenseT::const_iterator const_iterator
Definition: SparseSet.h:176
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:51
TargetInstrInfo - Interface to description of machine instruction set.
ArrayRef< MCPhysReg > getRegisters() const
unsigned getID() const
Return the register class ID number.
bool contains(Register Reg) const
Return true if the specified register is included in this register class.
bool hasSubClassEq(const TargetRegisterClass *RC) const
Returns true if RC is a sub-class of or equal to this class.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
TargetSubtargetInfo - Generic base class for all target subtargets.
virtual const TargetRegisterInfo * getRegisterInfo() const
getRegisterInfo - If register information is available, return it.
virtual const TargetInstrInfo * getInstrInfo() const
An efficient, type-erasing, non-owning reference to a callable.
self_iterator getIterator()
Definition: ilist_node.h:132
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition: raw_ostream.h:52
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition: CallingConv.h:24
@ Kill
The last use of a register.
@ SS
Definition: X86.h:212
Reg
All possible values of the reg field in the ModR/M byte.
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:443
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
FunctionPass * createFastRegisterAllocator()
FastRegisterAllocation Pass - This pass register allocates as fast as possible.
std::function< bool(const TargetRegisterInfo &TRI, const MachineRegisterInfo &MRI, const Register Reg)> RegAllocFilterFunc
Filter function for register classes during regalloc.
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
Definition: STLExtras.h:1697
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
void updateDbgValueForSpill(MachineInstr &Orig, int FrameIndex, Register Reg)
Update a DBG_VALUE whose value has been spilled to FrameIndex.
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
Definition: MCRegister.h:21
Printable printRegUnit(unsigned Unit, const TargetRegisterInfo *TRI)
Create Printable object to print register units on a raw_ostream.
PreservedAnalyses getMachineFunctionPassPreservedAnalyses()
Returns the minimum set of Analyses that all machine function passes must preserve.
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1746
auto reverse(ContainerTy &&C)
Definition: STLExtras.h:420
void sort(IteratorTy Start, IteratorTy End)
Definition: STLExtras.h:1664
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
MachineInstr * buildDbgValueForSpill(MachineBasicBlock &BB, MachineBasicBlock::iterator I, const MachineInstr &Orig, int FrameIndex, Register SpillReg)
Clone a DBG_VALUE whose value has been spilled to FrameIndex.
Printable printReg(Register Reg, const TargetRegisterInfo *TRI=nullptr, unsigned SubIdx=0, const MachineRegisterInfo *MRI=nullptr)
Prints virtual and physical registers with or without a TRI instance.
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
Pair of physical register and lane mask.
RegAllocFilterFunc Filter
Definition: RegAllocFast.h:18
A MapVector that performs no allocations if smaller than a certain size.
Definition: MapVector.h:254